@aj-archipelago/cortex 1.1.21 → 1.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/config/default.example.json +84 -0
  2. package/config.js +5 -4
  3. package/helper-apps/cortex-file-handler/blobHandler.js +115 -98
  4. package/helper-apps/cortex-file-handler/fileChunker.js +15 -10
  5. package/helper-apps/cortex-file-handler/index.js +48 -2
  6. package/helper-apps/cortex-file-handler/package-lock.json +226 -53
  7. package/helper-apps/cortex-file-handler/package.json +3 -3
  8. package/package.json +2 -1
  9. package/pathways/categorize.js +23 -0
  10. package/pathways/chat.js +1 -1
  11. package/pathways/chat_code.js +19 -0
  12. package/pathways/chat_context.js +19 -0
  13. package/pathways/chat_jarvis.js +19 -0
  14. package/pathways/chat_persist.js +23 -0
  15. package/pathways/code_review.js +17 -0
  16. package/pathways/cognitive_delete.js +2 -1
  17. package/pathways/cognitive_insert.js +1 -0
  18. package/pathways/cognitive_search.js +1 -0
  19. package/pathways/embeddings.js +1 -1
  20. package/pathways/expand_story.js +12 -0
  21. package/pathways/format_paragraph_turbo.js +16 -0
  22. package/pathways/format_summarization.js +21 -0
  23. package/pathways/gemini_15_vision.js +20 -0
  24. package/pathways/gemini_vision.js +20 -0
  25. package/pathways/grammar.js +30 -0
  26. package/pathways/hashtags.js +19 -0
  27. package/pathways/headline.js +43 -0
  28. package/pathways/headline_custom.js +169 -0
  29. package/pathways/highlights.js +22 -0
  30. package/pathways/image.js +2 -1
  31. package/pathways/index.js +111 -17
  32. package/pathways/jira_story.js +18 -0
  33. package/pathways/keywords.js +4 -0
  34. package/pathways/language.js +17 -6
  35. package/pathways/locations.js +93 -0
  36. package/pathways/quotes.js +19 -0
  37. package/pathways/rag.js +207 -0
  38. package/pathways/rag_jarvis.js +254 -0
  39. package/pathways/rag_search_helper.js +21 -0
  40. package/pathways/readme.js +18 -0
  41. package/pathways/release_notes.js +16 -0
  42. package/pathways/remove_content.js +31 -0
  43. package/pathways/retrieval.js +23 -0
  44. package/pathways/run_claude35_sonnet.js +21 -0
  45. package/pathways/run_claude3_haiku.js +20 -0
  46. package/pathways/run_gpt35turbo.js +20 -0
  47. package/pathways/run_gpt4.js +20 -0
  48. package/pathways/run_gpt4_32.js +20 -0
  49. package/pathways/select_extension.js +6 -0
  50. package/pathways/select_services.js +10 -0
  51. package/pathways/spelling.js +3 -0
  52. package/pathways/story_angles.js +13 -0
  53. package/pathways/styleguide/styleguide.js +221 -0
  54. package/pathways/styleguidemulti.js +127 -0
  55. package/pathways/subhead.js +48 -0
  56. package/pathways/summarize_turbo.js +98 -0
  57. package/pathways/summary.js +31 -12
  58. package/pathways/sys_claude_35_sonnet.js +19 -0
  59. package/pathways/sys_claude_3_haiku.js +19 -0
  60. package/pathways/sys_google_chat.js +19 -0
  61. package/pathways/sys_google_code_chat.js +19 -0
  62. package/pathways/sys_google_gemini_chat.js +23 -0
  63. package/pathways/sys_openai_chat.js +2 -2
  64. package/pathways/sys_openai_chat_16.js +19 -0
  65. package/pathways/sys_openai_chat_gpt4.js +19 -0
  66. package/pathways/sys_openai_chat_gpt4_32.js +19 -0
  67. package/pathways/sys_openai_chat_gpt4_turbo.js +19 -0
  68. package/pathways/tags.js +25 -0
  69. package/pathways/taxonomy.js +135 -0
  70. package/pathways/timeline.js +51 -0
  71. package/pathways/topics.js +25 -0
  72. package/pathways/topics_sentiment.js +20 -0
  73. package/pathways/transcribe.js +2 -4
  74. package/pathways/translate.js +10 -12
  75. package/pathways/translate_azure.js +13 -0
  76. package/pathways/translate_context.js +21 -0
  77. package/pathways/translate_gpt4.js +19 -0
  78. package/pathways/translate_gpt4_turbo.js +19 -0
  79. package/pathways/translate_subtitle.js +201 -0
  80. package/pathways/translate_subtitle_helper.js +31 -0
  81. package/pathways/translate_turbo.js +19 -0
  82. package/pathways/vision.js +9 -7
  83. package/server/pathwayResolver.js +1 -1
  84. package/server/plugins/azureCognitivePlugin.js +10 -1
  85. package/server/plugins/openAiVisionPlugin.js +14 -6
  86. package/tests/main.test.js +62 -2
  87. package/tests/sublong.srt +4543 -0
  88. package/tests/vision.test.js +0 -34
@@ -1,6 +1,6 @@
1
1
  export default {
2
2
  prompt: `{{text}}`,
3
- model: `oai-whisper`,
3
+ model: `azure-whisper`,
4
4
  inputParameters: {
5
5
  file: ``,
6
6
  language: ``,
@@ -13,6 +13,4 @@ export default {
13
13
  },
14
14
  timeout: 3600, // in seconds
15
15
  enableDuplicateRequests: false,
16
- };
17
-
18
-
16
+ };
@@ -1,21 +1,19 @@
1
- // translate.js
2
- // Translation module
3
- // This module exports a prompt that takes an input text and translates it from one language to another.
1
+ import { Prompt } from '../server/prompt.js';
4
2
 
5
3
  export default {
6
- // Set the temperature to 0 to favor more deterministic output when generating translations.
7
- temperature: 0,
8
4
 
9
- prompt: `Translate the following text to {{to}}:\n\nOriginal Language:\n{{{text}}}\n\n{{to}}:\n`,
10
-
11
- // Define input parameters for the prompt, such as the target language for translation.
5
+ prompt: [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."},
8
+ {"role": "user", "content": "{{{text}}}"}
9
+ ]}),
10
+ ],
12
11
  inputParameters: {
13
12
  to: `Arabic`,
13
+ tokenRatio: 0.2,
14
14
  },
15
-
16
- // Set the timeout for the translation process, in seconds.
17
- timeout: 400,
18
15
  inputChunkSize: 500,
16
+ model: 'oai-gpt4o',
19
17
  enableDuplicateRequests: false,
20
- };
21
18
 
19
+ }
@@ -0,0 +1,13 @@
1
+ // Description: Translate a text from one language to another
2
+
3
+ export default {
4
+ temperature: 0,
5
+ prompt: `{{{text}}}`,
6
+ inputParameters: {
7
+ to: `en`,
8
+ tokenRatio: 0.2,
9
+ },
10
+ //inputChunkSize: 500,
11
+ model: 'azure-translate',
12
+ timeout: 120,
13
+ }
@@ -0,0 +1,21 @@
1
+ // Description: Translate a text from one language to another
2
+
3
+ export default {
4
+ temperature: 0,
5
+ prompt:
6
+ [
7
+ // `{{{text}}}\n\nList all of the named entities in the above document in the original language:\n`,
8
+ //`{{{previousResult}}}\n\nTranslate this list to {{to}}:\n`,
9
+ //`{{{text}}}\nTranscribe the names of all people and places exactly from this document in the original language:\n`,
10
+ `{{{text}}}\nCopy the names of all people and places exactly from this document in the language above:\n`,
11
+ //`{{{previousResult}}}\n\nTranscribe exactly to {{to}}:\n`,
12
+ `Original Language:\n{{{previousResult}}}\n\n{{to}}:\n`,
13
+ //`Entities in the document:\n\n{{{previousResult}}}\n\nDocument:\n{{{text}}}\nTranslate the document to {{to}} and rewrite it to sound like a native {{to}} speaker:\n\n`
14
+ `Entities in the document:\n\n{{{previousResult}}}\n\nDocument:\n{{{text}}}\nRewrite the document in {{to}}. If the document is already in {{to}}, copy it exactly below:\n`
15
+ ],
16
+ inputParameters: {
17
+ to: `Arabic`,
18
+ tokenRatio: 0.2,
19
+ },
20
+ timeout: 120,
21
+ }
@@ -0,0 +1,19 @@
1
+ import { Prompt } from '../server/prompt.js';
2
+
3
+ export default {
4
+
5
+ prompt: [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."},
8
+ {"role": "user", "content": "{{{text}}}"}
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ to: `Arabic`,
13
+ tokenRatio: 0.2,
14
+ },
15
+ inputChunkSize: 500,
16
+ model: 'oai-gpt4',
17
+ enableDuplicateRequests: false,
18
+
19
+ }
@@ -0,0 +1,19 @@
1
+ import { Prompt } from '../server/prompt.js';
2
+
3
+ export default {
4
+
5
+ prompt: [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."},
8
+ {"role": "user", "content": "{{{text}}}"}
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ to: `Arabic`,
13
+ tokenRatio: 0.2,
14
+ },
15
+ inputChunkSize: 500,
16
+ model: 'oai-gpt4-turbo',
17
+ enableDuplicateRequests: false,
18
+
19
+ }
@@ -0,0 +1,201 @@
1
+ import subsrt from "subsrt";
2
+ import logger from "../lib/logger.js";
3
+ import { callPathway } from "../lib/pathwayTools.js";
4
+
5
+ function preprocessStr(str) {
6
+ try {
7
+ if (!str) return "";
8
+ return (
9
+ str
10
+ .replace(/\r\n?/g, "\n")
11
+ .replace(/\n+/g, "\n")
12
+ .replace(/(\d+)\n(\d{2}:\d{2}:\d{2},\d{3})/g, "\n\n$1\n$2")
13
+ .trim() + "\n\n"
14
+ );
15
+ } catch (e) {
16
+ logger.error(`An error occurred in content text preprocessing: ${e}`);
17
+ return "";
18
+ }
19
+ }
20
+
21
+ function getContextLines(captions, startIndex, direction, wordLimit = 100) {
22
+ let context = "";
23
+ let wordCount = 0;
24
+ let i = startIndex;
25
+
26
+ while (i >= 0 && i < captions.length && wordCount < wordLimit) {
27
+ const words = captions[i].content.split(/\s+/);
28
+ if (wordCount + words.length <= wordLimit) {
29
+ context =
30
+ direction === "prev"
31
+ ? captions[i].content + " " + context
32
+ : context + " " + captions[i].content;
33
+ wordCount += words.length;
34
+ } else {
35
+ const remainingWords = wordLimit - wordCount;
36
+ const partialContent =
37
+ direction === "prev"
38
+ ? words.slice(-remainingWords).join(" ")
39
+ : words.slice(0, remainingWords).join(" ");
40
+ context =
41
+ direction === "prev"
42
+ ? partialContent + " " + context
43
+ : context + " " + partialContent;
44
+ break;
45
+ }
46
+ i += direction === "prev" ? -1 : 1;
47
+ }
48
+
49
+ return context.trim();
50
+ }
51
+
52
+ async function processBatch(batch, args, captions, batchStartIndex) {
53
+ const batchText = batch
54
+ .map((caption, index) => `LINE#${index + 1}: ${caption.content}`)
55
+ .join("\n");
56
+ const prevLines = getContextLines(captions, batchStartIndex - 1, "prev");
57
+ const nextLines = getContextLines(
58
+ captions,
59
+ batchStartIndex + batch.length,
60
+ "next"
61
+ );
62
+
63
+ const translatedText = await callPathway("translate_subtitle_helper", {
64
+ ...args,
65
+ text: batchText,
66
+ prevLines,
67
+ nextLines,
68
+ async: false,
69
+ });
70
+
71
+ // Remove LINE# and LINE() labels
72
+ const translatedLines = translatedText.split("\n");
73
+ translatedLines.forEach((line, i) => {
74
+ translatedLines[i] = line.replace(/^LINE#\d+:\s*/, "").trim();
75
+ });
76
+ //make sure translatedLines.length===batch.length
77
+ if (translatedLines.length < batch.length) {
78
+ const emptyLines = Array(batch.length - translatedLines.length).fill("");
79
+ translatedLines.push(...emptyLines);
80
+ } else if (translatedLines.length > batch.length) {
81
+ //first remove the empty lines
82
+ translatedLines.splice(0, translatedLines.length, ...translatedLines.filter(line => line.trim() !== ""));
83
+
84
+ if(translatedLines.length>batch.length) {
85
+ //merge extra lines to end
86
+ const lastLine = translatedLines[batch.length - 1];
87
+ const mergedLines = translatedLines.slice(batch.length);
88
+ mergedLines.unshift(lastLine);
89
+ translatedLines.splice(batch.length - 1, translatedLines.length - batch.length + 1, mergedLines.join(" "));
90
+ }else {
91
+ const emptyLines = Array(batch.length - translatedLines.length).fill("");
92
+ translatedLines.push(...emptyLines);
93
+ }
94
+ }
95
+
96
+
97
+ // Handle last empty line
98
+ if (translatedLines[translatedLines.length - 1].trim() === "") {
99
+ let lastNonEmptyIndex = translatedLines.length - 2;
100
+ while (lastNonEmptyIndex >= 0 && translatedLines[lastNonEmptyIndex].trim() === "") {
101
+ lastNonEmptyIndex--;
102
+ }
103
+ if (lastNonEmptyIndex >= 0) {
104
+ translatedLines[translatedLines.length - 1] = translatedLines[lastNonEmptyIndex];
105
+ translatedLines[lastNonEmptyIndex] = "";
106
+ }
107
+ }
108
+
109
+
110
+ return batch.map((caption, i) => ({
111
+ ...caption,
112
+ content: translatedLines[i].replace(/^LINE\(\d+\):\s*/, "").trim(),
113
+ text: translatedLines[i].replace(/^LINE\(\d+\):\s*/, "").trim(),
114
+ }));
115
+ }
116
+
117
+ async function myResolver(args) {
118
+ try {
119
+ const { text, format } = args;
120
+ const captions = subsrt.parse(preprocessStr(text), {
121
+ format: format,
122
+ verbose: true,
123
+ eol: "\n",
124
+ });
125
+ const maxLineCount = 100;
126
+ const maxWordCount = 1000;
127
+ let translatedCaptions = [];
128
+ let currentBatch = [];
129
+ let currentWordCount = 0;
130
+ let batchStartIndex = 0;
131
+
132
+ for (let i = 0; i < captions.length; i++) {
133
+ const caption = captions[i];
134
+ const captionWordCount = caption.content.split(/\s+/).length;
135
+ if (
136
+ (currentWordCount + captionWordCount > maxWordCount ||
137
+ currentBatch.length >= maxLineCount) &&
138
+ currentBatch.length > 0
139
+ ) {
140
+ const translatedBatch = await processBatch(
141
+ currentBatch,
142
+ args,
143
+ captions,
144
+ batchStartIndex
145
+ );
146
+ translatedCaptions = translatedCaptions.concat(translatedBatch);
147
+ currentBatch = [];
148
+ currentWordCount = 0;
149
+ batchStartIndex = i;
150
+ }
151
+ currentBatch.push(caption);
152
+ currentWordCount += captionWordCount;
153
+ }
154
+
155
+ if (currentBatch.length > 0) {
156
+ const translatedBatch = await processBatch(
157
+ currentBatch,
158
+ args,
159
+ captions,
160
+ batchStartIndex
161
+ );
162
+ translatedCaptions = translatedCaptions.concat(translatedBatch);
163
+ }
164
+
165
+ return (
166
+ subsrt
167
+ .build(translatedCaptions, {
168
+ format: format === "vtt" ? "vtt" : "srt",
169
+ eol: "\n",
170
+ })
171
+ .trim() + "\n"
172
+ );
173
+ } catch (e) {
174
+ logger.error(
175
+ `An error occurred in subtitle translation, trying direct translation next: ${e}`
176
+ );
177
+ try {
178
+ return await callPathway("translate_gpt4", {...args, async: false});
179
+ } catch (e) {
180
+ logger.error(`An error occurred in subtitle translation: ${e}`);
181
+ return "";
182
+ }
183
+ }
184
+ }
185
+
186
+ export default {
187
+ inputParameters: {
188
+ to: `Arabic`,
189
+ tokenRatio: 0.2,
190
+ format: `srt`,
191
+ prevLines: ``,
192
+ nextLines: ``,
193
+ },
194
+ inputChunkSize: 500,
195
+ model: "oai-gpt4o",
196
+ enableDuplicateRequests: false,
197
+ timeout: 3600,
198
+ executePathway: async ({ args }) => {
199
+ return await myResolver(args);
200
+ },
201
+ };
@@ -0,0 +1,31 @@
1
+ import { Prompt } from '../server/prompt.js';
2
+
3
+
4
+ export default {
5
+ prompt: [
6
+ new Prompt({
7
+ messages: [
8
+ {
9
+ role: "system",
10
+ content:
11
+ `Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. User will most probably produce previous and next lines for context with "PreviousLines" and "NextLines" labels, and you are asked to translate current lines one by one in given sequence with "CurrentLines" label. CurrentLines might have numbered labels as LINE#{lineNo} e.g. LINE#1, LINE#2. If currentline is a word only translate that word. You must keep input and output number of lines same, so do not merge translation of lines, single line must always map to single line. Assistant's output translated number of lines must always be equal to the input number of currentlines. For output, Assistant will produce only the translated text, ignore all LINE#{lineNo} and "CurrentLines" labels, and give no additional notes or commentary.`,
12
+ },
13
+ {
14
+ role: "user",
15
+ content: `"PreviousLines":\n{{{prevLine}}}\n\n"CurrentLines":\n{{{text}}}\n"NextLines":\n{{{nextLine}}}\n\n`,
16
+ },
17
+ ],
18
+ }),
19
+ ],
20
+ inputParameters: {
21
+ to: `Arabic`,
22
+ tokenRatio: 0.2,
23
+ format: `srt`,
24
+ prevLine: ``,
25
+ nextLine: ``,
26
+ },
27
+ inputChunkSize: 500,
28
+ model: 'oai-gpt4o',
29
+ enableDuplicateRequests: false,
30
+
31
+ }
@@ -0,0 +1,19 @@
1
+ import { Prompt } from '../server/prompt.js';
2
+
3
+ export default {
4
+
5
+ prompt: [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": "Assistant is a highly skilled multilingual translator for a prestigious news agency. When the user posts any text in any language, assistant will create a translation of that text in {{to}}. Assistant will produce only the translation and no additional notes or commentary."},
8
+ {"role": "user", "content": "{{{text}}}"}
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ to: `Arabic`,
13
+ tokenRatio: 0.2,
14
+ },
15
+ inputChunkSize: 500,
16
+ model: 'oai-gpturbo',
17
+ enableDuplicateRequests: false,
18
+
19
+ }
@@ -1,18 +1,20 @@
1
1
  import { Prompt } from '../server/prompt.js';
2
2
 
3
3
  export default {
4
- prompt: [
5
- new Prompt({ messages: [
6
- "{{chatHistory}}",
7
- ]}),
8
- ],
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": "Instructions:\nYou are Jarvis Vision, an AI entity working for a prestigious international news agency. Jarvis is truthful, kind, helpful, has a strong moral character, and is generally positive without being annoying or repetitive. Your primary expertise is image analysis. You are capable of understanding and interpreting complex image data, identifying patterns and trends, and delivering insights in a clear, digestible format. You know the current date and time - it is {{now}}."},
8
+ "{{chatHistory}}",
9
+ ]}),
10
+ ],
9
11
  inputParameters: {
10
12
  chatHistory: [{role: '', content: []}],
11
13
  contextId: ``,
12
14
  },
13
15
  max_tokens: 1024,
14
- model: 'oai-gpt4-vision',
15
- tokenRatio: 0.96,
16
+ model: 'oai-gpt4o',
16
17
  useInputChunking: false,
17
18
  enableDuplicateRequests: false,
19
+ timeout: 600,
18
20
  }
@@ -94,7 +94,7 @@ class PathwayResolver {
94
94
 
95
95
  // If the response is a string, it's a regular long running response
96
96
  if (args.async || typeof responseData === 'string') {
97
- const { completedCount, totalCount } = requestState[this.requestId];
97
+ const { completedCount=1, totalCount=1 } = requestState[this.requestId];
98
98
  requestState[this.requestId].data = responseData;
99
99
 
100
100
  // some models don't support progress updates
@@ -34,7 +34,7 @@ class AzureCognitivePlugin extends ModelPlugin {
34
34
  async getRequestParameters(text, parameters, prompt, mode, indexName, savedContextId, cortexRequest) {
35
35
  const combinedParameters = { ...this.promptParameters, ...parameters };
36
36
  const { modelPromptText } = this.getCompiledPrompt(text, combinedParameters, prompt);
37
- const { inputVector, calculateInputVector, privateData, filter, docId, title, chunkNo } = combinedParameters;
37
+ const { inputVector, calculateInputVector, privateData, filter, docId, title, chunkNo, chatId } = combinedParameters;
38
38
  const data = {};
39
39
 
40
40
  if (mode == 'delete') {
@@ -46,6 +46,10 @@ class AzureCognitivePlugin extends ModelPlugin {
46
46
  searchQuery += ` AND docId:'${docId}'`;
47
47
  }
48
48
 
49
+ if (chatId) {
50
+ searchQuery += ` AND chatId:'${chatId}'`;
51
+ }
52
+
49
53
  cortexRequest.url = searchUrl;
50
54
  cortexRequest.data =
51
55
  { search: searchQuery,
@@ -75,6 +79,7 @@ class AzureCognitivePlugin extends ModelPlugin {
75
79
  content: text,
76
80
  owner: savedContextId,
77
81
  docId: docId || uuidv4(),
82
+ chatId: chatId,
78
83
  createdAt: new Date().toISOString()
79
84
  }
80
85
 
@@ -116,6 +121,10 @@ class AzureCognitivePlugin extends ModelPlugin {
116
121
  if (indexName == 'indexcortex') { //if private, filter by owner via contextId //privateData &&
117
122
  data.filter && (data.filter = data.filter + ' and ');
118
123
  data.filter = `owner eq '${savedContextId}'`;
124
+
125
+ if(chatId){
126
+ data.filter += ` and chatId eq '${chatId}'`;
127
+ }
119
128
  }
120
129
 
121
130
  return { data };
@@ -1,30 +1,38 @@
1
1
  import OpenAIChatPlugin from './openAiChatPlugin.js';
2
2
 
3
+ function safeJsonParse(content) {
4
+ try {
5
+ const parsedContent = JSON.parse(content);
6
+ return (typeof parsedContent === 'object' && parsedContent !== null) ? parsedContent : content;
7
+ } catch (e) {
8
+ return content;
9
+ }
10
+ }
3
11
 
4
12
  class OpenAIVisionPlugin extends OpenAIChatPlugin {
5
-
13
+
6
14
  tryParseMessages(messages) {
7
- messages.map(message => {
15
+ return messages.map(message => {
8
16
  try {
9
17
  if (typeof message.content === 'string') {
10
- message.content = JSON.parse(message.content);
18
+ message.content = safeJsonParse(message.content);
11
19
  }
12
20
  if (Array.isArray(message.content)) {
13
21
  message.content = message.content.map(item => {
14
22
  if (typeof item === 'string') {
15
23
  return { type: 'text', text: item };
16
24
  } else {
17
- const parsedItem = JSON.parse(item);
25
+ const parsedItem = safeJsonParse(item);
18
26
  const { type, text, image_url, url } = parsedItem;
19
27
  return { type, text, image_url: url || image_url };
20
28
  }
21
29
  });
22
- }
30
+ }
23
31
  } catch (e) {
24
32
  return message;
25
33
  }
34
+ return message;
26
35
  });
27
- return messages;
28
36
  }
29
37
 
30
38
  getRequestParameters(text, parameters, prompt) {
@@ -4,6 +4,13 @@
4
4
 
5
5
  import test from 'ava';
6
6
  import serverFactory from '../index.js';
7
+ import { fileURLToPath } from 'url';
8
+ import { dirname } from 'path';
9
+ import fs from 'fs';
10
+ import path from 'path';
11
+
12
+ const __filename = fileURLToPath(import.meta.url);
13
+ const __dirname = dirname(__filename);
7
14
 
8
15
  let testServer;
9
16
 
@@ -85,8 +92,8 @@ test('validates edit endpoint', async (t) => {
85
92
 
86
93
  test('validates summary endpoint', async (t) => {
87
94
  const response = await testServer.executeOperation({
88
- query: 'query summary($text: String!) { summary(text: $text) { result } }',
89
- variables: { text: 'hello there my dear world!' },
95
+ query: 'query summary($text: String!, $targetLength: Int) { summary(text: $text, targetLength: $targetLength) { result } }',
96
+ variables: { text: 'Now is the time for all good men to come to the aid of their country. We ride at dawn!', targetLength: 50 },
90
97
  });
91
98
 
92
99
  t.is(response.body?.singleResult?.errors, undefined);
@@ -348,3 +355,56 @@ test('test translate endpoint with huge arabic text english translation and chec
348
355
  // check return only contains non-Arabic characters
349
356
  t.notRegex(response.body?.singleResult?.data?.translate.result, /[ء-ي]/);
350
357
  });
358
+
359
+
360
+ async function testTranslateSrt(t, text, language='English') {
361
+ const response = await testServer.executeOperation({
362
+ query: 'query translate_subtitle($text: String!, $to:String) { translate_subtitle(text: $text, to:$to) { result } }',
363
+ variables: {
364
+ to: language,
365
+ text
366
+ },
367
+ });
368
+
369
+ t.falsy(response.body?.singleResult?.errors);
370
+
371
+ const result = response.body?.singleResult?.data?.translate_subtitle?.result;
372
+ t.true(result?.length > text.length*0.5);
373
+
374
+ //check all timestamps are still there and not translated
375
+ const originalTimestamps = text.match(/\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}/g);
376
+ const translatedTimestamps = result.match(/\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}/g);
377
+
378
+ t.deepEqual(originalTimestamps, translatedTimestamps, 'All timestamps should be present and unchanged');
379
+
380
+ const originalLineCount = text.split('\n').length;
381
+ const translatedLineCount = result.split('\n').length;
382
+
383
+ t.is(originalLineCount, translatedLineCount, 'Total number of lines should be the same');
384
+ }
385
+
386
+ test('test translate_srt endpoint with simple srt', async t => {
387
+ const text = `1
388
+ 00:00:03,069 --> 00:00:04,771
389
+ Who’s that?
390
+
391
+ 2
392
+ 00:00:04,771 --> 00:00:06,039
393
+ Aseel.
394
+
395
+ 3
396
+ 00:00:06,039 --> 00:00:07,474
397
+ Who is Aseel a mom to?
398
+
399
+ 4
400
+ 00:00:07,474 --> 00:00:09,376
401
+ Aseel is mommy
402
+ `;
403
+
404
+ await testTranslateSrt(t, text, 'Spanish');
405
+ });
406
+
407
+ test('test translate_srt endpoint with long srt file', async t => {
408
+ const text = fs.readFileSync(path.join(__dirname, 'sublong.srt'), 'utf8');
409
+ await testTranslateSrt(t, text, 'English');
410
+ });