@aj-archipelago/cortex 1.1.21 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/config/default.example.json +84 -0
  2. package/config.js +5 -4
  3. package/helper-apps/cortex-file-handler/blobHandler.js +115 -98
  4. package/helper-apps/cortex-file-handler/fileChunker.js +13 -8
  5. package/helper-apps/cortex-file-handler/index.js +48 -2
  6. package/package.json +2 -1
  7. package/pathways/categorize.js +23 -0
  8. package/pathways/chat.js +1 -1
  9. package/pathways/chat_code.js +19 -0
  10. package/pathways/chat_context.js +19 -0
  11. package/pathways/chat_jarvis.js +19 -0
  12. package/pathways/chat_persist.js +23 -0
  13. package/pathways/code_review.js +17 -0
  14. package/pathways/cognitive_delete.js +2 -1
  15. package/pathways/cognitive_insert.js +1 -0
  16. package/pathways/cognitive_search.js +1 -0
  17. package/pathways/embeddings.js +1 -1
  18. package/pathways/expand_story.js +12 -0
  19. package/pathways/format_paragraph_turbo.js +16 -0
  20. package/pathways/format_summarization.js +21 -0
  21. package/pathways/gemini_15_vision.js +20 -0
  22. package/pathways/gemini_vision.js +20 -0
  23. package/pathways/grammar.js +30 -0
  24. package/pathways/hashtags.js +19 -0
  25. package/pathways/headline.js +43 -0
  26. package/pathways/headline_custom.js +169 -0
  27. package/pathways/highlights.js +22 -0
  28. package/pathways/image.js +2 -1
  29. package/pathways/index.js +107 -17
  30. package/pathways/jira_story.js +18 -0
  31. package/pathways/keywords.js +4 -0
  32. package/pathways/language.js +17 -6
  33. package/pathways/locations.js +93 -0
  34. package/pathways/quotes.js +19 -0
  35. package/pathways/rag.js +207 -0
  36. package/pathways/rag_jarvis.js +254 -0
  37. package/pathways/rag_search_helper.js +21 -0
  38. package/pathways/readme.js +18 -0
  39. package/pathways/release_notes.js +16 -0
  40. package/pathways/remove_content.js +31 -0
  41. package/pathways/retrieval.js +23 -0
  42. package/pathways/run_claude35_sonnet.js +21 -0
  43. package/pathways/run_claude3_haiku.js +20 -0
  44. package/pathways/run_gpt35turbo.js +20 -0
  45. package/pathways/run_gpt4.js +20 -0
  46. package/pathways/run_gpt4_32.js +20 -0
  47. package/pathways/select_extension.js +6 -0
  48. package/pathways/select_services.js +10 -0
  49. package/pathways/spelling.js +3 -0
  50. package/pathways/story_angles.js +13 -0
  51. package/pathways/styleguide/styleguide.js +221 -0
  52. package/pathways/styleguidemulti.js +127 -0
  53. package/pathways/subhead.js +48 -0
  54. package/pathways/summarize_turbo.js +98 -0
  55. package/pathways/summary.js +31 -12
  56. package/pathways/sys_claude_35_sonnet.js +19 -0
  57. package/pathways/sys_claude_3_haiku.js +19 -0
  58. package/pathways/sys_google_chat.js +19 -0
  59. package/pathways/sys_google_code_chat.js +19 -0
  60. package/pathways/sys_google_gemini_chat.js +23 -0
  61. package/pathways/sys_openai_chat.js +2 -2
  62. package/pathways/sys_openai_chat_16.js +19 -0
  63. package/pathways/sys_openai_chat_gpt4.js +19 -0
  64. package/pathways/sys_openai_chat_gpt4_32.js +19 -0
  65. package/pathways/sys_openai_chat_gpt4_turbo.js +19 -0
  66. package/pathways/tags.js +25 -0
  67. package/pathways/taxonomy.js +135 -0
  68. package/pathways/timeline.js +51 -0
  69. package/pathways/topics.js +25 -0
  70. package/pathways/topics_sentiment.js +20 -0
  71. package/pathways/transcribe.js +2 -4
  72. package/pathways/translate.js +10 -12
  73. package/pathways/translate_azure.js +13 -0
  74. package/pathways/translate_context.js +21 -0
  75. package/pathways/translate_gpt4.js +19 -0
  76. package/pathways/translate_gpt4_turbo.js +19 -0
  77. package/pathways/translate_turbo.js +19 -0
  78. package/pathways/vision.js +9 -7
  79. package/server/plugins/azureCognitivePlugin.js +10 -1
  80. package/server/plugins/openAiVisionPlugin.js +14 -6
  81. package/tests/main.test.js +2 -2
  82. package/tests/vision.test.js +0 -34
@@ -0,0 +1,98 @@
1
+ // Text summarization module with custom resolver for turbo models
2
+ // This module exports a prompt that takes an input text and generates a summary using a custom resolver.
3
+
4
+ // Import required modules
5
+ import { semanticTruncate } from '../server/chunker.js';
6
+ import { PathwayResolver } from '../server/pathwayResolver.js';
7
+ import { Prompt } from '../server/prompt.js';
8
+ import { callPathway } from '../lib/pathwayTools.js';
9
+
10
+ export default {
11
+ // Define input parameters for the prompt, such as the target length of the summary.
12
+ inputParameters: {
13
+ targetLength: 0,
14
+ targetLanguage: ''
15
+ },
16
+
17
+ model: 'oai-gpt4o',
18
+
19
+ // Custom resolver to generate summaries by reprompting if they are too long or too short.
20
+ resolver: async (parent, args, contextValue, _info) => {
21
+ const { config, pathway } = contextValue;
22
+ const originalTargetLength = args.targetLength || 0;
23
+ const targetLanguage = args.targetLanguage || await callPathway('language', args);
24
+
25
+ const targetLanguagePrompt = targetLanguage ? `language '${targetLanguage}'` : 'same language as the text being summarized';
26
+
27
+ // If targetLength is not provided, execute the prompt once and return the result.
28
+ if (originalTargetLength === 0) {
29
+ let pathwayResolver = new PathwayResolver({ config, pathway, args });
30
+ pathwayResolver.pathwayPrompt = [
31
+ new Prompt({ messages: [
32
+ {"role": "system", "content": `Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed summary of that text. Assistant will produce only the summary text and no additional or other response. The summary must be in the ${targetLanguagePrompt}.`},
33
+ {"role": "user", "content": "Text to summarize:\n{{{text}}}"}
34
+ ]}),
35
+ ];
36
+ return await pathwayResolver.resolve(args);
37
+ }
38
+
39
+ const errorMargin = 0.1;
40
+ const lowTargetLength = originalTargetLength * (1 - errorMargin);
41
+ const targetWords = Math.round(originalTargetLength / 6.6);
42
+
43
+ // If the text is shorter than the summary length, just return the text.
44
+ if (args.text.length <= originalTargetLength) {
45
+ return args.text;
46
+ }
47
+
48
+ const MAX_ITERATIONS = 5;
49
+ let summary = '';
50
+ let pathwayResolver = new PathwayResolver({ config, pathway, args });
51
+
52
+ // Modify the prompt to be words-based instead of characters-based.
53
+ pathwayResolver.pathwayPrompt = [
54
+ new Prompt({ messages: [
55
+ {"role": "system", "content": `Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed summary of that text. The summary should be ${targetWords} words long. Assistant will produce only the summary text and no additional or other response. The summary must be in the ${targetLanguagePrompt}.`},
56
+ {"role": "user", "content": "Text to summarize:\n{{{text}}}"}
57
+ ]}),
58
+ ];
59
+
60
+ let i = 0;
61
+ // Make sure it's long enough to start
62
+ while ((summary.length < lowTargetLength) && i < MAX_ITERATIONS) {
63
+ summary = await pathwayResolver.resolve(args);
64
+ i++;
65
+ }
66
+
67
+ // If it's too long, it could be because the input text was chunked
68
+ // and now we have all the chunks together. We can summarize that
69
+ // to get a comprehensive summary.
70
+ if (summary.length > originalTargetLength) {
71
+ pathwayResolver.pathwayPrompt = [
72
+ new Prompt({ messages: [
73
+ {"role": "system", "content": `Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed summary of that text. The summary should be ${targetWords} words long. Assistant will produce only the summary text and no additional or other response. The summary must be in the ${targetLanguagePrompt}.`},
74
+ {"role": "user", "content": `Text to summarize:\n${summary}`}
75
+ ]}),
76
+ ];
77
+ summary = await pathwayResolver.resolve(args);
78
+ i++;
79
+
80
+ // Now make sure it's not too long
81
+ while ((summary.length > originalTargetLength) && i < MAX_ITERATIONS) {
82
+ // add the summary response from the assistant to the prompt
83
+ pathwayResolver.pathwayPrompt[0].messages.push({"role": "assistant", "content": summary});
84
+ // add the next query to the prompt
85
+ pathwayResolver.pathwayPrompt[0].messages.push({"role": "system", "content": `Is that less than ${targetWords} words long? If not, try again using a length of no more than ${targetWords} words. Generate only the summary text and no apology or other response. The summary must be in the ${targetLanguagePrompt}.`});
86
+ summary = await pathwayResolver.resolve(args);
87
+ i++;
88
+ }
89
+ }
90
+
91
+ // If the summary is still too long, truncate it.
92
+ if (summary.length > originalTargetLength) {
93
+ return semanticTruncate(summary, originalTargetLength);
94
+ } else {
95
+ return summary;
96
+ }
97
+ }
98
+ }
@@ -1,27 +1,35 @@
1
- // summary.js
2
- // Text summarization module with custom resolver
1
+ // Text summarization module with custom resolver for turbo models
3
2
  // This module exports a prompt that takes an input text and generates a summary using a custom resolver.
4
3
 
5
4
  // Import required modules
6
5
  import { semanticTruncate } from '../server/chunker.js';
7
6
  import { PathwayResolver } from '../server/pathwayResolver.js';
7
+ import { Prompt } from '../server/prompt.js';
8
8
 
9
9
  export default {
10
10
  // The main prompt function that takes the input text and asks to generate a summary.
11
- prompt: `{{{text}}}\n\nWrite a summary of the above text. If the text is in a language other than english, make sure the summary is written in the same language:\n\n`,
11
+ prompt:[
12
+ new Prompt({ messages: [
13
+ {"role": "system", "content": "Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed, complete summary of that text. The summary must be in the same language as the posted text. Assistant will produce only the summary text and no additional or other response."},
14
+ {"role": "user", "content": "Text to summarize:\n{{{text}}}"}
15
+ ]}),
16
+ ],
12
17
 
13
18
  // Define input parameters for the prompt, such as the target length of the summary.
14
19
  inputParameters: {
15
20
  targetLength: 0,
16
21
  },
17
22
 
23
+ model: 'oai-gpt4o',
24
+ enableCache: true,
25
+
18
26
  // Custom resolver to generate summaries by reprompting if they are too long or too short.
19
- resolver: async (parent, args, contextValue, _info) => {
27
+ resolver: async (_parent, args, contextValue, _info) => {
20
28
  const { config, pathway } = contextValue;
21
- const originalTargetLength = args.targetLength;
29
+ const originalTargetLength = args.targetLength || pathway.inputParameters.targetLength;
22
30
 
23
31
  // If targetLength is not provided, execute the prompt once and return the result.
24
- if (!originalTargetLength) {
32
+ if (originalTargetLength === 0) {
25
33
  let pathwayResolver = new PathwayResolver({ config, pathway, args });
26
34
  return await pathwayResolver.resolve(args);
27
35
  }
@@ -40,7 +48,12 @@ export default {
40
48
  let pathwayResolver = new PathwayResolver({ config, pathway, args });
41
49
 
42
50
  // Modify the prompt to be words-based instead of characters-based.
43
- pathwayResolver.pathwayPrompt = `Write a summary of all of the text below. If the text is in a language other than english, make sure the summary is written in the same language. Your summary should be ${targetWords} words in length.\n\nText:\n\n{{{text}}}\n\nSummary:\n\n`
51
+ pathwayResolver.pathwayPrompt = [
52
+ new Prompt({ messages: [
53
+ {"role": "system", "content": `Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed summary of that text. The summary must be in the same language as the posted text. The summary should be ${targetWords} words long. Assistant will produce only the summary text and no additional or other response.`},
54
+ {"role": "user", "content": "Text to summarize:\n{{{text}}}"}
55
+ ]}),
56
+ ];
44
57
 
45
58
  let i = 0;
46
59
  // Make sure it's long enough to start
@@ -53,13 +66,21 @@ export default {
53
66
  // and now we have all the chunks together. We can summarize that
54
67
  // to get a comprehensive summary.
55
68
  if (summary.length > originalTargetLength) {
56
- pathwayResolver.pathwayPrompt = `Write a summary of all of the text below. If the text is in a language other than english, make sure the summary is written in the same language. Your summary should be ${targetWords} words in length.\n\nText:\n\n${summary}\n\nSummary:\n\n`
69
+ pathwayResolver.pathwayPrompt = [
70
+ new Prompt({ messages: [
71
+ {"role": "system", "content": `Assistant is a highly skilled multilingual AI writing agent that summarizes text. When the user posts any text in any language, assistant will create a detailed summary of that text. The summary must be in the same language as the posted text. The summary should be ${targetWords} words long. Assistant will produce only the summary text and no additional or other response.`},
72
+ {"role": "user", "content": `Text to summarize:\n${summary}`}
73
+ ]}),
74
+ ];
57
75
  summary = await pathwayResolver.resolve(args);
58
76
  i++;
59
77
 
60
78
  // Now make sure it's not too long
61
79
  while ((summary.length > originalTargetLength) && i < MAX_ITERATIONS) {
62
- pathwayResolver.pathwayPrompt = `${summary}\n\nIs that less than ${targetWords} words long? If not, try again using a length of no more than ${targetWords} words.\n\n`;
80
+ // add the summary response from the assistant to the prompt
81
+ pathwayResolver.pathwayPrompt[0].messages.push({"role": "assistant", "content": summary});
82
+ // add the next query to the prompt
83
+ pathwayResolver.pathwayPrompt[0].messages.push({"role": "system", "content": `Is that less than ${targetWords} words long? If not, try again using a length of no more than ${targetWords} words.`});
63
84
  summary = await pathwayResolver.resolve(args);
64
85
  i++;
65
86
  }
@@ -72,6 +93,4 @@ export default {
72
93
  return summary;
73
94
  }
74
95
  }
75
- };
76
-
77
-
96
+ }
@@ -0,0 +1,19 @@
1
+ // sys_claude_35_sonnet.js
2
+ // override handler for claude-35-sonnet
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'claude-35-sonnet-vertex',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'claude-3.5-sonnet',
19
+ }
@@ -0,0 +1,19 @@
1
+ // sys_claude_3_haiku.js
2
+ // override handler for claude-3-haiku
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'claude-3-haiku-vertex',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'claude-3-haiku',
19
+ }
@@ -0,0 +1,19 @@
1
+ // sys_google_chat.js
2
+ // override handler for palm-chat
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'palm-chat',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'palm-chat',
19
+ }
@@ -0,0 +1,19 @@
1
+ // sys_google_code_chat.js
2
+ // override handler for palm-code-chat
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'palm-code-chat',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'palm-code-chat',
19
+ }
@@ -0,0 +1,23 @@
1
+ // sys_google_gemini_chat.js
2
+ // override handler for gemini-chat
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'gemini-pro-chat',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'gemini-pro-chat',
19
+ geminiSafetySettings: [{category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH'},
20
+ {category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'BLOCK_ONLY_HIGH'},
21
+ {category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_ONLY_HIGH'},
22
+ {category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'BLOCK_ONLY_HIGH'}],
23
+ }
@@ -1,5 +1,5 @@
1
1
  // sys_openai_chat.js
2
- // default handler for openAI chat endpoints when REST endpoints are enabled
2
+ // override handler for gpt-3.5-turbo
3
3
 
4
4
  import { Prompt } from '../server/prompt.js';
5
5
 
@@ -15,5 +15,5 @@ export default {
15
15
  },
16
16
  model: 'oai-gpturbo',
17
17
  useInputChunking: false,
18
- emulateOpenAIChatModel: '*',
18
+ emulateOpenAIChatModel: 'gpt-3.5-turbo',
19
19
  }
@@ -0,0 +1,19 @@
1
+ // sys_openai_chat_16.js
2
+ // override handler for gpt-3.5-turbo-16k
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'azure-turbo-16',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'gpt-3.5-turbo-16k',
19
+ }
@@ -0,0 +1,19 @@
1
+ // sys_openai_chat_gpt4.js
2
+ // override handler for gpt-4
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'oai-gpt4',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'gpt-4',
19
+ }
@@ -0,0 +1,19 @@
1
+ // sys_openai_chat_gpt4_32.js
2
+ // override handler for gpt-4-32
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'oai-gpt4-32',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'gpt-4-32k',
19
+ }
@@ -0,0 +1,19 @@
1
+ // sys_openai_chat_gpt4_turbo.js
2
+ // override handler for gpt-4-turbo
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'oai-gpt4-turbo',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: 'gpt-4-turbo',
19
+ }
@@ -0,0 +1,25 @@
1
+ // tags.js
2
+ // News tags identification module
3
+ // This module exports a prompt that takes an input article text and identifies the top news tags for the article.
4
+
5
+ import { callPathway } from '../lib/pathwayTools.js';
6
+
7
+ export default {
8
+ prompt: [],
9
+ model: 'oai-gpt4o',
10
+
11
+ // Define input parameters for the prompt, such as the number of top news tags to identify and select.
12
+ inputParameters: {
13
+ count: 5,
14
+ tags: '',
15
+ },
16
+
17
+ // Set 'list' to true to indicate that the output is expected to be a list.
18
+ list: true,
19
+ timeout: 240,
20
+ temperature: 0,
21
+
22
+ resolver: async (parent, args, _contextValue, _info) => {
23
+ return await callPathway('taxonomy', { ...args, taxonomyType: 'tag', taxonomyItems: args.tags });
24
+ }
25
+ }
@@ -0,0 +1,135 @@
1
+ // taxonomy.js
2
+ // News taxonomy identification module
3
+ // This module exports a prompt that takes an input article text and taxonomy list and type and identifies the top news taxonomy for the article.
4
+
5
+ import { Prompt } from "../server/prompt.js";
6
+ import { PathwayResolver } from '../server/pathwayResolver.js';
7
+ import { callPathway } from '../lib/pathwayTools.js';
8
+
9
+ function getFilteredTaxonomyItems(taxonomyResult, taxonomySet) {
10
+ // Normalize taxonomy item
11
+ function normalizeTaxonomyItem(item) {
12
+ return item.trim().toLowerCase().replace(/[.,]/g, '');
13
+ }
14
+
15
+ const taxonomyItems = taxonomySet.split(',');
16
+ const filteredTaxonomyResult = taxonomyResult.reduce((acc, item) => {
17
+ const normalizedItem = normalizeTaxonomyItem(item);
18
+ const matchingItemIndex = taxonomyItems
19
+ .map(s => normalizeTaxonomyItem(s))
20
+ .findIndex(normalizedPredefinedItem => normalizedPredefinedItem === normalizedItem);
21
+
22
+ // If a matchingItemIndex is found, add the verbatim item from the predefined set
23
+ if (matchingItemIndex !== -1) {
24
+ acc.push(taxonomyItems[matchingItemIndex]);
25
+ }
26
+
27
+ return acc;
28
+ }, []);
29
+
30
+ // If filteredTaxonomyResult is not empty, push the members of filteredTaxonomyResult into taxonomyResults
31
+ const taxonomyResults = [];
32
+ if (filteredTaxonomyResult.length > 0) {
33
+ taxonomyResults.push(...filteredTaxonomyResult);
34
+ }
35
+
36
+ return taxonomyResults;
37
+ }
38
+
39
+ export default {
40
+ prompt: [],
41
+ model: 'oai-gpt4o',
42
+
43
+ // Define input parameters for the prompt, such as the number of top news taxonomyItems to identify and select.
44
+ inputParameters: {
45
+ count: 5,
46
+ taxonomyItems: '',
47
+ taxonomyType: 'topic',
48
+ },
49
+
50
+ // Set 'list' to true to indicate that the output is expected to be a list.
51
+ list: true,
52
+ timeout: 240,
53
+
54
+ // Custom resolver to find matching taxonomyItems.
55
+ resolver: async (parent, args, contextValue, _info) => {
56
+ const { config, pathway } = contextValue;
57
+ const taxonomyItems = args.taxonomyItems;
58
+ const taxonomyType = args.taxonomyType || 'topic';
59
+ let text = args.text;
60
+
61
+ // Summarize the input text
62
+ text = await callPathway('summary', { ...args, targetLength: 0 });
63
+
64
+ // loop through the comma delimited list of taxonomyItems and create sets of 25 or less
65
+ // to pass into a call to the taxonomyItem picking logic
66
+ const taxonomyItemsArray = taxonomyItems.split(',')
67
+ .map(taxonomyItem => taxonomyItem.trim())
68
+ .filter(taxonomyItem => taxonomyItem.length > 0);
69
+
70
+ const taxonomyItemSets = taxonomyItemsArray.reduce((acc, taxonomyItem, index) => {
71
+ if (index % 25 === 0) {
72
+ acc.push(taxonomyItem);
73
+ } else {
74
+ acc[acc.length - 1] += `, ${taxonomyItem}`;
75
+ }
76
+ return acc;
77
+ }, []);
78
+
79
+ let pathwayResolver = new PathwayResolver({ config, pathway, args });
80
+
81
+ // call the taxonomyItemging logic for each set of taxonomyItems
82
+ const taxonomyItemResults = [];
83
+ for (let taxonomyItemSet of taxonomyItemSets) {
84
+ if (taxonomyItemSet.length === 0) continue;
85
+ pathwayResolver.pathwayPrompt = [
86
+ new Prompt({
87
+ messages: [
88
+ { "role": "system", "content": `Assistant is an AI editorial assistant for an online news agency tasked with identifying ${taxonomyType}s from a pre-determined list that fit a news article summary. When User posts a news article summary and a list of possible ${taxonomyType}s, assistant will carefully examine the ${taxonomyType}s in the list. If any of them are a high confidence match for the article, assistant will return the matching ${taxonomyType}s as a comma separated list. Assistant must only identify a ${taxonomyType} if assistant is sure the ${taxonomyType} is a good match for the article. Any ${taxonomyType}s that assistant returns must be in the list already - assistant cannot add new ${taxonomyType}s. If there are no good matches, assistant will respond with <none>. Assistant will return only the ${taxonomyType}s and no other notes or commentary.`},
89
+ { "role": "user", "content": `Article Summary: {{{text}}}\n\nPossible ${taxonomyType}s: ${taxonomyItemSet}\n\n`},
90
+ ]
91
+ }),
92
+ ];
93
+
94
+ const taxonomyItemResult = await pathwayResolver.resolve({ ...args, text });
95
+
96
+ taxonomyItemResults.push(...getFilteredTaxonomyItems(taxonomyItemResult, taxonomyItemSet));
97
+ }
98
+
99
+ if (taxonomyItemResults.length < 2) {
100
+ return taxonomyItemResults;
101
+ }
102
+
103
+ if (args.count === 1) {
104
+ pathwayResolver.pathwayPrompt = [
105
+ new Prompt({
106
+ messages: [
107
+ { "role": "system", "content": `Assistant is an AI editorial assistant for an online news agency tasked with identifying a single ${taxonomyType} from a list that best fits a news article summary. When User posts a news article summary and a list of possible ${taxonomyType}s, assistant will carefully examine the ${taxonomyType}s in the list and return the one ${taxonomyType} that best represents the news article summary. Assistant will use high judgement when picking the correct ${taxonomyType}. Assistant will return only the ${taxonomyType} and no other notes or commentary.` },
108
+ { "role": "user", "content": `Article Summary: {{{text}}}\n\nPossible ${taxonomyType}s: ${taxonomyItemResults.join(', ')}\n\n`},
109
+ ]
110
+ }),
111
+ ];
112
+ } else {
113
+ pathwayResolver.pathwayPrompt = [
114
+ new Prompt({
115
+ messages: [
116
+ { "role": "system", "content": `Assistant is an AI editorial assistant for an online news agency tasked with identifying ${taxonomyType}s from a list that best fit a news article summary. When User posts a news article summary and a list of possible ${taxonomyType}s, assistant will carefully examine the ${taxonomyType}s in the list and return them in order of relevance to the article summary (best fit first). Assistant will return only the list of ${taxonomyType}s and no other notes or commentary. Assistant will not add ${taxonomyType}s to the list and will select only from User's posted ${taxonomyType}s.` },
117
+ { "role": "user", "content": `Article Summary: {{{text}}}\n\nPossible ${taxonomyType}s: ${taxonomyItemResults.join(', ')}\n\n`},
118
+ ]
119
+ }),
120
+ ];
121
+ }
122
+
123
+ const taxonomyItemResult = await pathwayResolver.resolve({ ...args, text });
124
+
125
+ taxonomyItemResults.length = 0;
126
+ let filteredItems = getFilteredTaxonomyItems(taxonomyItemResult, taxonomyItems);
127
+ if (args.count > 0) {
128
+ filteredItems = filteredItems.slice(0, args.count);
129
+ }
130
+ taxonomyItemResults.push(...filteredItems);
131
+
132
+ return taxonomyItemResults;
133
+
134
+ }
135
+ }
@@ -0,0 +1,51 @@
1
+ import { Prompt } from '../server/prompt.js';
2
+ import * as chrono from 'chrono-node';
3
+ import dayjs from 'dayjs';
4
+
5
+ const getLastOccurrenceOfMonth = (month) => {
6
+ month = month - 1;
7
+ const date = new Date();
8
+
9
+ while (date.getMonth() !== month) {
10
+ date.setMonth(date.getMonth() - 1);
11
+ }
12
+
13
+ return date.toLocaleString('default', { month: 'long', year: 'numeric' });
14
+ }
15
+
16
+ export default {
17
+ prompt: [
18
+ new Prompt({
19
+ messages: [
20
+ {
21
+ "role": "system", "content": `
22
+ Assistant is a news editor at a prestigious international news agency. Assistant's job is to create timelines of events mentioned in news excerpts, covering the past 20 years.
23
+ Assume the excerpt was written on {{date}}.
24
+ Each item in the timeline contains two pieces of information: the event and the time it occurred.
25
+ The time can be a full date, or a partial date, such as month and year or year alone.
26
+ The order in which the events are mentioned in the document may not be the order in which they occurred.
27
+ If only a month can be inferred, use the month preceding {{date}}.
28
+ If a weekday is mentioned, use the date of that weekday immediately preceding {{date}}.
29
+ The event must be news-worthy, not trivial, and not a mere statement of fact. Examples of events to not include: "Biking to work", "The world needs to end poverty", "Climate change is getting worse".
30
+ Assistant will respond with a JSON array contaning each timeline item, defined using two keys: event:string and time:any. Do not wrap the array in a root object.
31
+ Assistant will respond only with the array and no additional notes or commentary.
32
+ For each event title, assistant will use the language that the document is written in.
33
+ ` },
34
+ { "role": "user", "content": "NATO conducted airstrikes in the northern regions of Russia on February 6, 2022. One week later, the south was targeted." },
35
+ {"role": "assistant", "content": `[{"event": "NATO targets Russian north with airstrikes", "time": "2022-02-06"}, {"event": "NATO conducts airstrikes in southern Russia", "time": "2022-02-13"}]"},
36
+ { "role": "user", "content": "Russia annexed Crimea from Ukraine in 2014, a move that most of the world denounced as illegal, and moved in November to officially claim four regions in Ukraine's south and east as Russian territory.` },
37
+ {"role": "assistant", "content": `[{"event": "Russia annexes Crimea from Ukraine", "time": "2014"}, {"event": "Russia claims four regions in Ukraine as Russian territory", "time": "${getLastOccurrenceOfMonth(9)}}"}]`},
38
+ { "role": "user", "content": "The UK on Sunday passed a resolution in favour of all political parties." },
39
+ {"role": "assistant", "content": `[{"event": "UK passes resolution in favour of all political parties", "time": "${dayjs(chrono.parseDate('Last Sunday')).format('YYYY-MM-DD')}"}]`},
40
+ { "role": "user", "content": "As of Tuesday, 24 bodies had been identified." },
41
+ {"role": "assistant", "content": `[{"event": "24 bodies identified", "time": "${dayjs(chrono.parseDate('Last Tuesday')).format('YYYY-MM-DD')}"}]`},
42
+ { "role": "user", "content": "{{text}}" },
43
+ ]
44
+ })],
45
+ model: 'oai-gpt4o',
46
+ inputParameters: {
47
+ date: new Date().toDateString(),
48
+ },
49
+ temperature: 0.0,
50
+ enableDuplicateRequests: false,
51
+ }
@@ -0,0 +1,25 @@
1
+ // topics.js
2
+ // News categories identification module
3
+ // This module exports a prompt that takes an input article text and identifies the top news categories for the article.
4
+
5
+ import { callPathway } from '../lib/pathwayTools.js';
6
+
7
+ export default {
8
+ prompt: [],
9
+ model: 'oai-gpt4o',
10
+
11
+ // Define input parameters for the prompt, such as the number of top news topics to identify and select.
12
+ inputParameters: {
13
+ count: 5,
14
+ topics: '',
15
+ },
16
+
17
+ // Set 'list' to true to indicate that the output is expected to be a list.
18
+ list: true,
19
+ timeout: 240,
20
+
21
+ // Custom resolver to find matching topics.
22
+ resolver: async (parent, args, _contextValue, _info) => {
23
+ return await callPathway('taxonomy', { ...args, taxonomyType: 'topic', taxonomyItems: args.topics });
24
+ }
25
+ }
@@ -0,0 +1,20 @@
1
+ import { Prompt } from '../server/prompt.js';
2
+
3
+ export default {
4
+ prompt: [
5
+ new Prompt({
6
+ messages: [
7
+ { "role": "system", "content": `Assistant is an expert topic and trend analyst AI working for a prestigious international news agency. When a user submits commentary with a video id, Assistant will return the video id being analyzed in a block called "Video ID:", then will summarize the commentary and return it in a block called "Summary:" and then will list the topics contained therein with general sentiments about each of the topics in a block called "Topics:". Each topic line in the block should be of the form <#><topic description> - <sentiment analysis>. The goal of the analysis is to answer the question "What are the viewers interested in and how do they feel about it?" Assistant will return the video id, summary, and topics and sentiments and no other notes or commentary.`},
8
+ { "role": "user", "content": `Video commentary:\n\n{{{text}}}`},
9
+ ]
10
+ })
11
+ ],
12
+ model: 'oai-gpt4o',
13
+ //inputChunkSize: 1000,
14
+ joinChunksWith: '\n',
15
+ tokenRatio: 1,
16
+ enableDuplicateRequests: false,
17
+ timeout: 1800,
18
+ }
19
+
20
+