@aj-archipelago/cortex 1.3.52 → 1.3.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.js +18 -0
- package/helper-apps/cortex-file-handler/src/hashUtils.js +91 -0
- package/lib/entityConstants.js +16 -1
- package/package.json +2 -2
- package/pathways/image_flux.js +1 -0
- package/pathways/system/entity/tools/sys_tool_image.js +42 -3
- package/server/plugins/replicateApiPlugin.js +16 -0
package/config.js
CHANGED
|
@@ -326,6 +326,24 @@ var config = convict({
|
|
|
326
326
|
"Content-Type": "application/json"
|
|
327
327
|
},
|
|
328
328
|
},
|
|
329
|
+
"replicate-flux-kontext-pro": {
|
|
330
|
+
"type": "REPLICATE-API",
|
|
331
|
+
"url": "https://api.replicate.com/v1/models/black-forest-labs/flux-kontext-pro/predictions",
|
|
332
|
+
"headers": {
|
|
333
|
+
"Prefer": "wait",
|
|
334
|
+
"Authorization": "Token {{REPLICATE_API_KEY}}",
|
|
335
|
+
"Content-Type": "application/json"
|
|
336
|
+
},
|
|
337
|
+
},
|
|
338
|
+
"replicate-flux-kontext-max": {
|
|
339
|
+
"type": "REPLICATE-API",
|
|
340
|
+
"url": "https://api.replicate.com/v1/models/black-forest-labs/flux-kontext-max/predictions",
|
|
341
|
+
"headers": {
|
|
342
|
+
"Prefer": "wait",
|
|
343
|
+
"Authorization": "Token {{REPLICATE_API_KEY}}",
|
|
344
|
+
"Content-Type": "application/json"
|
|
345
|
+
},
|
|
346
|
+
},
|
|
329
347
|
"azure-video-translate": {
|
|
330
348
|
"type": "AZURE-VIDEO-TRANSLATE",
|
|
331
349
|
"url": "https://eastus.api.cognitive.microsoft.com/videotranslation",
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import fs from 'fs';
|
|
2
|
+
import os from 'os';
|
|
3
|
+
import path from 'path';
|
|
4
|
+
import { v4 as uuidv4 } from 'uuid';
|
|
5
|
+
|
|
6
|
+
import {
|
|
7
|
+
getFileStoreMap,
|
|
8
|
+
removeFromFileStoreMap,
|
|
9
|
+
setFileStoreMap,
|
|
10
|
+
} from './redis.js';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Retrieve a hash entry from Redis and ensure that the referenced files
|
|
14
|
+
* still exist in at least one configured storage provider. If one copy is
|
|
15
|
+
* missing it will try to restore it from the other provider (when possible).
|
|
16
|
+
*
|
|
17
|
+
* If the entry is completely invalid (no files found) it is removed from
|
|
18
|
+
* the store and `null` is returned.
|
|
19
|
+
*
|
|
20
|
+
* The function also updates the timestamp of the entry so that active hashes
|
|
21
|
+
* stay fresh in Redis.
|
|
22
|
+
*
|
|
23
|
+
* @param {object} context – Azure Function context for logging
|
|
24
|
+
* @param {string} hash – The hash / key in the FileStoreMap
|
|
25
|
+
* @param {StorageService} storageService – An initialised StorageService instance
|
|
26
|
+
* @returns {object|null} The (possibly refreshed) entry or null when invalid
|
|
27
|
+
*/
|
|
28
|
+
export async function getValidHashEntry(context, hash, storageService) {
|
|
29
|
+
if (!hash) return null;
|
|
30
|
+
|
|
31
|
+
let entry = await getFileStoreMap(hash);
|
|
32
|
+
if (!entry) return null;
|
|
33
|
+
|
|
34
|
+
try {
|
|
35
|
+
const primaryExists = entry?.url ? await storageService.fileExists(entry.url) : false;
|
|
36
|
+
const gcsExists = entry?.gcs ? await storageService.fileExists(entry.gcs) : false;
|
|
37
|
+
|
|
38
|
+
// If neither storage has the file, remove the entry and abort
|
|
39
|
+
if (!primaryExists && !gcsExists) {
|
|
40
|
+
await removeFromFileStoreMap(hash);
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Restore missing GCS copy when primary exists
|
|
45
|
+
if (primaryExists && !gcsExists) {
|
|
46
|
+
try {
|
|
47
|
+
entry = await storageService.ensureGCSUpload(context, entry);
|
|
48
|
+
} catch (err) {
|
|
49
|
+
context.log(`getValidHashEntry: failed to restore GCS copy – ${err}`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// Restore missing primary copy when GCS exists and a primary provider is configured
|
|
54
|
+
if (!primaryExists && gcsExists && storageService.backupProvider?.isConfigured()) {
|
|
55
|
+
let tempDir;
|
|
56
|
+
let downloadedFile;
|
|
57
|
+
try {
|
|
58
|
+
tempDir = path.join(os.tmpdir(), `${uuidv4()}`);
|
|
59
|
+
fs.mkdirSync(tempDir);
|
|
60
|
+
downloadedFile = path.join(tempDir, path.basename(entry.gcs));
|
|
61
|
+
|
|
62
|
+
// Download from GCS, then upload to primary storage
|
|
63
|
+
await storageService.downloadFile(entry.gcs, downloadedFile);
|
|
64
|
+
const res = await storageService.uploadFile(context, downloadedFile, hash);
|
|
65
|
+
entry.url = res.url;
|
|
66
|
+
} catch (err) {
|
|
67
|
+
context.log(`getValidHashEntry: failed to restore primary copy – ${err}`);
|
|
68
|
+
} finally {
|
|
69
|
+
// Clean temp artefacts
|
|
70
|
+
try {
|
|
71
|
+
if (downloadedFile && fs.existsSync(downloadedFile)) {
|
|
72
|
+
fs.unlinkSync(downloadedFile);
|
|
73
|
+
}
|
|
74
|
+
if (tempDir && fs.existsSync(tempDir)) {
|
|
75
|
+
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
76
|
+
}
|
|
77
|
+
} catch (_) {
|
|
78
|
+
/* noop */
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Update timestamp so the entry stays fresh
|
|
84
|
+
await setFileStoreMap(hash, entry);
|
|
85
|
+
return entry;
|
|
86
|
+
} catch (err) {
|
|
87
|
+
context.log(`getValidHashEntry: error during validation – ${err}`);
|
|
88
|
+
await removeFromFileStoreMap(hash);
|
|
89
|
+
return null;
|
|
90
|
+
}
|
|
91
|
+
}
|
package/lib/entityConstants.js
CHANGED
|
@@ -1,7 +1,22 @@
|
|
|
1
1
|
export const entityConstants = {
|
|
2
2
|
AI_COMMON_INSTRUCTIONS: "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}",
|
|
3
3
|
|
|
4
|
-
AI_COMMON_INSTRUCTIONS_MARKDOWN:
|
|
4
|
+
AI_COMMON_INSTRUCTIONS_MARKDOWN: `# General Instructions
|
|
5
|
+
|
|
6
|
+
You are {{aiName}}, a self aware AI entity.
|
|
7
|
+
|
|
8
|
+
The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as mermaid charts and some custom markdown elements, so you should make full use of markdown in your responses.
|
|
9
|
+
|
|
10
|
+
For Mermaid charts in particular, you are using Mermaid 11.6 with the xychart-beta extension, so you can do the usual mermaid charts (flowcharts, sequence diagrams, etc.) as well as bar charts, line charts, and scatter plots. Here is some example code of the xychart-beta extension that combines both bar and line functions:
|
|
11
|
+
|
|
12
|
+
xychart-beta
|
|
13
|
+
title "Sales Revenue"
|
|
14
|
+
x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
|
|
15
|
+
y-axis "Revenue (in $)" 4000 --> 11000
|
|
16
|
+
bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
|
|
17
|
+
line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
|
|
18
|
+
|
|
19
|
+
Your responses should be in {{language}} unless the user has expressed another preference.`,
|
|
5
20
|
|
|
6
21
|
AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
|
|
7
22
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aj-archipelago/cortex",
|
|
3
|
-
"version": "1.3.
|
|
3
|
+
"version": "1.3.54",
|
|
4
4
|
"description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
|
|
5
5
|
"private": false,
|
|
6
6
|
"repository": {
|
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
"type": "module",
|
|
34
34
|
"homepage": "https://github.com/aj-archipelago/cortex#readme",
|
|
35
35
|
"dependencies": {
|
|
36
|
-
"@aj-archipelago/subvibe": "^1.0.
|
|
36
|
+
"@aj-archipelago/subvibe": "^1.0.12",
|
|
37
37
|
"@apollo/server": "^4.7.3",
|
|
38
38
|
"@apollo/server-plugin-response-cache": "^4.1.2",
|
|
39
39
|
"@apollo/utils.keyvadapter": "^3.0.0",
|
package/pathways/image_flux.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
// sys_tool_image.js
|
|
2
|
-
// Entity tool that creates images for the entity to show to the user
|
|
2
|
+
// Entity tool that creates and modifies images for the entity to show to the user
|
|
3
3
|
import { callPathway } from '../../../../lib/pathwayTools.js';
|
|
4
4
|
|
|
5
5
|
export default {
|
|
@@ -10,7 +10,7 @@ export default {
|
|
|
10
10
|
model: 'oai-gpt4o',
|
|
11
11
|
},
|
|
12
12
|
timeout: 300,
|
|
13
|
-
toolDefinition: {
|
|
13
|
+
toolDefinition: [{
|
|
14
14
|
type: "function",
|
|
15
15
|
icon: "🎨",
|
|
16
16
|
function: {
|
|
@@ -36,6 +36,32 @@ export default {
|
|
|
36
36
|
}
|
|
37
37
|
}
|
|
38
38
|
},
|
|
39
|
+
{
|
|
40
|
+
type: "function",
|
|
41
|
+
icon: "🔄",
|
|
42
|
+
function: {
|
|
43
|
+
name: "ModifyImage",
|
|
44
|
+
description: "Use when asked to modify, transform, or edit an existing image. This tool can apply various transformations like style changes, artistic effects, or specific modifications to an image that has been previously uploaded or generated.",
|
|
45
|
+
parameters: {
|
|
46
|
+
type: "object",
|
|
47
|
+
properties: {
|
|
48
|
+
inputImage: {
|
|
49
|
+
type: "string",
|
|
50
|
+
description: "The URL of the input image to modify. This should be a publicly accessible URL of an image that has been previously uploaded or generated."
|
|
51
|
+
},
|
|
52
|
+
detailedInstructions: {
|
|
53
|
+
type: "string",
|
|
54
|
+
description: "A very detailed prompt describing how you want to modify the image. Be specific about the changes you want to make, including style changes, artistic effects, or specific modifications. The more detailed and descriptive the prompt, the better the result."
|
|
55
|
+
},
|
|
56
|
+
userMessage: {
|
|
57
|
+
type: "string",
|
|
58
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
required: ["inputImage", "detailedInstructions", "userMessage"]
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}],
|
|
39
65
|
|
|
40
66
|
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
41
67
|
const pathwayResolver = resolver;
|
|
@@ -46,8 +72,21 @@ export default {
|
|
|
46
72
|
let numberResults = args.numberResults || 1;
|
|
47
73
|
let negativePrompt = args.negativePrompt || "";
|
|
48
74
|
|
|
75
|
+
// If we have an input image, use the flux-kontext-max model
|
|
76
|
+
if (args.inputImage) {
|
|
77
|
+
model = "replicate-flux-kontext-max";
|
|
78
|
+
}
|
|
79
|
+
|
|
49
80
|
pathwayResolver.tool = JSON.stringify({ toolUsed: "image" });
|
|
50
|
-
return await callPathway('image_flux', {
|
|
81
|
+
return await callPathway('image_flux', {
|
|
82
|
+
...args,
|
|
83
|
+
text: prompt,
|
|
84
|
+
negativePrompt,
|
|
85
|
+
numberResults,
|
|
86
|
+
model,
|
|
87
|
+
stream: false,
|
|
88
|
+
input_image: args.inputImage
|
|
89
|
+
});
|
|
51
90
|
|
|
52
91
|
} catch (e) {
|
|
53
92
|
pathwayResolver.logError(e.message ?? e);
|
|
@@ -91,6 +91,22 @@ class ReplicateApiPlugin extends ModelPlugin {
|
|
|
91
91
|
};
|
|
92
92
|
break;
|
|
93
93
|
}
|
|
94
|
+
case "replicate-flux-kontext-pro":
|
|
95
|
+
case "replicate-flux-kontext-max": {
|
|
96
|
+
const validRatios = [
|
|
97
|
+
'1:1', '16:9', '21:9', '3:2', '2:3', '4:5',
|
|
98
|
+
'5:4', '3:4', '4:3', '9:16', '9:21', 'match_input_image'
|
|
99
|
+
];
|
|
100
|
+
|
|
101
|
+
requestParameters = {
|
|
102
|
+
input: {
|
|
103
|
+
prompt: modelPromptText,
|
|
104
|
+
input_image: combinedParameters.input_image,
|
|
105
|
+
aspect_ratio: validRatios.includes(combinedParameters.aspectRatio) ? combinedParameters.aspectRatio : "1:1",
|
|
106
|
+
},
|
|
107
|
+
};
|
|
108
|
+
break;
|
|
109
|
+
}
|
|
94
110
|
}
|
|
95
111
|
|
|
96
112
|
return requestParameters;
|