apexify.js 4.4.36 → 4.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -199
- package/dist/ai/ApexAI.d.ts +30 -18
- package/dist/ai/ApexAI.d.ts.map +1 -1
- package/dist/ai/ApexAI.js +34 -171
- package/dist/ai/ApexAI.js.map +1 -1
- package/dist/ai/ApexModules.d.ts.map +1 -1
- package/dist/ai/ApexModules.js +42 -127
- package/dist/ai/ApexModules.js.map +1 -1
- package/dist/ai/functions/draw.d.ts +6 -1
- package/dist/ai/functions/draw.d.ts.map +1 -1
- package/dist/ai/functions/draw.js +550 -722
- package/dist/ai/functions/draw.js.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts +6 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.js +2 -2
- package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
- package/dist/ai/functions/validOptions.d.ts +14 -7
- package/dist/ai/functions/validOptions.d.ts.map +1 -1
- package/dist/ai/functions/validOptions.js +23 -22
- package/dist/ai/functions/validOptions.js.map +1 -1
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts +7 -0
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js +28 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts +5 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js +30 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-flash.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-flash.js → gemini/Gemini-flash.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-flash.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-pro.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-pro.js → gemini/Gemini-pro.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-pro.js.map +1 -0
- package/dist/ai/modals-chat/gemini/config.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/config.js.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.js.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts +9 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.js +58 -0
- package/dist/ai/modals-chat/groq/chatgroq.js.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts +8 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js +64 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js.map +1 -0
- package/dist/ai/modals-chat/groq/whisper.d.ts.map +1 -0
- package/dist/ai/modals-chat/{whisper.js → groq/whisper.js} +2 -2
- package/dist/ai/modals-chat/groq/whisper.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModals.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModels.js.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts +7 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.js +88 -0
- package/dist/ai/modals-chat/others/otherModels.js.map +1 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts +8 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts.map +1 -0
- package/dist/ai/modals-chat/{bing.js → rsn/rsnChat.js} +29 -8
- package/dist/ai/modals-chat/rsn/rsnChat.js.map +1 -0
- package/dist/ai/modals-images/cartoon.js +1 -1
- package/dist/ai/modals-images/cartoon.js.map +1 -1
- package/dist/ai/modals-images/flux.js +1 -1
- package/dist/ai/modals-images/flux.js.map +1 -1
- package/dist/ai/utils.d.ts +4 -7
- package/dist/ai/utils.d.ts.map +1 -1
- package/dist/ai/utils.js +6 -12
- package/dist/ai/utils.js.map +1 -1
- package/dist/index.d.ts +14 -9
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +6 -3
- package/dist/index.js.map +1 -1
- package/lib/ai/ApexAI.ts +90 -191
- package/lib/ai/ApexModules.ts +45 -135
- package/lib/ai/functions/draw.ts +572 -788
- package/lib/ai/functions/generateVoiceResponse.ts +2 -2
- package/lib/ai/functions/validOptions.ts +24 -23
- package/lib/ai/modals-chat/electronHub/imageModels.ts +26 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/cartoon.ts +3 -3
- package/lib/ai/modals-chat/freesedgpt/chat.ts +31 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/flux.ts +3 -3
- package/lib/ai/modals-chat/{Gemini-flash.ts → gemini/Gemini-flash.ts} +2 -2
- package/lib/ai/modals-chat/{Gemini-pro.ts → gemini/Gemini-pro.ts} +2 -2
- package/lib/ai/modals-chat/groq/chatgroq.ts +68 -0
- package/lib/ai/modals-chat/groq/imageAnalyzer.ts +68 -0
- package/lib/ai/modals-chat/{whisper.ts → groq/whisper.ts} +2 -2
- package/lib/ai/modals-chat/hercai/chatModels.ts +20 -0
- package/lib/ai/modals-chat/others/otherModels.ts +99 -0
- package/lib/ai/modals-chat/rsn/rsnChat.ts +74 -0
- package/lib/ai/utils.ts +7 -12
- package/lib/index.ts +5 -3
- package/package.json +1 -1
- package/dist/ai/functions/imageAnalysis.d.ts +0 -2
- package/dist/ai/functions/imageAnalysis.d.ts.map +0 -1
- package/dist/ai/functions/imageAnalysis.js +0 -45
- package/dist/ai/functions/imageAnalysis.js.map +0 -1
- package/dist/ai/functions/readImagess.d.ts +0 -2
- package/dist/ai/functions/readImagess.d.ts.map +0 -1
- package/dist/ai/functions/readImagess.js +0 -45
- package/dist/ai/functions/readImagess.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.js.map +0 -1
- package/dist/ai/modals-chat/apexChat.d.ts +0 -2
- package/dist/ai/modals-chat/apexChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/apexChat.js +0 -32
- package/dist/ai/modals-chat/apexChat.js.map +0 -1
- package/dist/ai/modals-chat/bard.d.ts +0 -7
- package/dist/ai/modals-chat/bard.d.ts.map +0 -1
- package/dist/ai/modals-chat/bard.js +0 -48
- package/dist/ai/modals-chat/bard.js.map +0 -1
- package/dist/ai/modals-chat/bing.d.ts +0 -7
- package/dist/ai/modals-chat/bing.d.ts.map +0 -1
- package/dist/ai/modals-chat/bing.js.map +0 -1
- package/dist/ai/modals-chat/codellama.d.ts +0 -7
- package/dist/ai/modals-chat/codellama.d.ts.map +0 -1
- package/dist/ai/modals-chat/codellama.js +0 -48
- package/dist/ai/modals-chat/codellama.js.map +0 -1
- package/dist/ai/modals-chat/config.d.ts.map +0 -1
- package/dist/ai/modals-chat/config.js.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.d.ts +0 -2
- package/dist/ai/modals-chat/facebook-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.js +0 -20
- package/dist/ai/modals-chat/facebook-ai.js.map +0 -1
- package/dist/ai/modals-chat/geminiFast.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiFast.js.map +0 -1
- package/dist/ai/modals-chat/geminiV2.d.ts +0 -7
- package/dist/ai/modals-chat/geminiV2.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiV2.js +0 -48
- package/dist/ai/modals-chat/geminiV2.js.map +0 -1
- package/dist/ai/modals-chat/gemma.d.ts +0 -2
- package/dist/ai/modals-chat/gemma.d.ts.map +0 -1
- package/dist/ai/modals-chat/gemma.js +0 -43
- package/dist/ai/modals-chat/gemma.js.map +0 -1
- package/dist/ai/modals-chat/llama.d.ts +0 -7
- package/dist/ai/modals-chat/llama.d.ts.map +0 -1
- package/dist/ai/modals-chat/llama.js +0 -48
- package/dist/ai/modals-chat/llama.js.map +0 -1
- package/dist/ai/modals-chat/llamav2.d.ts +0 -2
- package/dist/ai/modals-chat/llamav2.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav2.js +0 -43
- package/dist/ai/modals-chat/llamav2.js.map +0 -1
- package/dist/ai/modals-chat/llamav3.d.ts +0 -2
- package/dist/ai/modals-chat/llamav3.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav3.js +0 -43
- package/dist/ai/modals-chat/llamav3.js.map +0 -1
- package/dist/ai/modals-chat/mixtral.d.ts +0 -7
- package/dist/ai/modals-chat/mixtral.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtral.js +0 -48
- package/dist/ai/modals-chat/mixtral.js.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.d.ts +0 -2
- package/dist/ai/modals-chat/mixtralv2.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.js +0 -43
- package/dist/ai/modals-chat/mixtralv2.js.map +0 -1
- package/dist/ai/modals-chat/modals.d.ts +0 -8
- package/dist/ai/modals-chat/modals.d.ts.map +0 -1
- package/dist/ai/modals-chat/modals.js +0 -16
- package/dist/ai/modals-chat/modals.js.map +0 -1
- package/dist/ai/modals-chat/openChat.d.ts +0 -7
- package/dist/ai/modals-chat/openChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/openChat.js +0 -48
- package/dist/ai/modals-chat/openChat.js.map +0 -1
- package/dist/ai/modals-chat/starChat.d.ts +0 -2
- package/dist/ai/modals-chat/starChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/starChat.js +0 -31
- package/dist/ai/modals-chat/starChat.js.map +0 -1
- package/dist/ai/modals-chat/v4.d.ts +0 -7
- package/dist/ai/modals-chat/v4.d.ts.map +0 -1
- package/dist/ai/modals-chat/v4.js +0 -48
- package/dist/ai/modals-chat/v4.js.map +0 -1
- package/dist/ai/modals-chat/whisper.d.ts.map +0 -1
- package/dist/ai/modals-chat/whisper.js.map +0 -1
- package/dist/ai/modals-chat/yi-ai.d.ts +0 -2
- package/dist/ai/modals-chat/yi-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/yi-ai.js +0 -40
- package/dist/ai/modals-chat/yi-ai.js.map +0 -1
- package/lib/ai/functions/imageAnalysis.ts +0 -41
- package/lib/ai/modals-chat/apexChat.ts +0 -31
- package/lib/ai/modals-chat/bard.ts +0 -44
- package/lib/ai/modals-chat/bing.ts +0 -44
- package/lib/ai/modals-chat/codellama.ts +0 -44
- package/lib/ai/modals-chat/facebook-ai.ts +0 -14
- package/lib/ai/modals-chat/geminiV2.ts +0 -44
- package/lib/ai/modals-chat/gemma.ts +0 -35
- package/lib/ai/modals-chat/llama.ts +0 -44
- package/lib/ai/modals-chat/llamav2.ts +0 -35
- package/lib/ai/modals-chat/llamav3.ts +0 -35
- package/lib/ai/modals-chat/mixtral.ts +0 -45
- package/lib/ai/modals-chat/mixtralv2.ts +0 -35
- package/lib/ai/modals-chat/modals.ts +0 -8
- package/lib/ai/modals-chat/openChat.ts +0 -44
- package/lib/ai/modals-chat/starChat.ts +0 -31
- package/lib/ai/modals-chat/v4.ts +0 -44
- package/lib/ai/modals-chat/yi-ai.ts +0 -40
- /package/dist/ai/modals-chat/{Gemini-flash.d.ts → gemini/Gemini-flash.d.ts} +0 -0
- /package/dist/ai/modals-chat/{Gemini-pro.d.ts → gemini/Gemini-pro.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.d.ts → gemini/config.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.js → gemini/config.js} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.d.ts → gemini/geminiFast.d.ts} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.js → gemini/geminiFast.js} +0 -0
- /package/dist/ai/modals-chat/{whisper.d.ts → groq/whisper.d.ts} +0 -0
- /package/lib/ai/modals-chat/{config.ts → gemini/config.ts} +0 -0
- /package/lib/ai/modals-chat/{geminiFast.ts → gemini/geminiFast.ts} +0 -0
|
@@ -28,7 +28,7 @@ async function aiVoice(
|
|
|
28
28
|
enhancer: any,
|
|
29
29
|
buttons: any[],
|
|
30
30
|
RespondMessage: any,
|
|
31
|
-
|
|
31
|
+
imageAPIS?: { groqAPI?: string, rsnAPI?: string, prodiaAPI?: string, freesedGPTApi?: string }
|
|
32
32
|
) {
|
|
33
33
|
if (message.author.bot || isProcessing || !message.guild) {
|
|
34
34
|
return;
|
|
@@ -40,7 +40,7 @@ async function aiVoice(
|
|
|
40
40
|
let msg = message.content;
|
|
41
41
|
|
|
42
42
|
if (drawValid) {
|
|
43
|
-
return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage,
|
|
43
|
+
return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage, { rsnAPI: imageAPIS?.rsnAPI, groqAPI: imageAPIS?.groqAPI, prodiaAPI: imageAPIS?.prodiaAPI, freesedGPTApi: imageAPIS?.freesedGPTApi as string});
|
|
44
44
|
}
|
|
45
45
|
|
|
46
46
|
if (message.attachments.size > 0) {
|
|
@@ -3,20 +3,20 @@ import api from "api";
|
|
|
3
3
|
const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
|
|
4
4
|
sdk.auth('43435e1c-cab1-493f-a224-f51e4b97ce8d');
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
6
|
+
export const hercChatModels = ["v3" , "v3-32k" , "turbo" , "turbo-16k" , "gemini" , "llama3-70b" , "llama3-8b" , "mixtral-8x7b" , "gemma-7b" , "gemma2-9b"];
|
|
7
|
+
export const groqChatModels = ['gemma-7b-it', 'gemma2-9b-it', 'llama3-groq-70b-8192-tool-use-preview', 'llama3-groq-8b-8192-tool-use-preview', 'llama-3.1-70b-versatile', 'llama-3.1-8b-instant', 'llama-guard-3-8b', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768'];
|
|
8
|
+
export const rsnChatModels = ['bard', 'bing', 'codellama', 'gemini', 'llama', 'mixtral', 'openchat', 'gpt4', 'dalle'];
|
|
9
|
+
export const otherChatModel = ['apexai', 'facebook_ai', 'yi_34b', 'starChat'];
|
|
10
|
+
export const fresedgptModels = ['real-cartoon-xl-v6', 'flux-schnell', 'gpt-4o'];
|
|
11
|
+
|
|
12
|
+
async function initializeValidOptions() {
|
|
13
|
+
const [SDModels, SDXLModels, samplers] = await Promise.all([
|
|
14
|
+
sdModels(),
|
|
15
|
+
sdxlModels(),
|
|
10
16
|
sampler()
|
|
11
17
|
]);
|
|
12
18
|
|
|
13
|
-
const
|
|
14
|
-
const ApexChatModals = ['v4', 'gemini-v2', 'gemini', 'v3', 'v3-32k', 'turbo', 'turbo-16k', 'llama', 'apexChat', 'openChat', 'yi-ai', 'facebook-ai', 'starChat', 'mixtral', 'codellama', 'bing', 'bard'];
|
|
15
|
-
|
|
16
|
-
const HercModals = [
|
|
17
|
-
"v1",
|
|
18
|
-
"v2",
|
|
19
|
-
"v2-beta",
|
|
19
|
+
const HercImageModels = [
|
|
20
20
|
"v3",
|
|
21
21
|
"lexica",
|
|
22
22
|
"prodia",
|
|
@@ -24,41 +24,42 @@ async function initializeValidOptions() {
|
|
|
24
24
|
"simurg",
|
|
25
25
|
"animefy",
|
|
26
26
|
"raava",
|
|
27
|
-
"shonin"
|
|
27
|
+
"shonin",
|
|
28
28
|
];
|
|
29
29
|
|
|
30
|
-
const others = ['cartoon', 'flux'];
|
|
31
30
|
|
|
32
31
|
return {
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
32
|
+
validHercChatModels: hercChatModels,
|
|
33
|
+
validgroqChatModels: groqChatModels,
|
|
34
|
+
validRSNChatModels: rsnChatModels,
|
|
35
|
+
validHercaiModels: HercImageModels,
|
|
36
|
+
validProdiaModels: SDModels,
|
|
37
|
+
validotherChatModels: otherChatModel,
|
|
38
|
+
validfresedgptModels: fresedgptModels,
|
|
38
39
|
validEnhancers: [
|
|
39
40
|
"ESRGAN_4x", "Lanczos", "Nearest", "LDSR", "R-ESRGAN 4x+",
|
|
40
41
|
"R-ESRGAN 4x+ Anime6B", "ScuNET GAN", "ScuNET PSNR", "SwinIR 4x"
|
|
41
42
|
],
|
|
42
43
|
validSamplers: samplers,
|
|
43
|
-
validSXDL:
|
|
44
|
+
validSXDL: SDXLModels,
|
|
44
45
|
validImgStyle: [
|
|
45
46
|
"3d-model", "analog-film", "anime", "cinematic", "comic-book",
|
|
46
47
|
"digital-art", "enhance", "isometric", "fantasy-art", "isometric",
|
|
47
48
|
"line-art", "low-poly", "neon-punk", "origami", "photographic",
|
|
48
49
|
"pixel-art", "texture", "craft-clay"
|
|
49
50
|
],
|
|
50
|
-
|
|
51
|
+
allModels: [...SDModels, ...SDXLModels, ...HercImageModels, ...fresedgptModels, ...hercChatModels, ...otherChatModel, ...groqChatModels, ...rsnChatModels]
|
|
51
52
|
};
|
|
52
53
|
}
|
|
53
54
|
|
|
54
|
-
export const
|
|
55
|
+
export const validateModels = initializeValidOptions();
|
|
55
56
|
|
|
56
|
-
async function
|
|
57
|
+
async function sdModels(): Promise<string[]> {
|
|
57
58
|
const SDModals = await sdk.listModels();
|
|
58
59
|
return SDModals.data;
|
|
59
60
|
}
|
|
60
61
|
|
|
61
|
-
async function
|
|
62
|
+
async function sdxlModels(): Promise<string[]> {
|
|
62
63
|
const SDXLModals = await sdk.listSdxlModels();
|
|
63
64
|
return SDXLModals.data;
|
|
64
65
|
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import axios from 'axios';
|
|
2
|
+
|
|
3
|
+
export async function electronImagine({ ApiKey, prompt, modelName, count = 1 }: { ApiKey?: string, prompt: string, modelName: string, count?: number }) {
|
|
4
|
+
try {
|
|
5
|
+
const response = await axios.post(
|
|
6
|
+
'https://api.electronhub.top/v1/images/generate',
|
|
7
|
+
{
|
|
8
|
+
model: modelName,
|
|
9
|
+
prompt: prompt,
|
|
10
|
+
n: count
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
headers: {
|
|
14
|
+
'Authorization': `Bearer ${ApiKey || 'ek-nFO8tz6qiu5cJ31lwCfPZNNrxFZLsJYou6yx4X1FS2Jyr2dm0a'}`,
|
|
15
|
+
'Content-Type': 'application/json'
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
);
|
|
19
|
+
|
|
20
|
+
const imagesUrl = response.data;
|
|
21
|
+
return imagesUrl;
|
|
22
|
+
} catch (e: any) {
|
|
23
|
+
console.error('Error generating images:', e.response ? e.response.data : e.message);
|
|
24
|
+
throw e;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import axios from 'axios';
|
|
2
2
|
|
|
3
|
-
export async function cartoon(prompt: string) {
|
|
3
|
+
export async function cartoon(prompt: string, apiKey: string) {
|
|
4
4
|
try {
|
|
5
5
|
const response = await axios.post(
|
|
6
6
|
'https://fresedgpt.space/v1/images/generations',
|
|
@@ -11,13 +11,13 @@ export async function cartoon(prompt: string) {
|
|
|
11
11
|
},
|
|
12
12
|
{
|
|
13
13
|
headers: {
|
|
14
|
-
'Authorization':
|
|
14
|
+
'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
|
|
15
15
|
'Content-Type': 'application/json'
|
|
16
16
|
}
|
|
17
17
|
}
|
|
18
18
|
);
|
|
19
19
|
|
|
20
|
-
return response.data.data[0].url;
|
|
20
|
+
return response.data.data[0].url || null;
|
|
21
21
|
} catch (e: any) {
|
|
22
22
|
return null
|
|
23
23
|
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import axios from 'axios';
|
|
2
|
+
const API_URL = 'https://fresedgpt.space/v1/chat/completions';
|
|
3
|
+
|
|
4
|
+
export async function gpt4o({ prompt, ApiKey}: { prompt: string, ApiKey?: string}) {
|
|
5
|
+
try {
|
|
6
|
+
const API_KEY = ApiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ';
|
|
7
|
+
const response = await axios.post(
|
|
8
|
+
API_URL,
|
|
9
|
+
{
|
|
10
|
+
messages: [{ role: 'user', content: `${prompt}` }],
|
|
11
|
+
model: 'chatgpt-4o-latest',
|
|
12
|
+
stream: false
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
headers: {
|
|
16
|
+
'Authorization': `Bearer ${API_KEY}`,
|
|
17
|
+
'Content-Type': 'application/json'
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
);
|
|
21
|
+
|
|
22
|
+
const responseData = response.data;
|
|
23
|
+
const assistantContent = responseData.choices[0]?.message?.content || 'No response content available';
|
|
24
|
+
|
|
25
|
+
return assistantContent || null;
|
|
26
|
+
|
|
27
|
+
} catch (error) {
|
|
28
|
+
console.error('Error creating chat completion:', error);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import axios from 'axios';
|
|
2
2
|
|
|
3
|
-
export async function flux(prompt: string) {
|
|
3
|
+
export async function flux(prompt: string, apiKey: string) {
|
|
4
4
|
try {
|
|
5
5
|
const response = await axios.post(
|
|
6
6
|
'https://fresedgpt.space/v1/images/generations',
|
|
@@ -11,13 +11,13 @@ export async function flux(prompt: string) {
|
|
|
11
11
|
},
|
|
12
12
|
{
|
|
13
13
|
headers: {
|
|
14
|
-
'Authorization':
|
|
14
|
+
'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
|
|
15
15
|
'Content-Type': 'application/json'
|
|
16
16
|
}
|
|
17
17
|
}
|
|
18
18
|
);
|
|
19
19
|
|
|
20
|
-
return response.data.data[0].url;
|
|
20
|
+
return response.data.data[0].url || null;
|
|
21
21
|
} catch (e: any) {
|
|
22
22
|
return null
|
|
23
23
|
}
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
|
|
2
|
-
import { converter } from "
|
|
2
|
+
import { converter } from "../../../canvas/utils/general functions";
|
|
3
3
|
import { connect } from "verse.db";
|
|
4
4
|
import config from './config';
|
|
5
5
|
import axios from "axios";
|
|
6
6
|
import path from 'path';
|
|
7
7
|
import fs from 'fs';
|
|
8
|
-
import { readFile } from "
|
|
8
|
+
import { readFile } from "../../utils";
|
|
9
9
|
|
|
10
10
|
export async function geminiFlash(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
11
11
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
|
|
2
|
-
import { converter } from "
|
|
2
|
+
import { converter } from "../../../canvas/utils/general functions";
|
|
3
3
|
import { GoogleAIFileManager } from "@google/generative-ai/server";
|
|
4
4
|
|
|
5
5
|
import { connect } from "verse.db";
|
|
@@ -7,7 +7,7 @@ import config from './config';
|
|
|
7
7
|
import axios from "axios";
|
|
8
8
|
import path from 'path';
|
|
9
9
|
import fs from 'fs';
|
|
10
|
-
import { readFile } from "
|
|
10
|
+
import { readFile } from "../../utils";
|
|
11
11
|
|
|
12
12
|
export async function geminiPro(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
13
13
|
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import Groq from 'groq-sdk';
|
|
2
|
+
|
|
3
|
+
interface groqOptions {
|
|
4
|
+
API_KEY?: string;
|
|
5
|
+
prompt: string;
|
|
6
|
+
apiName: string;
|
|
7
|
+
instruction?: string;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
// Define your own type for message parameters
|
|
11
|
+
interface ChatCompletionMessageParam {
|
|
12
|
+
role: 'system' | 'user' | 'assistant';
|
|
13
|
+
content: string;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const validGroqApiNames = ['gemma-7b-it', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768'];
|
|
17
|
+
|
|
18
|
+
export async function chatGroq({ API_KEY, prompt, apiName, instruction }: groqOptions): Promise<string> {
|
|
19
|
+
if (!validGroqApiNames.includes(apiName)) {
|
|
20
|
+
return `Invalid API name: ${apiName}. Please provide one of the following: ${validGroqApiNames.join(', ')}`;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
try {
|
|
24
|
+
const groq = new Groq({
|
|
25
|
+
apiKey: API_KEY || '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',
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
const messages: ChatCompletionMessageParam[] = [];
|
|
29
|
+
|
|
30
|
+
if (instruction) {
|
|
31
|
+
messages.push({
|
|
32
|
+
role: 'system',
|
|
33
|
+
content: instruction,
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
messages.push({
|
|
38
|
+
role: 'user',
|
|
39
|
+
content: prompt,
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
const chatCompletion = await groq.chat.completions.create({
|
|
43
|
+
messages: messages,
|
|
44
|
+
model: apiName,
|
|
45
|
+
max_tokens: 8192,
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
const response = chatCompletion.choices[0]?.message?.content || 'Rate limit';
|
|
49
|
+
return response;
|
|
50
|
+
|
|
51
|
+
} catch (err: any) {
|
|
52
|
+
if (err instanceof Groq.APIError) {
|
|
53
|
+
if (err.status === 400) {
|
|
54
|
+
return 'Bad request. Try again after a minute please.';
|
|
55
|
+
} else if (err.status === 429) {
|
|
56
|
+
return 'Rate limit. Try again after one minute or provide your own API key.';
|
|
57
|
+
} else if (err.status === 401 || !err.status) {
|
|
58
|
+
throw new Error('Invalid API key provided.');
|
|
59
|
+
} else {
|
|
60
|
+
console.error(err);
|
|
61
|
+
return 'Unknown error occurred.';
|
|
62
|
+
}
|
|
63
|
+
} else {
|
|
64
|
+
console.error(err);
|
|
65
|
+
return 'Unknown error occurred.';
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import Groq from 'groq-sdk';
|
|
2
|
+
|
|
3
|
+
interface GroqAnalyzerOptions {
|
|
4
|
+
imgURL: string;
|
|
5
|
+
ApiKey?: string;
|
|
6
|
+
prompt?: string;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
async function convertUrlToDataUrl(url: string): Promise<string> {
|
|
10
|
+
try {
|
|
11
|
+
const response = await fetch(url);
|
|
12
|
+
const blob = await response.blob();
|
|
13
|
+
const reader = new FileReader();
|
|
14
|
+
|
|
15
|
+
return new Promise<string>((resolve, reject) => {
|
|
16
|
+
reader.onloadend = () => {
|
|
17
|
+
resolve(reader.result as string);
|
|
18
|
+
};
|
|
19
|
+
reader.onerror = reject;
|
|
20
|
+
reader.readAsDataURL(blob);
|
|
21
|
+
});
|
|
22
|
+
} catch (error) {
|
|
23
|
+
console.error('Error converting URL to Data URL:', error);
|
|
24
|
+
throw new Error('Failed to convert image URL to data URL');
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export async function groqAnalyzer({ imgURL, ApiKey, prompt }: GroqAnalyzerOptions): Promise<string> {
|
|
29
|
+
try {
|
|
30
|
+
const groq = new Groq({
|
|
31
|
+
apiKey: ApiKey || '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',
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
const imageDataUrl = await convertUrlToDataUrl(imgURL);
|
|
35
|
+
|
|
36
|
+
// @ts-ignore
|
|
37
|
+
const chatCompletion = await groq.chat.completions.create({
|
|
38
|
+
messages: [
|
|
39
|
+
{
|
|
40
|
+
role: 'user',
|
|
41
|
+
content: {
|
|
42
|
+
type: 'text',
|
|
43
|
+
text: prompt || '',
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
role: 'user',
|
|
48
|
+
content: {
|
|
49
|
+
type: 'image_url',
|
|
50
|
+
image_url: imageDataUrl,
|
|
51
|
+
},
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
model: 'llava-v1.5-7b-4096-preview',
|
|
55
|
+
temperature: 0,
|
|
56
|
+
max_tokens: 1024,
|
|
57
|
+
top_p: 1,
|
|
58
|
+
stream: false,
|
|
59
|
+
stop: null,
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
return chatCompletion.choices[0].message.content as string;
|
|
63
|
+
} catch (error) {
|
|
64
|
+
console.error('Error in groqAnalyzer:', error);
|
|
65
|
+
return 'Error occurred at the api'
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
@@ -10,7 +10,7 @@ function getFileSize(filePath: string): number {
|
|
|
10
10
|
}
|
|
11
11
|
|
|
12
12
|
async function createReadableStream(filepathOrUrl: string): Promise<NodeJS.ReadableStream | string> {
|
|
13
|
-
const maxFileSizeBytes = 25 * 1024 * 1024;
|
|
13
|
+
const maxFileSizeBytes = 25 * 1024 * 1024;
|
|
14
14
|
|
|
15
15
|
if (filepathOrUrl.startsWith('http')) {
|
|
16
16
|
const parsedUrl = new URL(filepathOrUrl);
|
|
@@ -66,7 +66,7 @@ export async function whisper(prompt: string, filepath: string, lang?: string, A
|
|
|
66
66
|
apiKey: API_KEY || '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',
|
|
67
67
|
});
|
|
68
68
|
|
|
69
|
-
const fileStream = await createReadableStream(filepath);
|
|
69
|
+
const fileStream = await createReadableStream(path.join(process.cwd(), filepath));
|
|
70
70
|
|
|
71
71
|
if (typeof fileStream === 'string') {
|
|
72
72
|
return fileStream;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { Hercai } from "hercai";
|
|
2
|
+
const hercai = new Hercai('6eZZOdDwm6Epdzn8mnhcX9SBDkxvoNYcNj9ILS0P44=');
|
|
3
|
+
type hercaiModels = "v3" | "v3-32k" | "turbo" | "turbo-16k" | "gemini" | "llama3-70b" | "llama3-8b" | "mixtral-8x7b" | "gemma-7b" | "gemma2-9b"
|
|
4
|
+
|
|
5
|
+
export async function chatHercai(prompt: string, model: hercaiModels, instructions?: string, memory?: { enable?: boolean, id?: string}): Promise<string> {
|
|
6
|
+
try {
|
|
7
|
+
|
|
8
|
+
if (memory && memory.enable && memory.id) {
|
|
9
|
+
return (await hercai.betaQuestion({ content: prompt, user: memory.id })).reply;
|
|
10
|
+
} else if (prompt === ( "v3" || "v3-32k" || "turbo" || "turbo-16k" || "gemini" || "llama3-70b" || "llama3-8b" || "mixtral-8x7b" || "gemma-7b" || "gemma2-9b" )) {
|
|
11
|
+
|
|
12
|
+
return (await hercai.question({ model, content: prompt, personality: instructions })).reply;
|
|
13
|
+
} else {
|
|
14
|
+
throw new Error('Invalid model name.');
|
|
15
|
+
}
|
|
16
|
+
} catch (e: any) {
|
|
17
|
+
console.error(e.message);
|
|
18
|
+
return 'An Error has occurred in the API';
|
|
19
|
+
}
|
|
20
|
+
}
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import axios from 'axios';
|
|
2
|
+
|
|
3
|
+
interface AiOptions {
|
|
4
|
+
modelName: 'apexai' | 'facebook_ai' | 'starChat' | 'yi_34b';
|
|
5
|
+
prompt: string;
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
const apiUrls = {
|
|
9
|
+
apexai: 'https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1',
|
|
10
|
+
facebook_ai: 'https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill',
|
|
11
|
+
starChat: 'https://api-inference.huggingface.co/models/HuggingFaceH4/starchat2-15b-v0.1',
|
|
12
|
+
yi_34b: 'https://api-inference.huggingface.co/models/01-ai/Yi-1.5-34B-Chat',
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
const apiKey = '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';
|
|
16
|
+
|
|
17
|
+
export async function otherChatModels({ modelName, prompt }: AiOptions): Promise<string> {
|
|
18
|
+
try {
|
|
19
|
+
switch (modelName) {
|
|
20
|
+
case 'apexai': {
|
|
21
|
+
const messages = [{ role: 'user', content: prompt }];
|
|
22
|
+
const formattedMessages = messages.map(message => `[${message.role}] ${message.content}`).join('\n');
|
|
23
|
+
|
|
24
|
+
const response = await axios.post(apiUrls.apexai, {
|
|
25
|
+
inputs: formattedMessages,
|
|
26
|
+
}, {
|
|
27
|
+
headers: {
|
|
28
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
29
|
+
'Content-Type': 'application/json',
|
|
30
|
+
},
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
const generatedText = response.data[0].generated_text;
|
|
34
|
+
const lines = generatedText.split('\n').slice(1);
|
|
35
|
+
return lines.map((line: string) => line.replace(/^\[.*?\]\s*/, '')).join('\n');
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
case 'facebook_ai': {
|
|
39
|
+
const response = await axios.post(apiUrls.facebook_ai, { inputs: prompt }, {
|
|
40
|
+
headers: { 'Authorization': `Bearer ${apiKey}` },
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
return response.data[0].generated_text;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
case 'starChat': {
|
|
47
|
+
const messages = [{ role: 'assistant', content: prompt }];
|
|
48
|
+
const response = await axios.post(apiUrls.starChat, {
|
|
49
|
+
inputs: JSON.stringify(messages),
|
|
50
|
+
}, {
|
|
51
|
+
headers: {
|
|
52
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
53
|
+
},
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
let chatbotReply = response.data[0].generated_text.replace(/^.*?\n.*?\n/, '');
|
|
57
|
+
if (chatbotReply.startsWith('[' || '{')) {
|
|
58
|
+
chatbotReply = JSON.parse(chatbotReply).join(' ');
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return chatbotReply;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
case 'yi_34b': {
|
|
65
|
+
const messages = [{ role: 'user', content: prompt }];
|
|
66
|
+
const formattedMessages = messages.map(message => `[${message.role}] ${message.content}`).join('\n');
|
|
67
|
+
|
|
68
|
+
const response = await axios.post(apiUrls.yi_34b, {
|
|
69
|
+
inputs: formattedMessages,
|
|
70
|
+
}, {
|
|
71
|
+
headers: {
|
|
72
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
73
|
+
'Content-Type': 'application/json',
|
|
74
|
+
},
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
const generatedText = response.data[0].generated_text;
|
|
78
|
+
const botIndex = generatedText.indexOf('[bot]', generatedText.indexOf('[user]'));
|
|
79
|
+
|
|
80
|
+
if (botIndex !== -1) {
|
|
81
|
+
let botResponse = generatedText.substring(botIndex);
|
|
82
|
+
const nextBotIndex = botResponse.indexOf('[bot]', 1);
|
|
83
|
+
const nextUserIndex = botResponse.indexOf('[user]', 1);
|
|
84
|
+
const endIndex = Math.min(nextBotIndex !== -1 ? nextBotIndex : botResponse.length, nextUserIndex !== -1 ? nextUserIndex : botResponse.length);
|
|
85
|
+
botResponse = botResponse.substring(0, endIndex).substring(botResponse.indexOf('[bot]') + 5).trim();
|
|
86
|
+
return botResponse;
|
|
87
|
+
} else {
|
|
88
|
+
return generatedText;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
default:
|
|
93
|
+
return 'Invalid model name provided.';
|
|
94
|
+
}
|
|
95
|
+
} catch (error: any) {
|
|
96
|
+
console.error(`Error fetching response for ${modelName}:`, error.message || error.response?.data);
|
|
97
|
+
return 'Please wait, I am on cooldown.';
|
|
98
|
+
}
|
|
99
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import axios, { AxiosResponse } from 'axios';
|
|
2
|
+
|
|
3
|
+
interface aiOptions {
|
|
4
|
+
API_KEY?: string | null;
|
|
5
|
+
prompt: string;
|
|
6
|
+
apiName: string;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
const validApiNames = [
|
|
10
|
+
'bard',
|
|
11
|
+
'bing',
|
|
12
|
+
'codellama',
|
|
13
|
+
'gemini',
|
|
14
|
+
'llama',
|
|
15
|
+
'mixtral',
|
|
16
|
+
'openchat',
|
|
17
|
+
'gpt4',
|
|
18
|
+
'dalle'
|
|
19
|
+
];
|
|
20
|
+
|
|
21
|
+
export async function rsnAPI({ API_KEY, prompt, apiName }: aiOptions): Promise<string> {
|
|
22
|
+
|
|
23
|
+
if (!validApiNames.includes(apiName)) {
|
|
24
|
+
return `Invalid API name: ${apiName}. Please provide one of the following: ${validApiNames.join(', ')}`;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const apiKey: string = API_KEY || 'rsnai_SbLbFcwdT2h2KoYet2LS0F34';
|
|
28
|
+
const apiUrl = `https://api.rnilaweera.lk/api/v1/user/${apiName}`;
|
|
29
|
+
|
|
30
|
+
try {
|
|
31
|
+
const payload = { prompt };
|
|
32
|
+
const response: AxiosResponse = await axios.post(apiUrl, payload, {
|
|
33
|
+
headers: { Authorization: `Bearer ${apiKey}` },
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
if (apiName === 'dalle') return response.data?.image.url;
|
|
37
|
+
|
|
38
|
+
return response.data.message;
|
|
39
|
+
} catch (e: any) {
|
|
40
|
+
if (e.response && e.response.data.message === 'Invalid API key.') {
|
|
41
|
+
try {
|
|
42
|
+
const defaultResponse = await axios.post(apiUrl, { prompt }, {
|
|
43
|
+
headers: { Authorization: `Bearer rsnai_SbLbFcwdT2h2KoYet2LS0F34` },
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
if (apiName === 'dalle') return defaultResponse.data?.image.url;
|
|
47
|
+
|
|
48
|
+
return defaultResponse.data.message;
|
|
49
|
+
} catch (err) {
|
|
50
|
+
console.log(err);
|
|
51
|
+
return 'An error occurred while using the default API key.';
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
else if (e.response && e.response.status === 500) {
|
|
55
|
+
try {
|
|
56
|
+
const backupResponse = await axios.post(apiUrl, { prompt }, {
|
|
57
|
+
headers: { Authorization: `Bearer rsnai_lvIch9Z7apBHqfXYqOiXwzm7` },
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
if (apiName === 'dalle') return backupResponse.data?.image.url;
|
|
61
|
+
|
|
62
|
+
return backupResponse.data.message;
|
|
63
|
+
} catch (err) {
|
|
64
|
+
console.log(err);
|
|
65
|
+
return 'An error occurred while using the backup API key.';
|
|
66
|
+
}
|
|
67
|
+
} else {
|
|
68
|
+
console.log(e);
|
|
69
|
+
return 'The API is on cooldown for 50 seconds.';
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
|
package/lib/ai/utils.ts
CHANGED
|
@@ -6,17 +6,14 @@ import { aiImagine } from "./functions/draw";
|
|
|
6
6
|
import { readFile } from "./functions/readFiles";
|
|
7
7
|
import { ApexChat, ApexImagine, ApexListener } from "./ApexModules";
|
|
8
8
|
import { typeWriter } from "./functions/typeWriter" ;
|
|
9
|
-
import {
|
|
10
|
-
import {
|
|
11
|
-
import {
|
|
12
|
-
|
|
13
|
-
import { llamaChat } from "./modals-chat/llama";
|
|
14
|
-
import { v4 } from "./modals-chat/v4";
|
|
9
|
+
import { groqAnalyzer } from "./modals-chat/groq/imageAnalyzer";
|
|
10
|
+
import { geminiFlash } from "./modals-chat/gemini/Gemini-flash";
|
|
11
|
+
import { geminiPro } from "./modals-chat/gemini/Gemini-pro";
|
|
12
|
+
|
|
15
13
|
|
|
16
14
|
export {
|
|
17
15
|
chunkString,
|
|
18
16
|
imageReader,
|
|
19
|
-
readImage,
|
|
20
17
|
toDraw,
|
|
21
18
|
aiVoice,
|
|
22
19
|
aiImagine,
|
|
@@ -25,9 +22,7 @@ export {
|
|
|
25
22
|
ApexImagine,
|
|
26
23
|
ApexListener,
|
|
27
24
|
typeWriter,
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
mixtral,
|
|
32
|
-
openChat
|
|
25
|
+
groqAnalyzer,
|
|
26
|
+
geminiFlash,
|
|
27
|
+
geminiPro
|
|
33
28
|
};
|
package/lib/index.ts
CHANGED
|
@@ -29,16 +29,18 @@ fetch("https://registry.npmjs.com/-/v1/search?text=apexify.js")
|
|
|
29
29
|
.catch(function(error: any) {});
|
|
30
30
|
|
|
31
31
|
import { ApexAI, ApexChat, ApexImagine, ApexPainter, ApexListener, readFile, imageReader } from "./utils";
|
|
32
|
-
import {
|
|
32
|
+
import { validateModels } from "./ai/functions/validOptions";
|
|
33
|
+
import { groqAnalyzer } from "./ai/utils";
|
|
33
34
|
export * from './canvas/utils/utils';
|
|
34
35
|
export * from './canvas/utils/types';
|
|
35
36
|
|
|
36
37
|
const ApexFileReader = readFile;
|
|
37
38
|
const ApexImageReader = imageReader
|
|
38
|
-
|
|
39
|
+
const ApexImageAnalyzer = groqAnalyzer
|
|
40
|
+
export { ApexPainter, ApexAI, ApexImagine, ApexChat, validateModels, ApexListener, ApexFileReader, ApexImageReader, ApexImageAnalyzer };
|
|
39
41
|
|
|
40
42
|
const Apexify = {
|
|
41
|
-
ApexPainter, ApexAI, ApexImagine, ApexChat,
|
|
43
|
+
ApexPainter, ApexAI, ApexImagine, ApexChat, validateModels, ApexListener, ApexFileReader, ApexImageReader, ApexImageAnalyzer
|
|
42
44
|
};
|
|
43
45
|
|
|
44
46
|
export default Apexify;
|
package/package.json
CHANGED