apexify.js 4.4.36 → 4.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -199
- package/dist/ai/ApexAI.d.ts +18 -6
- package/dist/ai/ApexAI.d.ts.map +1 -1
- package/dist/ai/ApexAI.js +33 -170
- package/dist/ai/ApexAI.js.map +1 -1
- package/dist/ai/ApexModules.d.ts.map +1 -1
- package/dist/ai/ApexModules.js +37 -125
- package/dist/ai/ApexModules.js.map +1 -1
- package/dist/ai/functions/draw.d.ts +6 -1
- package/dist/ai/functions/draw.d.ts.map +1 -1
- package/dist/ai/functions/draw.js +18 -59
- package/dist/ai/functions/draw.js.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts +6 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.js +2 -2
- package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
- package/dist/ai/functions/validOptions.d.ts +11 -4
- package/dist/ai/functions/validOptions.d.ts.map +1 -1
- package/dist/ai/functions/validOptions.js +15 -14
- package/dist/ai/functions/validOptions.js.map +1 -1
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts +7 -0
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js +28 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts +5 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js +30 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-flash.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-flash.js → gemini/Gemini-flash.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-flash.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-pro.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-pro.js → gemini/Gemini-pro.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-pro.js.map +1 -0
- package/dist/ai/modals-chat/gemini/config.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/config.js.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.js.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts +9 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.js +58 -0
- package/dist/ai/modals-chat/groq/chatgroq.js.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts +8 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js +64 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js.map +1 -0
- package/dist/ai/modals-chat/groq/whisper.d.ts.map +1 -0
- package/dist/ai/modals-chat/{whisper.js → groq/whisper.js} +2 -2
- package/dist/ai/modals-chat/groq/whisper.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModals.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModels.js.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts +7 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.js +88 -0
- package/dist/ai/modals-chat/others/otherModels.js.map +1 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts +8 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts.map +1 -0
- package/dist/ai/modals-chat/{bing.js → rsn/rsnChat.js} +22 -8
- package/dist/ai/modals-chat/rsn/rsnChat.js.map +1 -0
- package/dist/ai/modals-images/cartoon.js +1 -1
- package/dist/ai/modals-images/cartoon.js.map +1 -1
- package/dist/ai/modals-images/flux.js +1 -1
- package/dist/ai/modals-images/flux.js.map +1 -1
- package/dist/ai/utils.d.ts +4 -7
- package/dist/ai/utils.d.ts.map +1 -1
- package/dist/ai/utils.js +6 -12
- package/dist/ai/utils.js.map +1 -1
- package/dist/index.d.ts +11 -6
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +6 -3
- package/dist/index.js.map +1 -1
- package/lib/ai/ApexAI.ts +90 -191
- package/lib/ai/ApexModules.ts +43 -135
- package/lib/ai/functions/draw.ts +23 -68
- package/lib/ai/functions/generateVoiceResponse.ts +2 -2
- package/lib/ai/functions/validOptions.ts +23 -22
- package/lib/ai/modals-chat/electronHub/imageModels.ts +26 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/cartoon.ts +3 -3
- package/lib/ai/modals-chat/freesedgpt/chat.ts +31 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/flux.ts +3 -3
- package/lib/ai/modals-chat/{Gemini-flash.ts → gemini/Gemini-flash.ts} +2 -2
- package/lib/ai/modals-chat/{Gemini-pro.ts → gemini/Gemini-pro.ts} +2 -2
- package/lib/ai/modals-chat/groq/chatgroq.ts +68 -0
- package/lib/ai/modals-chat/groq/imageAnalyzer.ts +68 -0
- package/lib/ai/modals-chat/{whisper.ts → groq/whisper.ts} +2 -2
- package/lib/ai/modals-chat/hercai/chatModels.ts +20 -0
- package/lib/ai/modals-chat/others/otherModels.ts +99 -0
- package/lib/ai/modals-chat/{mixtral.ts → rsn/rsnChat.ts} +26 -8
- package/lib/ai/utils.ts +7 -12
- package/lib/index.ts +5 -3
- package/package.json +1 -1
- package/dist/ai/functions/imageAnalysis.d.ts +0 -2
- package/dist/ai/functions/imageAnalysis.d.ts.map +0 -1
- package/dist/ai/functions/imageAnalysis.js +0 -45
- package/dist/ai/functions/imageAnalysis.js.map +0 -1
- package/dist/ai/functions/readImagess.d.ts +0 -2
- package/dist/ai/functions/readImagess.d.ts.map +0 -1
- package/dist/ai/functions/readImagess.js +0 -45
- package/dist/ai/functions/readImagess.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.js.map +0 -1
- package/dist/ai/modals-chat/apexChat.d.ts +0 -2
- package/dist/ai/modals-chat/apexChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/apexChat.js +0 -32
- package/dist/ai/modals-chat/apexChat.js.map +0 -1
- package/dist/ai/modals-chat/bard.d.ts +0 -7
- package/dist/ai/modals-chat/bard.d.ts.map +0 -1
- package/dist/ai/modals-chat/bard.js +0 -48
- package/dist/ai/modals-chat/bard.js.map +0 -1
- package/dist/ai/modals-chat/bing.d.ts +0 -7
- package/dist/ai/modals-chat/bing.d.ts.map +0 -1
- package/dist/ai/modals-chat/bing.js.map +0 -1
- package/dist/ai/modals-chat/codellama.d.ts +0 -7
- package/dist/ai/modals-chat/codellama.d.ts.map +0 -1
- package/dist/ai/modals-chat/codellama.js +0 -48
- package/dist/ai/modals-chat/codellama.js.map +0 -1
- package/dist/ai/modals-chat/config.d.ts.map +0 -1
- package/dist/ai/modals-chat/config.js.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.d.ts +0 -2
- package/dist/ai/modals-chat/facebook-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.js +0 -20
- package/dist/ai/modals-chat/facebook-ai.js.map +0 -1
- package/dist/ai/modals-chat/geminiFast.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiFast.js.map +0 -1
- package/dist/ai/modals-chat/geminiV2.d.ts +0 -7
- package/dist/ai/modals-chat/geminiV2.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiV2.js +0 -48
- package/dist/ai/modals-chat/geminiV2.js.map +0 -1
- package/dist/ai/modals-chat/gemma.d.ts +0 -2
- package/dist/ai/modals-chat/gemma.d.ts.map +0 -1
- package/dist/ai/modals-chat/gemma.js +0 -43
- package/dist/ai/modals-chat/gemma.js.map +0 -1
- package/dist/ai/modals-chat/llama.d.ts +0 -7
- package/dist/ai/modals-chat/llama.d.ts.map +0 -1
- package/dist/ai/modals-chat/llama.js +0 -48
- package/dist/ai/modals-chat/llama.js.map +0 -1
- package/dist/ai/modals-chat/llamav2.d.ts +0 -2
- package/dist/ai/modals-chat/llamav2.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav2.js +0 -43
- package/dist/ai/modals-chat/llamav2.js.map +0 -1
- package/dist/ai/modals-chat/llamav3.d.ts +0 -2
- package/dist/ai/modals-chat/llamav3.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav3.js +0 -43
- package/dist/ai/modals-chat/llamav3.js.map +0 -1
- package/dist/ai/modals-chat/mixtral.d.ts +0 -7
- package/dist/ai/modals-chat/mixtral.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtral.js +0 -48
- package/dist/ai/modals-chat/mixtral.js.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.d.ts +0 -2
- package/dist/ai/modals-chat/mixtralv2.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.js +0 -43
- package/dist/ai/modals-chat/mixtralv2.js.map +0 -1
- package/dist/ai/modals-chat/modals.d.ts +0 -8
- package/dist/ai/modals-chat/modals.d.ts.map +0 -1
- package/dist/ai/modals-chat/modals.js +0 -16
- package/dist/ai/modals-chat/modals.js.map +0 -1
- package/dist/ai/modals-chat/openChat.d.ts +0 -7
- package/dist/ai/modals-chat/openChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/openChat.js +0 -48
- package/dist/ai/modals-chat/openChat.js.map +0 -1
- package/dist/ai/modals-chat/starChat.d.ts +0 -2
- package/dist/ai/modals-chat/starChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/starChat.js +0 -31
- package/dist/ai/modals-chat/starChat.js.map +0 -1
- package/dist/ai/modals-chat/v4.d.ts +0 -7
- package/dist/ai/modals-chat/v4.d.ts.map +0 -1
- package/dist/ai/modals-chat/v4.js +0 -48
- package/dist/ai/modals-chat/v4.js.map +0 -1
- package/dist/ai/modals-chat/whisper.d.ts.map +0 -1
- package/dist/ai/modals-chat/whisper.js.map +0 -1
- package/dist/ai/modals-chat/yi-ai.d.ts +0 -2
- package/dist/ai/modals-chat/yi-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/yi-ai.js +0 -40
- package/dist/ai/modals-chat/yi-ai.js.map +0 -1
- package/lib/ai/functions/imageAnalysis.ts +0 -41
- package/lib/ai/modals-chat/apexChat.ts +0 -31
- package/lib/ai/modals-chat/bard.ts +0 -44
- package/lib/ai/modals-chat/bing.ts +0 -44
- package/lib/ai/modals-chat/codellama.ts +0 -44
- package/lib/ai/modals-chat/facebook-ai.ts +0 -14
- package/lib/ai/modals-chat/geminiV2.ts +0 -44
- package/lib/ai/modals-chat/gemma.ts +0 -35
- package/lib/ai/modals-chat/llama.ts +0 -44
- package/lib/ai/modals-chat/llamav2.ts +0 -35
- package/lib/ai/modals-chat/llamav3.ts +0 -35
- package/lib/ai/modals-chat/mixtralv2.ts +0 -35
- package/lib/ai/modals-chat/modals.ts +0 -8
- package/lib/ai/modals-chat/openChat.ts +0 -44
- package/lib/ai/modals-chat/starChat.ts +0 -31
- package/lib/ai/modals-chat/v4.ts +0 -44
- package/lib/ai/modals-chat/yi-ai.ts +0 -40
- /package/dist/ai/modals-chat/{Gemini-flash.d.ts → gemini/Gemini-flash.d.ts} +0 -0
- /package/dist/ai/modals-chat/{Gemini-pro.d.ts → gemini/Gemini-pro.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.d.ts → gemini/config.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.js → gemini/config.js} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.d.ts → gemini/geminiFast.d.ts} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.js → gemini/geminiFast.js} +0 -0
- /package/dist/ai/modals-chat/{whisper.d.ts → groq/whisper.d.ts} +0 -0
- /package/lib/ai/modals-chat/{config.ts → gemini/config.ts} +0 -0
- /package/lib/ai/modals-chat/{geminiFast.ts → gemini/geminiFast.ts} +0 -0
package/lib/ai/ApexModules.ts
CHANGED
|
@@ -3,22 +3,18 @@ import axios from 'axios';
|
|
|
3
3
|
import api from "api";
|
|
4
4
|
import translate from "@iamtraction/google-translate";
|
|
5
5
|
const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
|
|
6
|
-
import {
|
|
7
|
-
import {
|
|
8
|
-
import { llamaChat, mixtral, openChat, v4 } from './utils';
|
|
9
|
-
import { codellama } from './modals-chat/codellama';
|
|
10
|
-
import { bard } from './modals-chat/bard';
|
|
11
|
-
import { bing } from './modals-chat/bing';
|
|
12
|
-
import { gemini_v2 } from './modals-chat/geminiV2';
|
|
13
|
-
import { whisper } from './modals-chat/whisper';
|
|
14
|
-
import { llama2 } from './modals-chat/llamav2';
|
|
15
|
-
import { llama3 } from './modals-chat/llamav3';
|
|
16
|
-
import { gemma } from './modals-chat/gemma';
|
|
17
|
-
import { mixtral2 } from './modals-chat/mixtralv2';
|
|
6
|
+
import { groqChatModels, hercChatModels, otherChatModel, rsnChatModels, validateModels } from "./functions/validOptions";
|
|
7
|
+
import { whisper } from './modals-chat/groq/whisper';
|
|
18
8
|
import { connect } from 'verse.db';
|
|
19
|
-
import {
|
|
20
|
-
import {
|
|
21
|
-
import {
|
|
9
|
+
import { otherChatModels } from './modals-chat/others/otherModels';
|
|
10
|
+
import { rsnAPI } from './modals-chat/rsn/rsnChat';
|
|
11
|
+
import { chatGroq } from './modals-chat/groq/chatgroq';
|
|
12
|
+
import { geminiFast } from './modals-chat/gemini/geminiFast';
|
|
13
|
+
import { cartoon } from './modals-chat/freesedgpt/cartoon';
|
|
14
|
+
import { flux } from './modals-chat/freesedgpt/flux';
|
|
15
|
+
import { gpt4o } from './modals-chat/freesedgpt/chat';
|
|
16
|
+
import { groqAnalyzer } from './utils';
|
|
17
|
+
|
|
22
18
|
|
|
23
19
|
const dbConfig = {
|
|
24
20
|
adapter: 'json',
|
|
@@ -50,21 +46,19 @@ type hercmodals = "v1" | "v2" | "v2-beta" | "v3" | "lexica" | "prodia" | "animef
|
|
|
50
46
|
async function ApexImagine(model: string, prompt: string, options: ApexImagineOptions): Promise<string[] | undefined> {
|
|
51
47
|
let imageURL: string | null = '';
|
|
52
48
|
let response: string[] = [];
|
|
53
|
-
const imageType = await
|
|
54
|
-
const { nsfw = false, deepCheck = false, nsfwWords = [], count = 2, negative_prompt = "", sampler = "DPM++ 2M Karras", height = 512, width = 512, cfg_scale = 9, steps = 20, seed = -1, image_style = "cinematic" } = options;
|
|
55
|
-
|
|
56
|
-
sdk.auth(options.Api_Key || 'eaebff6e-c7b2-477c-8edd-9aa91becf1e3');
|
|
49
|
+
const imageType = await validateModels;
|
|
50
|
+
const { Api_Key = 'eaebff6e-c7b2-477c-8edd-9aa91becf1e3', nsfw = false, deepCheck = false, nsfwWords = [], count = 2, negative_prompt = "", sampler = "DPM++ 2M Karras", height = 512, width = 512, cfg_scale = 9, steps = 20, seed = -1, image_style = "cinematic" } = options;
|
|
57
51
|
|
|
58
52
|
const translatedText = await translate(prompt, {
|
|
59
53
|
from: "auto",
|
|
60
|
-
to: "en"
|
|
61
|
-
|
|
54
|
+
to: "en"
|
|
55
|
+
});
|
|
62
56
|
|
|
63
57
|
if (count > 4 || count <= 0) throw new Error("Inavlid usage. Count can't be less than 0 or more than 4.");
|
|
64
|
-
if ((width || height)
|
|
58
|
+
if ((width || height) <= 0) throw new Error("Inavlid usage. Image width/height can't be less than 0 or more than 1024.");
|
|
65
59
|
|
|
66
60
|
for (let i = 0; i < count; i++) {
|
|
67
|
-
if (imageType.
|
|
61
|
+
if (imageType.validHercaiModels.includes(model)) {
|
|
68
62
|
if (model === 'prodia-v2') {
|
|
69
63
|
imageURL = (await hercai.betaDrawImage({
|
|
70
64
|
prompt: translatedText.text,
|
|
@@ -83,7 +77,8 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
|
|
|
83
77
|
negative_prompt: negative_prompt
|
|
84
78
|
})).url;
|
|
85
79
|
}
|
|
86
|
-
} else if (imageType.
|
|
80
|
+
} else if (imageType.validProdiaModels.includes(model)) {
|
|
81
|
+
sdk.auth(Api_Key);
|
|
87
82
|
const generating = await sdk.generate({
|
|
88
83
|
model: model,
|
|
89
84
|
prompt: translatedText.text,
|
|
@@ -100,6 +95,7 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
|
|
|
100
95
|
const generatedJobId = generating.data.job;
|
|
101
96
|
imageURL = await checkJobStatus(generatedJobId);
|
|
102
97
|
} else if (imageType.validSXDL.includes(model)) {
|
|
98
|
+
sdk.auth(Api_Key);
|
|
103
99
|
const generating = await sdk.sdxlGenerate({
|
|
104
100
|
model: model,
|
|
105
101
|
prompt: translatedText.text,
|
|
@@ -115,15 +111,16 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
|
|
|
115
111
|
|
|
116
112
|
const generatedJobId = generating.data.job;
|
|
117
113
|
imageURL = await checkJobStatus(generatedJobId);
|
|
118
|
-
} else if (model === 'flux') {
|
|
119
|
-
const
|
|
120
|
-
if (
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
if (
|
|
126
|
-
|
|
114
|
+
} else if (model === 'flux-schnell') {
|
|
115
|
+
const gen = await flux(translatedText.text, options.Api_Key as string);
|
|
116
|
+
if (gen === null) throw new Error('this model reached rate limit. Provide your own api key from: "https://discord.gg/94qUZWhwFE"');
|
|
117
|
+
response = gen
|
|
118
|
+
|
|
119
|
+
} else if (model === 'real-cartoon-xl-v6') {
|
|
120
|
+
const gen = await cartoon(translatedText.text, options.Api_Key as string);
|
|
121
|
+
if (gen === null) throw new Error('this model reached rate limit. Provide your own api key from: "https://discord.gg/94qUZWhwFE"');
|
|
122
|
+
response = gen
|
|
123
|
+
} else {
|
|
127
124
|
throw new Error("Invalid model provided. Please check docs/npm page for valid models.");
|
|
128
125
|
}
|
|
129
126
|
|
|
@@ -618,7 +615,7 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
|
|
|
618
615
|
let shouldExclude = false;
|
|
619
616
|
|
|
620
617
|
if (nsfw === true) {
|
|
621
|
-
const caption = await
|
|
618
|
+
const caption = await groqAnalyzer({ imgURL: imageURL, prompt });
|
|
622
619
|
if (!caption) return;
|
|
623
620
|
shouldExclude = NSFWWORDS.some(word => caption.includes(word));
|
|
624
621
|
shouldExclude = nsfwWords.some(word => caption.includes(word));
|
|
@@ -678,7 +675,7 @@ async function ApexChat(model: string, prompt: string, { userId, memory, limit,
|
|
|
678
675
|
}
|
|
679
676
|
|
|
680
677
|
const responses = await Promise.all(chunks.map(async (chunk) => {
|
|
681
|
-
return await processChunk(model, chunk, {});
|
|
678
|
+
return await processChunk(model, chunk, { ApiKey: Api_key, personality: instruction });
|
|
682
679
|
}));
|
|
683
680
|
|
|
684
681
|
response = responses.join('');
|
|
@@ -699,116 +696,27 @@ async function ApexChat(model: string, prompt: string, { userId, memory, limit,
|
|
|
699
696
|
|
|
700
697
|
async function processChunk(model: string, prompt: string, { ApiKey, personality }: { ApiKey?: string, personality?: string }): Promise<string> {
|
|
701
698
|
let response: string;
|
|
702
|
-
switch (
|
|
703
|
-
case
|
|
704
|
-
case 'v3-32k':
|
|
705
|
-
case 'turbo':
|
|
706
|
-
case 'turbo-16k':
|
|
707
|
-
case 'gemini':
|
|
699
|
+
switch (true) {
|
|
700
|
+
case hercChatModels.includes(model):
|
|
708
701
|
response = (await hercai.question({ model: model as ChatModelOption, content: prompt, personality })).reply;
|
|
709
702
|
break;
|
|
710
|
-
case
|
|
711
|
-
response = await
|
|
712
|
-
break;
|
|
713
|
-
case 'starChat':
|
|
714
|
-
response = await starChat(prompt);
|
|
715
|
-
break;
|
|
716
|
-
case 'facebook-ai':
|
|
717
|
-
response = await facebook_ai(prompt);
|
|
718
|
-
break;
|
|
719
|
-
case 'yi-ai':
|
|
720
|
-
response = await yi_34b(prompt);
|
|
721
|
-
break;
|
|
722
|
-
case 'v4':
|
|
723
|
-
response = await v4({ API_KEY: ApiKey, prompt });
|
|
724
|
-
break;
|
|
725
|
-
case 'openChat':
|
|
726
|
-
response = await openChat({ API_KEY: ApiKey, prompt });
|
|
727
|
-
break;
|
|
728
|
-
case 'mixtral':
|
|
729
|
-
response = await mixtral({ API_KEY: ApiKey, prompt });
|
|
730
|
-
break;
|
|
731
|
-
case 'llama':
|
|
732
|
-
response = await llamaChat({ API_KEY: ApiKey, prompt });
|
|
733
|
-
break;
|
|
734
|
-
case 'llama-v2':
|
|
735
|
-
response = await llama2(prompt, ApiKey);
|
|
736
|
-
break;
|
|
737
|
-
case 'llama-v3':
|
|
738
|
-
response = await llama3(prompt, ApiKey);
|
|
739
|
-
break;
|
|
740
|
-
case 'gemma':
|
|
741
|
-
response = await gemma(prompt, ApiKey);
|
|
742
|
-
break;
|
|
743
|
-
case 'mixtral-v2':
|
|
744
|
-
response = await mixtral2(prompt, ApiKey);
|
|
703
|
+
case otherChatModel.includes(model):
|
|
704
|
+
response = await otherChatModels({ modelName: model as any, prompt });
|
|
745
705
|
break;
|
|
746
|
-
case
|
|
747
|
-
response = await
|
|
706
|
+
case rsnChatModels.includes(model):
|
|
707
|
+
response = await rsnAPI({ API_KEY: ApiKey, apiName: model, prompt });
|
|
748
708
|
break;
|
|
749
|
-
case
|
|
750
|
-
response = await
|
|
751
|
-
break;
|
|
752
|
-
case 'bing':
|
|
753
|
-
response = await bing({ API_KEY: ApiKey, prompt });
|
|
754
|
-
break;
|
|
755
|
-
case 'gemini-v2':
|
|
756
|
-
response = await gemini_v2({ API_KEY: ApiKey, prompt });
|
|
709
|
+
case groqChatModels.includes(model):
|
|
710
|
+
response = await chatGroq({ API_KEY: ApiKey, apiName: model, prompt, instruction: personality });
|
|
757
711
|
break;
|
|
712
|
+
case model === 'gpt-4o':
|
|
713
|
+
response = await gpt4o({ ApiKey, prompt });
|
|
714
|
+
break;
|
|
758
715
|
default:
|
|
759
716
|
throw new Error('Invalid model.');
|
|
760
717
|
}
|
|
761
718
|
return response;
|
|
762
719
|
}
|
|
763
|
-
async function apexChecker(urls: any) {
|
|
764
|
-
try {
|
|
765
|
-
let retryCount = 0;
|
|
766
|
-
const maxRetries = 3;
|
|
767
|
-
|
|
768
|
-
const fetchData = async () => {
|
|
769
|
-
try {
|
|
770
|
-
const response = await axios.post(
|
|
771
|
-
`https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base`,
|
|
772
|
-
{ image: urls },
|
|
773
|
-
{
|
|
774
|
-
headers: {
|
|
775
|
-
"Content-Type": "application/json",
|
|
776
|
-
Authorization: `Bearer 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`,
|
|
777
|
-
},
|
|
778
|
-
},
|
|
779
|
-
);
|
|
780
|
-
|
|
781
|
-
if (response.status === 200) {
|
|
782
|
-
return response.data[0].generated_text;
|
|
783
|
-
} else {
|
|
784
|
-
console.error(
|
|
785
|
-
`Failed to fetch image captioning API: ${response.statusText}`,
|
|
786
|
-
);
|
|
787
|
-
return null;
|
|
788
|
-
}
|
|
789
|
-
} catch (e: any) {
|
|
790
|
-
console.error(`Error fetching data: ${e.message}`);
|
|
791
|
-
throw e;
|
|
792
|
-
}
|
|
793
|
-
};
|
|
794
|
-
|
|
795
|
-
while (retryCount < maxRetries) {
|
|
796
|
-
try {
|
|
797
|
-
return await fetchData();
|
|
798
|
-
} catch (e: any) {
|
|
799
|
-
console.error(
|
|
800
|
-
`Error fetching data (Retry ${retryCount + 1}): ${e.message}`,
|
|
801
|
-
);
|
|
802
|
-
retryCount++;
|
|
803
|
-
}
|
|
804
|
-
}
|
|
805
|
-
|
|
806
|
-
return null;
|
|
807
|
-
} catch (e: any) {
|
|
808
|
-
console.error(`Error in attemptImageCaptioning: ${e.message}`);
|
|
809
|
-
return null;
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
720
|
|
|
813
721
|
async function checkJobStatus(jobId: string): Promise<string | null> {
|
|
814
722
|
try {
|
package/lib/ai/functions/draw.ts
CHANGED
|
@@ -2,10 +2,12 @@ import translate from "@iamtraction/google-translate";
|
|
|
2
2
|
import sharp from "sharp";
|
|
3
3
|
import { ButtonBuilder, StringSelectMenuBuilder, StringSelectMenuOptionBuilder, ButtonStyle, ActionRowBuilder, AttachmentBuilder, AttachmentData } from "discord.js";
|
|
4
4
|
import axios from "axios";
|
|
5
|
-
import {
|
|
5
|
+
import { validateModels } from './validOptions';
|
|
6
6
|
import api from "api";
|
|
7
|
-
import {
|
|
8
|
-
import { flux } from "../modals-
|
|
7
|
+
import { groqAnalyzer } from "../utils";
|
|
8
|
+
import { flux } from "../modals-chat/freesedgpt/flux";
|
|
9
|
+
import { cartoon } from "../modals-chat/freesedgpt/cartoon";
|
|
10
|
+
|
|
9
11
|
|
|
10
12
|
const sdk: any = api("@prodia/v1.3.0#be019b2kls0gqss3");
|
|
11
13
|
async function aiImagine(
|
|
@@ -20,14 +22,12 @@ async function aiImagine(
|
|
|
20
22
|
enhancer: any,
|
|
21
23
|
buttons: any[],
|
|
22
24
|
RespondMessage: any,
|
|
23
|
-
|
|
25
|
+
imageAPIS?: { groqAPI?: string, rsnAPI?: string, prodiaAPI?: string, freesedGPTApi?: string }
|
|
24
26
|
) {
|
|
25
|
-
sdk.auth(imgKey);
|
|
26
|
-
|
|
27
27
|
const maxRetryAttempts = 4;
|
|
28
28
|
const retryInterval = 5000;
|
|
29
29
|
let response: any;
|
|
30
|
-
const imageType = await
|
|
30
|
+
const imageType = await validateModels;
|
|
31
31
|
|
|
32
32
|
async function retry(fn: any, retriesLeft = maxRetryAttempts) {
|
|
33
33
|
|
|
@@ -80,14 +80,16 @@ async function aiImagine(
|
|
|
80
80
|
);
|
|
81
81
|
if (response.url === 'This Modal is Currently Under Maintenance.') throw new Error('this model is under Maintenance for a while.')
|
|
82
82
|
|
|
83
|
-
} else if (imageType.
|
|
83
|
+
} else if (imageType.validHercaiModels.includes(imageModal)) {
|
|
84
84
|
response = await retry(() =>
|
|
85
85
|
hercai.drawImage({
|
|
86
86
|
model: imageModal,
|
|
87
87
|
prompt: translatedText.text,
|
|
88
88
|
}),
|
|
89
89
|
);
|
|
90
|
-
} else if (imageType.
|
|
90
|
+
} else if (imageType.validProdiaModels.includes(imageModal)) {
|
|
91
|
+
sdk.auth(imageAPIS?.prodiaAPI);
|
|
92
|
+
|
|
91
93
|
const generateResponse = await sdk.generate({
|
|
92
94
|
model: imageModal,
|
|
93
95
|
prompt: translatedText.text,
|
|
@@ -106,6 +108,7 @@ async function aiImagine(
|
|
|
106
108
|
const generatedJobId = generateResponse.data.job;
|
|
107
109
|
response = await checkJobStatus(generatedJobId);
|
|
108
110
|
} else if (imageType.validSXDL.includes(imageModal)) {
|
|
111
|
+
sdk.auth(imageAPIS?.prodiaAPI);
|
|
109
112
|
|
|
110
113
|
const generateResponse = await sdk.sdxlGenerate({
|
|
111
114
|
model: imageModal,
|
|
@@ -124,15 +127,16 @@ async function aiImagine(
|
|
|
124
127
|
|
|
125
128
|
const generatedJobId = generateResponse.data.job;
|
|
126
129
|
response = await checkJobStatus(generatedJobId);
|
|
127
|
-
} else if (imageModal === 'flux') {
|
|
128
|
-
const
|
|
129
|
-
if (
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
if (
|
|
135
|
-
|
|
130
|
+
} else if (imageModal === 'flux-schnell') {
|
|
131
|
+
const gen = await flux(translatedText.text, imageAPIS?.freesedGPTApi as string);
|
|
132
|
+
if (gen === null) return message.reply(`Please wait i am in a cool down for a minute`);
|
|
133
|
+
response = gen
|
|
134
|
+
|
|
135
|
+
} else if (imageModal === 'real-cartoon-xl-v6') {
|
|
136
|
+
const gen = await cartoon(translatedText.text, imageAPIS?.freesedGPTApi as string);
|
|
137
|
+
if (gen === null) return message.reply(`Please wait i am in a cool down for a minute`);
|
|
138
|
+
response = gen
|
|
139
|
+
} else {
|
|
136
140
|
throw new Error("Invalid modal name.");
|
|
137
141
|
}
|
|
138
142
|
} catch (error: any) {
|
|
@@ -691,7 +695,7 @@ async function aiImagine(
|
|
|
691
695
|
}
|
|
692
696
|
|
|
693
697
|
if (nsfw) {
|
|
694
|
-
const textToCheck = await
|
|
698
|
+
const textToCheck = await groqAnalyzer({ imgURL: imageUrl, prompt: textToDraw });
|
|
695
699
|
|
|
696
700
|
if (textToCheck && nsfwWords.some(word => textToCheck.includes(word))) {
|
|
697
701
|
return message.reply("Warning ⚠️. Your prompt contains **`NSFW/Illegal/Prohibited`** words. Please refrain from doing this.");
|
|
@@ -832,54 +836,5 @@ async function checkJobStatus(jobId: number | string | any) {
|
|
|
832
836
|
}
|
|
833
837
|
}
|
|
834
838
|
|
|
835
|
-
async function attemptImageCaptioning(imageUrl: string) {
|
|
836
|
-
try {
|
|
837
|
-
let retryCount = 0;
|
|
838
|
-
const maxRetries = 3;
|
|
839
|
-
|
|
840
|
-
const fetchData = async () => {
|
|
841
|
-
try {
|
|
842
|
-
const response = await axios.post(
|
|
843
|
-
`https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base`,
|
|
844
|
-
{ image: imageUrl },
|
|
845
|
-
{
|
|
846
|
-
headers: {
|
|
847
|
-
"Content-Type": "application/json",
|
|
848
|
-
Authorization: `Bearer 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`,
|
|
849
|
-
},
|
|
850
|
-
},
|
|
851
|
-
);
|
|
852
|
-
|
|
853
|
-
if (response.status === 200) {
|
|
854
|
-
return response.data[0].generated_text;
|
|
855
|
-
} else {
|
|
856
|
-
console.error(
|
|
857
|
-
`Failed to fetch image captioning API: ${response.statusText}`,
|
|
858
|
-
);
|
|
859
|
-
return null;
|
|
860
|
-
}
|
|
861
|
-
} catch (error: any) {
|
|
862
|
-
console.error(`Error fetching data: ${error.message}`);
|
|
863
|
-
throw error;
|
|
864
|
-
}
|
|
865
|
-
};
|
|
866
|
-
|
|
867
|
-
while (retryCount < maxRetries) {
|
|
868
|
-
try {
|
|
869
|
-
return await fetchData();
|
|
870
|
-
} catch (error: any) {
|
|
871
|
-
console.error(
|
|
872
|
-
`Error fetching data (Retry ${retryCount + 1}): ${error.message}`,
|
|
873
|
-
);
|
|
874
|
-
retryCount++;
|
|
875
|
-
}
|
|
876
|
-
}
|
|
877
|
-
|
|
878
|
-
return null;
|
|
879
|
-
} catch (error: any) {
|
|
880
|
-
console.error(`Error in attemptImageCaptioning: ${error.message}`);
|
|
881
|
-
return null;
|
|
882
|
-
}
|
|
883
|
-
}
|
|
884
839
|
|
|
885
840
|
export { aiImagine };
|
|
@@ -28,7 +28,7 @@ async function aiVoice(
|
|
|
28
28
|
enhancer: any,
|
|
29
29
|
buttons: any[],
|
|
30
30
|
RespondMessage: any,
|
|
31
|
-
|
|
31
|
+
imageAPIS?: { groqAPI?: string, rsnAPI?: string, prodiaAPI?: string, freesedGPTApi?: string }
|
|
32
32
|
) {
|
|
33
33
|
if (message.author.bot || isProcessing || !message.guild) {
|
|
34
34
|
return;
|
|
@@ -40,7 +40,7 @@ async function aiVoice(
|
|
|
40
40
|
let msg = message.content;
|
|
41
41
|
|
|
42
42
|
if (drawValid) {
|
|
43
|
-
return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage,
|
|
43
|
+
return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage, { rsnAPI: imageAPIS?.rsnAPI, groqAPI: imageAPIS?.groqAPI, prodiaAPI: imageAPIS?.prodiaAPI, freesedGPTApi: imageAPIS?.freesedGPTApi as string});
|
|
44
44
|
}
|
|
45
45
|
|
|
46
46
|
if (message.attachments.size > 0) {
|
|
@@ -3,20 +3,20 @@ import api from "api";
|
|
|
3
3
|
const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
|
|
4
4
|
sdk.auth('43435e1c-cab1-493f-a224-f51e4b97ce8d');
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
6
|
+
export const hercChatModels = ["v3" , "v3-32k" , "turbo" , "turbo-16k" , "gemini" , "llama3-70b" , "llama3-8b" , "mixtral-8x7b" , "gemma-7b" , "gemma2-9b"];
|
|
7
|
+
export const groqChatModels = ['gemma-7b-it', 'gemma2-9b-it', 'llama3-groq-70b-8192-tool-use-preview', 'llama3-groq-8b-8192-tool-use-preview', 'llama-3.1-70b-versatile', 'llama-3.1-8b-instant', 'llama-guard-3-8b', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768'];
|
|
8
|
+
export const rsnChatModels = ['bard', 'bing', 'codellama', 'gemini', 'llama', 'mixtral', 'openchat', 'gpt4'];
|
|
9
|
+
export const otherChatModel = ['apexai', 'facebook_ai', 'yi_34b', 'starChat'];
|
|
10
|
+
export const fresedgptModels = ['real-cartoon-xl-v6', 'flux-schnell', 'gpt-4o'];
|
|
11
|
+
|
|
12
|
+
async function initializeValidOptions() {
|
|
13
|
+
const [SDModels, SDXLModels, samplers] = await Promise.all([
|
|
14
|
+
sdModels(),
|
|
15
|
+
sdxlModels(),
|
|
10
16
|
sampler()
|
|
11
17
|
]);
|
|
12
18
|
|
|
13
|
-
const
|
|
14
|
-
const ApexChatModals = ['v4', 'gemini-v2', 'gemini', 'v3', 'v3-32k', 'turbo', 'turbo-16k', 'llama', 'apexChat', 'openChat', 'yi-ai', 'facebook-ai', 'starChat', 'mixtral', 'codellama', 'bing', 'bard'];
|
|
15
|
-
|
|
16
|
-
const HercModals = [
|
|
17
|
-
"v1",
|
|
18
|
-
"v2",
|
|
19
|
-
"v2-beta",
|
|
19
|
+
const HercImageModels = [
|
|
20
20
|
"v3",
|
|
21
21
|
"lexica",
|
|
22
22
|
"prodia",
|
|
@@ -27,38 +27,39 @@ async function initializeValidOptions() {
|
|
|
27
27
|
"shonin"
|
|
28
28
|
];
|
|
29
29
|
|
|
30
|
-
const others = ['cartoon', 'flux'];
|
|
31
30
|
|
|
32
31
|
return {
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
32
|
+
validHercChatModels: hercChatModels,
|
|
33
|
+
validgroqChatModels: groqChatModels,
|
|
34
|
+
validRSNChatModels: rsnChatModels,
|
|
35
|
+
validHercaiModels: HercImageModels,
|
|
36
|
+
validProdiaModels: SDModels,
|
|
37
|
+
validotherChatModels: otherChatModel,
|
|
38
|
+
validfresedgptModels: fresedgptModels,
|
|
38
39
|
validEnhancers: [
|
|
39
40
|
"ESRGAN_4x", "Lanczos", "Nearest", "LDSR", "R-ESRGAN 4x+",
|
|
40
41
|
"R-ESRGAN 4x+ Anime6B", "ScuNET GAN", "ScuNET PSNR", "SwinIR 4x"
|
|
41
42
|
],
|
|
42
43
|
validSamplers: samplers,
|
|
43
|
-
validSXDL:
|
|
44
|
+
validSXDL: SDXLModels,
|
|
44
45
|
validImgStyle: [
|
|
45
46
|
"3d-model", "analog-film", "anime", "cinematic", "comic-book",
|
|
46
47
|
"digital-art", "enhance", "isometric", "fantasy-art", "isometric",
|
|
47
48
|
"line-art", "low-poly", "neon-punk", "origami", "photographic",
|
|
48
49
|
"pixel-art", "texture", "craft-clay"
|
|
49
50
|
],
|
|
50
|
-
|
|
51
|
+
allModels: [...SDModels, ...SDXLModels, ...HercImageModels, ...fresedgptModels, ...hercChatModels, ...otherChatModel, ...groqChatModels, ...rsnChatModels]
|
|
51
52
|
};
|
|
52
53
|
}
|
|
53
54
|
|
|
54
|
-
export const
|
|
55
|
+
export const validateModels = initializeValidOptions();
|
|
55
56
|
|
|
56
|
-
async function
|
|
57
|
+
async function sdModels(): Promise<string[]> {
|
|
57
58
|
const SDModals = await sdk.listModels();
|
|
58
59
|
return SDModals.data;
|
|
59
60
|
}
|
|
60
61
|
|
|
61
|
-
async function
|
|
62
|
+
async function sdxlModels(): Promise<string[]> {
|
|
62
63
|
const SDXLModals = await sdk.listSdxlModels();
|
|
63
64
|
return SDXLModals.data;
|
|
64
65
|
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import axios from 'axios';
|
|
2
|
+
|
|
3
|
+
export async function electronImagine({ ApiKey, prompt, modelName, count = 1 }: { ApiKey?: string, prompt: string, modelName: string, count?: number }) {
|
|
4
|
+
try {
|
|
5
|
+
const response = await axios.post(
|
|
6
|
+
'https://api.electronhub.top/v1/images/generate',
|
|
7
|
+
{
|
|
8
|
+
model: modelName,
|
|
9
|
+
prompt: prompt,
|
|
10
|
+
n: count
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
headers: {
|
|
14
|
+
'Authorization': `Bearer ${ApiKey || 'ek-nFO8tz6qiu5cJ31lwCfPZNNrxFZLsJYou6yx4X1FS2Jyr2dm0a'}`,
|
|
15
|
+
'Content-Type': 'application/json'
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
);
|
|
19
|
+
|
|
20
|
+
const imagesUrl = response.data;
|
|
21
|
+
return imagesUrl;
|
|
22
|
+
} catch (e: any) {
|
|
23
|
+
console.error('Error generating images:', e.response ? e.response.data : e.message);
|
|
24
|
+
throw e;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import axios from 'axios';
|
|
2
2
|
|
|
3
|
-
export async function cartoon(prompt: string) {
|
|
3
|
+
export async function cartoon(prompt: string, apiKey: string) {
|
|
4
4
|
try {
|
|
5
5
|
const response = await axios.post(
|
|
6
6
|
'https://fresedgpt.space/v1/images/generations',
|
|
@@ -11,13 +11,13 @@ export async function cartoon(prompt: string) {
|
|
|
11
11
|
},
|
|
12
12
|
{
|
|
13
13
|
headers: {
|
|
14
|
-
'Authorization':
|
|
14
|
+
'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
|
|
15
15
|
'Content-Type': 'application/json'
|
|
16
16
|
}
|
|
17
17
|
}
|
|
18
18
|
);
|
|
19
19
|
|
|
20
|
-
return response.data.data[0].url;
|
|
20
|
+
return response.data.data[0].url || null;
|
|
21
21
|
} catch (e: any) {
|
|
22
22
|
return null
|
|
23
23
|
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import axios from 'axios';
|
|
2
|
+
const API_URL = 'https://fresedgpt.space/v1/chat/completions';
|
|
3
|
+
|
|
4
|
+
export async function gpt4o({ prompt, ApiKey}: { prompt: string, ApiKey?: string}) {
|
|
5
|
+
try {
|
|
6
|
+
const API_KEY = ApiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ';
|
|
7
|
+
const response = await axios.post(
|
|
8
|
+
API_URL,
|
|
9
|
+
{
|
|
10
|
+
messages: [{ role: 'user', content: `${prompt}` }],
|
|
11
|
+
model: 'chatgpt-4o-latest',
|
|
12
|
+
stream: false
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
headers: {
|
|
16
|
+
'Authorization': `Bearer ${API_KEY}`,
|
|
17
|
+
'Content-Type': 'application/json'
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
);
|
|
21
|
+
|
|
22
|
+
const responseData = response.data;
|
|
23
|
+
const assistantContent = responseData.choices[0]?.message?.content || 'No response content available';
|
|
24
|
+
|
|
25
|
+
return assistantContent || null;
|
|
26
|
+
|
|
27
|
+
} catch (error) {
|
|
28
|
+
console.error('Error creating chat completion:', error);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import axios from 'axios';
|
|
2
2
|
|
|
3
|
-
export async function flux(prompt: string) {
|
|
3
|
+
export async function flux(prompt: string, apiKey: string) {
|
|
4
4
|
try {
|
|
5
5
|
const response = await axios.post(
|
|
6
6
|
'https://fresedgpt.space/v1/images/generations',
|
|
@@ -11,13 +11,13 @@ export async function flux(prompt: string) {
|
|
|
11
11
|
},
|
|
12
12
|
{
|
|
13
13
|
headers: {
|
|
14
|
-
'Authorization':
|
|
14
|
+
'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
|
|
15
15
|
'Content-Type': 'application/json'
|
|
16
16
|
}
|
|
17
17
|
}
|
|
18
18
|
);
|
|
19
19
|
|
|
20
|
-
return response.data.data[0].url;
|
|
20
|
+
return response.data.data[0].url || null;
|
|
21
21
|
} catch (e: any) {
|
|
22
22
|
return null
|
|
23
23
|
}
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
|
|
2
|
-
import { converter } from "
|
|
2
|
+
import { converter } from "../../../canvas/utils/general functions";
|
|
3
3
|
import { connect } from "verse.db";
|
|
4
4
|
import config from './config';
|
|
5
5
|
import axios from "axios";
|
|
6
6
|
import path from 'path';
|
|
7
7
|
import fs from 'fs';
|
|
8
|
-
import { readFile } from "
|
|
8
|
+
import { readFile } from "../../utils";
|
|
9
9
|
|
|
10
10
|
export async function geminiFlash(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
11
11
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
|
|
2
|
-
import { converter } from "
|
|
2
|
+
import { converter } from "../../../canvas/utils/general functions";
|
|
3
3
|
import { GoogleAIFileManager } from "@google/generative-ai/server";
|
|
4
4
|
|
|
5
5
|
import { connect } from "verse.db";
|
|
@@ -7,7 +7,7 @@ import config from './config';
|
|
|
7
7
|
import axios from "axios";
|
|
8
8
|
import path from 'path';
|
|
9
9
|
import fs from 'fs';
|
|
10
|
-
import { readFile } from "
|
|
10
|
+
import { readFile } from "../../utils";
|
|
11
11
|
|
|
12
12
|
export async function geminiPro(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
13
13
|
|