apexify.js 4.1.6 → 4.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai/modals-chat/Gemini-flash.d.ts.map +1 -1
- package/dist/ai/modals-chat/Gemini-flash.js +56 -32
- package/dist/ai/modals-chat/Gemini-flash.js.map +1 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts.map +1 -1
- package/dist/ai/modals-chat/Gemini-pro.js +55 -31
- package/dist/ai/modals-chat/Gemini-pro.js.map +1 -1
- package/dist/ai/modals-chat/llama.d.ts.map +1 -1
- package/dist/ai/modals-chat/llama.js +10 -2
- package/dist/ai/modals-chat/llama.js.map +1 -1
- package/dist/ai/modals-chat/mixtral.d.ts.map +1 -1
- package/dist/ai/modals-chat/mixtral.js +10 -2
- package/dist/ai/modals-chat/mixtral.js.map +1 -1
- package/dist/ai/modals-chat/openChat.d.ts.map +1 -1
- package/dist/ai/modals-chat/openChat.js +11 -3
- package/dist/ai/modals-chat/openChat.js.map +1 -1
- package/dist/ai/modals-chat/v4.d.ts.map +1 -1
- package/dist/ai/modals-chat/v4.js +10 -2
- package/dist/ai/modals-chat/v4.js.map +1 -1
- package/lib/ai/modals-chat/Gemini-flash.ts +155 -133
- package/lib/ai/modals-chat/Gemini-pro.ts +154 -132
- package/lib/ai/modals-chat/llama.ts +11 -6
- package/lib/ai/modals-chat/mixtral.ts +12 -7
- package/lib/ai/modals-chat/openChat.ts +11 -6
- package/lib/ai/modals-chat/v4.ts +11 -6
- package/package.json +1 -1
|
@@ -3,185 +3,207 @@ import path from 'path';
|
|
|
3
3
|
import fs from 'fs';
|
|
4
4
|
import config from './config';
|
|
5
5
|
import { converter } from "../../canvas/utils/general functions";
|
|
6
|
-
import
|
|
6
|
+
import { connect } from "verse.db";
|
|
7
7
|
import axios from "axios";
|
|
8
8
|
|
|
9
9
|
let currentApiKeyIndex = 0;
|
|
10
10
|
|
|
11
|
-
export async function geminiFlash(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean
|
|
11
|
+
export async function geminiFlash(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
12
12
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
13
|
+
async function validateKey(apiKey: string) {
|
|
14
|
+
try {
|
|
15
|
+
const validateResponse = await axios.get(`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`);
|
|
16
|
+
return validateResponse.status === 200;
|
|
17
|
+
} catch (error) {
|
|
18
|
+
console.error(`Error validating API key: ${apiKey}`, error);
|
|
19
|
+
return false;
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
async function getValidGenAI(apiKeys: string[], initialIndex: number): Promise<any> {
|
|
24
|
+
for (let i = initialIndex; i < apiKeys.length; i++) {
|
|
25
|
+
const apiKey = apiKeys[i];
|
|
26
|
+
if (await validateKey(apiKey)) {
|
|
27
|
+
currentApiKeyIndex = i;
|
|
28
|
+
return new GoogleGenerativeAI(apiKey);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
return null;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async function processRequest(genAI: any): Promise<any> {
|
|
35
|
+
let personalityString = '';
|
|
26
36
|
|
|
27
|
-
if (apiKeyIndex === config.apiKeys.length) {
|
|
28
|
-
return 'All provided API keys are invalid.';
|
|
29
|
-
}
|
|
30
|
-
let personalityString: string = '';
|
|
31
|
-
|
|
32
37
|
if (AI.AiPersonality) {
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
38
|
+
const personalityFilePath = path.join(process.cwd(), AI.AiPersonality);
|
|
39
|
+
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
40
|
+
personalityString = personalityContent.split('\n').join(' ');
|
|
36
41
|
}
|
|
37
42
|
|
|
38
43
|
const generationConfig = {
|
|
39
|
-
|
|
44
|
+
maxOutputTokens: 750,
|
|
40
45
|
};
|
|
41
46
|
|
|
42
47
|
const safetySettings = [
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
48
|
+
{
|
|
49
|
+
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
50
|
+
threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
54
|
+
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
58
|
+
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
62
|
+
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
|
63
|
+
},
|
|
59
64
|
];
|
|
60
65
|
|
|
61
66
|
const systemInstruction = `${personalityString}\n And working on discord in serverName: ${message.serverName} at channelName: ${message.channelName} and responding to the userName: <@${message.userId}>`;
|
|
62
|
-
const model = genAI.getGenerativeModel({ model: "gemini-1.5-
|
|
67
|
+
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash-latest", systemInstruction, generationConfig, safetySettings });
|
|
63
68
|
|
|
64
69
|
const defaultHistory = [
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
70
|
+
{
|
|
71
|
+
role: "user",
|
|
72
|
+
parts: [{ text: `Hey who are you?` }],
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
role: "model",
|
|
76
|
+
parts: [{ text: `I am an AI assistant built/developed/created by Apexify.js` }],
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
role: "user",
|
|
80
|
+
parts: [{ text: `You have chat Memory?` }],
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
role: "model",
|
|
84
|
+
parts: [{ text: `Yes, I save chat history for each user if it is enabled in my configuration otherwise no data being saved.` }],
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
role: "user",
|
|
88
|
+
parts: [{ text: `How is your data/memory/chat history managed?` }],
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
role: "model",
|
|
92
|
+
parts: [{ text: `It is managed by verse.db. A database used to manage JSON/SQL/YAML/SQOL database with ease and supports schema/schemaless data.` }],
|
|
93
|
+
}
|
|
89
94
|
];
|
|
90
95
|
|
|
91
96
|
let historyData: any[] | undefined = undefined;
|
|
92
97
|
|
|
93
98
|
let db: any;
|
|
94
99
|
if (message.db) {
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
100
|
+
db = new connect({ adapter: 'json', dataPath: `${message.serverId}_ChatHistory` });
|
|
101
|
+
const data = await db.find(`${message.userId}_chatHistory`, { userId: message.userId });
|
|
102
|
+
historyData = data.results?.history || undefined;
|
|
98
103
|
}
|
|
99
104
|
|
|
100
105
|
const chat = model.startChat({
|
|
101
106
|
history: historyData || defaultHistory,
|
|
102
107
|
generationConfig
|
|
103
108
|
});
|
|
104
|
-
|
|
109
|
+
|
|
105
110
|
const imgURL = message.attachment?.url || null;
|
|
106
111
|
let result: any;
|
|
107
112
|
|
|
108
113
|
if (imgURL) {
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
114
|
+
const imageData = await urlToBase64(imgURL);
|
|
115
|
+
const image = {
|
|
116
|
+
inlineData: {
|
|
117
|
+
data: imageData,
|
|
118
|
+
mimeType: "image/png",
|
|
119
|
+
},
|
|
120
|
+
};
|
|
121
|
+
result = await chat.sendMessage([AI.userMsg, image]);
|
|
117
122
|
} else {
|
|
118
|
-
|
|
123
|
+
result = await chat.sendMessage(AI.userMsg);
|
|
119
124
|
}
|
|
120
125
|
|
|
121
|
-
const response =
|
|
126
|
+
const response = await result.response.text();
|
|
122
127
|
|
|
123
128
|
if (message.db) {
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
);
|
|
129
|
+
const userHistory = {
|
|
130
|
+
$push: {
|
|
131
|
+
"history": {
|
|
132
|
+
role: "user",
|
|
133
|
+
parts: [{ text: `${AI.userMsg}` }]
|
|
134
|
+
}
|
|
135
|
+
},
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
historyData = await db.update(`${message.userId}_chatHistory`,
|
|
139
|
+
{ userId: message.userId },
|
|
140
|
+
userHistory,
|
|
141
|
+
true
|
|
142
|
+
);
|
|
143
|
+
|
|
144
|
+
const modelHistory = {
|
|
145
|
+
$push: {
|
|
146
|
+
"history": {
|
|
147
|
+
role: "model",
|
|
148
|
+
parts: [{ text: `${response}` }]
|
|
149
|
+
}
|
|
150
|
+
},
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
historyData = await db.update(`${message.userId}_chatHistory`,
|
|
154
|
+
{ userId: message.userId },
|
|
155
|
+
modelHistory,
|
|
156
|
+
true
|
|
157
|
+
);
|
|
154
158
|
}
|
|
155
159
|
|
|
156
160
|
return response;
|
|
157
|
-
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async function urlToBase64(imageURL: string) {
|
|
164
|
+
try {
|
|
165
|
+
const convertedBuffer = await converter(imageURL, 'png');
|
|
166
|
+
const base64String = convertedBuffer.toString('base64');
|
|
167
|
+
return base64String;
|
|
168
|
+
} catch (error: any) {
|
|
169
|
+
throw new Error(`Failed to fetch and convert the image: ${error.message}`);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
try {
|
|
174
|
+
let genAI: any;
|
|
175
|
+
|
|
176
|
+
if (AI.API_KEY && await validateKey(AI.API_KEY)) {
|
|
177
|
+
genAI = new GoogleGenerativeAI(AI.API_KEY);
|
|
178
|
+
} else {
|
|
179
|
+
genAI = await getValidGenAI(config.apiKeys, currentApiKeyIndex);
|
|
180
|
+
if (!genAI) {
|
|
181
|
+
return 'All provided API keys are invalid.';
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
return await processRequest(genAI);
|
|
186
|
+
} catch (e: any) {
|
|
158
187
|
if (e.message) {
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
} else if (e.message === '[GoogleGenerativeAI Error]: Candidate was blocked due to SAFETY') {
|
|
164
|
-
console.error(e);
|
|
165
|
-
return `Due to safety enabled by gemini you have been blocked.`;
|
|
188
|
+
if (e.status === 429 || e.status === 403) {
|
|
189
|
+
currentApiKeyIndex++;
|
|
190
|
+
if (currentApiKeyIndex < config.apiKeys.length) {
|
|
191
|
+
return await geminiFlash(message, AI);
|
|
166
192
|
} else {
|
|
167
|
-
|
|
168
|
-
return `Try again later please... Either API is on a cooldown or an internal server error has occurred. If issue persists please contact the bot developer or owner of the npm package.`;
|
|
193
|
+
return 'Ai is on a cooldown for the rest of the day. Either provide your own API key or wait for tomorrow. Check ai.google.dev for free apikeys';
|
|
169
194
|
}
|
|
170
|
-
|
|
195
|
+
} else if (e.status === 400) {
|
|
196
|
+
return `The hoster/bot owner/the used host isn't supported by gemini.`;
|
|
197
|
+
} else if (e.message === '[GoogleGenerativeAI Error]: Candidate was blocked due to SAFETY') {
|
|
198
|
+
console.error(e);
|
|
199
|
+
return `Due to safety enabled by gemini you have been blocked.`;
|
|
200
|
+
} else {
|
|
171
201
|
console.error(e);
|
|
172
|
-
return
|
|
202
|
+
return `Try again later please... Either API is on a cooldown or an internal server error has occurred. If issue persists please contact the bot developer or owner of the npm package.`;
|
|
203
|
+
}
|
|
204
|
+
} else {
|
|
205
|
+
console.error(e);
|
|
206
|
+
return 'An unknown error has occurred.';
|
|
173
207
|
}
|
|
174
208
|
}
|
|
175
209
|
}
|
|
176
|
-
|
|
177
|
-
async function urlToBase64(imageURL: string) {
|
|
178
|
-
try {
|
|
179
|
-
const convertedBuffer = await converter(imageURL, 'png');
|
|
180
|
-
|
|
181
|
-
const base64String = convertedBuffer.toString('base64');
|
|
182
|
-
return base64String;
|
|
183
|
-
} catch (error: any) {
|
|
184
|
-
throw new Error(`Failed to fetch and convert the image: ${error.message}`);
|
|
185
|
-
}
|
|
186
|
-
}
|
|
187
|
-
|