apexify.js 4.1.6 → 4.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,185 +3,207 @@ import path from 'path';
3
3
  import fs from 'fs';
4
4
  import config from './config';
5
5
  import { converter } from "../../canvas/utils/general functions";
6
- import { connect } from "verse.db";
6
+ import { connect } from "verse.db";
7
7
  import axios from "axios";
8
8
 
9
9
  let currentApiKeyIndex = 0;
10
10
 
11
- export async function geminiPro(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
11
+ export async function geminiPro(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
12
12
 
13
- try {
14
- let apiKeyIndex = currentApiKeyIndex;
15
- let genAI: any;
16
- while (apiKeyIndex < config.apiKeys.length) {
17
- const validateKey = await axios.get(`https://generativelanguage.googleapis.com/v1beta/models?key=${config.apiKeys[apiKeyIndex]}`);
18
- if (validateKey.status === 200) {
19
- genAI = new GoogleGenerativeAI(config.apiKeys[apiKeyIndex]);
20
-
21
- break;
22
- } else {
23
- apiKeyIndex++;
24
- }
25
- }
13
+ async function validateKey(apiKey: string) {
14
+ try {
15
+ const validateResponse = await axios.get(`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`);
16
+ return validateResponse.status === 200;
17
+ } catch (error) {
18
+ console.error(`Error validating API key: ${apiKey}`, error);
19
+ return false;
20
+ }
21
+ }
22
+
23
+ async function getValidGenAI(apiKeys: string[], initialIndex: number): Promise<any> {
24
+ for (let i = initialIndex; i < apiKeys.length; i++) {
25
+ const apiKey = apiKeys[i];
26
+ if (await validateKey(apiKey)) {
27
+ currentApiKeyIndex = i;
28
+ return new GoogleGenerativeAI(apiKey);
29
+ }
30
+ }
31
+ return null;
32
+ }
33
+
34
+ async function processRequest(genAI: any): Promise<any> {
35
+ let personalityString = '';
26
36
 
27
- if (apiKeyIndex === config.apiKeys.length) {
28
- return 'All provided API keys are invalid.';
29
- }
30
- let personalityString: string = '';
31
-
32
37
  if (AI.AiPersonality) {
33
- const personalityFilePath = path.join(process.cwd(), AI.AiPersonality);
34
- const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
35
- personalityString = personalityContent.split('\n').join(' ');
38
+ const personalityFilePath = path.join(process.cwd(), AI.AiPersonality);
39
+ const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
40
+ personalityString = personalityContent.split('\n').join(' ');
36
41
  }
37
42
 
38
43
  const generationConfig = {
39
- maxOutputTokens: 750,
44
+ maxOutputTokens: 750,
40
45
  };
41
46
 
42
47
  const safetySettings = [
43
- {
44
- category: HarmCategory.HARM_CATEGORY_HARASSMENT,
45
- threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH,
46
- },
47
- {
48
- category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
49
- threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
50
- },
51
- {
52
- category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
53
- threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
54
- },
55
- {
56
- category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
57
- threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
58
- },
48
+ {
49
+ category: HarmCategory.HARM_CATEGORY_HARASSMENT,
50
+ threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH,
51
+ },
52
+ {
53
+ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
54
+ threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
55
+ },
56
+ {
57
+ category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
58
+ threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
59
+ },
60
+ {
61
+ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
62
+ threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
63
+ },
59
64
  ];
60
65
 
61
66
  const systemInstruction = `${personalityString}\n And working on discord in serverName: ${message.serverName} at channelName: ${message.channelName} and responding to the userName: <@${message.userId}>`;
62
67
  const model = genAI.getGenerativeModel({ model: "gemini-1.5-pro-latest", systemInstruction, generationConfig, safetySettings });
63
68
 
64
69
  const defaultHistory = [
65
- {
66
- role: "user",
67
- parts: [{ text: `Hey who are you?` }],
68
- },
69
- {
70
- role: "model",
71
- parts: [{ text: `I am an AI assistant built/developed/created by Apexify.js` }],
72
- },
73
- {
74
- role: "user",
75
- parts: [{ text: `You have chat Memory?` }],
76
- },
77
- {
78
- role: "model",
79
- parts: [{ text: `Yes, i save chat history for each user if it is enabled in my configuration otherwise no data being saved.` }],
80
- },
81
- {
82
- role: "user",
83
- parts: [{ text: `How is your data/memory/chat history is managed` }],
84
- },
85
- {
86
- role: "model",
87
- parts: [{ text: `It is managed by verse.db. A database used to manage JSON/SQL/YAML/SQOL database with ease and supports schema/schemaless data.` }],
88
- }
70
+ {
71
+ role: "user",
72
+ parts: [{ text: `Hey who are you?` }],
73
+ },
74
+ {
75
+ role: "model",
76
+ parts: [{ text: `I am an AI assistant built/developed/created by Apexify.js` }],
77
+ },
78
+ {
79
+ role: "user",
80
+ parts: [{ text: `You have chat Memory?` }],
81
+ },
82
+ {
83
+ role: "model",
84
+ parts: [{ text: `Yes, I save chat history for each user if it is enabled in my configuration otherwise no data being saved.` }],
85
+ },
86
+ {
87
+ role: "user",
88
+ parts: [{ text: `How is your data/memory/chat history managed?` }],
89
+ },
90
+ {
91
+ role: "model",
92
+ parts: [{ text: `It is managed by verse.db. A database used to manage JSON/SQL/YAML/SQOL database with ease and supports schema/schemaless data.` }],
93
+ }
89
94
  ];
90
95
 
91
96
  let historyData: any[] | undefined = undefined;
92
97
 
93
98
  let db: any;
94
99
  if (message.db) {
95
- db = new connect({ adapter: 'json', dataPath: `${message.serverId}_ChatHistory` });
96
- const data = await db.find(`${message.userId}_chatHistory`, { userId: message.userId });
97
- historyData = data.results?.history || undefined;
100
+ db = new connect({ adapter: 'json', dataPath: `${message.serverId}_ChatHistory` });
101
+ const data = await db.find(`${message.userId}_chatHistory`, { userId: message.userId });
102
+ historyData = data.results?.history || undefined;
98
103
  }
99
104
 
100
105
  const chat = model.startChat({
101
106
  history: historyData || defaultHistory,
102
107
  generationConfig
103
108
  });
104
-
109
+
105
110
  const imgURL = message.attachment?.url || null;
106
111
  let result: any;
107
112
 
108
113
  if (imgURL) {
109
- const imageData = await urlToBase64(imgURL)
110
- const image = {
111
- inlineData: {
112
- data: imageData,
113
- mimeType: "image/png",
114
- },
115
- };
116
- result = await chat.sendMessage([AI.userMsg, image]);
114
+ const imageData = await urlToBase64(imgURL);
115
+ const image = {
116
+ inlineData: {
117
+ data: imageData,
118
+ mimeType: "image/png",
119
+ },
120
+ };
121
+ result = await chat.sendMessage([AI.userMsg, image]);
117
122
  } else {
118
- result = await chat.sendMessage(AI.userMsg);
123
+ result = await chat.sendMessage(AI.userMsg);
119
124
  }
120
125
 
121
- const response = await result.response.text();
126
+ const response = await result.response.text();
122
127
 
123
128
  if (message.db) {
124
-
125
- const userHistory = {
126
- $push: {
127
- "history": {
128
- role: "user",
129
- parts: [{ text: `${AI.userMsg}` }]
130
- }
131
- },
132
- };
133
-
134
- historyData = await db.update(`${message.userId}_chatHistory`,
135
- { userId: message.userId },
136
- userHistory,
137
- true
138
- );
139
-
140
- const modelHistory = {
141
- $push: {
142
- "history": {
143
- role: "model",
144
- parts: [{ text: `${response}` }]
145
- }
146
- },
147
- };
148
-
149
- historyData = await db.update(`${message.userId}_chatHistory`,
150
- { userId: message.userId },
151
- modelHistory,
152
- true
153
- );
129
+ const userHistory = {
130
+ $push: {
131
+ "history": {
132
+ role: "user",
133
+ parts: [{ text: `${AI.userMsg}` }]
134
+ }
135
+ },
136
+ };
137
+
138
+ historyData = await db.update(`${message.userId}_chatHistory`,
139
+ { userId: message.userId },
140
+ userHistory,
141
+ true
142
+ );
143
+
144
+ const modelHistory = {
145
+ $push: {
146
+ "history": {
147
+ role: "model",
148
+ parts: [{ text: `${response}` }]
149
+ }
150
+ },
151
+ };
152
+
153
+ historyData = await db.update(`${message.userId}_chatHistory`,
154
+ { userId: message.userId },
155
+ modelHistory,
156
+ true
157
+ );
154
158
  }
155
159
 
156
160
  return response;
157
- } catch (e: any) {
161
+ }
162
+
163
+ async function urlToBase64(imageURL: string) {
164
+ try {
165
+ const convertedBuffer = await converter(imageURL, 'png');
166
+ const base64String = convertedBuffer.toString('base64');
167
+ return base64String;
168
+ } catch (error: any) {
169
+ throw new Error(`Failed to fetch and convert the image: ${error.message}`);
170
+ }
171
+ }
172
+
173
+ try {
174
+ let genAI: any;
175
+
176
+ if (AI.API_KEY && await validateKey(AI.API_KEY)) {
177
+ genAI = new GoogleGenerativeAI(AI.API_KEY);
178
+ } else {
179
+ genAI = await getValidGenAI(config.apiKeys, currentApiKeyIndex);
180
+ if (!genAI) {
181
+ return 'All provided API keys are invalid.';
182
+ }
183
+ }
184
+
185
+ return await processRequest(genAI);
186
+ } catch (e: any) {
158
187
  if (e.message) {
159
- if (e.message === '[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent: [400 Bad Request] User location is not supported for the API use.') {
160
- return `The hoster/bot owner/the used host isn't supported by gemini.`;
161
- } else if (e.response && (e.response.status === 429 || e.response.status === 403)) {
162
- return 'Ai is on a cooldown for the rest of the day. Either provide your own API key or wait for tomorrow. Check ai.google.dev for free apikeys';
163
- } else if (e.message === '[GoogleGenerativeAI Error]: Candidate was blocked due to SAFETY') {
164
- console.error(e);
165
- return `Due to safety enabled by gemini you have been blocked.`;
188
+ if (e.status === 429 || e.status === 403) {
189
+ currentApiKeyIndex++;
190
+ if (currentApiKeyIndex < config.apiKeys.length) {
191
+ return await geminiPro(message, AI);
166
192
  } else {
167
- console.error(e);
168
- return `Try again later please... Either API is on a cooldown or an internal server error has occurred. If issue persists please contact the bot developer or owner of the npm package.`;
193
+ return 'Ai is on a cooldown for the rest of the day. Either provide your own API key or wait for tomorrow. Check ai.google.dev for free apikeys';
169
194
  }
170
- } else {
195
+ } else if (e.status === 400) {
196
+ return `The hoster/bot owner/the used host isn't supported by gemini.`;
197
+ } else if (e.message === '[GoogleGenerativeAI Error]: Candidate was blocked due to SAFETY') {
198
+ console.error(e);
199
+ return `Due to safety enabled by gemini you have been blocked.`;
200
+ } else {
171
201
  console.error(e);
172
- return 'An unknown error has occurred.';
202
+ return `Try again later please... Either API is on a cooldown or an internal server error has occurred. If issue persists please contact the bot developer or owner of the npm package.`;
203
+ }
204
+ } else {
205
+ console.error(e);
206
+ return 'An unknown error has occurred.';
173
207
  }
174
208
  }
175
209
  }
176
-
177
- async function urlToBase64(imageURL: string) {
178
- try {
179
- const convertedBuffer = await converter(imageURL, 'png');
180
-
181
- const base64String = convertedBuffer.toString('base64');
182
- return base64String;
183
- } catch (error: any) {
184
- throw new Error(`Failed to fetch and convert the image: ${error.message}`);
185
- }
186
- }
187
-
@@ -1,4 +1,4 @@
1
- import axios, { AxiosResponse } from 'axios';
1
+ import axios, { AxiosResponse, AxiosError } from 'axios';
2
2
 
3
3
  interface aiOptions {
4
4
  API_KEY?: string | null;
@@ -10,14 +10,19 @@ export async function llamaChat({ API_KEY, prompt }: aiOptions): Promise<string>
10
10
 
11
11
  try {
12
12
  const payload = { prompt: prompt };
13
- const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/codellama`,
14
- payload,
15
- {
13
+ const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/codellama`, payload, {
16
14
  headers: { Authorization: `Bearer ${apiKey}` },
17
15
  });
18
16
 
19
17
  return response.data.message;
20
- } catch {
21
- return 'The Api is on a cool down for a 50 seconds.'
18
+ } catch (e: any) {
19
+ if (e.response && e.response.data.message === 'Invalid API key.') {
20
+ const defaultResponse = await axios.post(`https://api.rsnai.org/api/v1/user/codellama`, { prompt }, {
21
+ headers: { Authorization: `Bearer rsnai_SbLbFcwdT2h2KoYet2LS0F34` },
22
+ });
23
+ return defaultResponse.data.message;
24
+ } else {
25
+ return 'The API is on cooldown for 50 seconds.';
26
+ }
22
27
  }
23
28
  }
@@ -1,4 +1,4 @@
1
- import axios, { AxiosResponse } from 'axios';
1
+ import axios, { AxiosResponse, AxiosError } from 'axios';
2
2
 
3
3
  interface aiOptions {
4
4
  API_KEY?: string | null;
@@ -10,14 +10,19 @@ export async function mixtral({ API_KEY, prompt }: aiOptions): Promise<string> {
10
10
 
11
11
  try {
12
12
  const payload = { prompt: prompt };
13
- const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/mixtral`,
14
- payload,
15
- {
13
+ const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/mixtral`, payload, {
16
14
  headers: { Authorization: `Bearer ${apiKey}` },
17
15
  });
18
-
16
+
19
17
  return response.data.message;
20
- } catch {
21
- return 'The Api is on a cool down for a 50 seconds.'
18
+ } catch (e: any) {
19
+ if (e.response && e.response.data.message === 'Invalid API key.') {
20
+ const defaultResponse = await axios.post(`https://api.rsnai.org/api/v1/user/mixtral`, { prompt }, {
21
+ headers: { Authorization: `Bearer rsnai_SbLbFcwdT2h2KoYet2LS0F34` },
22
+ });
23
+ return defaultResponse.data.message;
24
+ } else {
25
+ return 'The API is on cooldown for 50 seconds.';
26
+ }
22
27
  }
23
28
  }
@@ -1,4 +1,4 @@
1
- import axios, { AxiosResponse } from 'axios';
1
+ import axios, { AxiosResponse, AxiosError } from 'axios';
2
2
 
3
3
  interface aiOptions {
4
4
  API_KEY?: string | null;
@@ -10,14 +10,19 @@ export async function openChat({ API_KEY, prompt }: aiOptions): Promise<string>
10
10
 
11
11
  try {
12
12
  const payload = { prompt: prompt };
13
- const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/openchat`,
14
- payload,
15
- {
13
+ const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/openChat`, payload, {
16
14
  headers: { Authorization: `Bearer ${apiKey}` },
17
15
  });
18
16
 
19
17
  return response.data.message;
20
- } catch {
21
- return 'The Api is on a cool down for a 50 seconds.'
18
+ } catch (e: any) {
19
+ if (e.response && e.response.data.message === 'Invalid API key.') {
20
+ const defaultResponse = await axios.post(`https://api.rsnai.org/api/v1/user/openChat`, { prompt }, {
21
+ headers: { Authorization: `Bearer rsnai_SbLbFcwdT2h2KoYet2LS0F34` },
22
+ });
23
+ return defaultResponse.data.message;
24
+ } else {
25
+ return 'The API is on cooldown for 50 seconds.';
26
+ }
22
27
  }
23
28
  }
@@ -1,4 +1,4 @@
1
- import axios, { AxiosResponse } from 'axios';
1
+ import axios, { AxiosResponse, AxiosError } from 'axios';
2
2
 
3
3
  interface aiOptions {
4
4
  API_KEY?: string | null;
@@ -10,14 +10,19 @@ export async function v4({ API_KEY, prompt }: aiOptions): Promise<string> {
10
10
 
11
11
  try {
12
12
  const payload = { prompt: prompt };
13
- const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/gpt4`,
14
- payload,
15
- {
13
+ const response: AxiosResponse = await axios.post(`https://api.rsnai.org/api/v1/user/gpt4`, payload, {
16
14
  headers: { Authorization: `Bearer ${apiKey}` },
17
15
  });
18
16
 
19
17
  return response.data.message;
20
- } catch {
21
- return 'The Api is on a cool down for a 50 seconds.'
18
+ } catch (e: any) {
19
+ if (e.response && e.response.data.message === 'Invalid API key.') {
20
+ const defaultResponse = await axios.post(`https://api.rsnai.org/api/v1/user/gpt4`, { prompt }, {
21
+ headers: { Authorization: `Bearer rsnai_SbLbFcwdT2h2KoYet2LS0F34` },
22
+ });
23
+ return defaultResponse.data.message;
24
+ } else {
25
+ return 'The API is on cooldown for 50 seconds.';
26
+ }
22
27
  }
23
28
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "apexify.js",
3
- "version": "4.1.6",
3
+ "version": "4.1.8",
4
4
  "description": "Ai and Canvas library. Supports typescript and javascript",
5
5
  "main": "./dist/index.js",
6
6
  "types": "./dist/index.d.ts",