apexify.js 2.3.0 → 2.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,18 @@
1
1
  ### Jedi-Studio Introduce
2
2
  Greetings everyone! 🌟 This marks the release of a beta version for our package. If you encounter any issues or need assistance, feel free to reach out. Direct message me on Discord at `jedi.79` (ID: 1130583393176920095).
3
3
 
4
+
5
+ # Added new 🆕
6
+
7
+ ### Check our server for the code and usage for leveling system
8
+ <details>
9
+ - New chat models has been added (starChat, zephyr-beta, gemma-v and v4)
10
+ - Leveling system (customizable with new features and themes using canvas)
11
+
12
+ - Our [Server Support](https://discord.gg/FsmKSmxJjD)
13
+ </details>
14
+
15
+
4
16
  # 🚀 Getting Started
5
17
  <details>
6
18
 
@@ -73,11 +85,13 @@ starter(client, options);
73
85
  </details>
74
86
 
75
87
 
76
- ## 🚀 Usage Guidelines 🤖
88
+ # 🚀 Usage Guidelines
77
89
 
78
- # Ai Chat & Imagine
90
+ ## 🤖 Ai Chat & Imagine
79
91
  <details>
80
92
 
93
+ - **Added new chat models**
94
+
81
95
  For a standard reply without AI images or voice messages, set both `draw` and `voice` to `false`. Activate voice replies and AI images by setting them to `true`. Trigger the bot to draw AI images with specific words or phrases using `drawTrigger`.
82
96
 
83
97
  ### Usage as prefix
@@ -118,6 +132,7 @@ const options = {
118
132
  },
119
133
  readFiles: false, // Whether to read attached files (true/false)
120
134
  enhancer: false, // Whether to enhance text before processing (true/false)
135
+ nsfw: false, // Whether to prevent nsfw generated images (true/false)
121
136
  };
122
137
 
123
138
  await apexAI(message, options)
@@ -237,6 +252,111 @@ module.exports = {
237
252
  </details>
238
253
  </details>
239
254
 
255
+ ## 📈 Leveling-System 🌠
256
+ - Note: ** This system uses mongoDb as database you need to ensure connection is made before using it**. You can make your own connection to mongodb or use mongoConnect funciton which is exported from apexify.js.
257
+ <details>
258
+
259
+ ```js
260
+ const LevelingSystem = require('apexify.js');
261
+ const level_system = new LevelingSystem({
262
+ XpCount: 15, // Each message is equalt to how many xp (1 message = 15xp)
263
+ rate: 2, // Rate message multiplier 1 message * 2 so 30xp per each msg
264
+ channelId: '1212829696358875196', // Channel Id to send in it level up messages
265
+ guildId: '1206054171657375754', // Your serverId
266
+ levelingMessage:'Congrates {user} you level up to level {level}.', // Leveling up message
267
+ levelsArray: [ // You can use your own levels system or leave it empty array and it will use the deffaut one which is inside the pkg inbuilt
268
+ { level: 0, xpCount: 0 },
269
+ { level: 1, xpCount: 10 },
270
+ { level: 2, xpCount: 50 },
271
+ { level: 3, xpCount: 75 },
272
+ { level: 4, xpCount: 100 },
273
+ { level: 5, xpCount: 150 },
274
+ { level: 6, xpCount: 200 },
275
+ { level: 7, xpCount: 300 },
276
+ { level: 8, xpCount: 400 },
277
+ { level: 9, xpCount: 500 },
278
+ { level: 10, xpCount: 700 },
279
+ { level: 11, xpCount: 900 },
280
+ { level: 12, xpCount: 1200 },
281
+ { level: 13, xpCount: 1500 },
282
+ { level: 14, xpCount: 1800 },
283
+ { level: 15, xpCount: 2200 },
284
+ { level: 16, xpCount: 2600 },
285
+ { level: 17, xpCount: 3000 },
286
+ { level: 18, xpCount: 3500 },
287
+ { level: 19, xpCount: 4000 },
288
+ { level: 20, xpCount: 4500 },
289
+ { level: 21, xpCount: 5000 },
290
+ { level: 22, xpCount: 6000 },
291
+ { level: 23, xpCount: 7000 },
292
+ { level: 24, xpCount: 8000 },
293
+ { level: 25, xpCount: 9000 },
294
+ { level: 26, xpCount: 10000 },
295
+ { level: 27, xpCount: 12000 },
296
+ { level: 28, xpCount: 14000 },
297
+ { level: 29, xpCount: 16000 },
298
+ { level: 30, xpCount: 20000 },
299
+ ],
300
+ });
301
+
302
+
303
+ /// Using level system inside messageCreate event
304
+
305
+ client.on('messageCreate', async (m) => {
306
+ const userId = m.author.id;
307
+ const guild = m.guild;
308
+ try {
309
+ await level_system.setupConfig() /// saving data above into mongodb
310
+
311
+ await level_system.addXp(userId, guild.id); /// adding xp to the user on each msg sent
312
+
313
+ await level_system.disableLevel(guild.id); /// remove/disable leveling system for the current server (note this will delete server data)
314
+
315
+ const card = await level_system.xpCard(m, guild.id, userId);// display a leveling card user rank and his current xp
316
+ m.reply({ files: [card] });
317
+
318
+ await level_system.setUserBanner(guild.id, userId); // set user banner in the xpCard
319
+
320
+ const userData = await level_system.userInfo(guild.id, userId); // returns user data (xp, level, userId, bannerURL)
321
+ console.log(userData);
322
+
323
+ const serverData = await level_system.checkConfig(guild.id);
324
+ console.log(serverData); // returns (serverid, channelId, xpCount, rate, levelsArray, levelUpmessage)
325
+
326
+
327
+ const topusers = await level_system.serverLeaderBoard(guild.id, '20'); // 20 is the number of user to be displayed for example top 20
328
+ console.log(topusers); // returns top 20 users in order from highest to lowest
329
+
330
+ const topGlobal = await level_system.topGlobal('10'); // top 10
331
+ console.log(topGlobal); // Returns each user in order with data displayment for each
332
+
333
+ /// levelup event to send level up card once user moves to the next level
334
+ level_system.on('levelup', async (levelupmsg, userId, userlevel, userXp, channelId, guildId) => {
335
+ const guild = message.client.guilds.cache.get(guildId);
336
+ const channel = guild.channels.cache.get(channelId);
337
+ const card = await level_system.levelUpCard(message, userId, guildId);
338
+ await channel.send({ files: [card] })
339
+ });
340
+
341
+ const editXp = await level_system.editXp(guild.id, userId, xpAmount); // The xpAmount to add or to remove from the useer
342
+ console.log(editXp); // returns new xp and level of the user
343
+
344
+
345
+ const removeUser = await level_system.removeUser(guild.id, userId); // remove the user from data
346
+
347
+ const serverleaderboard = await level_system.serverLeaderBoardCard(m, guildId, version = 1 or 2, 'set usernames color or put word random');
348
+ m.reply({ files: [card] });
349
+
350
+ await level_system.liveServerLeaderboard(m, guild.id, 'channel id to set the live leaderboard at', 20000, 1); /// 20000 is the uodate timer for live duration keep it more than 10 seconds and number 1 is the version of the live board either user 1 or 2
351
+ } catch (error) {
352
+ console.error(error.message);
353
+ }
354
+
355
+ });
356
+
357
+ ```
358
+
359
+ </details>
240
360
 
241
361
  ## 📊 Database MongoDb (Online) 📁
242
362
  <details>
@@ -896,7 +1016,7 @@ client.on('interactionCreate', async (i) => {
896
1016
  ## 📚 More Info & Documentation 📖
897
1017
  <details>
898
1018
 
899
- - Explore a detailed list of apexify.js and their usage at our [Support Server](https://discord.gg/2YsyePDB).
1019
+ - Explore a detailed list of apexify.js and their usage at our [Support Server](https://discord.gg/FsmKSmxJjD).
900
1020
 
901
1021
  ## 🚨 Important Notes 📌
902
1022
 
@@ -991,6 +1111,11 @@ Keep experimenting, and feel free to contact me for assistance! Suggestions and
991
1111
  - turbo
992
1112
  - turbo-16k
993
1113
  - gemini
1114
+ - apexChat
1115
+ - gemma-v3
1116
+ - gemma-v4
1117
+ - starChat
1118
+ - zephyr-beta
994
1119
  </details>
995
1120
 
996
1121
 
package/index.js CHANGED
@@ -26,7 +26,7 @@ var checkLibraryVersion = function (library, version) {
26
26
  }
27
27
  };
28
28
  checkLibraryVersion("discord.js", "14");
29
- axios_1.default
29
+ axios_1
30
30
  .get("https://registry.npmjs.com/-/v1/search?text=apexify.js")
31
31
  .then(function (response) {
32
32
  var _a, _b;
@@ -106,11 +106,4 @@ axios_1.default
106
106
  Object.defineProperty(exports, "resumeCommand", { enumerable: true, get: function () { return utils_1.resumeCommand; } });
107
107
  Object.defineProperty(exports, "previousCommand", { enumerable: true, get: function () { return utils_1.previousCommand; } });
108
108
  Object.defineProperty(exports, "starter", { enumerable: true, get: function () { return utils_1.starter; } });
109
- Object.defineProperty(exports, "typeWriter", { enumerable: true, get: function () { return utils_1.typeWriter; } });
110
-
111
- const { apexAI } = require('apexify.js');
112
-
113
-
114
- (async () => {
115
-
116
- })();
109
+ Object.defineProperty(exports, "typeWriter", { enumerable: true, get: function () { return utils_1.typeWriter; } });
package/lib/ai/apexAI.js CHANGED
@@ -36,7 +36,7 @@ var __generator = (this && this.__generator) || function (thisArg, body) {
36
36
  }
37
37
  };
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
- exports.apexai = exports.apexAI = void 0;
39
+ exports.zephyr_beta = exports.starChat = exports.gemmaAi = exports.apexai = exports.apexAI = void 0;
40
40
  var hercai_1 = require("hercai");
41
41
  var utils_1 = require("./utils");
42
42
  var axios_1 = require("axios");
@@ -63,7 +63,7 @@ function apexAI(message, options) {
63
63
  _y.sent();
64
64
  _y.label = 3;
65
65
  case 3:
66
- _y.trys.push([3, 50, , 51]);
66
+ _y.trys.push([3, 58, , 59]);
67
67
  _c = options.voice, voice = _c === void 0 ? false : _c, _d = options.voiceModel, voiceModel = _d === void 0 ? "google" : _d, _e = options.voice_code, voice_code = _e === void 0 ? "en-US-3" : _e, _f = options.apiKey, apiKey = _f === void 0 ? "" : _f, _g = options.type, type = _g === void 0 ? "b" : _g, _h = options.draw, draw = _h === void 0 ? false : _h, _j = options.drawTrigger, drawTrigger = _j === void 0 ? ["create", "رسم"] : _j, _k = options.imageModel, imageModel = _k === void 0 ? "prodia" : _k, _l = options.numOfImages, numOfImages = _l === void 0 ? 2 : _l, _m = options.chatModel, chatModel = _m === void 0 ? "v3" : _m, _o = options.keywords, keywords = _o === void 0 ? [] : _o, _p = options.keywordResponses, keywordResponses = _p === void 0 ? {} : _p, _q = options.loader, loader = _q === void 0 ? {
68
68
  loadingMessage: 'loading...',
69
69
  loadingTimer: 3000
@@ -167,114 +167,182 @@ function apexAI(message, options) {
167
167
  return [4 /*yield*/, (0, utils_1.generateVoiceResponse)(message, numOfImages, textToDraw, hercai, response, imageModel, draw, drawValid, chatModel, voiceModel, voice_code, type, apiKey, nsfw)];
168
168
  case 24: return [2 /*return*/, _y.sent()];
169
169
  case 25:
170
- _y.trys.push([25, 31, , 36]);
170
+ _y.trys.push([25, 39, , 44]);
171
171
  if (!(chatModel === 'apexChat')) return [3 /*break*/, 27];
172
172
  return [4 /*yield*/, apexai(finalText)];
173
173
  case 26:
174
174
  response = _y.sent();
175
- return [3 /*break*/, 30];
175
+ return [3 /*break*/, 38];
176
176
  case 27:
177
- if (!(chatModel === 'v3' || chatModel === 'v3-32k' || chatModel === 'turbo' || chatModel === 'turbo-16k' || chatModel === 'gemini')) return [3 /*break*/, 29];
177
+ if (!(chatModel === 'gemma-v3')) return [3 /*break*/, 29];
178
+ return [4 /*yield*/, gemmaAi_3(finalText)];
179
+ case 28:
180
+ response = _y.sent();
181
+ return [3 /*break*/, 38];
182
+ case 29:
183
+ if (!(chatModel === 'gemma-v4')) return [3 /*break*/, 31];
184
+ return [4 /*yield*/, gemmaAi_4(finalText)];
185
+ case 30:
186
+ response = _y.sent();
187
+ return [3 /*break*/, 38];
188
+ case 31:
189
+ if (!(chatModel === 'starChat')) return [3 /*break*/, 33];
190
+ return [4 /*yield*/, starChat(finalText)];
191
+ case 32:
192
+ response = _y.sent();
193
+ return [3 /*break*/, 38];
194
+ case 33:
195
+ if (!(chatModel === 'zephyr-beta')) return [3 /*break*/, 35];
196
+ return [4 /*yield*/, zephyr_beta(finalText)];
197
+ case 34:
198
+ response = _y.sent();
199
+ return [3 /*break*/, 38];
200
+ case 35:
201
+ if (!(chatModel === 'v3' || chatModel === 'v3-32k' || chatModel === 'turbo' || chatModel === 'turbo-16k' || chatModel === 'gemini')) return [3 /*break*/, 37];
178
202
  return [4 /*yield*/, hercai.question({
179
203
  model: chatModel,
180
204
  content: finalText,
181
205
  })];
182
- case 28:
206
+ case 36:
183
207
  response = _y.sent();
184
- return [3 /*break*/, 30];
185
- case 29: throw new Error('Invalid chat model');
186
- case 30: return [3 /*break*/, 36];
187
- case 31:
208
+ return [3 /*break*/, 38];
209
+ case 37: throw new Error('Invalid chat model');
210
+ case 38: return [3 /*break*/, 44];
211
+ case 39:
188
212
  error_1 = _y.sent();
189
- if (!(error_1.response && error_1.response.status === 429)) return [3 /*break*/, 32];
213
+ if (!(error_1.response && error_1.response.status === 429)) return [3 /*break*/, 40];
190
214
  console.error("Too many requests. Please try again later.");
191
215
  return [2 /*return*/, message.reply("Please wait i am in a cool down for a minute")];
192
- case 32:
193
- if (!(error_1.response && error_1.response.status === 500)) return [3 /*break*/, 33];
216
+ case 40:
217
+ if (!(error_1.response && error_1.response.status === 500)) return [3 /*break*/, 41];
194
218
  console.error("Internal server error. Please try again later.");
195
219
  return [2 /*return*/, message.reply("Please wait i am in a cool down for a minute")];
196
- case 33: return [4 /*yield*/, message.reply("Please wait i am in a cool down for a minute")];
197
- case 34:
220
+ case 41: return [4 /*yield*/, message.reply("Please wait i am in a cool down for a minute")];
221
+ case 42:
198
222
  _y.sent();
199
223
  throw new Error("The Api is on a cool down for 10 seconds");
200
- case 35: return [3 /*break*/, 36];
201
- case 36:
202
- if (!(typeof response === 'string')) return [3 /*break*/, 43];
203
- if (!(response.length <= 2000)) return [3 /*break*/, 38];
224
+ case 43: return [3 /*break*/, 44];
225
+ case 44:
226
+ if (!(typeof response === 'string')) return [3 /*break*/, 51];
227
+ if (!(response.length <= 2000)) return [3 /*break*/, 46];
204
228
  return [4 /*yield*/, message.reply({
205
229
  content: response,
206
230
  allowedMentions: { repliedUser: false },
207
231
  })];
208
- case 37:
232
+ case 45:
209
233
  _y.sent();
210
- return [3 /*break*/, 42];
211
- case 38:
234
+ return [3 /*break*/, 50];
235
+ case 46:
212
236
  parts = [];
213
237
  while (response.length > 0) {
214
238
  parts.push(response.substring(0, 1999));
215
239
  response = response.substring(1999);
216
240
  }
217
241
  _w = 0, parts_1 = parts;
218
- _y.label = 39;
219
- case 39:
220
- if (!(_w < parts_1.length)) return [3 /*break*/, 42];
242
+ _y.label = 47;
243
+ case 47:
244
+ if (!(_w < parts_1.length)) return [3 /*break*/, 50];
221
245
  part = parts_1[_w];
222
246
  return [4 /*yield*/, message.reply({
223
247
  content: part,
224
248
  allowedMentions: { repliedUser: false },
225
249
  })];
226
- case 40:
250
+ case 48:
227
251
  _y.sent();
228
- _y.label = 41;
229
- case 41:
252
+ _y.label = 49;
253
+ case 49:
230
254
  _w++;
231
- return [3 /*break*/, 39];
232
- case 42: return [3 /*break*/, 49];
233
- case 43:
234
- if (!(typeof response === 'object' && response.reply)) return [3 /*break*/, 49];
235
- if (!(response.reply.length <= 2000)) return [3 /*break*/, 45];
255
+ return [3 /*break*/, 47];
256
+ case 50: return [3 /*break*/, 57];
257
+ case 51:
258
+ if (!(typeof response === 'object' && response.reply)) return [3 /*break*/, 57];
259
+ if (!(response.reply.length <= 2000)) return [3 /*break*/, 53];
236
260
  return [4 /*yield*/, message.reply({
237
261
  content: response.reply,
238
262
  allowedMentions: { repliedUser: false },
239
263
  })];
240
- case 44:
264
+ case 52:
241
265
  _y.sent();
242
- return [3 /*break*/, 49];
243
- case 45:
266
+ return [3 /*break*/, 57];
267
+ case 53:
244
268
  parts = [];
245
269
  while (response.reply.length > 0) {
246
270
  parts.push(response.reply.substring(0, 1999));
247
271
  response.reply = response.reply.substring(1999);
248
272
  }
249
273
  _x = 0, parts_2 = parts;
250
- _y.label = 46;
251
- case 46:
252
- if (!(_x < parts_2.length)) return [3 /*break*/, 49];
274
+ _y.label = 54;
275
+ case 54:
276
+ if (!(_x < parts_2.length)) return [3 /*break*/, 57];
253
277
  part = parts_2[_x];
254
278
  return [4 /*yield*/, message.reply({
255
279
  content: part,
256
280
  allowedMentions: { repliedUser: false },
257
281
  })];
258
- case 47:
282
+ case 55:
259
283
  _y.sent();
260
- _y.label = 48;
261
- case 48:
284
+ _y.label = 56;
285
+ case 56:
262
286
  _x++;
263
- return [3 /*break*/, 46];
264
- case 49: return [3 /*break*/, 51];
265
- case 50:
287
+ return [3 /*break*/, 54];
288
+ case 57: return [3 /*break*/, 59];
289
+ case 58:
266
290
  error_2 = _y.sent();
267
291
  console.error("Error processing message in file");
268
292
  throw error_2;
269
- case 51: return [2 /*return*/];
293
+ case 59: return [2 /*return*/];
270
294
  }
271
295
  });
272
296
  });
273
297
  }
274
298
  exports.apexAI = apexAI;
299
+ function gemmaAi_4(prompt) {
300
+ return __awaiter(this, void 0, void 0, function () {
301
+ var response, error_3;
302
+ return __generator(this, function (_a) {
303
+ switch (_a.label) {
304
+ case 0:
305
+ _a.trys.push([0, 2, , 3]);
306
+ return [4 /*yield*/, axios_1.default.post('https://api-inference.huggingface.co/models/google/gemma-7b-it', { inputs: prompt }, {
307
+ headers: { 'Authorization': "Bearer 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" }
308
+ })];
309
+ case 1:
310
+ response = _a.sent();
311
+ return [2 /*return*/, response.data[0].generated_text];
312
+ case 2:
313
+ error_3 = _a.sent();
314
+ console.error('Error fetching response:', error_3);
315
+ return [2 /*return*/, null];
316
+ case 3: return [2 /*return*/];
317
+ }
318
+ });
319
+ });
320
+ }
321
+ function gemmaAi_3(prompt) {
322
+ return __awaiter(this, void 0, void 0, function () {
323
+ var response, error_4;
324
+ return __generator(this, function (_a) {
325
+ switch (_a.label) {
326
+ case 0:
327
+ _a.trys.push([0, 2, , 3]);
328
+ return [4 /*yield*/, axios_1.default.post('https://api-inference.huggingface.co/models/google/gemma-2b-it', { inputs: prompt }, {
329
+ headers: { 'Authorization': "Bearer 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" }
330
+ })];
331
+ case 1:
332
+ response = _a.sent();
333
+ return [2 /*return*/, response.data[0].generated_text];
334
+ case 2:
335
+ error_4 = _a.sent();
336
+ console.error('Error fetching response:', error_4);
337
+ return [2 /*return*/, null];
338
+ case 3: return [2 /*return*/];
339
+ }
340
+ });
341
+ });
342
+ }
275
343
  function apexai(prompt) {
276
344
  return __awaiter(this, void 0, void 0, function () {
277
- var messages, formattedMessages, response, generatedText, lines, output, error_3;
345
+ var messages, formattedMessages, response, generatedText, lines, output, error_5;
278
346
  return __generator(this, function (_a) {
279
347
  switch (_a.label) {
280
348
  case 0:
@@ -283,7 +351,7 @@ function apexai(prompt) {
283
351
  { "role": "user", "content": "".concat(prompt) }
284
352
  ];
285
353
  formattedMessages = messages.map(function (message) { return "[".concat(message.role, "] ").concat(message.content); }).join('\n');
286
- return [4 /*yield*/, axios_1.post("https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", {
354
+ return [4 /*yield*/, axios_1.default.post("https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", {
287
355
  inputs: formattedMessages
288
356
  }, {
289
357
  headers: {
@@ -298,12 +366,81 @@ function apexai(prompt) {
298
366
  output = lines.join('\n');
299
367
  return [2 /*return*/, output];
300
368
  case 2:
301
- error_3 = _a.sent();
302
- console.error('Error:', error_3.response.data);
369
+ error_5 = _a.sent();
370
+ console.error('Error:', error_5.response.data);
303
371
  return [2 /*return*/, 'Please wait i am on cooldown.'];
304
372
  case 3: return [2 /*return*/];
305
373
  }
306
374
  });
307
375
  });
308
376
  }
309
- exports.apexai = apexai;
377
+ exports.apexai = apexai;
378
+ function starChat(prompt) {
379
+ return __awaiter(this, void 0, void 0, function () {
380
+ var messages, response, chatbotReply, chatbotResponseText, chatbotResponseArray, chatbotResponseString, error_6;
381
+ return __generator(this, function (_a) {
382
+ switch (_a.label) {
383
+ case 0:
384
+ messages = [{ "role": "user", "content": "".concat(prompt) }];
385
+ _a.label = 1;
386
+ case 1:
387
+ _a.trys.push([1, 3, , 4]);
388
+ return [4 /*yield*/, axios_1.default.post('https://api-inference.huggingface.co/models/HuggingFaceH4/starchat2-15b-v0.1', {
389
+ inputs: JSON.stringify(messages),
390
+ }, {
391
+ headers: {
392
+ 'Authorization': "Bearer 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",
393
+ },
394
+ })];
395
+ case 2:
396
+ response = _a.sent();
397
+ chatbotReply = response.data[0];
398
+ chatbotResponseText = chatbotReply.generated_text.replace(/^.*?\n.*?\n/, '');
399
+ chatbotResponseArray = JSON.parse(chatbotResponseText);
400
+ chatbotResponseString = chatbotResponseArray.join(' ');
401
+ return [2 /*return*/, chatbotResponseString];
402
+ case 3:
403
+ error_6 = _a.sent();
404
+ console.error('Error fetching response:', error_6);
405
+ return [2 /*return*/, null];
406
+ case 4: return [2 /*return*/];
407
+ }
408
+ });
409
+ });
410
+ }
411
+ exports.starChat = starChat;
412
+ function zephyr_beta(prompt) {
413
+ return __awaiter(this, void 0, void 0, function () {
414
+ var messages, response, chatbotReply, textParts, secondArrayString, chatbotResponseArray, chatbotResponseString, error_7;
415
+ return __generator(this, function (_a) {
416
+ switch (_a.label) {
417
+ case 0:
418
+ messages = [{ "role": "user", "content": "".concat(prompt) }];
419
+ _a.label = 1;
420
+ case 1:
421
+ _a.trys.push([1, 3, , 4]);
422
+ return [4 /*yield*/, axios_1.default.post('https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta', {
423
+ inputs: JSON.stringify(messages),
424
+ }, {
425
+ headers: {
426
+ 'Authorization': "Bearer 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",
427
+ },
428
+ })];
429
+ case 2:
430
+ response = _a.sent();
431
+ chatbotReply = response.data[0];
432
+ textParts = chatbotReply.generated_text.split('\n');
433
+ secondArrayString = textParts[2];
434
+ chatbotResponseArray = JSON.parse(secondArrayString);
435
+ chatbotResponseString = chatbotResponseArray.map(function (obj) { return obj.content; }).join(' ');
436
+ return [2 /*return*/, chatbotResponseString];
437
+ case 3:
438
+ error_7 = _a.sent();
439
+ console.error('Error fetching response:', error_7);
440
+ return [2 /*return*/, null];
441
+ case 4: return [2 /*return*/];
442
+ }
443
+ });
444
+ });
445
+ }
446
+ exports.zephyr_beta = zephyr_beta;
package/lib/ai/models.js CHANGED
@@ -46,10 +46,11 @@ var herc = new hercai_1.Hercai();
46
46
  var apexAI_1 = require("./apexAI");
47
47
  function ApexImagine(model, prompt, options) {
48
48
  return __awaiter(this, void 0, void 0, function () {
49
- var negative, number, nsfw, neg, count, resultUrls, allowedModelsH, validProdiaModalsP, i, result, error_1, i, generateResponse, generatedJobId, result, checkedURLs, nsfwWords, _loop_1, _i, checkedURLs_1, text, state_1;
49
+ var negative, number, nsfw, neg, count, resultUrls, allowedModelsH, validProdiaModalsP, i, result, error_1, i, generateResponse, generatedJobId, result, checkedURLs, nsfwWords, _loop_1, _i, checkedURLs_1, text, state_1, e_1;
50
50
  return __generator(this, function (_a) {
51
51
  switch (_a.label) {
52
52
  case 0:
53
+ _a.trys.push([0, 15, , 16]);
53
54
  negative = options.negative, number = options.number, nsfw = options.nsfw;
54
55
  neg = '';
55
56
  count = 1;
@@ -497,6 +498,22 @@ function ApexImagine(model, prompt, options) {
497
498
  }
498
499
  _a.label = 14;
499
500
  case 14: return [2 /*return*/, resultUrls];
501
+ case 15:
502
+ e_1 = _a.sent();
503
+ if (e_1.response && e_1.response.status === 429) {
504
+ console.error("Too many requests. Please try again later.");
505
+ return [2 /*return*/, "Please wait i am in a cool down for a minute"];
506
+ }
507
+ else if (e_1.response && e_1.response.status === 500) {
508
+ console.error("Internal server error. Please try again later.");
509
+ return [2 /*return*/, "Please wait i am in a cool down for a minute"];
510
+ }
511
+ else {
512
+ console.error("Please wait i am in a cool down for a minute");
513
+ return [2 /*return*/, "Please wait i am in a cool down for a minute"];
514
+ }
515
+ return [3 /*break*/, 16];
516
+ case 16: return [2 /*return*/];
500
517
  }
501
518
  });
502
519
  });
@@ -545,7 +562,7 @@ function checkJobStatus(jobId, retryCount) {
545
562
  }
546
563
  function ApexChat(model, prompt) {
547
564
  return __awaiter(this, void 0, void 0, function () {
548
- var response, e_1;
565
+ var response, e_2;
549
566
  return __generator(this, function (_a) {
550
567
  switch (_a.label) {
551
568
  case 0:
@@ -562,8 +579,8 @@ function ApexChat(model, prompt) {
562
579
  case 4: throw new Error('Invalid model.');
563
580
  case 5: return [3 /*break*/, 7];
564
581
  case 6:
565
- e_1 = _a.sent();
566
- console.error(e_1.message);
582
+ e_2 = _a.sent();
583
+ console.error(e_2.message);
567
584
  return [2 /*return*/, ''];
568
585
  case 7: return [2 /*return*/];
569
586
  }
@@ -634,4 +651,4 @@ function apexChecker(urls) {
634
651
  }
635
652
  });
636
653
  });
637
- }
654
+ }