apexify.js 4.8.2 → 4.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/dist/tsconfig.cjs.tsbuildinfo +1 -1
  2. package/dist/tsconfig.esm.tsbuildinfo +1 -1
  3. package/lib/ai/ApexAI.ts +4 -4
  4. package/lib/ai/ApexModules.ts +8 -8
  5. package/lib/ai/modals/electronHub/chatmodels.ts +1 -1
  6. package/lib/ai/modals/electronHub/imageModels.ts +2 -2
  7. package/lib/ai/modals/electronHub/speechModels.ts +1 -1
  8. package/lib/ai/utils.ts +3 -3
  9. package/lib/canvas/ApexPainter.ts +1 -1
  10. package/lib/canvas/utils/Background/bg.ts +1 -1
  11. package/lib/canvas/utils/Charts/charts.ts +1 -1
  12. package/lib/canvas/utils/Custom/customLines.ts +2 -2
  13. package/lib/canvas/utils/General/general functions.ts +1 -1
  14. package/lib/canvas/utils/Image/imageProperties.ts +1 -1
  15. package/lib/canvas/utils/Texts/textProperties.ts +1 -1
  16. package/lib/canvas/utils/utils.ts +8 -8
  17. package/lib/index.ts +5 -5
  18. package/lib/utils.ts +3 -3
  19. package/package.json +5 -5
  20. package/dist/cjs/ai/ApexAI.d.ts +0 -144
  21. package/dist/cjs/ai/ApexAI.js +0 -486
  22. package/dist/cjs/ai/ApexModules.d.ts +0 -52
  23. package/dist/cjs/ai/ApexModules.js +0 -811
  24. package/dist/cjs/ai/functions/readFiles.d.ts +0 -1
  25. package/dist/cjs/ai/functions/readFiles.js +0 -56
  26. package/dist/cjs/ai/functions/tokenizer.d.ts +0 -9
  27. package/dist/cjs/ai/functions/tokenizer.js +0 -60
  28. package/dist/cjs/ai/functions/validOptions.d.ts +0 -21
  29. package/dist/cjs/ai/functions/validOptions.js +0 -93
  30. package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts +0 -6
  31. package/dist/cjs/ai/modals/electronHub/chatmodels.js +0 -44
  32. package/dist/cjs/ai/modals/electronHub/imageModels.d.ts +0 -11
  33. package/dist/cjs/ai/modals/electronHub/imageModels.js +0 -85
  34. package/dist/cjs/ai/modals/electronHub/songModels.d.ts +0 -1
  35. package/dist/cjs/ai/modals/electronHub/songModels.js +0 -1
  36. package/dist/cjs/ai/modals/electronHub/speechModels.d.ts +0 -6
  37. package/dist/cjs/ai/modals/electronHub/speechModels.js +0 -56
  38. package/dist/cjs/ai/modals/electronHub/videoModels.d.ts +0 -5
  39. package/dist/cjs/ai/modals/electronHub/videoModels.js +0 -56
  40. package/dist/cjs/ai/modals/groq/chatgroq.d.ts +0 -8
  41. package/dist/cjs/ai/modals/groq/chatgroq.js +0 -57
  42. package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts +0 -7
  43. package/dist/cjs/ai/modals/groq/imageAnalyzer.js +0 -75
  44. package/dist/cjs/ai/modals/groq/whisper.d.ts +0 -4
  45. package/dist/cjs/ai/modals/groq/whisper.js +0 -101
  46. package/dist/cjs/ai/modals/hercai/chatModels.d.ts +0 -6
  47. package/dist/cjs/ai/modals/hercai/chatModels.js +0 -19
  48. package/dist/cjs/ai/utils.d.ts +0 -4
  49. package/dist/cjs/ai/utils.js +0 -4
  50. package/dist/cjs/canvas/ApexPainter.d.ts +0 -144
  51. package/dist/cjs/canvas/ApexPainter.js +0 -900
  52. package/dist/cjs/canvas/utils/Background/bg.d.ts +0 -30
  53. package/dist/cjs/canvas/utils/Background/bg.js +0 -151
  54. package/dist/cjs/canvas/utils/Charts/charts.d.ts +0 -7
  55. package/dist/cjs/canvas/utils/Charts/charts.js +0 -455
  56. package/dist/cjs/canvas/utils/Custom/customLines.d.ts +0 -2
  57. package/dist/cjs/canvas/utils/Custom/customLines.js +0 -105
  58. package/dist/cjs/canvas/utils/General/conversion.d.ts +0 -5
  59. package/dist/cjs/canvas/utils/General/conversion.js +0 -26
  60. package/dist/cjs/canvas/utils/General/general functions.d.ts +0 -38
  61. package/dist/cjs/canvas/utils/General/general functions.js +0 -590
  62. package/dist/cjs/canvas/utils/Image/imageProperties.d.ts +0 -114
  63. package/dist/cjs/canvas/utils/Image/imageProperties.js +0 -590
  64. package/dist/cjs/canvas/utils/Texts/textProperties.d.ts +0 -16
  65. package/dist/cjs/canvas/utils/Texts/textProperties.js +0 -154
  66. package/dist/cjs/canvas/utils/types.d.ts +0 -621
  67. package/dist/cjs/canvas/utils/types.js +0 -5
  68. package/dist/cjs/canvas/utils/utils.d.ts +0 -18
  69. package/dist/cjs/canvas/utils/utils.js +0 -17
  70. package/dist/cjs/index.d.ts +0 -28
  71. package/dist/cjs/index.js +0 -67
  72. package/dist/cjs/utils.d.ts +0 -4
  73. package/dist/cjs/utils.js +0 -4
  74. package/dist/esm/ai/ApexAI.d.ts +0 -144
  75. package/dist/esm/ai/ApexAI.js +0 -486
  76. package/dist/esm/ai/ApexModules.d.ts +0 -52
  77. package/dist/esm/ai/ApexModules.js +0 -811
  78. package/dist/esm/ai/functions/readFiles.d.ts +0 -1
  79. package/dist/esm/ai/functions/readFiles.js +0 -56
  80. package/dist/esm/ai/functions/tokenizer.d.ts +0 -9
  81. package/dist/esm/ai/functions/tokenizer.js +0 -60
  82. package/dist/esm/ai/functions/validOptions.d.ts +0 -21
  83. package/dist/esm/ai/functions/validOptions.js +0 -93
  84. package/dist/esm/ai/modals/electronHub/chatmodels.d.ts +0 -6
  85. package/dist/esm/ai/modals/electronHub/chatmodels.js +0 -44
  86. package/dist/esm/ai/modals/electronHub/imageModels.d.ts +0 -11
  87. package/dist/esm/ai/modals/electronHub/imageModels.js +0 -85
  88. package/dist/esm/ai/modals/electronHub/songModels.d.ts +0 -1
  89. package/dist/esm/ai/modals/electronHub/songModels.js +0 -1
  90. package/dist/esm/ai/modals/electronHub/speechModels.d.ts +0 -6
  91. package/dist/esm/ai/modals/electronHub/speechModels.js +0 -56
  92. package/dist/esm/ai/modals/electronHub/videoModels.d.ts +0 -5
  93. package/dist/esm/ai/modals/electronHub/videoModels.js +0 -56
  94. package/dist/esm/ai/modals/groq/chatgroq.d.ts +0 -8
  95. package/dist/esm/ai/modals/groq/chatgroq.js +0 -57
  96. package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts +0 -7
  97. package/dist/esm/ai/modals/groq/imageAnalyzer.js +0 -75
  98. package/dist/esm/ai/modals/groq/whisper.d.ts +0 -4
  99. package/dist/esm/ai/modals/groq/whisper.js +0 -101
  100. package/dist/esm/ai/modals/hercai/chatModels.d.ts +0 -6
  101. package/dist/esm/ai/modals/hercai/chatModels.js +0 -19
  102. package/dist/esm/ai/utils.d.ts +0 -4
  103. package/dist/esm/ai/utils.js +0 -4
  104. package/dist/esm/canvas/ApexPainter.d.ts +0 -144
  105. package/dist/esm/canvas/ApexPainter.js +0 -900
  106. package/dist/esm/canvas/utils/Background/bg.d.ts +0 -30
  107. package/dist/esm/canvas/utils/Background/bg.js +0 -151
  108. package/dist/esm/canvas/utils/Charts/charts.d.ts +0 -7
  109. package/dist/esm/canvas/utils/Charts/charts.js +0 -455
  110. package/dist/esm/canvas/utils/Custom/customLines.d.ts +0 -2
  111. package/dist/esm/canvas/utils/Custom/customLines.js +0 -105
  112. package/dist/esm/canvas/utils/General/conversion.d.ts +0 -5
  113. package/dist/esm/canvas/utils/General/conversion.js +0 -26
  114. package/dist/esm/canvas/utils/General/general functions.d.ts +0 -38
  115. package/dist/esm/canvas/utils/General/general functions.js +0 -590
  116. package/dist/esm/canvas/utils/Image/imageProperties.d.ts +0 -114
  117. package/dist/esm/canvas/utils/Image/imageProperties.js +0 -590
  118. package/dist/esm/canvas/utils/Texts/textProperties.d.ts +0 -16
  119. package/dist/esm/canvas/utils/Texts/textProperties.js +0 -154
  120. package/dist/esm/canvas/utils/types.d.ts +0 -621
  121. package/dist/esm/canvas/utils/types.js +0 -5
  122. package/dist/esm/canvas/utils/utils.d.ts +0 -18
  123. package/dist/esm/canvas/utils/utils.js +0 -17
  124. package/dist/esm/index.d.ts +0 -28
  125. package/dist/esm/index.js +0 -67
  126. package/dist/esm/utils.d.ts +0 -4
  127. package/dist/esm/utils.js +0 -4
  128. package/lib/ai/modals/electronHub/songModels.ts +0 -0
@@ -1 +0,0 @@
1
- export declare function readFile(pathOrUrl: string, type?: string): Promise<string>;
@@ -1,56 +0,0 @@
1
- import pdf from "pdf-parse";
2
- import * as fs from "fs";
3
- import * as https from "https";
4
- import { createWriteStream } from "fs";
5
- import { promisify } from "util";
6
- import path from "path";
7
- const readFileAsync = promisify(fs.readFile);
8
- export async function readFile(pathOrUrl, type) {
9
- const buffer = await loadFile(pathOrUrl);
10
- const ext = type || getFileExtension(pathOrUrl);
11
- switch (ext) {
12
- case "pdf":
13
- return readPdf(buffer);
14
- default:
15
- return buffer.toString("utf-8");
16
- }
17
- }
18
- function getFileExtension(pathOrUrl) {
19
- const parsedPath = new URL(pathOrUrl, "file://");
20
- const pathname = parsedPath.pathname || pathOrUrl;
21
- const ext = path.extname(pathname).toLowerCase();
22
- return ext ? ext.slice(1) : undefined;
23
- }
24
- async function loadFile(pathOrUrl) {
25
- return isUrl(pathOrUrl) ? downloadFile(pathOrUrl) : readFileAsync(pathOrUrl);
26
- }
27
- function isUrl(pathOrUrl) {
28
- return /^https?:\/\//.test(pathOrUrl);
29
- }
30
- async function downloadFile(url) {
31
- const tempFilePath = "temp";
32
- const file = createWriteStream(tempFilePath);
33
- await new Promise((resolve, reject) => {
34
- https.get(url, (response) => {
35
- // @ts-ignore
36
- response.pipe(file);
37
- file.on("finish", resolve);
38
- file.on("error", reject);
39
- }).on("error", reject);
40
- });
41
- const buffer = await readFileAsync(tempFilePath);
42
- await fs.promises.unlink(tempFilePath);
43
- return buffer;
44
- }
45
- async function readPdf(buffer) {
46
- try {
47
- const data = await pdf(buffer);
48
- console.log("PDF Metadata:", data.metadata);
49
- console.log("PDF Text:", data.text);
50
- return data.text || "No readable text found in PDF.";
51
- }
52
- catch (err) {
53
- console.error("Error parsing PDF:", err);
54
- return "Failed to extract text from PDF.";
55
- }
56
- }
@@ -1,9 +0,0 @@
1
- /**
2
- * Trims the user’s prompt to ensure total tokens do not exceed 3964.
3
- * - Keeps the full instruction intact.
4
- * - Trims only the user’s prompt if needed.
5
- */
6
- export declare function tokenLimit(instruction: string, prompt: string, maxTokens?: number): {
7
- instruction: string;
8
- prompt: string;
9
- };
@@ -1,60 +0,0 @@
1
- /**
2
- * Optimized tokenization function that processes characters in a single pass.
3
- * - Splits words & punctuation efficiently.
4
- * - Handles contractions (e.g., "can't", "it's").
5
- */
6
- function optimizedTokenize(text) {
7
- const tokens = [];
8
- let currentToken = '';
9
- for (let i = 0; i < text.length; i++) {
10
- const char = text[i];
11
- if (char.match(/\w/)) {
12
- currentToken += char;
13
- }
14
- else {
15
- if (currentToken) {
16
- tokens.push(currentToken);
17
- currentToken = '';
18
- }
19
- if (char.match(/[^\s]/)) {
20
- tokens.push(char);
21
- }
22
- }
23
- }
24
- if (currentToken) {
25
- tokens.push(currentToken);
26
- }
27
- return tokens;
28
- }
29
- /**
30
- * Returns the token count using the optimized tokenizer.
31
- */
32
- function optimizedTokenCount(text) {
33
- return optimizedTokenize(text).length;
34
- }
35
- /**
36
- * Trims the user’s prompt to ensure total tokens do not exceed 3964.
37
- * - Keeps the full instruction intact.
38
- * - Trims only the user’s prompt if needed.
39
- */
40
- export function tokenLimit(instruction, prompt, maxTokens = 3964) {
41
- const instructionTokens = optimizedTokenCount(instruction);
42
- const remainingTokens = maxTokens - instructionTokens;
43
- if (remainingTokens <= 0) {
44
- throw new Error("Instruction alone exceeds max token limit! This should never happen.");
45
- }
46
- return {
47
- instruction,
48
- prompt: optimizedTrimText(prompt, remainingTokens)
49
- };
50
- }
51
- /**
52
- * Trims the input text to fit within the token limit.
53
- */
54
- function optimizedTrimText(text, maxTokens) {
55
- const tokens = optimizedTokenize(text);
56
- if (tokens.length <= maxTokens) {
57
- return text;
58
- }
59
- return tokens.slice(0, maxTokens).join(' ');
60
- }
@@ -1,21 +0,0 @@
1
- import Groq from "groq-sdk";
2
- export declare const HERC_CHAT_MODELS: readonly string[];
3
- export declare const HERC_IMAGE_MODELS: readonly string[];
4
- /**
5
- * Fetches available models from ElectronHub API.
6
- */
7
- export declare function getElectronHubModels(apiKey?: string): Promise<string[]>;
8
- /**
9
- * Fetches available chat models from Groq API.
10
- */
11
- export declare function getGroqChatModels(groqInstance: Groq): Promise<string[]>;
12
- /**
13
- * Returns all valid model lists asynchronously.
14
- */
15
- export declare const validateModels: () => Promise<{
16
- validHercChatModels: readonly string[];
17
- validGroqChatModels: string[];
18
- validElectronModels: string[];
19
- validHercImageModels: readonly string[];
20
- allModels: string[];
21
- }>;
@@ -1,93 +0,0 @@
1
- import Groq from "groq-sdk";
2
- import OpenAI from "openai";
3
- // Define static models as readonly to prevent accidental modifications
4
- export const HERC_CHAT_MODELS = Object.freeze([
5
- "v3", "v3-32k", "turbo", "turbo-16k", "gemini",
6
- "llama3-70b", "llama3-8b", "mixtral-8x7b",
7
- "gemma-7b", "gemma2-9b",
8
- ]);
9
- export const HERC_IMAGE_MODELS = Object.freeze([
10
- "v3", "lexica", "prodia", "prodia-v2", "simurg",
11
- "animefy", "raava", "shonin",
12
- ]);
13
- // API Keys (ensure they are securely stored, do not hardcode in production)
14
- const GROQ_API_KEY = "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";
15
- const ELECTRON_HUB_API_KEY = "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
16
- /**
17
- * Initializes the Groq SDK instance.
18
- */
19
- const initializeGroqInstance = () => new Groq({ apiKey: GROQ_API_KEY });
20
- /**
21
- * Fetches available models from ElectronHub API.
22
- */
23
- export async function getElectronHubModels(apiKey) {
24
- try {
25
- const openai = new OpenAI({
26
- apiKey: apiKey ?? ELECTRON_HUB_API_KEY,
27
- baseURL: "https://api.electronhub.top/v1",
28
- });
29
- const models = await openai.models.list();
30
- // Ensure models are valid and correctly formatted
31
- if (!models?.data || !Array.isArray(models.data)) {
32
- console.warn("Warning: Unexpected ElectronHub models response", models);
33
- return [];
34
- }
35
- return models.data.map((model) => model.id).filter(Boolean); // Filter out any undefined/null values
36
- }
37
- catch (error) {
38
- console.error("Error fetching ElectronHub models:", error);
39
- return [];
40
- }
41
- }
42
- /**
43
- * Fetches available chat models from Groq API.
44
- */
45
- export async function getGroqChatModels(groqInstance) {
46
- try {
47
- const response = await groqInstance.models.list();
48
- // Ensure response is valid
49
- if (!response?.data || !Array.isArray(response.data)) {
50
- console.warn("Warning: Unexpected Groq models response", response);
51
- return [];
52
- }
53
- return response.data
54
- .map((model) => model.id)
55
- .filter((id) => id &&
56
- !id.startsWith("whisper") &&
57
- !id.startsWith("llava") &&
58
- !id.startsWith("distil-whisper")); // Remove unwanted prefixes
59
- }
60
- catch (error) {
61
- console.error("Error fetching Groq models:", error);
62
- return [];
63
- }
64
- }
65
- /**
66
- * Fetches and compiles all valid model options.
67
- */
68
- async function initializeValidOptions() {
69
- const groq = initializeGroqInstance();
70
- // Fetch models in parallel for better performance
71
- const [groqModels, electronModels] = await Promise.all([
72
- getGroqChatModels(groq),
73
- getElectronHubModels(),
74
- ]);
75
- // Use Set to prevent duplicate entries in `allModels`
76
- const allModelsSet = new Set([
77
- ...HERC_IMAGE_MODELS,
78
- ...HERC_CHAT_MODELS,
79
- ...groqModels,
80
- ...electronModels,
81
- ]);
82
- return {
83
- validHercChatModels: HERC_CHAT_MODELS,
84
- validGroqChatModels: groqModels,
85
- validElectronModels: electronModels,
86
- validHercImageModels: HERC_IMAGE_MODELS,
87
- allModels: Array.from(allModelsSet), // Convert Set to array for uniqueness
88
- };
89
- }
90
- /**
91
- * Returns all valid model lists asynchronously.
92
- */
93
- export const validateModels = async () => await initializeValidOptions();
@@ -1,6 +0,0 @@
1
- export declare function electronChat({ ApiKey, prompt, modelName, instruction, }: {
2
- ApiKey?: string;
3
- prompt: string;
4
- modelName: string;
5
- instruction?: string;
6
- }): Promise<string | null>;
@@ -1,44 +0,0 @@
1
- import OpenAI from "openai";
2
- import { tokenLimit } from "../../functions/tokenizer.js";
3
- export async function electronChat({ ApiKey, prompt, modelName, instruction, }) {
4
- try {
5
- const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
6
- const openai = new OpenAI({
7
- apiKey: apiKey,
8
- baseURL: "https://api.electronhub.top/v1",
9
- });
10
- const models = await openai.models.list();
11
- const modelExists = models.data.some((model) => model.id === modelName);
12
- if (!modelExists) {
13
- throw new Error('Invalid model name. Please check out Electron hub models for more info.');
14
- }
15
- const { instruction: finalInstruction, prompt: finalPrompt } = tokenLimit(instruction || "", prompt, 10000);
16
- const messages = [
17
- { role: "system", content: finalInstruction },
18
- { role: "user", content: finalPrompt }
19
- ];
20
- const completion = await openai.chat.completions.create({
21
- model: modelName,
22
- messages: messages
23
- });
24
- return completion.choices[0]?.message?.content;
25
- }
26
- catch (e) {
27
- if (e.response) {
28
- if (e.response.status === 429) {
29
- throw new Error("Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key.");
30
- }
31
- else if (e.response.status === 500) {
32
- throw new Error("Server error. Please try again later.");
33
- }
34
- else {
35
- console.error("Error generating response:", e.response.data);
36
- throw e;
37
- }
38
- }
39
- else {
40
- console.error("Error generating response:", e.message);
41
- throw e;
42
- }
43
- }
44
- }
@@ -1,11 +0,0 @@
1
- export declare function electronImagine({ ApiKey, prompt, modelName, resizeOptions, }: {
2
- ApiKey?: string;
3
- prompt: string;
4
- modelName: string;
5
- resizeOptions?: {
6
- width?: number;
7
- height?: number;
8
- format?: "jpeg" | "png";
9
- quality?: number;
10
- };
11
- }): Promise<string | undefined>;
@@ -1,85 +0,0 @@
1
- import OpenAI from "openai";
2
- import sharp from "sharp";
3
- import { ApexPainter } from "../../../utils.js";
4
- import { tokenLimit } from "../../functions/tokenizer.js";
5
- const paint = new ApexPainter({ type: "url" });
6
- export async function electronImagine({ ApiKey, prompt, modelName, resizeOptions, }) {
7
- let apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
8
- if (ApiKey === "eaebff6e-c7b2-477c-8edd-9aa91becf1e3") {
9
- apiKey = "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
10
- }
11
- const openai = new OpenAI({
12
- apiKey: apiKey,
13
- baseURL: "https://api.electronhub.top/v1",
14
- });
15
- const models = await openai.models.list();
16
- const modelExists = models.data.some((model) => model.id === modelName);
17
- if (!modelExists) {
18
- throw new Error("Invalid model name. Please check out Electron Hub models for more info.");
19
- }
20
- try {
21
- const { prompt: trimmedPrompt } = tokenLimit("", prompt, 32000);
22
- const response = await openai.images.generate({
23
- model: modelName,
24
- prompt: trimmedPrompt,
25
- n: 1,
26
- });
27
- const imagesUrl = response.data;
28
- const imageUrl = imagesUrl[0].url;
29
- if (!resizeOptions ||
30
- ((resizeOptions.width == null &&
31
- resizeOptions.height == null &&
32
- resizeOptions.format == null) ||
33
- (resizeOptions.width === 1024 &&
34
- resizeOptions.height === 1024 &&
35
- resizeOptions.format == null))) {
36
- return imageUrl;
37
- }
38
- const responseFetch = await fetch(imageUrl);
39
- const arrayBuffer = await responseFetch.arrayBuffer();
40
- const imageBuffer = Buffer.from(arrayBuffer);
41
- let imageProcessor = sharp(imageBuffer);
42
- if (resizeOptions.width || resizeOptions.height) {
43
- imageProcessor = imageProcessor.resize({
44
- width: resizeOptions.width,
45
- height: resizeOptions.height,
46
- kernel: sharp.kernel.lanczos3,
47
- withoutEnlargement: true,
48
- });
49
- }
50
- if (resizeOptions.format) {
51
- const quality = resizeOptions.quality || 90;
52
- switch (resizeOptions.format) {
53
- case "jpeg":
54
- imageProcessor = imageProcessor.jpeg({ quality });
55
- break;
56
- case "png":
57
- imageProcessor = imageProcessor.png({ quality });
58
- break;
59
- default:
60
- throw Error("We don't support this format, only png and jpeg.");
61
- }
62
- }
63
- const buffer = await imageProcessor.toBuffer();
64
- const output = (await paint.outPut(buffer));
65
- return output;
66
- }
67
- catch (e) {
68
- if (e.response) {
69
- if (e.response.status === 429) {
70
- throw new Error("Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key.");
71
- }
72
- else if (e.response.status === 500) {
73
- throw new Error("Server error. Please try again later.");
74
- }
75
- else {
76
- console.error("Error generating response:", e.response.data);
77
- throw e;
78
- }
79
- }
80
- else {
81
- console.error("Error generating response:", e.message);
82
- throw e;
83
- }
84
- }
85
- }
@@ -1 +0,0 @@
1
- export {};
@@ -1 +0,0 @@
1
- export {};
@@ -1,6 +0,0 @@
1
- export declare function electronSpeech({ ApiKey, inputText, modelName, personality, }: {
2
- ApiKey?: string;
3
- inputText: string;
4
- modelName?: "elevenlabs" | "myshell-tts" | "deepinfra-tts" | "whisper-large-v3" | "distil-large-v3" | string;
5
- personality?: string;
6
- }): Promise<Buffer>;
@@ -1,56 +0,0 @@
1
- import OpenAI from 'openai';
2
- import axios from 'axios';
3
- import { tokenLimit } from '../../functions/tokenizer.js'; // ✅ Import token limiter
4
- export async function electronSpeech({ ApiKey, inputText, modelName = 'elevenlabs', personality, }) {
5
- try {
6
- const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
7
- const validPersonalities = [
8
- 'will', 'maltida', 'liam', 'jessica', 'george', 'lily', 'sana',
9
- 'wahab', 'martin', 'darine', 'guillaume', 'leonie', 'kurt', 'leo',
10
- 'shakuntala', 'maciej', 'aneta', 'gabriela', 'juan'
11
- ];
12
- if (personality && !validPersonalities.includes(personality)) {
13
- throw new Error(`Invalid personality. Please choose from the following: ${validPersonalities.join(', ')}`);
14
- }
15
- const openai = new OpenAI({
16
- apiKey: apiKey,
17
- baseURL: "https://api.electronhub.top/v1",
18
- });
19
- const models = await openai.models.list();
20
- const modelExists = models.data.some((model) => model.id === modelName);
21
- if (!modelExists) {
22
- throw new Error('Invalid model name. Please check out Electron hub models for more info.');
23
- }
24
- // ✅ Limit input text to max 3964 tokens
25
- const { prompt: trimmedInputText } = tokenLimit("", inputText, 2500);
26
- const response = await axios.post('https://api.electronhub.top/v1/audio/speech', {
27
- model: modelName,
28
- voice: personality || 'will',
29
- input: trimmedInputText,
30
- }, {
31
- headers: {
32
- Authorization: `Bearer ${apiKey}`,
33
- },
34
- responseType: 'arraybuffer',
35
- });
36
- return response.data;
37
- }
38
- catch (error) {
39
- if (error.response) {
40
- if (error.response.status === 429) {
41
- throw new Error("Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key.");
42
- }
43
- else if (error.response.status === 500) {
44
- throw new Error("Server error. Please try again later.");
45
- }
46
- else {
47
- console.error("Error generating speech:", error.response.data);
48
- throw error;
49
- }
50
- }
51
- else {
52
- console.error("Error generating speech:", error.message);
53
- throw error;
54
- }
55
- }
56
- }
@@ -1,5 +0,0 @@
1
- export declare function electronVideo({ ApiKey, prompt, modelName, }: {
2
- ApiKey?: string;
3
- prompt: string;
4
- modelName?: string;
5
- }): Promise<string>;
@@ -1,56 +0,0 @@
1
- import axios from 'axios';
2
- import OpenAI from 'openai';
3
- export async function electronVideo({ ApiKey, prompt, modelName = 't2v-turbo', }) {
4
- const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
5
- const headers = {
6
- Authorization: `Bearer ${apiKey}`,
7
- };
8
- const openai = new OpenAI({
9
- apiKey: apiKey,
10
- baseURL: "https://api.electronhub.top/v1",
11
- });
12
- try {
13
- const models = await openai.models.list();
14
- const modelExists = models.data.some((model) => model.id === modelName);
15
- if (!modelExists) {
16
- throw new Error(`Invalid model name: ${modelName}. Please check available models.`);
17
- }
18
- const payload = {
19
- model: modelName,
20
- prompt: prompt,
21
- };
22
- const response = await axios.post('https://api.electronhub.top/v1/videos/generations', payload, {
23
- headers,
24
- responseType: 'stream',
25
- });
26
- return new Promise((resolve, reject) => {
27
- response.data.on('data', (chunk) => {
28
- const line = chunk.toString();
29
- try {
30
- const data = JSON.parse(line);
31
- if (data.heartbeat) {
32
- console.log('Pending...');
33
- }
34
- else if (data[0]?.url) {
35
- console.log('Video generation completed.');
36
- resolve(data[0].url);
37
- }
38
- }
39
- catch (e) {
40
- console.error('Error parsing chunk:', e);
41
- reject(new Error('Error parsing response from video generation.'));
42
- }
43
- });
44
- response.data.on('end', () => {
45
- console.log('Video generation stream ended.');
46
- });
47
- response.data.on('error', (error) => {
48
- reject(new Error('Error streaming video generation: ' + error.message));
49
- });
50
- });
51
- }
52
- catch (error) {
53
- console.error('Error generating video:', error.message);
54
- throw new Error('Error generating video: ' + error.message);
55
- }
56
- }
@@ -1,8 +0,0 @@
1
- interface groqOptions {
2
- API_KEY?: string;
3
- prompt: string;
4
- apiName: string;
5
- instruction?: string;
6
- }
7
- export declare function chatGroq({ API_KEY, prompt, apiName, instruction }: groqOptions): Promise<string>;
8
- export {};
@@ -1,57 +0,0 @@
1
- import Groq from 'groq-sdk';
2
- const getModels = async (groqInstance) => {
3
- const allModels = await groqInstance.models.list();
4
- const filteredModels = allModels.data.filter((model) => !model.id.startsWith('whisper') && !model.id.startsWith('llava') && !model.id.startsWith('distil-whisper'));
5
- return filteredModels;
6
- };
7
- export async function chatGroq({ API_KEY, prompt, apiName, instruction }) {
8
- try {
9
- const groq = new Groq({
10
- apiKey: API_KEY || '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',
11
- });
12
- const models = await getModels(groq);
13
- const modelExists = models.some((model) => model.id === apiName);
14
- if (!modelExists) {
15
- throw new Error('Invalid model name provided. Please check the available models on Groq.');
16
- }
17
- const messages = [];
18
- if (instruction) {
19
- messages.push({
20
- role: 'system',
21
- content: instruction,
22
- });
23
- }
24
- messages.push({
25
- role: 'user',
26
- content: prompt,
27
- });
28
- const chatCompletion = await groq.chat.completions.create({
29
- messages: messages,
30
- model: apiName,
31
- max_tokens: 8192,
32
- });
33
- const response = chatCompletion.choices[0]?.message?.content || 'Rate limit';
34
- return response;
35
- }
36
- catch (err) {
37
- if (err instanceof Groq.APIError) {
38
- if (err.status === 400) {
39
- return 'Bad request. Try again after a minute please.';
40
- }
41
- else if (err.status === 429) {
42
- return 'Rate limit. Try again after one minute or provide your own API key.';
43
- }
44
- else if (err.status === 401 || !err.status) {
45
- throw new Error('Invalid API key provided.');
46
- }
47
- else {
48
- console.error(err);
49
- return 'Unknown error occurred.';
50
- }
51
- }
52
- else {
53
- console.error(err);
54
- return 'Unknown error occurred.';
55
- }
56
- }
57
- }
@@ -1,7 +0,0 @@
1
- interface GroqAnalyzerOptions {
2
- img: string | Buffer;
3
- ApiKey?: string;
4
- prompt?: string;
5
- }
6
- export declare function groqAnalyzer({ img, ApiKey, prompt }: GroqAnalyzerOptions): Promise<string>;
7
- export {};