apexify.js 4.9.23 → 4.9.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (152) hide show
  1. package/dist/cjs/canvas/ApexPainter.js +1 -1
  2. package/dist/cjs/canvas/ApexPainter.js.map +1 -1
  3. package/dist/cjs/index.d.ts +2 -22
  4. package/dist/cjs/index.d.ts.map +1 -1
  5. package/dist/cjs/index.js +2 -17
  6. package/dist/cjs/index.js.map +1 -1
  7. package/dist/cjs/tsconfig.cjs.tsbuildinfo +1 -1
  8. package/dist/cjs/utils.d.ts +1 -3
  9. package/dist/cjs/utils.d.ts.map +1 -1
  10. package/dist/cjs/utils.js +1 -11
  11. package/dist/cjs/utils.js.map +1 -1
  12. package/dist/esm/canvas/ApexPainter.js +1 -1
  13. package/dist/esm/canvas/ApexPainter.js.map +1 -1
  14. package/dist/esm/index.d.ts +2 -22
  15. package/dist/esm/index.d.ts.map +1 -1
  16. package/dist/esm/index.js +2 -17
  17. package/dist/esm/index.js.map +1 -1
  18. package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
  19. package/dist/esm/utils.d.ts +1 -3
  20. package/dist/esm/utils.d.ts.map +1 -1
  21. package/dist/esm/utils.js +1 -11
  22. package/dist/esm/utils.js.map +1 -1
  23. package/lib/canvas/ApexPainter.ts +3 -3
  24. package/lib/index.ts +4 -11
  25. package/lib/utils.ts +1 -3
  26. package/package.json +3 -12
  27. package/dist/cjs/ai/ApexAI.d.ts +0 -145
  28. package/dist/cjs/ai/ApexAI.d.ts.map +0 -1
  29. package/dist/cjs/ai/ApexAI.js +0 -494
  30. package/dist/cjs/ai/ApexAI.js.map +0 -1
  31. package/dist/cjs/ai/ApexModules.d.ts +0 -53
  32. package/dist/cjs/ai/ApexModules.d.ts.map +0 -1
  33. package/dist/cjs/ai/ApexModules.js +0 -824
  34. package/dist/cjs/ai/ApexModules.js.map +0 -1
  35. package/dist/cjs/ai/functions/readFiles.d.ts +0 -2
  36. package/dist/cjs/ai/functions/readFiles.d.ts.map +0 -1
  37. package/dist/cjs/ai/functions/readFiles.js +0 -96
  38. package/dist/cjs/ai/functions/readFiles.js.map +0 -1
  39. package/dist/cjs/ai/functions/tokenizer.d.ts +0 -10
  40. package/dist/cjs/ai/functions/tokenizer.d.ts.map +0 -1
  41. package/dist/cjs/ai/functions/tokenizer.js +0 -64
  42. package/dist/cjs/ai/functions/tokenizer.js.map +0 -1
  43. package/dist/cjs/ai/functions/validOptions.d.ts +0 -22
  44. package/dist/cjs/ai/functions/validOptions.d.ts.map +0 -1
  45. package/dist/cjs/ai/functions/validOptions.js +0 -103
  46. package/dist/cjs/ai/functions/validOptions.js.map +0 -1
  47. package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts +0 -7
  48. package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts.map +0 -1
  49. package/dist/cjs/ai/modals/electronHub/chatmodels.js +0 -51
  50. package/dist/cjs/ai/modals/electronHub/chatmodels.js.map +0 -1
  51. package/dist/cjs/ai/modals/electronHub/imageModels.d.ts +0 -12
  52. package/dist/cjs/ai/modals/electronHub/imageModels.d.ts.map +0 -1
  53. package/dist/cjs/ai/modals/electronHub/imageModels.js +0 -92
  54. package/dist/cjs/ai/modals/electronHub/imageModels.js.map +0 -1
  55. package/dist/cjs/ai/modals/electronHub/speechModels.d.ts +0 -7
  56. package/dist/cjs/ai/modals/electronHub/speechModels.d.ts.map +0 -1
  57. package/dist/cjs/ai/modals/electronHub/speechModels.js +0 -63
  58. package/dist/cjs/ai/modals/electronHub/speechModels.js.map +0 -1
  59. package/dist/cjs/ai/modals/electronHub/videoModels.d.ts +0 -6
  60. package/dist/cjs/ai/modals/electronHub/videoModels.d.ts.map +0 -1
  61. package/dist/cjs/ai/modals/electronHub/videoModels.js +0 -63
  62. package/dist/cjs/ai/modals/electronHub/videoModels.js.map +0 -1
  63. package/dist/cjs/ai/modals/groq/chatgroq.d.ts +0 -9
  64. package/dist/cjs/ai/modals/groq/chatgroq.d.ts.map +0 -1
  65. package/dist/cjs/ai/modals/groq/chatgroq.js +0 -64
  66. package/dist/cjs/ai/modals/groq/chatgroq.js.map +0 -1
  67. package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts +0 -8
  68. package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts.map +0 -1
  69. package/dist/cjs/ai/modals/groq/imageAnalyzer.js +0 -82
  70. package/dist/cjs/ai/modals/groq/imageAnalyzer.js.map +0 -1
  71. package/dist/cjs/ai/modals/groq/whisper.d.ts +0 -5
  72. package/dist/cjs/ai/modals/groq/whisper.d.ts.map +0 -1
  73. package/dist/cjs/ai/modals/groq/whisper.js +0 -108
  74. package/dist/cjs/ai/modals/groq/whisper.js.map +0 -1
  75. package/dist/cjs/ai/modals/hercai/chatModels.d.ts +0 -7
  76. package/dist/cjs/ai/modals/hercai/chatModels.d.ts.map +0 -1
  77. package/dist/cjs/ai/modals/hercai/chatModels.js +0 -23
  78. package/dist/cjs/ai/modals/hercai/chatModels.js.map +0 -1
  79. package/dist/cjs/ai/utils.d.ts +0 -5
  80. package/dist/cjs/ai/utils.d.ts.map +0 -1
  81. package/dist/cjs/ai/utils.js +0 -15
  82. package/dist/cjs/ai/utils.js.map +0 -1
  83. package/dist/esm/ai/ApexAI.d.ts +0 -145
  84. package/dist/esm/ai/ApexAI.d.ts.map +0 -1
  85. package/dist/esm/ai/ApexAI.js +0 -494
  86. package/dist/esm/ai/ApexAI.js.map +0 -1
  87. package/dist/esm/ai/ApexModules.d.ts +0 -53
  88. package/dist/esm/ai/ApexModules.d.ts.map +0 -1
  89. package/dist/esm/ai/ApexModules.js +0 -824
  90. package/dist/esm/ai/ApexModules.js.map +0 -1
  91. package/dist/esm/ai/functions/readFiles.d.ts +0 -2
  92. package/dist/esm/ai/functions/readFiles.d.ts.map +0 -1
  93. package/dist/esm/ai/functions/readFiles.js +0 -96
  94. package/dist/esm/ai/functions/readFiles.js.map +0 -1
  95. package/dist/esm/ai/functions/tokenizer.d.ts +0 -10
  96. package/dist/esm/ai/functions/tokenizer.d.ts.map +0 -1
  97. package/dist/esm/ai/functions/tokenizer.js +0 -64
  98. package/dist/esm/ai/functions/tokenizer.js.map +0 -1
  99. package/dist/esm/ai/functions/validOptions.d.ts +0 -22
  100. package/dist/esm/ai/functions/validOptions.d.ts.map +0 -1
  101. package/dist/esm/ai/functions/validOptions.js +0 -103
  102. package/dist/esm/ai/functions/validOptions.js.map +0 -1
  103. package/dist/esm/ai/modals/electronHub/chatmodels.d.ts +0 -7
  104. package/dist/esm/ai/modals/electronHub/chatmodels.d.ts.map +0 -1
  105. package/dist/esm/ai/modals/electronHub/chatmodels.js +0 -51
  106. package/dist/esm/ai/modals/electronHub/chatmodels.js.map +0 -1
  107. package/dist/esm/ai/modals/electronHub/imageModels.d.ts +0 -12
  108. package/dist/esm/ai/modals/electronHub/imageModels.d.ts.map +0 -1
  109. package/dist/esm/ai/modals/electronHub/imageModels.js +0 -92
  110. package/dist/esm/ai/modals/electronHub/imageModels.js.map +0 -1
  111. package/dist/esm/ai/modals/electronHub/speechModels.d.ts +0 -7
  112. package/dist/esm/ai/modals/electronHub/speechModels.d.ts.map +0 -1
  113. package/dist/esm/ai/modals/electronHub/speechModels.js +0 -63
  114. package/dist/esm/ai/modals/electronHub/speechModels.js.map +0 -1
  115. package/dist/esm/ai/modals/electronHub/videoModels.d.ts +0 -6
  116. package/dist/esm/ai/modals/electronHub/videoModels.d.ts.map +0 -1
  117. package/dist/esm/ai/modals/electronHub/videoModels.js +0 -63
  118. package/dist/esm/ai/modals/electronHub/videoModels.js.map +0 -1
  119. package/dist/esm/ai/modals/groq/chatgroq.d.ts +0 -9
  120. package/dist/esm/ai/modals/groq/chatgroq.d.ts.map +0 -1
  121. package/dist/esm/ai/modals/groq/chatgroq.js +0 -64
  122. package/dist/esm/ai/modals/groq/chatgroq.js.map +0 -1
  123. package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts +0 -8
  124. package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts.map +0 -1
  125. package/dist/esm/ai/modals/groq/imageAnalyzer.js +0 -82
  126. package/dist/esm/ai/modals/groq/imageAnalyzer.js.map +0 -1
  127. package/dist/esm/ai/modals/groq/whisper.d.ts +0 -5
  128. package/dist/esm/ai/modals/groq/whisper.d.ts.map +0 -1
  129. package/dist/esm/ai/modals/groq/whisper.js +0 -108
  130. package/dist/esm/ai/modals/groq/whisper.js.map +0 -1
  131. package/dist/esm/ai/modals/hercai/chatModels.d.ts +0 -7
  132. package/dist/esm/ai/modals/hercai/chatModels.d.ts.map +0 -1
  133. package/dist/esm/ai/modals/hercai/chatModels.js +0 -23
  134. package/dist/esm/ai/modals/hercai/chatModels.js.map +0 -1
  135. package/dist/esm/ai/utils.d.ts +0 -5
  136. package/dist/esm/ai/utils.d.ts.map +0 -1
  137. package/dist/esm/ai/utils.js +0 -15
  138. package/dist/esm/ai/utils.js.map +0 -1
  139. package/lib/ai/ApexAI.ts +0 -758
  140. package/lib/ai/ApexModules.ts +0 -916
  141. package/lib/ai/functions/readFiles.ts +0 -66
  142. package/lib/ai/functions/tokenizer.ts +0 -69
  143. package/lib/ai/functions/validOptions.ts +0 -116
  144. package/lib/ai/modals/electronHub/chatmodels.ts +0 -57
  145. package/lib/ai/modals/electronHub/imageModels.ts +0 -116
  146. package/lib/ai/modals/electronHub/speechModels.ts +0 -75
  147. package/lib/ai/modals/electronHub/videoModels.ts +0 -75
  148. package/lib/ai/modals/groq/chatgroq.ts +0 -78
  149. package/lib/ai/modals/groq/imageAnalyzer.ts +0 -83
  150. package/lib/ai/modals/groq/whisper.ts +0 -114
  151. package/lib/ai/modals/hercai/chatModels.ts +0 -20
  152. package/lib/ai/utils.ts +0 -15
@@ -1,66 +0,0 @@
1
- import pdf from "pdf-parse";
2
- import * as fs from "fs";
3
- import * as https from "https";
4
- import { createWriteStream } from "fs";
5
- import { promisify } from "util";
6
- import path from "path";
7
-
8
- const readFileAsync = promisify(fs.readFile);
9
-
10
- export async function readFile(pathOrUrl: string, type?: string): Promise<string> {
11
- const buffer = await loadFile(pathOrUrl);
12
- const ext = type || getFileExtension(pathOrUrl);
13
-
14
- switch (ext) {
15
- case "pdf":
16
- return readPdf(buffer);
17
- default:
18
- return buffer.toString("utf-8");
19
- }
20
- }
21
-
22
- function getFileExtension(pathOrUrl: string): string | undefined {
23
- const parsedPath = new URL(pathOrUrl, "file://");
24
- const pathname = parsedPath.pathname || pathOrUrl;
25
- const ext = path.extname(pathname).toLowerCase();
26
-
27
- return ext ? ext.slice(1) : undefined;
28
- }
29
-
30
- async function loadFile(pathOrUrl: string): Promise<Buffer> {
31
- return isUrl(pathOrUrl) ? downloadFile(pathOrUrl) : readFileAsync(pathOrUrl);
32
- }
33
-
34
- function isUrl(pathOrUrl: string): boolean {
35
- return /^https?:\/\//.test(pathOrUrl);
36
- }
37
-
38
- async function downloadFile(url: string): Promise<Buffer> {
39
- const tempFilePath = "temp";
40
- const file = createWriteStream(tempFilePath);
41
-
42
- await new Promise<void>((resolve, reject) => {
43
- https.get(url, (response) => {
44
- // @ts-ignore
45
- response.pipe(file);
46
- file.on("finish", resolve);
47
- file.on("error", reject);
48
- }).on("error", reject);
49
- });
50
-
51
- const buffer = await readFileAsync(tempFilePath);
52
- await fs.promises.unlink(tempFilePath);
53
- return buffer;
54
- }
55
-
56
- async function readPdf(buffer: Buffer): Promise<string> {
57
- try {
58
- const data = await pdf(buffer);
59
- console.log("PDF Metadata:", data.metadata);
60
- console.log("PDF Text:", data.text);
61
- return data.text || "No readable text found in PDF.";
62
- } catch (err) {
63
- console.error("Error parsing PDF:", err);
64
- return "Failed to extract text from PDF.";
65
- }
66
- }
@@ -1,69 +0,0 @@
1
- /**
2
- * Optimized tokenization function that processes characters in a single pass.
3
- * - Splits words & punctuation efficiently.
4
- * - Handles contractions (e.g., "can't", "it's").
5
- */
6
- function optimizedTokenize(text: string): string[] {
7
- const tokens: string[] = [];
8
- let currentToken = '';
9
-
10
- for (let i = 0; i < text.length; i++) {
11
- const char = text[i];
12
-
13
- if (char.match(/\w/)) {
14
- currentToken += char;
15
- } else {
16
- if (currentToken) {
17
- tokens.push(currentToken);
18
- currentToken = '';
19
- }
20
- if (char.match(/[^\s]/)) {
21
- tokens.push(char);
22
- }
23
- }
24
- }
25
-
26
- if (currentToken) {
27
- tokens.push(currentToken);
28
- }
29
-
30
- return tokens;
31
- }
32
-
33
- /**
34
- * Returns the token count using the optimized tokenizer.
35
- */
36
- function optimizedTokenCount(text: string): number {
37
- return optimizedTokenize(text).length;
38
- }
39
-
40
- /**
41
- * Trims the user’s prompt to ensure total tokens do not exceed 3964.
42
- * - Keeps the full instruction intact.
43
- * - Trims only the user’s prompt if needed.
44
- */
45
- export function tokenLimit(instruction: string, prompt: string, maxTokens = 3964): { instruction: string, prompt: string } {
46
- const instructionTokens = optimizedTokenCount(instruction);
47
-
48
- const remainingTokens = maxTokens - instructionTokens;
49
-
50
- if (remainingTokens <= 0) {
51
- throw new Error("Instruction alone exceeds max token limit! This should never happen.");
52
- }
53
-
54
- return {
55
- instruction,
56
- prompt: optimizedTrimText(prompt, remainingTokens)
57
- };
58
- }
59
-
60
- /**
61
- * Trims the input text to fit within the token limit.
62
- */
63
- function optimizedTrimText(text: string, maxTokens: number): string {
64
- const tokens = optimizedTokenize(text);
65
- if (tokens.length <= maxTokens) {
66
- return text;
67
- }
68
- return tokens.slice(0, maxTokens).join(' ');
69
- }
@@ -1,116 +0,0 @@
1
- import Groq from "groq-sdk";
2
- import OpenAI from "openai";
3
-
4
- // Define static models as readonly to prevent accidental modifications
5
- export const HERC_CHAT_MODELS = Object.freeze([
6
- "v3", "v3-32k", "turbo", "turbo-16k", "gemini",
7
- "llama3-70b", "llama3-8b", "mixtral-8x7b",
8
- "gemma-7b", "gemma2-9b",
9
- ]);
10
-
11
- export const HERC_IMAGE_MODELS = Object.freeze([
12
- "v3", "lexica", "prodia", "prodia-v2", "simurg",
13
- "animefy", "raava", "shonin",
14
- ]);
15
-
16
- // API Keys (ensure they are securely stored, do not hardcode in production)
17
- const GROQ_API_KEY = "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";
18
- const ELECTRON_HUB_API_KEY = "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
19
-
20
- /**
21
- * Initializes the Groq SDK instance.
22
- */
23
- const initializeGroqInstance = (): Groq => new Groq({ apiKey: GROQ_API_KEY });
24
-
25
- /**
26
- * Fetches available models from ElectronHub API.
27
- */
28
- export async function getElectronHubModels(apiKey?: string): Promise<string[]> {
29
- try {
30
- const openai = new OpenAI({
31
- apiKey: apiKey ?? ELECTRON_HUB_API_KEY,
32
- baseURL: "https://api.electronhub.top/v1",
33
- });
34
-
35
- const models = await openai.models.list();
36
-
37
- // Ensure models are valid and correctly formatted
38
- if (!models?.data || !Array.isArray(models.data)) {
39
- console.warn("Warning: Unexpected ElectronHub models response", models);
40
- return [];
41
- }
42
-
43
- return models.data.map((model) => model.id).filter(Boolean); // Filter out any undefined/null values
44
- } catch (error) {
45
- console.error("Error fetching ElectronHub models:", error);
46
- return [];
47
- }
48
- }
49
-
50
- /**
51
- * Fetches available chat models from Groq API.
52
- */
53
- export async function getGroqChatModels(groqInstance: Groq): Promise<string[]> {
54
- try {
55
- const response = await groqInstance.models.list();
56
-
57
- // Ensure response is valid
58
- if (!response?.data || !Array.isArray(response.data)) {
59
- console.warn("Warning: Unexpected Groq models response", response);
60
- return [];
61
- }
62
-
63
- return response.data
64
- .map((model) => model.id)
65
- .filter(
66
- (id) =>
67
- id &&
68
- !id.startsWith("whisper") &&
69
- !id.startsWith("llava") &&
70
- !id.startsWith("distil-whisper")
71
- ); // Remove unwanted prefixes
72
- } catch (error) {
73
- console.error("Error fetching Groq models:", error);
74
- return [];
75
- }
76
- }
77
-
78
- /**
79
- * Fetches and compiles all valid model options.
80
- */
81
- async function initializeValidOptions(): Promise<{
82
- validHercChatModels: readonly string[];
83
- validGroqChatModels: string[];
84
- validElectronModels: string[];
85
- validHercImageModels: readonly string[];
86
- allModels: string[];
87
- }> {
88
- const groq = initializeGroqInstance();
89
-
90
- // Fetch models in parallel for better performance
91
- const [groqModels, electronModels] = await Promise.all([
92
- getGroqChatModels(groq),
93
- getElectronHubModels(),
94
- ]);
95
-
96
- // Use Set to prevent duplicate entries in `allModels`
97
- const allModelsSet = new Set([
98
- ...HERC_IMAGE_MODELS,
99
- ...HERC_CHAT_MODELS,
100
- ...groqModels,
101
- ...electronModels,
102
- ]);
103
-
104
- return {
105
- validHercChatModels: HERC_CHAT_MODELS,
106
- validGroqChatModels: groqModels,
107
- validElectronModels: electronModels,
108
- validHercImageModels: HERC_IMAGE_MODELS,
109
- allModels: Array.from(allModelsSet), // Convert Set to array for uniqueness
110
- };
111
- }
112
-
113
- /**
114
- * Returns all valid model lists asynchronously.
115
- */
116
- export const validateModels = async () => await initializeValidOptions();
@@ -1,57 +0,0 @@
1
- import OpenAI from "openai";
2
- import { tokenLimit } from "../../functions/tokenizer";
3
- export async function electronChat({
4
- ApiKey,
5
- prompt,
6
- modelName,
7
- instruction,
8
- }: {
9
- ApiKey?: string;
10
- prompt: string;
11
- modelName: string;
12
- instruction?: string;
13
- }) {
14
- try {
15
- const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
16
- const openai = new OpenAI({
17
- apiKey: apiKey,
18
- baseURL: "https://api.electronhub.top/v1",
19
- });
20
-
21
- const models = await openai.models.list();
22
- const modelExists = models.data.some((model: any) => model.id === modelName);
23
- if (!modelExists) {
24
- throw new Error('Invalid model name. Please check out Electron hub models for more info.');
25
- }
26
-
27
- const { instruction: finalInstruction, prompt: finalPrompt } = tokenLimit(instruction || "", prompt, 10000);
28
-
29
- const messages: OpenAI.ChatCompletionMessageParam[] = [
30
- { role: "system", content: finalInstruction },
31
- { role: "user", content: finalPrompt }
32
- ];
33
-
34
- const completion = await openai.chat.completions.create({
35
- model: modelName,
36
- messages: messages
37
- });
38
-
39
- return completion.choices[0]?.message?.content;
40
- } catch (e: any) {
41
- if (e.response) {
42
- if (e.response.status === 429) {
43
- throw new Error(
44
- "Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key."
45
- );
46
- } else if (e.response.status === 500) {
47
- throw new Error("Server error. Please try again later.");
48
- } else {
49
- console.error("Error generating response:", e.response.data);
50
- throw e;
51
- }
52
- } else {
53
- console.error("Error generating response:", e.message);
54
- throw e;
55
- }
56
- }
57
- }
@@ -1,116 +0,0 @@
1
- import OpenAI from "openai";
2
- import sharp from "sharp";
3
- import { ApexPainter } from "../../../utils";
4
- import { tokenLimit } from "../../functions/tokenizer";
5
-
6
- const paint = new ApexPainter({ type: "url" });
7
-
8
- export async function electronImagine({
9
- ApiKey,
10
- prompt,
11
- modelName,
12
- resizeOptions,
13
- }: {
14
- ApiKey?: string;
15
- prompt: string;
16
- modelName: string;
17
- resizeOptions?: {
18
- width?: number;
19
- height?: number;
20
- format?: "jpeg" | "png";
21
- quality?: number;
22
- };
23
- }): Promise<string | undefined> {
24
- let apiKey =
25
- ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
26
- if (ApiKey === "eaebff6e-c7b2-477c-8edd-9aa91becf1e3") {
27
- apiKey = "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
28
- }
29
-
30
- const openai = new OpenAI({
31
- apiKey: apiKey,
32
- baseURL: "https://api.electronhub.top/v1",
33
- });
34
-
35
- const models = await openai.models.list();
36
- const modelExists = models.data.some((model: any) => model.id === modelName);
37
- if (!modelExists) {
38
- throw new Error(
39
- "Invalid model name. Please check out Electron Hub models for more info."
40
- );
41
- }
42
-
43
- try {
44
- const { prompt: trimmedPrompt } = tokenLimit("", prompt, 32000);
45
-
46
- const response = await openai.images.generate({
47
- model: modelName,
48
- prompt: trimmedPrompt,
49
- n: 1,
50
- });
51
-
52
- const imagesUrl = response.data;
53
- const imageUrl = imagesUrl[0].url;
54
-
55
- if (
56
- !resizeOptions ||
57
- ((resizeOptions.width == null &&
58
- resizeOptions.height == null &&
59
- resizeOptions.format == null) ||
60
- (resizeOptions.width === 1024 &&
61
- resizeOptions.height === 1024 &&
62
- resizeOptions.format == null))
63
- ) {
64
- return imageUrl;
65
- }
66
-
67
- const responseFetch = await fetch(imageUrl as string);
68
- const arrayBuffer = await responseFetch.arrayBuffer();
69
- const imageBuffer = Buffer.from(arrayBuffer);
70
-
71
- let imageProcessor = sharp(imageBuffer);
72
-
73
- if (resizeOptions.width || resizeOptions.height) {
74
- imageProcessor = imageProcessor.resize({
75
- width: resizeOptions.width,
76
- height: resizeOptions.height,
77
- kernel: sharp.kernel.lanczos3,
78
- withoutEnlargement: true,
79
- });
80
- }
81
-
82
- if (resizeOptions.format) {
83
- const quality = resizeOptions.quality || 90;
84
- switch (resizeOptions.format) {
85
- case "jpeg":
86
- imageProcessor = imageProcessor.jpeg({ quality });
87
- break;
88
- case "png":
89
- imageProcessor = imageProcessor.png({ quality });
90
- break;
91
- default:
92
- throw Error("We don't support this format, only png and jpeg.");
93
- }
94
- }
95
-
96
- const buffer = await imageProcessor.toBuffer();
97
- const output = (await paint.outPut(buffer)) as string;
98
- return output;
99
- } catch (e: any) {
100
- if (e.response) {
101
- if (e.response.status === 429) {
102
- throw new Error(
103
- "Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key."
104
- );
105
- } else if (e.response.status === 500) {
106
- throw new Error("Server error. Please try again later.");
107
- } else {
108
- console.error("Error generating response:", e.response.data);
109
- throw e;
110
- }
111
- } else {
112
- console.error("Error generating response:", e.message);
113
- throw e;
114
- }
115
- }
116
- }
@@ -1,75 +0,0 @@
1
- import OpenAI from 'openai';
2
- import axios from 'axios';
3
- import { tokenLimit } from '../../functions/tokenizer'; // ✅ Import token limiter
4
-
5
- export async function electronSpeech({
6
- ApiKey,
7
- inputText,
8
- modelName = 'elevenlabs',
9
- personality,
10
- }: {
11
- ApiKey?: string;
12
- inputText: string;
13
- modelName?: "elevenlabs" | "myshell-tts" | "deepinfra-tts" | "whisper-large-v3" | "distil-large-v3" | string;
14
- personality?: string;
15
- }): Promise<Buffer> {
16
- try {
17
- const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
18
-
19
- const validPersonalities = [
20
- 'will', 'maltida', 'liam', 'jessica', 'george', 'lily', 'sana',
21
- 'wahab', 'martin', 'darine', 'guillaume', 'leonie', 'kurt', 'leo',
22
- 'shakuntala', 'maciej', 'aneta', 'gabriela', 'juan'
23
- ];
24
-
25
- if (personality && !validPersonalities.includes(personality)) {
26
- throw new Error(`Invalid personality. Please choose from the following: ${validPersonalities.join(', ')}`);
27
- }
28
-
29
- const openai = new OpenAI({
30
- apiKey: apiKey,
31
- baseURL: "https://api.electronhub.top/v1",
32
- });
33
-
34
- const models = await openai.models.list();
35
-
36
- const modelExists = models.data.some((model: any) => model.id === modelName);
37
- if (!modelExists) {
38
- throw new Error('Invalid model name. Please check out Electron hub models for more info.');
39
- }
40
-
41
- // ✅ Limit input text to max 3964 tokens
42
- const { prompt: trimmedInputText } = tokenLimit("", inputText, 2500);
43
-
44
- const response = await axios.post(
45
- 'https://api.electronhub.top/v1/audio/speech',
46
- {
47
- model: modelName,
48
- voice: personality || 'will',
49
- input: trimmedInputText,
50
- },
51
- {
52
- headers: {
53
- Authorization: `Bearer ${apiKey}`,
54
- },
55
- responseType: 'arraybuffer',
56
- }
57
- );
58
-
59
- return response.data;
60
- } catch (error: any) {
61
- if (error.response) {
62
- if (error.response.status === 429) {
63
- throw new Error("Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key.");
64
- } else if (error.response.status === 500) {
65
- throw new Error("Server error. Please try again later.");
66
- } else {
67
- console.error("Error generating speech:", error.response.data);
68
- throw error;
69
- }
70
- } else {
71
- console.error("Error generating speech:", error.message);
72
- throw error;
73
- }
74
- }
75
- }
@@ -1,75 +0,0 @@
1
- import axios from 'axios';
2
- import OpenAI from 'openai';
3
-
4
- export async function electronVideo({
5
- ApiKey,
6
- prompt,
7
- modelName = 't2v-turbo',
8
- }: {
9
- ApiKey?: string;
10
- prompt: string;
11
- modelName?: string;
12
- }): Promise<string> {
13
- const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
14
- const headers = {
15
- Authorization: `Bearer ${apiKey}`,
16
- };
17
-
18
- const openai = new OpenAI({
19
- apiKey: apiKey,
20
- baseURL: "https://api.electronhub.top/v1",
21
- });
22
-
23
- try {
24
- const models = await openai.models.list();
25
- const modelExists = models.data.some((model: any) => model.id === modelName);
26
-
27
- if (!modelExists) {
28
- throw new Error(`Invalid model name: ${modelName}. Please check available models.`);
29
- }
30
-
31
- const payload = {
32
- model: modelName,
33
- prompt: prompt,
34
- };
35
-
36
- const response = await axios.post(
37
- 'https://api.electronhub.top/v1/videos/generations',
38
- payload,
39
- {
40
- headers,
41
- responseType: 'stream',
42
- }
43
- );
44
-
45
- return new Promise((resolve, reject) => {
46
- response.data.on('data', (chunk: Buffer) => {
47
- const line = chunk.toString();
48
- try {
49
- const data = JSON.parse(line);
50
- if (data.heartbeat) {
51
- console.log('Pending...');
52
- } else if (data[0]?.url) {
53
- console.log('Video generation completed.');
54
- resolve(data[0].url);
55
- }
56
- } catch (e) {
57
- console.error('Error parsing chunk:', e);
58
- reject(new Error('Error parsing response from video generation.'));
59
- }
60
- });
61
-
62
- response.data.on('end', () => {
63
- console.log('Video generation stream ended.');
64
- });
65
-
66
- response.data.on('error', (error: any) => {
67
- reject(new Error('Error streaming video generation: ' + error.message));
68
- });
69
- });
70
-
71
- } catch (error: any) {
72
- console.error('Error generating video:', error.message);
73
- throw new Error('Error generating video: ' + error.message);
74
- }
75
- }
@@ -1,78 +0,0 @@
1
- import Groq from 'groq-sdk';
2
-
3
- interface groqOptions {
4
- API_KEY?: string;
5
- prompt: string;
6
- apiName: string;
7
- instruction?: string;
8
- }
9
-
10
- interface ChatCompletionMessageParam {
11
- role: 'system' | 'user' | 'assistant';
12
- content: string;
13
- }
14
-
15
- const getModels = async (groqInstance: any) => {
16
- const allModels = await groqInstance.models.list();
17
-
18
- const filteredModels = allModels.data.filter((model: any) =>
19
- !model.id.startsWith('whisper') && !model.id.startsWith('llava') && !model.id.startsWith('distil-whisper')
20
- );
21
-
22
- return filteredModels;
23
- };
24
-
25
- export async function chatGroq({ API_KEY, prompt, apiName, instruction }: groqOptions): Promise<string> {
26
- try {
27
- const groq = new Groq({
28
- apiKey: API_KEY || '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',
29
- });
30
-
31
- const models = await getModels(groq);
32
- const modelExists = models.some((model: any) => model.id === apiName);
33
-
34
- if (!modelExists) {
35
- throw new Error('Invalid model name provided. Please check the available models on Groq.');
36
- }
37
-
38
- const messages: ChatCompletionMessageParam[] = [];
39
-
40
- if (instruction) {
41
- messages.push({
42
- role: 'system',
43
- content: instruction,
44
- });
45
- }
46
-
47
- messages.push({
48
- role: 'user',
49
- content: prompt,
50
- });
51
-
52
- const chatCompletion = await groq.chat.completions.create({
53
- messages: messages,
54
- model: apiName,
55
- max_tokens: 8192,
56
- });
57
-
58
- const response = chatCompletion.choices[0]?.message?.content || 'Rate limit';
59
- return response;
60
-
61
- } catch (err: any) {
62
- if (err instanceof Groq.APIError) {
63
- if (err.status === 400) {
64
- return 'Bad request. Try again after a minute please.';
65
- } else if (err.status === 429) {
66
- return 'Rate limit. Try again after one minute or provide your own API key.';
67
- } else if (err.status === 401 || !err.status) {
68
- throw new Error('Invalid API key provided.');
69
- } else {
70
- console.error(err);
71
- return 'Unknown error occurred.';
72
- }
73
- } else {
74
- console.error(err);
75
- return 'Unknown error occurred.';
76
- }
77
- }
78
- }