@mindstudio-ai/local-model-tunnel 0.5.7 → 0.5.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +183 -11
- package/dist/chunk-C3JPRLSS.js +1485 -0
- package/dist/chunk-C3JPRLSS.js.map +1 -0
- package/dist/{chunk-KLOTDVWL.js → chunk-QALGC7T7.js} +56 -310
- package/dist/chunk-QALGC7T7.js.map +1 -0
- package/dist/chunk-WFQXIMTS.js +378 -0
- package/dist/chunk-WFQXIMTS.js.map +1 -0
- package/dist/cli.js +18 -2
- package/dist/cli.js.map +1 -1
- package/dist/headless.d.ts +113 -0
- package/dist/headless.js +8 -0
- package/dist/headless.js.map +1 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +7 -2
- package/dist/{tui-UNFZSO7R.js → tui-4PJCFILV.js} +1467 -318
- package/dist/tui-4PJCFILV.js.map +1 -0
- package/package.json +3 -1
- package/dist/chunk-KLOTDVWL.js.map +0 -1
- package/dist/tui-UNFZSO7R.js.map +0 -1
|
@@ -1,250 +1,13 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
providerBaseUrls: {},
|
|
12
|
-
providerInstallPaths: {},
|
|
13
|
-
localInterfaces: {},
|
|
14
|
-
environments: {
|
|
15
|
-
prod: {
|
|
16
|
-
apiBaseUrl: "https://api.mindstudio.ai"
|
|
17
|
-
},
|
|
18
|
-
local: {
|
|
19
|
-
apiBaseUrl: "http://localhost:3129"
|
|
20
|
-
}
|
|
21
|
-
}
|
|
22
|
-
}
|
|
23
|
-
});
|
|
24
|
-
function getEnvironment() {
|
|
25
|
-
return config.get("environment");
|
|
26
|
-
}
|
|
27
|
-
function getEnvConfig() {
|
|
28
|
-
const env = getEnvironment();
|
|
29
|
-
return config.get(`environments.${env}`);
|
|
30
|
-
}
|
|
31
|
-
function setEnvConfig(key, value) {
|
|
32
|
-
const env = getEnvironment();
|
|
33
|
-
config.set(`environments.${env}.${key}`, value);
|
|
34
|
-
}
|
|
35
|
-
function getApiKey() {
|
|
36
|
-
return getEnvConfig().apiKey;
|
|
37
|
-
}
|
|
38
|
-
function setApiKey(key) {
|
|
39
|
-
setEnvConfig("apiKey", key);
|
|
40
|
-
}
|
|
41
|
-
function getUserId() {
|
|
42
|
-
return getEnvConfig().userId;
|
|
43
|
-
}
|
|
44
|
-
function setUserId(id) {
|
|
45
|
-
setEnvConfig("userId", id);
|
|
46
|
-
}
|
|
47
|
-
function getApiBaseUrl() {
|
|
48
|
-
return getEnvConfig().apiBaseUrl;
|
|
49
|
-
}
|
|
50
|
-
function getConfigPath() {
|
|
51
|
-
return config.path;
|
|
52
|
-
}
|
|
53
|
-
function getProviderBaseUrl(name, defaultUrl) {
|
|
54
|
-
const urls = config.get("providerBaseUrls");
|
|
55
|
-
return urls[name] ?? defaultUrl;
|
|
56
|
-
}
|
|
57
|
-
function setProviderBaseUrl(name, url) {
|
|
58
|
-
const urls = config.get("providerBaseUrls");
|
|
59
|
-
urls[name] = url;
|
|
60
|
-
config.set("providerBaseUrls", urls);
|
|
61
|
-
}
|
|
62
|
-
function getProviderInstallPath(name) {
|
|
63
|
-
const paths = config.get("providerInstallPaths");
|
|
64
|
-
return paths[name];
|
|
65
|
-
}
|
|
66
|
-
function setProviderInstallPath(name, installPath) {
|
|
67
|
-
const paths = config.get("providerInstallPaths");
|
|
68
|
-
paths[name] = installPath;
|
|
69
|
-
config.set("providerInstallPaths", paths);
|
|
70
|
-
}
|
|
71
|
-
function getLocalInterfacesDir() {
|
|
72
|
-
return path.join(os.homedir(), ".mindstudio-local-tunnel", "interfaces");
|
|
73
|
-
}
|
|
74
|
-
function getLocalInterfacePath(key) {
|
|
75
|
-
const interfaces = config.get("localInterfaces");
|
|
76
|
-
return interfaces[key];
|
|
77
|
-
}
|
|
78
|
-
function setLocalInterfacePath(key, dirPath) {
|
|
79
|
-
const interfaces = config.get("localInterfaces");
|
|
80
|
-
interfaces[key] = dirPath;
|
|
81
|
-
config.set("localInterfaces", interfaces);
|
|
82
|
-
}
|
|
83
|
-
function deleteLocalInterfacePath(key) {
|
|
84
|
-
const interfaces = config.get("localInterfaces");
|
|
85
|
-
delete interfaces[key];
|
|
86
|
-
config.set("localInterfaces", interfaces);
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
// src/api.ts
|
|
90
|
-
function getHeaders() {
|
|
91
|
-
const apiKey = getApiKey();
|
|
92
|
-
if (!apiKey) {
|
|
93
|
-
throw new Error("Not authenticated. Run: mindstudio-local auth");
|
|
94
|
-
}
|
|
95
|
-
const headers = {
|
|
96
|
-
Authorization: `Bearer ${apiKey}`,
|
|
97
|
-
"Content-Type": "application/json"
|
|
98
|
-
};
|
|
99
|
-
const userId = getUserId();
|
|
100
|
-
if (userId) {
|
|
101
|
-
headers["x-user-id"] = userId;
|
|
102
|
-
}
|
|
103
|
-
return headers;
|
|
104
|
-
}
|
|
105
|
-
async function pollForRequest(modelIds) {
|
|
106
|
-
const baseUrl = getApiBaseUrl();
|
|
107
|
-
const modelIdsParam = modelIds.join(",");
|
|
108
|
-
const response = await fetch(
|
|
109
|
-
`${baseUrl}/v1/local-models/poll?modelIds=${encodeURIComponent(modelIdsParam)}`,
|
|
110
|
-
{
|
|
111
|
-
method: "GET",
|
|
112
|
-
headers: getHeaders()
|
|
113
|
-
}
|
|
114
|
-
);
|
|
115
|
-
if (response.status === 204) {
|
|
116
|
-
return null;
|
|
117
|
-
}
|
|
118
|
-
if (!response.ok) {
|
|
119
|
-
const error = await response.text();
|
|
120
|
-
throw new Error(`Poll failed: ${response.status} ${error}`);
|
|
121
|
-
}
|
|
122
|
-
const data = await response.json();
|
|
123
|
-
return data.request;
|
|
124
|
-
}
|
|
125
|
-
async function submitProgress(requestId, content, type = "chunk") {
|
|
126
|
-
const baseUrl = getApiBaseUrl();
|
|
127
|
-
const response = await fetch(
|
|
128
|
-
`${baseUrl}/v1/local-models/requests/${requestId}/progress`,
|
|
129
|
-
{
|
|
130
|
-
method: "POST",
|
|
131
|
-
headers: getHeaders(),
|
|
132
|
-
body: JSON.stringify({ type, content })
|
|
133
|
-
}
|
|
134
|
-
);
|
|
135
|
-
if (!response.ok) {
|
|
136
|
-
console.warn(`Progress update failed: ${response.status}`);
|
|
137
|
-
}
|
|
138
|
-
}
|
|
139
|
-
async function submitResult(requestId, success, result, error) {
|
|
140
|
-
const baseUrl = getApiBaseUrl();
|
|
141
|
-
const response = await fetch(
|
|
142
|
-
`${baseUrl}/v1/local-models/requests/${requestId}/result`,
|
|
143
|
-
{
|
|
144
|
-
method: "POST",
|
|
145
|
-
headers: getHeaders(),
|
|
146
|
-
body: JSON.stringify({ success, result, error })
|
|
147
|
-
}
|
|
148
|
-
);
|
|
149
|
-
if (!response.ok) {
|
|
150
|
-
const errorText = await response.text();
|
|
151
|
-
throw new Error(
|
|
152
|
-
`Result submission failed: ${response.status} ${errorText}`
|
|
153
|
-
);
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
async function verifyApiKey() {
|
|
157
|
-
const baseUrl = getApiBaseUrl();
|
|
158
|
-
try {
|
|
159
|
-
const response = await fetch(`${baseUrl}/v1/local-models/verify-api-key`, {
|
|
160
|
-
method: "GET",
|
|
161
|
-
headers: getHeaders()
|
|
162
|
-
});
|
|
163
|
-
return response.status === 204 || response.ok;
|
|
164
|
-
} catch {
|
|
165
|
-
return false;
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
async function syncModels(models) {
|
|
169
|
-
const baseUrl = getApiBaseUrl();
|
|
170
|
-
const response = await fetch(`${baseUrl}/v1/local-models/models/sync`, {
|
|
171
|
-
method: "POST",
|
|
172
|
-
headers: getHeaders(),
|
|
173
|
-
body: JSON.stringify({ models })
|
|
174
|
-
});
|
|
175
|
-
if (!response.ok) {
|
|
176
|
-
const errorText = await response.text();
|
|
177
|
-
throw new Error(`Sync failed: ${response.status} ${errorText}`);
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
async function getSyncedModels() {
|
|
181
|
-
const baseUrl = getApiBaseUrl();
|
|
182
|
-
const response = await fetch(`${baseUrl}/v1/local-models/models`, {
|
|
183
|
-
method: "GET",
|
|
184
|
-
headers: getHeaders()
|
|
185
|
-
});
|
|
186
|
-
if (!response.ok) {
|
|
187
|
-
const errorText = await response.text();
|
|
188
|
-
throw new Error(
|
|
189
|
-
`Failed to fetch synced models: ${response.status} ${errorText}`
|
|
190
|
-
);
|
|
191
|
-
}
|
|
192
|
-
const data = await response.json();
|
|
193
|
-
return data.models;
|
|
194
|
-
}
|
|
195
|
-
async function requestDeviceAuth() {
|
|
196
|
-
const baseUrl = getApiBaseUrl();
|
|
197
|
-
const response = await fetch(`${baseUrl}/developer/v2/request-auth-url`, {
|
|
198
|
-
method: "GET",
|
|
199
|
-
headers: { "Content-Type": "application/json" }
|
|
200
|
-
});
|
|
201
|
-
if (!response.ok) {
|
|
202
|
-
const error = await response.text();
|
|
203
|
-
throw new Error(`Device auth request failed: ${response.status} ${error}`);
|
|
204
|
-
}
|
|
205
|
-
const data = await response.json();
|
|
206
|
-
return data;
|
|
207
|
-
}
|
|
208
|
-
async function pollDeviceAuth(token) {
|
|
209
|
-
const baseUrl = getApiBaseUrl();
|
|
210
|
-
const response = await fetch(`${baseUrl}/developer/v2/poll-auth-url`, {
|
|
211
|
-
method: "POST",
|
|
212
|
-
headers: { "Content-Type": "application/json" },
|
|
213
|
-
body: JSON.stringify({ token })
|
|
214
|
-
});
|
|
215
|
-
if (!response.ok) {
|
|
216
|
-
const error = await response.text();
|
|
217
|
-
throw new Error(`Device auth poll failed: ${response.status} ${error}`);
|
|
218
|
-
}
|
|
219
|
-
const data = await response.json();
|
|
220
|
-
return data;
|
|
221
|
-
}
|
|
222
|
-
async function getEditorSessions() {
|
|
223
|
-
const baseUrl = getApiBaseUrl();
|
|
224
|
-
const response = await fetch(`${baseUrl}/v1/local-editor/sessions`, {
|
|
225
|
-
method: "GET",
|
|
226
|
-
headers: getHeaders()
|
|
227
|
-
});
|
|
228
|
-
if (!response.ok) {
|
|
229
|
-
const errorText = await response.text();
|
|
230
|
-
throw new Error(
|
|
231
|
-
`Failed to fetch editor sessions: ${response.status} ${errorText}`
|
|
232
|
-
);
|
|
233
|
-
}
|
|
234
|
-
const data = await response.json();
|
|
235
|
-
return data.editors;
|
|
236
|
-
}
|
|
237
|
-
async function disconnectHeartbeat() {
|
|
238
|
-
const baseUrl = getApiBaseUrl();
|
|
239
|
-
const response = await fetch(`${baseUrl}/v1/local-models/disconnect`, {
|
|
240
|
-
method: "POST",
|
|
241
|
-
headers: getHeaders()
|
|
242
|
-
});
|
|
243
|
-
if (!response.ok) {
|
|
244
|
-
const error = await response.text();
|
|
245
|
-
throw new Error(`Heartbeat disconnect failed: ${response.status} ${error}`);
|
|
246
|
-
}
|
|
247
|
-
}
|
|
1
|
+
import {
|
|
2
|
+
disconnectHeartbeat,
|
|
3
|
+
getProviderBaseUrl,
|
|
4
|
+
getProviderInstallPath,
|
|
5
|
+
pollForRequest,
|
|
6
|
+
setProviderBaseUrl,
|
|
7
|
+
setProviderInstallPath,
|
|
8
|
+
submitProgress,
|
|
9
|
+
submitResult
|
|
10
|
+
} from "./chunk-C3JPRLSS.js";
|
|
248
11
|
|
|
249
12
|
// src/providers/ollama/index.ts
|
|
250
13
|
import { Ollama } from "ollama";
|
|
@@ -339,8 +102,8 @@ var ollama_default = new OllamaProvider();
|
|
|
339
102
|
|
|
340
103
|
// src/providers/lmstudio/index.ts
|
|
341
104
|
import * as fs from "fs";
|
|
342
|
-
import * as
|
|
343
|
-
import * as
|
|
105
|
+
import * as path from "path";
|
|
106
|
+
import * as os from "os";
|
|
344
107
|
|
|
345
108
|
// src/providers/lmstudio/readme.md
|
|
346
109
|
var readme_default2 = '# LM Studio\n\nLM Studio is a desktop app for running text generation models locally. No terminal needed -- everything is done through the app. Once its server is running, MindStudio will detect it automatically.\n\n**Default port:** 1234\n**Website:** https://lmstudio.ai\n**GitHub:** https://github.com/lmstudio-ai\n\n## Step 1: Install LM Studio\n\n1. Go to https://lmstudio.ai\n2. Click the download button for your operating system\n3. Open the downloaded file and install it like any other app\n\n## Step 2: Download a Model\n\n1. Open LM Studio\n2. Click the **Discover** tab on the left sidebar\n3. Search for a model (see suggestions below)\n4. Click the download button next to the model you want\n5. Wait for the download to finish\n\nGood starter models:\n\n- **Llama 3.2** -- great all-around model, fast\n- **Mistral** -- efficient and capable\n- **Phi-3** -- compact, runs well on most machines\n\n## Step 3: Start the Server\n\nThis is the key step -- LM Studio needs to be running its local server for MindStudio to connect.\n\n1. In LM Studio, click the **Developer** tab on the left sidebar\n2. Select a model from the dropdown at the top if one isn\'t already loaded\n3. Click **Start Server**\n\nYou should see a green indicator showing the server is running on `http://localhost:1234`.\n\n**Important:** Just opening LM Studio is not enough. You must start the server from the Developer tab.\n\nLeave LM Studio open with the server running while you use MindStudio. Go back to the tunnel and select **Refresh Providers** -- your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says LM Studio is "not running"** -- Make sure you started the server in the Developer tab. The green indicator should be visible.\n\n- **Server is running but no models show up** -- You need to load a model in the Developer tab. Select one from the dropdown at the top of the Developer tab before starting the server.\n\n- **Port conflict** -- If something else is using port 1234, you can change the port in the Developer tab settings.\n';
|
|
@@ -391,12 +154,12 @@ var LMStudioProvider = class {
|
|
|
391
154
|
const possiblePaths = {
|
|
392
155
|
darwin: ["/Applications/LM Studio.app"],
|
|
393
156
|
linux: [
|
|
394
|
-
|
|
157
|
+
path.join(os.homedir(), ".local/share/LM Studio"),
|
|
395
158
|
"/opt/lm-studio"
|
|
396
159
|
],
|
|
397
160
|
win32: [
|
|
398
|
-
|
|
399
|
-
|
|
161
|
+
path.join(process.env.LOCALAPPDATA || "", "LM Studio"),
|
|
162
|
+
path.join(process.env.PROGRAMFILES || "", "LM Studio")
|
|
400
163
|
]
|
|
401
164
|
};
|
|
402
165
|
const paths = possiblePaths[process.platform] || [];
|
|
@@ -486,8 +249,8 @@ var lmstudio_default = new LMStudioProvider();
|
|
|
486
249
|
|
|
487
250
|
// src/providers/stable-diffusion/index.ts
|
|
488
251
|
import * as fs2 from "fs";
|
|
489
|
-
import * as
|
|
490
|
-
import * as
|
|
252
|
+
import * as path2 from "path";
|
|
253
|
+
import * as os2 from "os";
|
|
491
254
|
|
|
492
255
|
// src/providers/stable-diffusion/readme.md
|
|
493
256
|
var readme_default3 = '# Stable Diffusion WebUI\n\nAUTOMATIC1111\'s Stable Diffusion WebUI runs image generation models locally. Once the server is running with at least one model, MindStudio will detect it automatically.\n\n**Default port:** 7860\n**GitHub:** https://github.com/AUTOMATIC1111/stable-diffusion-webui\n\n## What You\'ll Need\n\n- **Python 3.10 or newer** -- Check by opening a terminal and typing `python3 --version`. If you don\'t have it, download from https://www.python.org/downloads/\n\n- **Git** -- Check by typing `git --version`. If you don\'t have it, download from https://git-scm.com/downloads\n\n## Step 1: Install the WebUI\n\nOpen a terminal and paste this command:\n\n```\ngit clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git ~/stable-diffusion-webui\n```\n\n**Windows users**, use this instead:\n\n```\ngit clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git %USERPROFILE%\\stable-diffusion-webui\n```\n\n## Step 2: Download a Model\n\nYou need at least one model file for MindStudio to use. Model files have the `.safetensors` extension (typically 2-7 GB).\n\n1. Browse models at https://civitai.com or https://huggingface.co\n2. Download a `.safetensors` checkpoint file\n3. Move the file into this folder:\n\n```\n~/stable-diffusion-webui/models/Stable-diffusion/\n```\n\nGood starter models:\n\n- **Stable Diffusion XL (SDXL)** -- high quality, 1024x1024\n- **Stable Diffusion 1.5** -- fast, widely supported\n\n## Step 3: Start the Server\n\nOpen a terminal and run:\n\n```\ncd ~/stable-diffusion-webui && ./webui.sh --api\n```\n\n**Windows users:**\n\n```\ncd %USERPROFILE%\\stable-diffusion-webui && webui-user.bat --api\n```\n\nThe first time you run this it will take several minutes to install dependencies. This is normal -- let it finish.\n\n**Important:** The `--api` flag is required. Without it, MindStudio cannot send requests to the server.\n\nLeave this terminal window open while using MindStudio. Once the server is ready, go back to the tunnel and select **Refresh Providers** -- your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says WebUI is "not running"** -- Make sure you included `--api` when launching. The terminal should show the server at `http://127.0.0.1:7860`.\n\n- **Server is running but no models show up** -- Make sure your `.safetensors` file is directly in the `models/Stable-diffusion/` folder, not inside a subfolder. Restart the server after adding new model files.\n\n- **"Python not found"** -- Python 3.10+ is required. Download from https://www.python.org/downloads/. On Windows, check "Add Python to PATH" during installation.\n\n- **Errors during first launch** -- Delete the `venv` folder inside `stable-diffusion-webui` and run the launch command again to reinstall dependencies from scratch.\n\n- **"CUDA out of memory"** -- Your GPU doesn\'t have enough memory. Add `--medvram` or `--lowvram` to the launch command: `./webui.sh --api --medvram`\n';
|
|
@@ -537,13 +300,13 @@ var StableDiffusionProvider = class {
|
|
|
537
300
|
const savedPath = getProviderInstallPath(this.name);
|
|
538
301
|
const possiblePaths = [
|
|
539
302
|
...savedPath ? [savedPath] : [],
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
303
|
+
path2.join(os2.homedir(), "stable-diffusion-webui"),
|
|
304
|
+
path2.join(os2.homedir(), "Projects", "stable-diffusion-webui"),
|
|
305
|
+
path2.join(os2.homedir(), "Code", "stable-diffusion-webui")
|
|
543
306
|
];
|
|
544
307
|
let installed = false;
|
|
545
308
|
for (const p of possiblePaths) {
|
|
546
|
-
if (fs2.existsSync(
|
|
309
|
+
if (fs2.existsSync(path2.join(p, "launch.py")) || fs2.existsSync(path2.join(p, "webui.sh")) || fs2.existsSync(path2.join(p, "webui.bat"))) {
|
|
547
310
|
installed = true;
|
|
548
311
|
break;
|
|
549
312
|
}
|
|
@@ -774,23 +537,23 @@ var StableDiffusionProvider = class {
|
|
|
774
537
|
var stable_diffusion_default = new StableDiffusionProvider();
|
|
775
538
|
|
|
776
539
|
// src/providers/comfyui/index.ts
|
|
777
|
-
import * as
|
|
540
|
+
import * as path6 from "path";
|
|
778
541
|
|
|
779
542
|
// src/providers/comfyui/workflow-discovery.ts
|
|
780
543
|
import * as fs4 from "fs";
|
|
781
|
-
import * as
|
|
544
|
+
import * as path4 from "path";
|
|
782
545
|
|
|
783
546
|
// src/providers/comfyui/converter-install.ts
|
|
784
547
|
import * as fs3 from "fs";
|
|
785
|
-
import * as
|
|
548
|
+
import * as path3 from "path";
|
|
786
549
|
var CONVERTER_DIR = "comfyui-workflow-to-api-converter-endpoint";
|
|
787
550
|
var GITHUB_RAW_BASE = "https://raw.githubusercontent.com/SethRobinson/comfyui-workflow-to-api-converter-endpoint/main";
|
|
788
551
|
var FILES_TO_DOWNLOAD = ["__init__.py", "workflow_converter.py"];
|
|
789
552
|
async function ensureConverterInstalled(installPath) {
|
|
790
|
-
const customNodesDir =
|
|
791
|
-
const converterDir =
|
|
553
|
+
const customNodesDir = path3.join(installPath, "custom_nodes");
|
|
554
|
+
const converterDir = path3.join(customNodesDir, CONVERTER_DIR);
|
|
792
555
|
const allFilesExist = FILES_TO_DOWNLOAD.every(
|
|
793
|
-
(f) => fs3.existsSync(
|
|
556
|
+
(f) => fs3.existsSync(path3.join(converterDir, f))
|
|
794
557
|
);
|
|
795
558
|
if (allFilesExist) {
|
|
796
559
|
return true;
|
|
@@ -811,7 +574,7 @@ async function ensureConverterInstalled(installPath) {
|
|
|
811
574
|
throw new Error(`Failed to download ${filename}: ${response.status}`);
|
|
812
575
|
}
|
|
813
576
|
const content = await response.text();
|
|
814
|
-
fs3.writeFileSync(
|
|
577
|
+
fs3.writeFileSync(path3.join(converterDir, filename), content, "utf-8");
|
|
815
578
|
}
|
|
816
579
|
return true;
|
|
817
580
|
} catch {
|
|
@@ -891,7 +654,7 @@ async function discoverWorkflows(baseUrl, installPath) {
|
|
|
891
654
|
continue;
|
|
892
655
|
}
|
|
893
656
|
const capability = detectCapability(apiWorkflow);
|
|
894
|
-
const name =
|
|
657
|
+
const name = path4.basename(file, path4.extname(file));
|
|
895
658
|
converted[capability].push({ name, workflow: apiWorkflow });
|
|
896
659
|
} catch {
|
|
897
660
|
}
|
|
@@ -939,7 +702,7 @@ async function listWorkflowFiles(baseUrl, installPath) {
|
|
|
939
702
|
} catch {
|
|
940
703
|
}
|
|
941
704
|
if (installPath) {
|
|
942
|
-
const workflowsDir =
|
|
705
|
+
const workflowsDir = path4.join(installPath, "user", "default", "workflows");
|
|
943
706
|
return scanDirectory(workflowsDir);
|
|
944
707
|
}
|
|
945
708
|
return [];
|
|
@@ -950,10 +713,10 @@ function scanDirectory(dir) {
|
|
|
950
713
|
try {
|
|
951
714
|
const entries = fs4.readdirSync(dir, { withFileTypes: true });
|
|
952
715
|
for (const entry of entries) {
|
|
953
|
-
const fullPath =
|
|
716
|
+
const fullPath = path4.join(dir, entry.name);
|
|
954
717
|
if (entry.isDirectory()) {
|
|
955
718
|
results.push(
|
|
956
|
-
...scanDirectory(fullPath).map((f) =>
|
|
719
|
+
...scanDirectory(fullPath).map((f) => path4.join(entry.name, f))
|
|
957
720
|
);
|
|
958
721
|
} else if (entry.name.endsWith(".json")) {
|
|
959
722
|
results.push(entry.name);
|
|
@@ -976,7 +739,7 @@ async function fetchWorkflowJson(baseUrl, installPath, filePath) {
|
|
|
976
739
|
} catch {
|
|
977
740
|
}
|
|
978
741
|
if (installPath) {
|
|
979
|
-
const fullPath =
|
|
742
|
+
const fullPath = path4.join(
|
|
980
743
|
installPath,
|
|
981
744
|
"user",
|
|
982
745
|
"default",
|
|
@@ -1021,7 +784,7 @@ function detectCapability(apiWorkflow) {
|
|
|
1021
784
|
}
|
|
1022
785
|
|
|
1023
786
|
// src/providers/comfyui/workflow-executor.ts
|
|
1024
|
-
import * as
|
|
787
|
+
import * as path5 from "path";
|
|
1025
788
|
async function executeWorkflow(options) {
|
|
1026
789
|
const { baseUrl, workflow, onProgress } = options;
|
|
1027
790
|
const clientId = `mindstudio_${Date.now()}_${Math.random().toString(36).slice(2)}`;
|
|
@@ -1086,7 +849,7 @@ async function executeWorkflow(options) {
|
|
|
1086
849
|
}
|
|
1087
850
|
const fileBuffer = await fileResponse.arrayBuffer();
|
|
1088
851
|
const dataBase64 = Buffer.from(fileBuffer).toString("base64");
|
|
1089
|
-
const ext =
|
|
852
|
+
const ext = path5.extname(outputFile.filename).toLowerCase();
|
|
1090
853
|
const mimeType = getMimeType(ext);
|
|
1091
854
|
return { dataBase64, mimeType, filename: outputFile.filename };
|
|
1092
855
|
}
|
|
@@ -1240,7 +1003,7 @@ var ComfyUIProvider = class {
|
|
|
1240
1003
|
const data = await response.json();
|
|
1241
1004
|
const customNodesPaths = data.custom_nodes;
|
|
1242
1005
|
if (!customNodesPaths || customNodesPaths.length === 0) return null;
|
|
1243
|
-
const installPath =
|
|
1006
|
+
const installPath = path6.dirname(customNodesPaths[0]);
|
|
1244
1007
|
setProviderInstallPath(this.name, installPath);
|
|
1245
1008
|
return installPath;
|
|
1246
1009
|
} catch {
|
|
@@ -1533,19 +1296,19 @@ var TunnelRunner = class {
|
|
|
1533
1296
|
throw new Error(`Provider does not support image generation`);
|
|
1534
1297
|
}
|
|
1535
1298
|
const prompt = request.payload.prompt || "";
|
|
1536
|
-
const
|
|
1299
|
+
const config = request.payload.config || {};
|
|
1537
1300
|
const result = await provider.generateImage(
|
|
1538
1301
|
localModelName,
|
|
1539
1302
|
prompt,
|
|
1540
1303
|
{
|
|
1541
|
-
negativePrompt:
|
|
1542
|
-
width:
|
|
1543
|
-
height:
|
|
1544
|
-
steps:
|
|
1545
|
-
cfgScale:
|
|
1546
|
-
seed:
|
|
1547
|
-
sampler:
|
|
1548
|
-
workflow:
|
|
1304
|
+
negativePrompt: config.negativePrompt,
|
|
1305
|
+
width: config.width,
|
|
1306
|
+
height: config.height,
|
|
1307
|
+
steps: config.steps,
|
|
1308
|
+
cfgScale: config.cfgScale,
|
|
1309
|
+
seed: config.seed,
|
|
1310
|
+
sampler: config.sampler,
|
|
1311
|
+
workflow: config.workflow
|
|
1549
1312
|
},
|
|
1550
1313
|
async (progress) => {
|
|
1551
1314
|
await submitProgress(
|
|
@@ -1578,20 +1341,20 @@ var TunnelRunner = class {
|
|
|
1578
1341
|
throw new Error(`Provider does not support video generation`);
|
|
1579
1342
|
}
|
|
1580
1343
|
const prompt = request.payload.prompt || "";
|
|
1581
|
-
const
|
|
1344
|
+
const config = request.payload.config || {};
|
|
1582
1345
|
const result = await provider.generateVideo(
|
|
1583
1346
|
localModelName,
|
|
1584
1347
|
prompt,
|
|
1585
1348
|
{
|
|
1586
|
-
negativePrompt:
|
|
1587
|
-
width:
|
|
1588
|
-
height:
|
|
1589
|
-
numFrames:
|
|
1590
|
-
fps:
|
|
1591
|
-
steps:
|
|
1592
|
-
cfgScale:
|
|
1593
|
-
seed:
|
|
1594
|
-
workflow:
|
|
1349
|
+
negativePrompt: config.negativePrompt,
|
|
1350
|
+
width: config.width,
|
|
1351
|
+
height: config.height,
|
|
1352
|
+
numFrames: config.numFrames,
|
|
1353
|
+
fps: config.fps,
|
|
1354
|
+
steps: config.steps,
|
|
1355
|
+
cfgScale: config.cfgScale,
|
|
1356
|
+
seed: config.seed,
|
|
1357
|
+
workflow: config.workflow
|
|
1595
1358
|
},
|
|
1596
1359
|
async (progress) => {
|
|
1597
1360
|
await submitProgress(
|
|
@@ -1627,26 +1390,9 @@ var TunnelRunner = class {
|
|
|
1627
1390
|
};
|
|
1628
1391
|
|
|
1629
1392
|
export {
|
|
1630
|
-
getEnvironment,
|
|
1631
|
-
getApiKey,
|
|
1632
|
-
setApiKey,
|
|
1633
|
-
getUserId,
|
|
1634
|
-
setUserId,
|
|
1635
|
-
getApiBaseUrl,
|
|
1636
|
-
getConfigPath,
|
|
1637
|
-
getLocalInterfacesDir,
|
|
1638
|
-
getLocalInterfacePath,
|
|
1639
|
-
setLocalInterfacePath,
|
|
1640
|
-
deleteLocalInterfacePath,
|
|
1641
|
-
verifyApiKey,
|
|
1642
|
-
syncModels,
|
|
1643
|
-
getSyncedModels,
|
|
1644
|
-
requestDeviceAuth,
|
|
1645
|
-
pollDeviceAuth,
|
|
1646
|
-
getEditorSessions,
|
|
1647
1393
|
detectAllProviderStatuses,
|
|
1648
1394
|
discoverAllModelsWithParameters,
|
|
1649
1395
|
requestEvents,
|
|
1650
1396
|
TunnelRunner
|
|
1651
1397
|
};
|
|
1652
|
-
//# sourceMappingURL=chunk-
|
|
1398
|
+
//# sourceMappingURL=chunk-QALGC7T7.js.map
|