@mindstudio-ai/local-model-tunnel 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +30 -115
- package/dist/chunk-PTK4SJQK.js +1768 -0
- package/dist/chunk-PTK4SJQK.js.map +1 -0
- package/dist/cli.d.ts +0 -2
- package/dist/cli.js +8 -517
- package/dist/cli.js.map +1 -1
- package/dist/index.d.ts +24 -5
- package/dist/index.js +6 -13
- package/dist/index.js.map +1 -1
- package/dist/tui-56JFPKBP.js +1561 -0
- package/dist/tui-56JFPKBP.js.map +1 -0
- package/package.json +11 -4
- package/dist/api.d.ts +0 -88
- package/dist/api.d.ts.map +0 -1
- package/dist/api.js +0 -168
- package/dist/api.js.map +0 -1
- package/dist/cli.d.ts.map +0 -1
- package/dist/config.d.ts +0 -27
- package/dist/config.d.ts.map +0 -1
- package/dist/config.js +0 -109
- package/dist/config.js.map +0 -1
- package/dist/helpers.d.ts +0 -4
- package/dist/helpers.d.ts.map +0 -1
- package/dist/helpers.js +0 -33
- package/dist/helpers.js.map +0 -1
- package/dist/index.d.ts.map +0 -1
- package/dist/ollama.d.ts +0 -11
- package/dist/ollama.d.ts.map +0 -1
- package/dist/ollama.js +0 -36
- package/dist/ollama.js.map +0 -1
- package/dist/providers/comfyui.d.ts +0 -29
- package/dist/providers/comfyui.d.ts.map +0 -1
- package/dist/providers/comfyui.js +0 -359
- package/dist/providers/comfyui.js.map +0 -1
- package/dist/providers/index.d.ts +0 -63
- package/dist/providers/index.d.ts.map +0 -1
- package/dist/providers/index.js +0 -126
- package/dist/providers/index.js.map +0 -1
- package/dist/providers/lmstudio.d.ts +0 -11
- package/dist/providers/lmstudio.d.ts.map +0 -1
- package/dist/providers/lmstudio.js +0 -106
- package/dist/providers/lmstudio.js.map +0 -1
- package/dist/providers/ollama.d.ts +0 -11
- package/dist/providers/ollama.d.ts.map +0 -1
- package/dist/providers/ollama.js +0 -59
- package/dist/providers/ollama.js.map +0 -1
- package/dist/providers/stable-diffusion.d.ts +0 -41
- package/dist/providers/stable-diffusion.d.ts.map +0 -1
- package/dist/providers/stable-diffusion.js +0 -283
- package/dist/providers/stable-diffusion.js.map +0 -1
- package/dist/providers/types.d.ts +0 -196
- package/dist/providers/types.d.ts.map +0 -1
- package/dist/providers/types.js +0 -19
- package/dist/providers/types.js.map +0 -1
- package/dist/quickstart/QuickstartScreen.d.ts +0 -5
- package/dist/quickstart/QuickstartScreen.d.ts.map +0 -1
- package/dist/quickstart/QuickstartScreen.js +0 -617
- package/dist/quickstart/QuickstartScreen.js.map +0 -1
- package/dist/quickstart/detect.d.ts +0 -22
- package/dist/quickstart/detect.d.ts.map +0 -1
- package/dist/quickstart/detect.js +0 -243
- package/dist/quickstart/detect.js.map +0 -1
- package/dist/quickstart/index.d.ts +0 -4
- package/dist/quickstart/index.d.ts.map +0 -1
- package/dist/quickstart/index.js +0 -274
- package/dist/quickstart/index.js.map +0 -1
- package/dist/quickstart/installers.d.ts +0 -109
- package/dist/quickstart/installers.d.ts.map +0 -1
- package/dist/quickstart/installers.js +0 -1296
- package/dist/quickstart/installers.js.map +0 -1
- package/dist/runner.d.ts +0 -19
- package/dist/runner.d.ts.map +0 -1
- package/dist/runner.js +0 -314
- package/dist/runner.js.map +0 -1
- package/dist/tui/App.d.ts +0 -7
- package/dist/tui/App.d.ts.map +0 -1
- package/dist/tui/App.js +0 -53
- package/dist/tui/App.js.map +0 -1
- package/dist/tui/TunnelRunner.d.ts +0 -19
- package/dist/tui/TunnelRunner.d.ts.map +0 -1
- package/dist/tui/TunnelRunner.js +0 -228
- package/dist/tui/TunnelRunner.js.map +0 -1
- package/dist/tui/components/Header.d.ts +0 -9
- package/dist/tui/components/Header.d.ts.map +0 -1
- package/dist/tui/components/Header.js +0 -21
- package/dist/tui/components/Header.js.map +0 -1
- package/dist/tui/components/ModelsPanel.d.ts +0 -7
- package/dist/tui/components/ModelsPanel.d.ts.map +0 -1
- package/dist/tui/components/ModelsPanel.js +0 -28
- package/dist/tui/components/ModelsPanel.js.map +0 -1
- package/dist/tui/components/ProvidersPanel.d.ts +0 -7
- package/dist/tui/components/ProvidersPanel.d.ts.map +0 -1
- package/dist/tui/components/ProvidersPanel.js +0 -6
- package/dist/tui/components/ProvidersPanel.js.map +0 -1
- package/dist/tui/components/RequestLog.d.ts +0 -8
- package/dist/tui/components/RequestLog.d.ts.map +0 -1
- package/dist/tui/components/RequestLog.js +0 -60
- package/dist/tui/components/RequestLog.js.map +0 -1
- package/dist/tui/components/StatusBar.d.ts +0 -10
- package/dist/tui/components/StatusBar.d.ts.map +0 -1
- package/dist/tui/components/StatusBar.js +0 -7
- package/dist/tui/components/StatusBar.js.map +0 -1
- package/dist/tui/components/index.d.ts +0 -6
- package/dist/tui/components/index.d.ts.map +0 -1
- package/dist/tui/components/index.js +0 -6
- package/dist/tui/components/index.js.map +0 -1
- package/dist/tui/events.d.ts +0 -35
- package/dist/tui/events.d.ts.map +0 -1
- package/dist/tui/events.js +0 -26
- package/dist/tui/events.js.map +0 -1
- package/dist/tui/hooks/index.d.ts +0 -5
- package/dist/tui/hooks/index.d.ts.map +0 -1
- package/dist/tui/hooks/index.js +0 -5
- package/dist/tui/hooks/index.js.map +0 -1
- package/dist/tui/hooks/useConnection.d.ts +0 -10
- package/dist/tui/hooks/useConnection.d.ts.map +0 -1
- package/dist/tui/hooks/useConnection.js +0 -42
- package/dist/tui/hooks/useConnection.js.map +0 -1
- package/dist/tui/hooks/useModels.d.ts +0 -9
- package/dist/tui/hooks/useModels.d.ts.map +0 -1
- package/dist/tui/hooks/useModels.js +0 -28
- package/dist/tui/hooks/useModels.js.map +0 -1
- package/dist/tui/hooks/useProviders.d.ts +0 -9
- package/dist/tui/hooks/useProviders.d.ts.map +0 -1
- package/dist/tui/hooks/useProviders.js +0 -30
- package/dist/tui/hooks/useProviders.js.map +0 -1
- package/dist/tui/hooks/useRequests.d.ts +0 -9
- package/dist/tui/hooks/useRequests.d.ts.map +0 -1
- package/dist/tui/hooks/useRequests.js +0 -60
- package/dist/tui/hooks/useRequests.js.map +0 -1
- package/dist/tui/index.d.ts +0 -2
- package/dist/tui/index.d.ts.map +0 -1
- package/dist/tui/index.js +0 -19
- package/dist/tui/index.js.map +0 -1
- package/dist/tui/screens/ConfigScreen.d.ts +0 -2
- package/dist/tui/screens/ConfigScreen.d.ts.map +0 -1
- package/dist/tui/screens/ConfigScreen.js +0 -18
- package/dist/tui/screens/ConfigScreen.js.map +0 -1
- package/dist/tui/screens/HomeScreen.d.ts +0 -2
- package/dist/tui/screens/HomeScreen.d.ts.map +0 -1
- package/dist/tui/screens/HomeScreen.js +0 -156
- package/dist/tui/screens/HomeScreen.js.map +0 -1
- package/dist/tui/screens/ModelsScreen.d.ts +0 -2
- package/dist/tui/screens/ModelsScreen.d.ts.map +0 -1
- package/dist/tui/screens/ModelsScreen.js +0 -59
- package/dist/tui/screens/ModelsScreen.js.map +0 -1
- package/dist/tui/screens/StatusScreen.d.ts +0 -2
- package/dist/tui/screens/StatusScreen.d.ts.map +0 -1
- package/dist/tui/screens/StatusScreen.js +0 -53
- package/dist/tui/screens/StatusScreen.js.map +0 -1
- package/dist/tui/screens/index.d.ts +0 -9
- package/dist/tui/screens/index.d.ts.map +0 -1
- package/dist/tui/screens/index.js +0 -38
- package/dist/tui/screens/index.js.map +0 -1
- package/dist/tui/types.d.ts +0 -30
- package/dist/tui/types.d.ts.map +0 -1
- package/dist/tui/types.js +0 -2
- package/dist/tui/types.js.map +0 -1
- package/dist/workflows/index.d.ts +0 -47
- package/dist/workflows/index.d.ts.map +0 -1
- package/dist/workflows/index.js +0 -95
- package/dist/workflows/index.js.map +0 -1
- package/dist/workflows/ltx-video.d.ts +0 -45
- package/dist/workflows/ltx-video.d.ts.map +0 -1
- package/dist/workflows/ltx-video.js +0 -114
- package/dist/workflows/ltx-video.js.map +0 -1
- package/dist/workflows/wan2.1.d.ts +0 -44
- package/dist/workflows/wan2.1.d.ts.map +0 -1
- package/dist/workflows/wan2.1.js +0 -119
- package/dist/workflows/wan2.1.js.map +0 -1
|
@@ -0,0 +1,1768 @@
|
|
|
1
|
+
// src/config.ts
|
|
2
|
+
import Conf from "conf";
|
|
3
|
+
import os from "os";
|
|
4
|
+
import path from "path";
|
|
5
|
+
var config = new Conf({
|
|
6
|
+
projectName: "mindstudio-local",
|
|
7
|
+
cwd: path.join(os.homedir(), ".mindstudio-local-tunnel"),
|
|
8
|
+
configName: "config",
|
|
9
|
+
defaults: {
|
|
10
|
+
environment: "prod",
|
|
11
|
+
providerBaseUrls: {},
|
|
12
|
+
providerInstallPaths: {},
|
|
13
|
+
environments: {
|
|
14
|
+
prod: {
|
|
15
|
+
apiBaseUrl: "https://api.mindstudio.ai"
|
|
16
|
+
},
|
|
17
|
+
local: {
|
|
18
|
+
apiBaseUrl: "http://localhost:3129"
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
});
|
|
23
|
+
function getEnvironment() {
|
|
24
|
+
return config.get("environment");
|
|
25
|
+
}
|
|
26
|
+
function getEnvConfig() {
|
|
27
|
+
const env = getEnvironment();
|
|
28
|
+
return config.get(`environments.${env}`);
|
|
29
|
+
}
|
|
30
|
+
function setEnvConfig(key, value) {
|
|
31
|
+
const env = getEnvironment();
|
|
32
|
+
config.set(`environments.${env}.${key}`, value);
|
|
33
|
+
}
|
|
34
|
+
function getApiKey() {
|
|
35
|
+
return getEnvConfig().apiKey;
|
|
36
|
+
}
|
|
37
|
+
function setApiKey(key) {
|
|
38
|
+
setEnvConfig("apiKey", key);
|
|
39
|
+
}
|
|
40
|
+
function getApiBaseUrl() {
|
|
41
|
+
return getEnvConfig().apiBaseUrl;
|
|
42
|
+
}
|
|
43
|
+
function getConfigPath() {
|
|
44
|
+
return config.path;
|
|
45
|
+
}
|
|
46
|
+
function getProviderBaseUrl(name, defaultUrl) {
|
|
47
|
+
const urls = config.get("providerBaseUrls");
|
|
48
|
+
return urls[name] ?? defaultUrl;
|
|
49
|
+
}
|
|
50
|
+
function getProviderInstallPath(name) {
|
|
51
|
+
const paths = config.get("providerInstallPaths");
|
|
52
|
+
return paths[name];
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// src/api.ts
|
|
56
|
+
function getHeaders() {
|
|
57
|
+
const apiKey = getApiKey();
|
|
58
|
+
if (!apiKey) {
|
|
59
|
+
throw new Error("Not authenticated. Run: mindstudio-local auth");
|
|
60
|
+
}
|
|
61
|
+
return {
|
|
62
|
+
Authorization: `Bearer ${apiKey}`,
|
|
63
|
+
"Content-Type": "application/json"
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
async function pollForRequest(models) {
|
|
67
|
+
const baseUrl = getApiBaseUrl();
|
|
68
|
+
const modelsParam = models.join(",");
|
|
69
|
+
const response = await fetch(
|
|
70
|
+
`${baseUrl}/v1/local-models/poll?models=${encodeURIComponent(modelsParam)}`,
|
|
71
|
+
{
|
|
72
|
+
method: "GET",
|
|
73
|
+
headers: getHeaders()
|
|
74
|
+
}
|
|
75
|
+
);
|
|
76
|
+
if (response.status === 204) {
|
|
77
|
+
return null;
|
|
78
|
+
}
|
|
79
|
+
if (!response.ok) {
|
|
80
|
+
const error = await response.text();
|
|
81
|
+
throw new Error(`Poll failed: ${response.status} ${error}`);
|
|
82
|
+
}
|
|
83
|
+
const data = await response.json();
|
|
84
|
+
return data.request;
|
|
85
|
+
}
|
|
86
|
+
async function submitProgress(requestId, content) {
|
|
87
|
+
const baseUrl = getApiBaseUrl();
|
|
88
|
+
const response = await fetch(
|
|
89
|
+
`${baseUrl}/v1/local-models/requests/${requestId}/progress`,
|
|
90
|
+
{
|
|
91
|
+
method: "POST",
|
|
92
|
+
headers: getHeaders(),
|
|
93
|
+
body: JSON.stringify({ content })
|
|
94
|
+
}
|
|
95
|
+
);
|
|
96
|
+
if (!response.ok) {
|
|
97
|
+
console.warn(`Progress update failed: ${response.status}`);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
async function submitGenerationProgress(requestId, step, totalSteps, preview) {
|
|
101
|
+
const baseUrl = getApiBaseUrl();
|
|
102
|
+
const response = await fetch(
|
|
103
|
+
`${baseUrl}/v1/local-models/requests/${requestId}/progress`,
|
|
104
|
+
{
|
|
105
|
+
method: "POST",
|
|
106
|
+
headers: getHeaders(),
|
|
107
|
+
body: JSON.stringify({
|
|
108
|
+
type: "generation",
|
|
109
|
+
step,
|
|
110
|
+
totalSteps,
|
|
111
|
+
preview
|
|
112
|
+
})
|
|
113
|
+
}
|
|
114
|
+
);
|
|
115
|
+
if (!response.ok) {
|
|
116
|
+
console.warn(`Generation progress update failed: ${response.status}`);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
async function submitResult(requestId, success, result, error) {
|
|
120
|
+
const baseUrl = getApiBaseUrl();
|
|
121
|
+
const response = await fetch(
|
|
122
|
+
`${baseUrl}/v1/local-models/requests/${requestId}/result`,
|
|
123
|
+
{
|
|
124
|
+
method: "POST",
|
|
125
|
+
headers: getHeaders(),
|
|
126
|
+
body: JSON.stringify({ success, result, error })
|
|
127
|
+
}
|
|
128
|
+
);
|
|
129
|
+
if (!response.ok) {
|
|
130
|
+
const errorText = await response.text();
|
|
131
|
+
throw new Error(
|
|
132
|
+
`Result submission failed: ${response.status} ${errorText}`
|
|
133
|
+
);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
async function verifyApiKey() {
|
|
137
|
+
const baseUrl = getApiBaseUrl();
|
|
138
|
+
try {
|
|
139
|
+
const response = await fetch(`${baseUrl}/v1/local-models/verify-api-key`, {
|
|
140
|
+
method: "GET",
|
|
141
|
+
headers: getHeaders()
|
|
142
|
+
});
|
|
143
|
+
return response.status === 204 || response.ok;
|
|
144
|
+
} catch {
|
|
145
|
+
return false;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
async function registerLocalModel(modelNameOrOptions, provider = "ollama", modelType = "llm_chat") {
|
|
149
|
+
const baseUrl = getApiBaseUrl();
|
|
150
|
+
let payload;
|
|
151
|
+
if (typeof modelNameOrOptions === "string") {
|
|
152
|
+
payload = {
|
|
153
|
+
modelName: modelNameOrOptions,
|
|
154
|
+
provider,
|
|
155
|
+
modelType
|
|
156
|
+
};
|
|
157
|
+
} else {
|
|
158
|
+
payload = {
|
|
159
|
+
modelName: modelNameOrOptions.modelName,
|
|
160
|
+
provider: modelNameOrOptions.provider,
|
|
161
|
+
modelType: modelNameOrOptions.modelType || "llm_chat",
|
|
162
|
+
parameters: modelNameOrOptions.parameters
|
|
163
|
+
};
|
|
164
|
+
}
|
|
165
|
+
const response = await fetch(`${baseUrl}/v1/local-models/models/create`, {
|
|
166
|
+
method: "POST",
|
|
167
|
+
headers: getHeaders(),
|
|
168
|
+
body: JSON.stringify(payload)
|
|
169
|
+
});
|
|
170
|
+
if (!response.ok) {
|
|
171
|
+
const errorText = await response.text();
|
|
172
|
+
throw new Error(`Register failed: ${response.status} ${errorText}`);
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
async function getRegisteredModels() {
|
|
176
|
+
const baseUrl = getApiBaseUrl();
|
|
177
|
+
const response = await fetch(`${baseUrl}/v1/local-models/models`, {
|
|
178
|
+
method: "GET",
|
|
179
|
+
headers: getHeaders()
|
|
180
|
+
});
|
|
181
|
+
if (!response.ok) {
|
|
182
|
+
const errorText = await response.text();
|
|
183
|
+
throw new Error(
|
|
184
|
+
`Failed to fetch registered models: ${response.status} ${errorText}`
|
|
185
|
+
);
|
|
186
|
+
}
|
|
187
|
+
const data = await response.json();
|
|
188
|
+
return data.models;
|
|
189
|
+
}
|
|
190
|
+
async function requestDeviceAuth() {
|
|
191
|
+
const baseUrl = getApiBaseUrl();
|
|
192
|
+
const response = await fetch(`${baseUrl}/developer/v2/request-auth-url`, {
|
|
193
|
+
method: "GET",
|
|
194
|
+
headers: { "Content-Type": "application/json" }
|
|
195
|
+
});
|
|
196
|
+
if (!response.ok) {
|
|
197
|
+
const error = await response.text();
|
|
198
|
+
throw new Error(`Device auth request failed: ${response.status} ${error}`);
|
|
199
|
+
}
|
|
200
|
+
const data = await response.json();
|
|
201
|
+
return data;
|
|
202
|
+
}
|
|
203
|
+
async function pollDeviceAuth(token) {
|
|
204
|
+
const baseUrl = getApiBaseUrl();
|
|
205
|
+
const response = await fetch(`${baseUrl}/developer/v2/poll-auth-url`, {
|
|
206
|
+
method: "POST",
|
|
207
|
+
headers: { "Content-Type": "application/json" },
|
|
208
|
+
body: JSON.stringify({ token })
|
|
209
|
+
});
|
|
210
|
+
if (!response.ok) {
|
|
211
|
+
const error = await response.text();
|
|
212
|
+
throw new Error(`Device auth poll failed: ${response.status} ${error}`);
|
|
213
|
+
}
|
|
214
|
+
const data = await response.json();
|
|
215
|
+
return data;
|
|
216
|
+
}
|
|
217
|
+
async function disconnectHeartbeat() {
|
|
218
|
+
const baseUrl = getApiBaseUrl();
|
|
219
|
+
const response = await fetch(`${baseUrl}/v1/local-models/disconnect`, {
|
|
220
|
+
method: "POST",
|
|
221
|
+
headers: getHeaders()
|
|
222
|
+
});
|
|
223
|
+
if (!response.ok) {
|
|
224
|
+
const error = await response.text();
|
|
225
|
+
throw new Error(`Heartbeat disconnect failed: ${response.status} ${error}`);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
// src/providers/ollama/index.ts
|
|
230
|
+
import { Ollama } from "ollama";
|
|
231
|
+
|
|
232
|
+
// src/providers/utils.ts
|
|
233
|
+
import { exec } from "child_process";
|
|
234
|
+
import { promisify } from "util";
|
|
235
|
+
var execAsync = promisify(exec);
|
|
236
|
+
async function commandExists(command) {
|
|
237
|
+
try {
|
|
238
|
+
const checkCmd = process.platform === "win32" ? "where" : "which";
|
|
239
|
+
await execAsync(`${checkCmd} ${command}`);
|
|
240
|
+
return true;
|
|
241
|
+
} catch {
|
|
242
|
+
return false;
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// src/providers/ollama/readme.md
|
|
247
|
+
var readme_default = '# Ollama\n\nOllama lets you run text generation models (Llama, Mistral, Gemma, etc.) locally. Once it\'s running with at least one model downloaded, MindStudio will detect it automatically.\n\n**Default port:** 11434\n**Website:** https://ollama.com\n**GitHub:** https://github.com/ollama/ollama\n\n## Step 1: Install Ollama\n\n### macOS / Linux\n\nOpen a terminal and paste this command:\n\n```\ncurl -fsSL https://ollama.com/install.sh | sh\n```\n\n### macOS (alternative)\n\nDownload the app from https://ollama.com/download, open the file, and drag it into your Applications folder.\n\n### Windows\n\nDownload the installer from https://ollama.com/download and run it. Follow the on-screen instructions.\n\n## Step 2: Start the Server\n\nOpen a terminal and run:\n\n```\nollama serve\n```\n\nLeave this terminal window open -- the server needs to keep running for MindStudio to connect to it.\n\n**macOS tip:** If you installed Ollama as a desktop app, the server starts automatically when you open it. Look for the Ollama icon in your menu bar -- if it\'s there, you can skip this step.\n\n## Step 3: Download a Model\n\nOpen a **new** terminal window (keep the server running in the other one) and download a model:\n\n```\nollama pull llama3.2\n```\n\nSome good models to start with:\n\n- **llama3.2** -- fast, great all-around model (2 GB download)\n- **mistral** -- efficient for most tasks (4 GB)\n- **gemma2** -- Google\'s open model (5 GB)\n\nBrowse more models at https://ollama.com/library\n\nOnce the download finishes, go back to the MindStudio tunnel and select **Refresh Providers**. Your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says Ollama is "not running"** -- Make sure `ollama serve` is running in a terminal window. You should see "Listening on 127.0.0.1:11434" in the output.\n\n- **Ollama is running but no models show up** -- You need to download at least one model first. Run `ollama pull llama3.2` in a separate terminal window.\n\n- **"address already in use"** -- Ollama is probably already running. On macOS, check for the Ollama icon in your menu bar. On Linux, run `pkill ollama` and try `ollama serve` again.\n\n- **"out of memory" errors** -- Your machine doesn\'t have enough RAM for that model. Try a smaller one like `llama3.2` (2 GB).\n';
|
|
248
|
+
|
|
249
|
+
// src/providers/ollama/index.ts
|
|
250
|
+
var OllamaProvider = class {
|
|
251
|
+
name = "ollama";
|
|
252
|
+
displayName = "Ollama";
|
|
253
|
+
description = "Run open-source LLMs locally via CLI. Supports Llama, Mistral, Gemma, and more.";
|
|
254
|
+
capabilities = ["text"];
|
|
255
|
+
readme = readme_default;
|
|
256
|
+
defaultBaseUrl = "http://localhost:11434";
|
|
257
|
+
get baseUrl() {
|
|
258
|
+
return getProviderBaseUrl(this.name, this.defaultBaseUrl);
|
|
259
|
+
}
|
|
260
|
+
createClient() {
|
|
261
|
+
return new Ollama({ host: this.baseUrl });
|
|
262
|
+
}
|
|
263
|
+
async isRunning() {
|
|
264
|
+
try {
|
|
265
|
+
const client = this.createClient();
|
|
266
|
+
await client.list();
|
|
267
|
+
return true;
|
|
268
|
+
} catch {
|
|
269
|
+
return false;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
async discoverModels() {
|
|
273
|
+
try {
|
|
274
|
+
const client = this.createClient();
|
|
275
|
+
const response = await client.list();
|
|
276
|
+
return response.models.map((m) => ({
|
|
277
|
+
name: m.name,
|
|
278
|
+
provider: this.name,
|
|
279
|
+
capability: "text",
|
|
280
|
+
size: m.size,
|
|
281
|
+
parameterSize: m.details?.parameter_size,
|
|
282
|
+
quantization: m.details?.quantization_level
|
|
283
|
+
}));
|
|
284
|
+
} catch {
|
|
285
|
+
return [];
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
async detect() {
|
|
289
|
+
const installed = await commandExists("ollama");
|
|
290
|
+
let running = false;
|
|
291
|
+
if (installed) {
|
|
292
|
+
running = await this.isRunning();
|
|
293
|
+
}
|
|
294
|
+
return { installed, running };
|
|
295
|
+
}
|
|
296
|
+
async *chat(model, messages, options) {
|
|
297
|
+
const client = this.createClient();
|
|
298
|
+
const stream = await client.chat({
|
|
299
|
+
model,
|
|
300
|
+
messages: messages.map((m) => ({
|
|
301
|
+
role: m.role,
|
|
302
|
+
content: m.content
|
|
303
|
+
})),
|
|
304
|
+
stream: true,
|
|
305
|
+
options: {
|
|
306
|
+
temperature: options?.temperature,
|
|
307
|
+
num_predict: options?.maxTokens
|
|
308
|
+
}
|
|
309
|
+
});
|
|
310
|
+
for await (const chunk of stream) {
|
|
311
|
+
yield {
|
|
312
|
+
content: chunk.message.content,
|
|
313
|
+
done: chunk.done
|
|
314
|
+
};
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
};
|
|
318
|
+
var ollama_default = new OllamaProvider();
|
|
319
|
+
|
|
320
|
+
// src/providers/lmstudio/index.ts
|
|
321
|
+
import * as fs from "fs";
|
|
322
|
+
import * as path2 from "path";
|
|
323
|
+
import * as os2 from "os";
|
|
324
|
+
|
|
325
|
+
// src/providers/lmstudio/readme.md
|
|
326
|
+
var readme_default2 = '# LM Studio\n\nLM Studio is a desktop app for running text generation models locally. No terminal needed -- everything is done through the app. Once its server is running, MindStudio will detect it automatically.\n\n**Default port:** 1234\n**Website:** https://lmstudio.ai\n**GitHub:** https://github.com/lmstudio-ai\n\n## Step 1: Install LM Studio\n\n1. Go to https://lmstudio.ai\n2. Click the download button for your operating system\n3. Open the downloaded file and install it like any other app\n\n## Step 2: Download a Model\n\n1. Open LM Studio\n2. Click the **Discover** tab on the left sidebar\n3. Search for a model (see suggestions below)\n4. Click the download button next to the model you want\n5. Wait for the download to finish\n\nGood starter models:\n\n- **Llama 3.2** -- great all-around model, fast\n- **Mistral** -- efficient and capable\n- **Phi-3** -- compact, runs well on most machines\n\n## Step 3: Start the Server\n\nThis is the key step -- LM Studio needs to be running its local server for MindStudio to connect.\n\n1. In LM Studio, click the **Developer** tab on the left sidebar\n2. Select a model from the dropdown at the top if one isn\'t already loaded\n3. Click **Start Server**\n\nYou should see a green indicator showing the server is running on `http://localhost:1234`.\n\n**Important:** Just opening LM Studio is not enough. You must start the server from the Developer tab.\n\nLeave LM Studio open with the server running while you use MindStudio. Go back to the tunnel and select **Refresh Providers** -- your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says LM Studio is "not running"** -- Make sure you started the server in the Developer tab. The green indicator should be visible.\n\n- **Server is running but no models show up** -- You need to load a model in the Developer tab. Select one from the dropdown at the top of the Developer tab before starting the server.\n\n- **Port conflict** -- If something else is using port 1234, you can change the port in the Developer tab settings.\n';
|
|
327
|
+
|
|
328
|
+
// src/providers/lmstudio/index.ts
|
|
329
|
+
var LMStudioProvider = class {
|
|
330
|
+
name = "lmstudio";
|
|
331
|
+
displayName = "LM Studio";
|
|
332
|
+
description = "Desktop app for running LLMs locally with a visual interface. No terminal required.";
|
|
333
|
+
capabilities = ["text"];
|
|
334
|
+
readme = readme_default2;
|
|
335
|
+
defaultBaseUrl = "http://localhost:1234/v1";
|
|
336
|
+
get baseUrl() {
|
|
337
|
+
return getProviderBaseUrl(this.name, this.defaultBaseUrl);
|
|
338
|
+
}
|
|
339
|
+
getBaseUrl() {
|
|
340
|
+
return this.baseUrl;
|
|
341
|
+
}
|
|
342
|
+
async isRunning() {
|
|
343
|
+
try {
|
|
344
|
+
const response = await fetch(`${this.getBaseUrl()}/models`, {
|
|
345
|
+
method: "GET",
|
|
346
|
+
signal: AbortSignal.timeout(3e3)
|
|
347
|
+
});
|
|
348
|
+
return response.ok;
|
|
349
|
+
} catch {
|
|
350
|
+
return false;
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
async discoverModels() {
|
|
354
|
+
try {
|
|
355
|
+
const response = await fetch(`${this.getBaseUrl()}/models`);
|
|
356
|
+
if (!response.ok) {
|
|
357
|
+
return [];
|
|
358
|
+
}
|
|
359
|
+
const data = await response.json();
|
|
360
|
+
return data.data.map((m) => ({
|
|
361
|
+
name: m.id,
|
|
362
|
+
provider: this.name,
|
|
363
|
+
capability: "text"
|
|
364
|
+
}));
|
|
365
|
+
} catch {
|
|
366
|
+
return [];
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
async detect() {
|
|
370
|
+
let installed = false;
|
|
371
|
+
const possiblePaths = {
|
|
372
|
+
darwin: ["/Applications/LM Studio.app"],
|
|
373
|
+
linux: [
|
|
374
|
+
path2.join(os2.homedir(), ".local/share/LM Studio"),
|
|
375
|
+
"/opt/lm-studio"
|
|
376
|
+
],
|
|
377
|
+
win32: [
|
|
378
|
+
path2.join(process.env.LOCALAPPDATA || "", "LM Studio"),
|
|
379
|
+
path2.join(process.env.PROGRAMFILES || "", "LM Studio")
|
|
380
|
+
]
|
|
381
|
+
};
|
|
382
|
+
const paths = possiblePaths[process.platform] || [];
|
|
383
|
+
for (const p of paths) {
|
|
384
|
+
if (fs.existsSync(p)) {
|
|
385
|
+
installed = true;
|
|
386
|
+
break;
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
let running = false;
|
|
390
|
+
try {
|
|
391
|
+
const response = await fetch("http://localhost:1234/v1/models", {
|
|
392
|
+
signal: AbortSignal.timeout(1e3)
|
|
393
|
+
});
|
|
394
|
+
running = response.ok;
|
|
395
|
+
if (running) installed = true;
|
|
396
|
+
} catch {
|
|
397
|
+
running = false;
|
|
398
|
+
}
|
|
399
|
+
return { installed, running };
|
|
400
|
+
}
|
|
401
|
+
async *chat(model, messages, options) {
|
|
402
|
+
const response = await fetch(`${this.getBaseUrl()}/chat/completions`, {
|
|
403
|
+
method: "POST",
|
|
404
|
+
headers: {
|
|
405
|
+
"Content-Type": "application/json"
|
|
406
|
+
},
|
|
407
|
+
body: JSON.stringify({
|
|
408
|
+
model,
|
|
409
|
+
messages: messages.map((m) => ({
|
|
410
|
+
role: m.role,
|
|
411
|
+
content: m.content
|
|
412
|
+
})),
|
|
413
|
+
stream: true,
|
|
414
|
+
temperature: options?.temperature,
|
|
415
|
+
max_tokens: options?.maxTokens
|
|
416
|
+
})
|
|
417
|
+
});
|
|
418
|
+
if (!response.ok) {
|
|
419
|
+
const error = await response.text();
|
|
420
|
+
throw new Error(`LM Studio request failed: ${response.status} ${error}`);
|
|
421
|
+
}
|
|
422
|
+
if (!response.body) {
|
|
423
|
+
throw new Error("No response body from LM Studio");
|
|
424
|
+
}
|
|
425
|
+
const reader = response.body.getReader();
|
|
426
|
+
const decoder = new TextDecoder();
|
|
427
|
+
let buffer = "";
|
|
428
|
+
try {
|
|
429
|
+
while (true) {
|
|
430
|
+
const { done, value } = await reader.read();
|
|
431
|
+
if (done) {
|
|
432
|
+
yield { content: "", done: true };
|
|
433
|
+
break;
|
|
434
|
+
}
|
|
435
|
+
buffer += decoder.decode(value, { stream: true });
|
|
436
|
+
const lines = buffer.split("\n");
|
|
437
|
+
buffer = lines.pop() || "";
|
|
438
|
+
for (const line of lines) {
|
|
439
|
+
const trimmed = line.trim();
|
|
440
|
+
if (!trimmed || !trimmed.startsWith("data: ")) {
|
|
441
|
+
continue;
|
|
442
|
+
}
|
|
443
|
+
const data = trimmed.slice(6);
|
|
444
|
+
if (data === "[DONE]") {
|
|
445
|
+
yield { content: "", done: true };
|
|
446
|
+
return;
|
|
447
|
+
}
|
|
448
|
+
try {
|
|
449
|
+
const parsed = JSON.parse(data);
|
|
450
|
+
const choice = parsed.choices[0];
|
|
451
|
+
const content = choice?.delta?.content || "";
|
|
452
|
+
const isDone = choice?.finish_reason !== null;
|
|
453
|
+
if (content) {
|
|
454
|
+
yield { content, done: isDone };
|
|
455
|
+
}
|
|
456
|
+
} catch {
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
} finally {
|
|
461
|
+
reader.releaseLock();
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
};
|
|
465
|
+
var lmstudio_default = new LMStudioProvider();
|
|
466
|
+
|
|
467
|
+
// src/providers/stable-diffusion/index.ts
|
|
468
|
+
import * as fs2 from "fs";
|
|
469
|
+
import * as path3 from "path";
|
|
470
|
+
import * as os3 from "os";
|
|
471
|
+
|
|
472
|
+
// src/providers/stable-diffusion/readme.md
|
|
473
|
+
var readme_default3 = '# Stable Diffusion WebUI\n\nAUTOMATIC1111\'s Stable Diffusion WebUI runs image generation models locally. Once the server is running with at least one model, MindStudio will detect it automatically.\n\n**Default port:** 7860\n**GitHub:** https://github.com/AUTOMATIC1111/stable-diffusion-webui\n\n## What You\'ll Need\n\n- **Python 3.10 or newer** -- Check by opening a terminal and typing `python3 --version`. If you don\'t have it, download from https://www.python.org/downloads/\n\n- **Git** -- Check by typing `git --version`. If you don\'t have it, download from https://git-scm.com/downloads\n\n## Step 1: Install the WebUI\n\nOpen a terminal and paste this command:\n\n```\ngit clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git ~/stable-diffusion-webui\n```\n\n**Windows users**, use this instead:\n\n```\ngit clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git %USERPROFILE%\\stable-diffusion-webui\n```\n\n## Step 2: Download a Model\n\nYou need at least one model file for MindStudio to use. Model files have the `.safetensors` extension (typically 2-7 GB).\n\n1. Browse models at https://civitai.com or https://huggingface.co\n2. Download a `.safetensors` checkpoint file\n3. Move the file into this folder:\n\n```\n~/stable-diffusion-webui/models/Stable-diffusion/\n```\n\nGood starter models:\n\n- **Stable Diffusion XL (SDXL)** -- high quality, 1024x1024\n- **Stable Diffusion 1.5** -- fast, widely supported\n\n## Step 3: Start the Server\n\nOpen a terminal and run:\n\n```\ncd ~/stable-diffusion-webui && ./webui.sh --api\n```\n\n**Windows users:**\n\n```\ncd %USERPROFILE%\\stable-diffusion-webui && webui-user.bat --api\n```\n\nThe first time you run this it will take several minutes to install dependencies. This is normal -- let it finish.\n\n**Important:** The `--api` flag is required. Without it, MindStudio cannot send requests to the server.\n\nLeave this terminal window open while using MindStudio. Once the server is ready, go back to the tunnel and select **Refresh Providers** -- your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says WebUI is "not running"** -- Make sure you included `--api` when launching. The terminal should show the server at `http://127.0.0.1:7860`.\n\n- **Server is running but no models show up** -- Make sure your `.safetensors` file is directly in the `models/Stable-diffusion/` folder, not inside a subfolder. Restart the server after adding new model files.\n\n- **"Python not found"** -- Python 3.10+ is required. Download from https://www.python.org/downloads/. On Windows, check "Add Python to PATH" during installation.\n\n- **Errors during first launch** -- Delete the `venv` folder inside `stable-diffusion-webui` and run the launch command again to reinstall dependencies from scratch.\n\n- **"CUDA out of memory"** -- Your GPU doesn\'t have enough memory. Add `--medvram` or `--lowvram` to the launch command: `./webui.sh --api --medvram`\n';
|
|
474
|
+
|
|
475
|
+
// src/providers/stable-diffusion/index.ts
|
|
476
|
+
var StableDiffusionProvider = class {
|
|
477
|
+
name = "stable-diffusion";
|
|
478
|
+
displayName = "Stable Diffusion WebUI";
|
|
479
|
+
description = "Generate images locally using Stable Diffusion checkpoints. Runs as a local web UI.";
|
|
480
|
+
capabilities = ["image"];
|
|
481
|
+
readme = readme_default3;
|
|
482
|
+
defaultBaseUrl = "http://127.0.0.1:7860";
|
|
483
|
+
get baseUrl() {
|
|
484
|
+
return getProviderBaseUrl(this.name, this.defaultBaseUrl);
|
|
485
|
+
}
|
|
486
|
+
getBaseUrl() {
|
|
487
|
+
return this.baseUrl;
|
|
488
|
+
}
|
|
489
|
+
async isRunning() {
|
|
490
|
+
try {
|
|
491
|
+
const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/sd-models`, {
|
|
492
|
+
method: "GET",
|
|
493
|
+
signal: AbortSignal.timeout(5e3)
|
|
494
|
+
});
|
|
495
|
+
return response.ok;
|
|
496
|
+
} catch {
|
|
497
|
+
return false;
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
async discoverModels() {
|
|
501
|
+
try {
|
|
502
|
+
const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/sd-models`);
|
|
503
|
+
if (!response.ok) {
|
|
504
|
+
return [];
|
|
505
|
+
}
|
|
506
|
+
const models = await response.json();
|
|
507
|
+
return models.map((m) => ({
|
|
508
|
+
name: m.model_name,
|
|
509
|
+
provider: this.name,
|
|
510
|
+
capability: "image"
|
|
511
|
+
}));
|
|
512
|
+
} catch {
|
|
513
|
+
return [];
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
async detect() {
|
|
517
|
+
const savedPath = getProviderInstallPath(this.name);
|
|
518
|
+
const possiblePaths = [
|
|
519
|
+
...savedPath ? [savedPath] : [],
|
|
520
|
+
path3.join(os3.homedir(), "stable-diffusion-webui"),
|
|
521
|
+
path3.join(os3.homedir(), "Projects", "stable-diffusion-webui"),
|
|
522
|
+
path3.join(os3.homedir(), "Code", "stable-diffusion-webui")
|
|
523
|
+
];
|
|
524
|
+
let installed = false;
|
|
525
|
+
for (const p of possiblePaths) {
|
|
526
|
+
if (fs2.existsSync(path3.join(p, "launch.py")) || fs2.existsSync(path3.join(p, "webui.sh")) || fs2.existsSync(path3.join(p, "webui.bat"))) {
|
|
527
|
+
installed = true;
|
|
528
|
+
break;
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
let running = false;
|
|
532
|
+
try {
|
|
533
|
+
const response = await fetch("http://127.0.0.1:7860/sdapi/v1/sd-models", {
|
|
534
|
+
signal: AbortSignal.timeout(1e3)
|
|
535
|
+
});
|
|
536
|
+
running = response.ok;
|
|
537
|
+
if (running) installed = true;
|
|
538
|
+
} catch {
|
|
539
|
+
running = false;
|
|
540
|
+
}
|
|
541
|
+
return { installed, running };
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
544
|
+
* Get the currently loaded model
|
|
545
|
+
*/
|
|
546
|
+
async getCurrentModel() {
|
|
547
|
+
try {
|
|
548
|
+
const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/options`);
|
|
549
|
+
if (!response.ok) return null;
|
|
550
|
+
const options = await response.json();
|
|
551
|
+
return options.sd_model_checkpoint || null;
|
|
552
|
+
} catch {
|
|
553
|
+
return null;
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
/**
|
|
557
|
+
* Switch to a different model
|
|
558
|
+
*/
|
|
559
|
+
async setModel(modelName) {
|
|
560
|
+
const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/options`, {
|
|
561
|
+
method: "POST",
|
|
562
|
+
headers: { "Content-Type": "application/json" },
|
|
563
|
+
body: JSON.stringify({ sd_model_checkpoint: modelName })
|
|
564
|
+
});
|
|
565
|
+
if (!response.ok) {
|
|
566
|
+
const error = await response.text();
|
|
567
|
+
throw new Error(`Failed to switch model: ${error}`);
|
|
568
|
+
}
|
|
569
|
+
}
|
|
570
|
+
async generateImage(model, prompt, options) {
|
|
571
|
+
const currentModel = await this.getCurrentModel();
|
|
572
|
+
if (currentModel && !currentModel.includes(model)) {
|
|
573
|
+
await this.setModel(model);
|
|
574
|
+
}
|
|
575
|
+
const payload = {
|
|
576
|
+
prompt,
|
|
577
|
+
negative_prompt: options?.negativePrompt || "",
|
|
578
|
+
steps: options?.steps || 20,
|
|
579
|
+
width: options?.width || 512,
|
|
580
|
+
height: options?.height || 512,
|
|
581
|
+
cfg_scale: options?.cfgScale || 7,
|
|
582
|
+
seed: options?.seed ?? -1,
|
|
583
|
+
sampler_name: options?.sampler || "Euler a"
|
|
584
|
+
};
|
|
585
|
+
const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/txt2img`, {
|
|
586
|
+
method: "POST",
|
|
587
|
+
headers: { "Content-Type": "application/json" },
|
|
588
|
+
body: JSON.stringify(payload)
|
|
589
|
+
});
|
|
590
|
+
if (!response.ok) {
|
|
591
|
+
const error = await response.text();
|
|
592
|
+
throw new Error(`Image generation failed: ${response.status} ${error}`);
|
|
593
|
+
}
|
|
594
|
+
const result = await response.json();
|
|
595
|
+
if (!result.images || result.images.length === 0) {
|
|
596
|
+
throw new Error("No images returned from Stable Diffusion");
|
|
597
|
+
}
|
|
598
|
+
let info = {};
|
|
599
|
+
let seed;
|
|
600
|
+
try {
|
|
601
|
+
info = JSON.parse(result.info);
|
|
602
|
+
seed = typeof info.seed === "number" ? info.seed : void 0;
|
|
603
|
+
} catch {
|
|
604
|
+
}
|
|
605
|
+
return {
|
|
606
|
+
imageBase64: result.images[0],
|
|
607
|
+
mimeType: "image/png",
|
|
608
|
+
seed,
|
|
609
|
+
info
|
|
610
|
+
};
|
|
611
|
+
}
|
|
612
|
+
async generateImageWithProgress(model, prompt, options, onProgress) {
|
|
613
|
+
const generatePromise = this.generateImage(model, prompt, options);
|
|
614
|
+
if (onProgress) {
|
|
615
|
+
const pollProgress = async () => {
|
|
616
|
+
while (true) {
|
|
617
|
+
try {
|
|
618
|
+
const response = await fetch(
|
|
619
|
+
`${this.getBaseUrl()}/sdapi/v1/progress`
|
|
620
|
+
);
|
|
621
|
+
if (!response.ok) break;
|
|
622
|
+
const progress = await response.json();
|
|
623
|
+
onProgress({
|
|
624
|
+
step: progress.state.sampling_step,
|
|
625
|
+
totalSteps: progress.state.sampling_steps,
|
|
626
|
+
preview: progress.current_image
|
|
627
|
+
});
|
|
628
|
+
if (progress.progress >= 1) break;
|
|
629
|
+
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
630
|
+
} catch {
|
|
631
|
+
break;
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
};
|
|
635
|
+
pollProgress().catch(() => {
|
|
636
|
+
});
|
|
637
|
+
}
|
|
638
|
+
return generatePromise;
|
|
639
|
+
}
|
|
640
|
+
/**
|
|
641
|
+
* Fetch available samplers from the backend
|
|
642
|
+
*/
|
|
643
|
+
async getSamplers() {
|
|
644
|
+
try {
|
|
645
|
+
const response = await fetch(`${this.getBaseUrl()}/sdapi/v1/samplers`);
|
|
646
|
+
if (!response.ok) return this.getDefaultSamplers();
|
|
647
|
+
const samplers = await response.json();
|
|
648
|
+
return samplers.map((s) => s.name);
|
|
649
|
+
} catch {
|
|
650
|
+
return this.getDefaultSamplers();
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
getDefaultSamplers() {
|
|
654
|
+
return [
|
|
655
|
+
"Euler a",
|
|
656
|
+
"Euler",
|
|
657
|
+
"LMS",
|
|
658
|
+
"Heun",
|
|
659
|
+
"DPM2",
|
|
660
|
+
"DPM2 a",
|
|
661
|
+
"DPM++ 2S a",
|
|
662
|
+
"DPM++ 2M",
|
|
663
|
+
"DPM++ SDE",
|
|
664
|
+
"DPM fast",
|
|
665
|
+
"DPM adaptive",
|
|
666
|
+
"LMS Karras",
|
|
667
|
+
"DPM2 Karras",
|
|
668
|
+
"DPM2 a Karras",
|
|
669
|
+
"DPM++ 2S a Karras",
|
|
670
|
+
"DPM++ 2M Karras",
|
|
671
|
+
"DPM++ SDE Karras",
|
|
672
|
+
"DDIM",
|
|
673
|
+
"PLMS",
|
|
674
|
+
"UniPC"
|
|
675
|
+
];
|
|
676
|
+
}
|
|
677
|
+
generateDimensionOptions() {
|
|
678
|
+
const options = [];
|
|
679
|
+
for (let size = 256; size <= 2048; size += 64) {
|
|
680
|
+
options.push({
|
|
681
|
+
label: `${size}px`,
|
|
682
|
+
value: String(size)
|
|
683
|
+
});
|
|
684
|
+
}
|
|
685
|
+
return options;
|
|
686
|
+
}
|
|
687
|
+
async getParameterSchemas() {
|
|
688
|
+
const samplers = await this.getSamplers();
|
|
689
|
+
const dimensionOptions = this.generateDimensionOptions();
|
|
690
|
+
return [
|
|
691
|
+
{
|
|
692
|
+
type: "select",
|
|
693
|
+
label: "Sampler",
|
|
694
|
+
variable: "sampler",
|
|
695
|
+
helpText: "The sampling method used for image generation",
|
|
696
|
+
defaultValue: "Euler a",
|
|
697
|
+
selectOptions: samplers.map((name) => ({
|
|
698
|
+
label: name,
|
|
699
|
+
value: name
|
|
700
|
+
}))
|
|
701
|
+
},
|
|
702
|
+
{
|
|
703
|
+
type: "select",
|
|
704
|
+
label: "Width",
|
|
705
|
+
variable: "width",
|
|
706
|
+
defaultValue: "512",
|
|
707
|
+
selectOptions: dimensionOptions
|
|
708
|
+
},
|
|
709
|
+
{
|
|
710
|
+
type: "select",
|
|
711
|
+
label: "Height",
|
|
712
|
+
variable: "height",
|
|
713
|
+
defaultValue: "512",
|
|
714
|
+
selectOptions: dimensionOptions
|
|
715
|
+
},
|
|
716
|
+
{
|
|
717
|
+
type: "number",
|
|
718
|
+
label: "Steps",
|
|
719
|
+
variable: "steps",
|
|
720
|
+
helpText: "Number of denoising steps. More steps = higher quality but slower.",
|
|
721
|
+
defaultValue: 20,
|
|
722
|
+
numberOptions: {
|
|
723
|
+
min: 1,
|
|
724
|
+
max: 150,
|
|
725
|
+
step: 1
|
|
726
|
+
}
|
|
727
|
+
},
|
|
728
|
+
{
|
|
729
|
+
type: "number",
|
|
730
|
+
label: "CFG Scale",
|
|
731
|
+
variable: "cfgScale",
|
|
732
|
+
helpText: "How strongly the image should follow the prompt. Higher = more literal.",
|
|
733
|
+
defaultValue: 7,
|
|
734
|
+
numberOptions: {
|
|
735
|
+
min: 1,
|
|
736
|
+
max: 30,
|
|
737
|
+
step: 0.5
|
|
738
|
+
}
|
|
739
|
+
},
|
|
740
|
+
{
|
|
741
|
+
type: "number",
|
|
742
|
+
label: "Seed",
|
|
743
|
+
variable: "seed",
|
|
744
|
+
helpText: "A specific value used to guide the 'randomness' of generation. Use -1 for random.",
|
|
745
|
+
defaultValue: -1,
|
|
746
|
+
numberOptions: {
|
|
747
|
+
min: -1,
|
|
748
|
+
max: 2147483647
|
|
749
|
+
}
|
|
750
|
+
},
|
|
751
|
+
{
|
|
752
|
+
type: "text",
|
|
753
|
+
label: "Negative Prompt",
|
|
754
|
+
variable: "negativePrompt",
|
|
755
|
+
helpText: "Things you don't want in the image",
|
|
756
|
+
placeholder: "blurry, low quality, distorted"
|
|
757
|
+
}
|
|
758
|
+
];
|
|
759
|
+
}
|
|
760
|
+
};
|
|
761
|
+
var stable_diffusion_default = new StableDiffusionProvider();
|
|
762
|
+
|
|
763
|
+
// src/providers/comfyui/index.ts
|
|
764
|
+
import * as fs3 from "fs";
|
|
765
|
+
import * as path4 from "path";
|
|
766
|
+
import * as os4 from "os";
|
|
767
|
+
|
|
768
|
+
// src/providers/comfyui/workflows/ltx-video.ts
|
|
769
|
+
var LTX_VIDEO_DEFAULTS = {
|
|
770
|
+
model: "ltx-video-2b-v0.9.5.safetensors",
|
|
771
|
+
textEncoder: "t5xxl_fp16.safetensors",
|
|
772
|
+
prompt: "",
|
|
773
|
+
negativePrompt: "worst quality, blurry, distorted, disfigured, motion smear, motion artifacts",
|
|
774
|
+
width: 512,
|
|
775
|
+
height: 320,
|
|
776
|
+
numFrames: 41,
|
|
777
|
+
fps: 8,
|
|
778
|
+
steps: 20,
|
|
779
|
+
cfgScale: 3,
|
|
780
|
+
seed: -1
|
|
781
|
+
};
|
|
782
|
+
function buildLtxVideoWorkflow(params) {
|
|
783
|
+
const p = { ...LTX_VIDEO_DEFAULTS, ...params };
|
|
784
|
+
const seed = p.seed === -1 ? Math.floor(Math.random() * 2 ** 32) : p.seed;
|
|
785
|
+
return {
|
|
786
|
+
// Node 1: Load checkpoint (MODEL + VAE, CLIP output unused)
|
|
787
|
+
"1": {
|
|
788
|
+
class_type: "CheckpointLoaderSimple",
|
|
789
|
+
inputs: {
|
|
790
|
+
ckpt_name: p.model
|
|
791
|
+
}
|
|
792
|
+
},
|
|
793
|
+
// Node 2: Load text encoder (T5-XXL) separately
|
|
794
|
+
"2": {
|
|
795
|
+
class_type: "CLIPLoader",
|
|
796
|
+
inputs: {
|
|
797
|
+
clip_name: p.textEncoder,
|
|
798
|
+
type: "ltxv"
|
|
799
|
+
}
|
|
800
|
+
},
|
|
801
|
+
// Node 3: Positive prompt encoding (CLIP from CLIPLoader, NOT from checkpoint)
|
|
802
|
+
"3": {
|
|
803
|
+
class_type: "CLIPTextEncode",
|
|
804
|
+
inputs: {
|
|
805
|
+
text: p.prompt,
|
|
806
|
+
clip: ["2", 0]
|
|
807
|
+
}
|
|
808
|
+
},
|
|
809
|
+
// Node 4: Negative prompt encoding
|
|
810
|
+
"4": {
|
|
811
|
+
class_type: "CLIPTextEncode",
|
|
812
|
+
inputs: {
|
|
813
|
+
text: p.negativePrompt,
|
|
814
|
+
clip: ["2", 0]
|
|
815
|
+
}
|
|
816
|
+
},
|
|
817
|
+
// Node 5: Empty latent video
|
|
818
|
+
"5": {
|
|
819
|
+
class_type: "EmptyLTXVLatentVideo",
|
|
820
|
+
inputs: {
|
|
821
|
+
width: p.width,
|
|
822
|
+
height: p.height,
|
|
823
|
+
length: p.numFrames,
|
|
824
|
+
batch_size: 1
|
|
825
|
+
}
|
|
826
|
+
},
|
|
827
|
+
// Node 6: KSampler
|
|
828
|
+
"6": {
|
|
829
|
+
class_type: "KSampler",
|
|
830
|
+
inputs: {
|
|
831
|
+
model: ["1", 0],
|
|
832
|
+
positive: ["3", 0],
|
|
833
|
+
negative: ["4", 0],
|
|
834
|
+
latent_image: ["5", 0],
|
|
835
|
+
seed,
|
|
836
|
+
steps: p.steps,
|
|
837
|
+
cfg: p.cfgScale,
|
|
838
|
+
sampler_name: "euler",
|
|
839
|
+
scheduler: "normal",
|
|
840
|
+
denoise: 1
|
|
841
|
+
}
|
|
842
|
+
},
|
|
843
|
+
// Node 7: VAE Decode (VAE from checkpoint, slot 2)
|
|
844
|
+
"7": {
|
|
845
|
+
class_type: "VAEDecode",
|
|
846
|
+
inputs: {
|
|
847
|
+
samples: ["6", 0],
|
|
848
|
+
vae: ["1", 2]
|
|
849
|
+
}
|
|
850
|
+
},
|
|
851
|
+
// Node 8: Save as MP4 via VideoHelperSuite
|
|
852
|
+
"8": {
|
|
853
|
+
class_type: "VHS_VideoCombine",
|
|
854
|
+
inputs: {
|
|
855
|
+
images: ["7", 0],
|
|
856
|
+
frame_rate: p.fps,
|
|
857
|
+
loop_count: 0,
|
|
858
|
+
filename_prefix: "ltxv_output",
|
|
859
|
+
format: "video/h264-mp4",
|
|
860
|
+
pingpong: false,
|
|
861
|
+
save_output: true
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
};
|
|
865
|
+
}
|
|
866
|
+
var LTX_VIDEO_OUTPUT_NODE = "8";
|
|
867
|
+
|
|
868
|
+
// src/providers/comfyui/workflows/wan2.1.ts
|
|
869
|
+
var WAN21_DEFAULTS = {
|
|
870
|
+
model: "wan2.1_t2v_1.3B_fp16.safetensors",
|
|
871
|
+
textEncoder: "umt5_xxl_fp8_e4m3fn_scaled.safetensors",
|
|
872
|
+
vae: "wan_2.1_vae.safetensors",
|
|
873
|
+
prompt: "",
|
|
874
|
+
negativePrompt: "worst quality, blurry, distorted",
|
|
875
|
+
width: 480,
|
|
876
|
+
height: 320,
|
|
877
|
+
numFrames: 25,
|
|
878
|
+
fps: 8,
|
|
879
|
+
steps: 20,
|
|
880
|
+
cfgScale: 5,
|
|
881
|
+
seed: -1
|
|
882
|
+
};
|
|
883
|
+
function buildWan21Workflow(params) {
|
|
884
|
+
const p = { ...WAN21_DEFAULTS, ...params };
|
|
885
|
+
const seed = p.seed === -1 ? Math.floor(Math.random() * 2 ** 32) : p.seed;
|
|
886
|
+
return {
|
|
887
|
+
// Node 1: Load diffusion model (UNET)
|
|
888
|
+
"1": {
|
|
889
|
+
class_type: "UNETLoader",
|
|
890
|
+
inputs: {
|
|
891
|
+
unet_name: p.model,
|
|
892
|
+
weight_dtype: "default"
|
|
893
|
+
}
|
|
894
|
+
},
|
|
895
|
+
// Node 2: Load text encoder (UMT5-XXL)
|
|
896
|
+
"2": {
|
|
897
|
+
class_type: "CLIPLoader",
|
|
898
|
+
inputs: {
|
|
899
|
+
clip_name: p.textEncoder,
|
|
900
|
+
type: "wan"
|
|
901
|
+
}
|
|
902
|
+
},
|
|
903
|
+
// Node 3: Load VAE
|
|
904
|
+
"3": {
|
|
905
|
+
class_type: "VAELoader",
|
|
906
|
+
inputs: {
|
|
907
|
+
vae_name: p.vae
|
|
908
|
+
}
|
|
909
|
+
},
|
|
910
|
+
// Node 4: Positive prompt encoding
|
|
911
|
+
"4": {
|
|
912
|
+
class_type: "CLIPTextEncode",
|
|
913
|
+
inputs: {
|
|
914
|
+
text: p.prompt,
|
|
915
|
+
clip: ["2", 0]
|
|
916
|
+
}
|
|
917
|
+
},
|
|
918
|
+
// Node 5: Negative prompt encoding
|
|
919
|
+
"5": {
|
|
920
|
+
class_type: "CLIPTextEncode",
|
|
921
|
+
inputs: {
|
|
922
|
+
text: p.negativePrompt,
|
|
923
|
+
clip: ["2", 0]
|
|
924
|
+
}
|
|
925
|
+
},
|
|
926
|
+
// Node 6: Empty latent image (for video frames)
|
|
927
|
+
"6": {
|
|
928
|
+
class_type: "EmptySD3LatentImage",
|
|
929
|
+
inputs: {
|
|
930
|
+
width: p.width,
|
|
931
|
+
height: p.height,
|
|
932
|
+
batch_size: p.numFrames
|
|
933
|
+
}
|
|
934
|
+
},
|
|
935
|
+
// Node 7: KSampler
|
|
936
|
+
"7": {
|
|
937
|
+
class_type: "KSampler",
|
|
938
|
+
inputs: {
|
|
939
|
+
model: ["1", 0],
|
|
940
|
+
positive: ["4", 0],
|
|
941
|
+
negative: ["5", 0],
|
|
942
|
+
latent_image: ["6", 0],
|
|
943
|
+
seed,
|
|
944
|
+
steps: p.steps,
|
|
945
|
+
cfg: p.cfgScale,
|
|
946
|
+
sampler_name: "euler",
|
|
947
|
+
scheduler: "normal",
|
|
948
|
+
denoise: 1
|
|
949
|
+
}
|
|
950
|
+
},
|
|
951
|
+
// Node 8: VAE Decode
|
|
952
|
+
"8": {
|
|
953
|
+
class_type: "VAEDecode",
|
|
954
|
+
inputs: {
|
|
955
|
+
samples: ["7", 0],
|
|
956
|
+
vae: ["3", 0]
|
|
957
|
+
}
|
|
958
|
+
},
|
|
959
|
+
// Node 9: Save as MP4 via VideoHelperSuite
|
|
960
|
+
"9": {
|
|
961
|
+
class_type: "VHS_VideoCombine",
|
|
962
|
+
inputs: {
|
|
963
|
+
images: ["8", 0],
|
|
964
|
+
frame_rate: p.fps,
|
|
965
|
+
loop_count: 0,
|
|
966
|
+
filename_prefix: "wan21_output",
|
|
967
|
+
format: "video/h264-mp4",
|
|
968
|
+
pingpong: false,
|
|
969
|
+
save_output: true
|
|
970
|
+
}
|
|
971
|
+
}
|
|
972
|
+
};
|
|
973
|
+
}
|
|
974
|
+
var WAN21_OUTPUT_NODE = "9";
|
|
975
|
+
|
|
976
|
+
// src/providers/comfyui/workflows/index.ts
|
|
977
|
+
var MODEL_REGISTRY = [
|
|
978
|
+
// LTX-Video models
|
|
979
|
+
{
|
|
980
|
+
pattern: /ltx[_-]?video/i,
|
|
981
|
+
config: {
|
|
982
|
+
family: "ltx-video",
|
|
983
|
+
displayName: "LTX-Video",
|
|
984
|
+
buildWorkflow: (params) => buildLtxVideoWorkflow({
|
|
985
|
+
model: params.model,
|
|
986
|
+
prompt: params.prompt,
|
|
987
|
+
negativePrompt: params.negativePrompt,
|
|
988
|
+
width: params.width,
|
|
989
|
+
height: params.height,
|
|
990
|
+
numFrames: params.numFrames,
|
|
991
|
+
fps: params.fps,
|
|
992
|
+
steps: params.steps,
|
|
993
|
+
cfgScale: params.cfgScale,
|
|
994
|
+
seed: params.seed
|
|
995
|
+
}),
|
|
996
|
+
outputNodeId: LTX_VIDEO_OUTPUT_NODE,
|
|
997
|
+
defaults: {
|
|
998
|
+
width: LTX_VIDEO_DEFAULTS.width,
|
|
999
|
+
height: LTX_VIDEO_DEFAULTS.height,
|
|
1000
|
+
numFrames: LTX_VIDEO_DEFAULTS.numFrames,
|
|
1001
|
+
fps: LTX_VIDEO_DEFAULTS.fps,
|
|
1002
|
+
steps: LTX_VIDEO_DEFAULTS.steps,
|
|
1003
|
+
cfgScale: LTX_VIDEO_DEFAULTS.cfgScale
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
},
|
|
1007
|
+
// Wan 2.1 models
|
|
1008
|
+
{
|
|
1009
|
+
pattern: /wan2[\._]?1/i,
|
|
1010
|
+
config: {
|
|
1011
|
+
family: "wan2.1",
|
|
1012
|
+
displayName: "Wan 2.1",
|
|
1013
|
+
buildWorkflow: (params) => buildWan21Workflow({
|
|
1014
|
+
model: params.model,
|
|
1015
|
+
prompt: params.prompt,
|
|
1016
|
+
negativePrompt: params.negativePrompt,
|
|
1017
|
+
width: params.width,
|
|
1018
|
+
height: params.height,
|
|
1019
|
+
numFrames: params.numFrames,
|
|
1020
|
+
fps: params.fps,
|
|
1021
|
+
steps: params.steps,
|
|
1022
|
+
cfgScale: params.cfgScale,
|
|
1023
|
+
seed: params.seed
|
|
1024
|
+
}),
|
|
1025
|
+
outputNodeId: WAN21_OUTPUT_NODE,
|
|
1026
|
+
defaults: {
|
|
1027
|
+
width: WAN21_DEFAULTS.width,
|
|
1028
|
+
height: WAN21_DEFAULTS.height,
|
|
1029
|
+
numFrames: WAN21_DEFAULTS.numFrames,
|
|
1030
|
+
fps: WAN21_DEFAULTS.fps,
|
|
1031
|
+
steps: WAN21_DEFAULTS.steps,
|
|
1032
|
+
cfgScale: WAN21_DEFAULTS.cfgScale
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
];
|
|
1037
|
+
function getWorkflowForModel(modelFilename) {
|
|
1038
|
+
for (const entry of MODEL_REGISTRY) {
|
|
1039
|
+
if (entry.pattern.test(modelFilename)) {
|
|
1040
|
+
return entry.config;
|
|
1041
|
+
}
|
|
1042
|
+
}
|
|
1043
|
+
return null;
|
|
1044
|
+
}
|
|
1045
|
+
function isKnownVideoModel(modelFilename) {
|
|
1046
|
+
return getWorkflowForModel(modelFilename) !== null;
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
// src/providers/comfyui/readme.md
|
|
1050
|
+
var readme_default4 = '# ComfyUI\n\nComfyUI runs video generation models (LTX-Video, Wan2.1) locally. MindStudio handles all the workflow complexity for you -- you just need to install ComfyUI and download a model.\n\n**Default port:** 8188\n**Website:** https://www.comfy.org\n**GitHub:** https://github.com/comfyanonymous/ComfyUI\n\n## What You\'ll Need\n\n- **Python 3.10 or newer** -- Check by opening a terminal and typing `python3 --version`. If you don\'t have it, download from https://www.python.org/downloads/\n\n- **Git** -- Check by typing `git --version`. If you don\'t have it, download from https://git-scm.com/downloads\n\n- **A GPU with 8+ GB of VRAM** -- Video generation is demanding. Without enough GPU memory, generation will fail or be extremely slow.\n\n## Step 1: Install ComfyUI\n\nOpen a terminal and run these commands one at a time, waiting for each to finish before running the next.\n\nDownload ComfyUI:\n\n```\ngit clone https://github.com/comfyanonymous/ComfyUI.git ~/ComfyUI\n```\n\nGo into the folder:\n\n```\ncd ~/ComfyUI\n```\n\nCreate an isolated Python environment:\n\n```\npython3 -m venv venv\n```\n\nActivate the environment:\n\n```\nsource venv/bin/activate\n```\n\n**Windows users:** use `venv\\Scripts\\activate` instead.\n\nInstall dependencies (may take a few minutes):\n\n```\npip install -r requirements.txt\n```\n\n## Step 2: Download a Video Model\n\nYou need at least one video model for MindStudio to use.\n\n### LTX-Video (recommended to start)\n\nFastest option, good for getting up and running quickly.\n\n1. Go to https://huggingface.co/Lightricks/LTX-Video\n2. Download `ltx-video-2b-v0.9.5.safetensors`\n3. Move the file into:\n\n```\n~/ComfyUI/models/checkpoints/\n```\n\n### Wan2.1\n\nHigher quality but slower and needs more VRAM. Requires multiple files -- make sure you download all of them or it won\'t work.\n\n1. Go to https://huggingface.co/Comfy-Org/Wan2.1_ComfyUI_repackaged\n2. Place UNET files in `~/ComfyUI/models/diffusion_models/`\n3. Place text encoder files in `~/ComfyUI/models/text_encoders/`\n4. Place VAE files in `~/ComfyUI/models/vae/`\n\n## Step 3: Start the Server\n\nEvery time you want to use ComfyUI with MindStudio, open a terminal and run:\n\n```\ncd ~/ComfyUI && source venv/bin/activate && python main.py --listen\n```\n\n**Windows users:**\n\n```\ncd %USERPROFILE%\\ComfyUI && venv\\Scripts\\activate && python main.py --listen\n```\n\n**Important:** The `--listen` flag is required. Without it, MindStudio cannot connect to the server.\n\nLeave this terminal window open. When you see "To see the GUI go to: http://0.0.0.0:8188", the server is ready. Go back to the tunnel and select **Refresh Providers** -- your models should appear.\n\n## Troubleshooting\n\n- **MindStudio says ComfyUI is "not running"** -- Make sure you started with the `--listen` flag. Without it, the server won\'t accept connections from MindStudio.\n\n- **Server is running but no models show up** -- Check that your model files are in the right folders under `~/ComfyUI/models/`. Checkpoint files go in `checkpoints/`, UNET files go in `diffusion_models/`.\n\n- **Generation fails with workflow errors** -- For Wan2.1, you need all three files (UNET, text encoder, VAE). If any are missing, generation will fail.\n\n- **"CUDA out of memory"** -- Video generation needs a lot of GPU memory. Try reducing the resolution or number of frames in your generation settings, or use LTX-Video which is lighter.\n\n- **Server crashes mid-generation** -- Press Ctrl+C in the terminal and run the start command again.\n';
|
|
1051
|
+
|
|
1052
|
+
// src/providers/comfyui/index.ts
|
|
1053
|
+
var ComfyUIProvider = class {
|
|
1054
|
+
name = "comfyui";
|
|
1055
|
+
displayName = "ComfyUI";
|
|
1056
|
+
description = "Generate videos locally using node-based workflows. Supports LTX-Video and Wan2.1.";
|
|
1057
|
+
capabilities = ["video"];
|
|
1058
|
+
readme = readme_default4;
|
|
1059
|
+
defaultBaseUrl = "http://127.0.0.1:8188";
|
|
1060
|
+
get baseUrl() {
|
|
1061
|
+
return getProviderBaseUrl(this.name, this.defaultBaseUrl);
|
|
1062
|
+
}
|
|
1063
|
+
getBaseUrl() {
|
|
1064
|
+
return this.baseUrl;
|
|
1065
|
+
}
|
|
1066
|
+
async isRunning() {
|
|
1067
|
+
try {
|
|
1068
|
+
const response = await fetch(`${this.getBaseUrl()}/system_stats`, {
|
|
1069
|
+
method: "GET",
|
|
1070
|
+
signal: AbortSignal.timeout(5e3)
|
|
1071
|
+
});
|
|
1072
|
+
return response.ok;
|
|
1073
|
+
} catch {
|
|
1074
|
+
return false;
|
|
1075
|
+
}
|
|
1076
|
+
}
|
|
1077
|
+
async detect() {
|
|
1078
|
+
const savedPath = getProviderInstallPath(this.name);
|
|
1079
|
+
const possiblePaths = [
|
|
1080
|
+
...savedPath ? [savedPath] : [],
|
|
1081
|
+
path4.join(os4.homedir(), "ComfyUI"),
|
|
1082
|
+
path4.join(os4.homedir(), "comfyui"),
|
|
1083
|
+
path4.join(os4.homedir(), "Projects", "ComfyUI"),
|
|
1084
|
+
path4.join(os4.homedir(), "Code", "ComfyUI")
|
|
1085
|
+
];
|
|
1086
|
+
let installed = false;
|
|
1087
|
+
for (const p of possiblePaths) {
|
|
1088
|
+
if (fs3.existsSync(path4.join(p, "main.py")) && fs3.existsSync(path4.join(p, "requirements.txt"))) {
|
|
1089
|
+
installed = true;
|
|
1090
|
+
break;
|
|
1091
|
+
}
|
|
1092
|
+
}
|
|
1093
|
+
let running = false;
|
|
1094
|
+
try {
|
|
1095
|
+
const response = await fetch("http://127.0.0.1:8188/system_stats", {
|
|
1096
|
+
signal: AbortSignal.timeout(1e3)
|
|
1097
|
+
});
|
|
1098
|
+
running = response.ok;
|
|
1099
|
+
if (running) installed = true;
|
|
1100
|
+
} catch {
|
|
1101
|
+
running = false;
|
|
1102
|
+
}
|
|
1103
|
+
return { installed, running };
|
|
1104
|
+
}
|
|
1105
|
+
/**
|
|
1106
|
+
* Discover video models by scanning ComfyUI's model directories.
|
|
1107
|
+
*/
|
|
1108
|
+
async discoverModels() {
|
|
1109
|
+
const models = [];
|
|
1110
|
+
try {
|
|
1111
|
+
const response = await fetch(
|
|
1112
|
+
`${this.getBaseUrl()}/object_info/CheckpointLoaderSimple`,
|
|
1113
|
+
{ signal: AbortSignal.timeout(5e3) }
|
|
1114
|
+
);
|
|
1115
|
+
if (response.ok) {
|
|
1116
|
+
const data = await response.json();
|
|
1117
|
+
const nodeInfo = data.CheckpointLoaderSimple;
|
|
1118
|
+
const checkpoints = nodeInfo?.input?.required?.ckpt_name?.[0] || [];
|
|
1119
|
+
for (const name of checkpoints) {
|
|
1120
|
+
if (isKnownVideoModel(name)) {
|
|
1121
|
+
const workflow = getWorkflowForModel(name);
|
|
1122
|
+
models.push({
|
|
1123
|
+
name,
|
|
1124
|
+
provider: this.name,
|
|
1125
|
+
capability: "video",
|
|
1126
|
+
parameterSize: workflow?.displayName
|
|
1127
|
+
});
|
|
1128
|
+
}
|
|
1129
|
+
}
|
|
1130
|
+
}
|
|
1131
|
+
} catch {
|
|
1132
|
+
}
|
|
1133
|
+
try {
|
|
1134
|
+
const response = await fetch(
|
|
1135
|
+
`${this.getBaseUrl()}/object_info/UNETLoader`,
|
|
1136
|
+
{ signal: AbortSignal.timeout(5e3) }
|
|
1137
|
+
);
|
|
1138
|
+
if (response.ok) {
|
|
1139
|
+
const data = await response.json();
|
|
1140
|
+
const nodeInfo = data.UNETLoader;
|
|
1141
|
+
const unetModels = nodeInfo?.input?.required?.unet_name?.[0] || [];
|
|
1142
|
+
for (const name of unetModels) {
|
|
1143
|
+
if (isKnownVideoModel(name) && !models.some((m) => m.name === name)) {
|
|
1144
|
+
const workflow = getWorkflowForModel(name);
|
|
1145
|
+
models.push({
|
|
1146
|
+
name,
|
|
1147
|
+
provider: this.name,
|
|
1148
|
+
capability: "video",
|
|
1149
|
+
parameterSize: workflow?.displayName
|
|
1150
|
+
});
|
|
1151
|
+
}
|
|
1152
|
+
}
|
|
1153
|
+
}
|
|
1154
|
+
} catch {
|
|
1155
|
+
}
|
|
1156
|
+
if (models.length === 0) {
|
|
1157
|
+
const installPath = getProviderInstallPath(this.name);
|
|
1158
|
+
if (installPath) {
|
|
1159
|
+
const dirs = [
|
|
1160
|
+
path4.join(installPath, "models", "checkpoints"),
|
|
1161
|
+
path4.join(installPath, "models", "diffusion_models")
|
|
1162
|
+
];
|
|
1163
|
+
for (const dir of dirs) {
|
|
1164
|
+
if (fs3.existsSync(dir)) {
|
|
1165
|
+
try {
|
|
1166
|
+
const files = fs3.readdirSync(dir);
|
|
1167
|
+
for (const file of files) {
|
|
1168
|
+
if (isKnownVideoModel(file) && !models.some((m) => m.name === file)) {
|
|
1169
|
+
const workflow = getWorkflowForModel(file);
|
|
1170
|
+
models.push({
|
|
1171
|
+
name: file,
|
|
1172
|
+
provider: this.name,
|
|
1173
|
+
capability: "video",
|
|
1174
|
+
parameterSize: workflow?.displayName
|
|
1175
|
+
});
|
|
1176
|
+
}
|
|
1177
|
+
}
|
|
1178
|
+
} catch {
|
|
1179
|
+
}
|
|
1180
|
+
}
|
|
1181
|
+
}
|
|
1182
|
+
}
|
|
1183
|
+
}
|
|
1184
|
+
return models;
|
|
1185
|
+
}
|
|
1186
|
+
/**
|
|
1187
|
+
* Generate a video using ComfyUI.
|
|
1188
|
+
*/
|
|
1189
|
+
async generateVideo(model, prompt, options, onProgress) {
|
|
1190
|
+
const baseUrl = this.getBaseUrl();
|
|
1191
|
+
const workflowConfig = getWorkflowForModel(model);
|
|
1192
|
+
if (!workflowConfig) {
|
|
1193
|
+
throw new Error(
|
|
1194
|
+
`No workflow template found for model: ${model}. Supported families: LTX-Video, Wan2.1`
|
|
1195
|
+
);
|
|
1196
|
+
}
|
|
1197
|
+
const defaults = workflowConfig.defaults;
|
|
1198
|
+
const seed = options?.seed !== void 0 && options.seed !== -1 ? options.seed : Math.floor(Math.random() * 2 ** 32);
|
|
1199
|
+
const workflow = workflowConfig.buildWorkflow({
|
|
1200
|
+
model,
|
|
1201
|
+
prompt,
|
|
1202
|
+
negativePrompt: options?.negativePrompt || "worst quality, blurry, distorted",
|
|
1203
|
+
width: options?.width || defaults.width,
|
|
1204
|
+
height: options?.height || defaults.height,
|
|
1205
|
+
numFrames: options?.numFrames || defaults.numFrames,
|
|
1206
|
+
fps: options?.fps || defaults.fps,
|
|
1207
|
+
steps: options?.steps || defaults.steps,
|
|
1208
|
+
cfgScale: options?.cfgScale || defaults.cfgScale,
|
|
1209
|
+
seed
|
|
1210
|
+
});
|
|
1211
|
+
const clientId = `mindstudio_${Date.now()}_${Math.random().toString(36).slice(2)}`;
|
|
1212
|
+
const wsUrl = baseUrl.replace(/^http/, "ws") + `/ws?clientId=${clientId}`;
|
|
1213
|
+
const submitResponse = await fetch(`${baseUrl}/prompt`, {
|
|
1214
|
+
method: "POST",
|
|
1215
|
+
headers: { "Content-Type": "application/json" },
|
|
1216
|
+
body: JSON.stringify({
|
|
1217
|
+
prompt: workflow,
|
|
1218
|
+
client_id: clientId
|
|
1219
|
+
})
|
|
1220
|
+
});
|
|
1221
|
+
if (!submitResponse.ok) {
|
|
1222
|
+
const errorText = await submitResponse.text();
|
|
1223
|
+
throw new Error(
|
|
1224
|
+
`ComfyUI prompt submission failed: ${submitResponse.status} ${errorText}`
|
|
1225
|
+
);
|
|
1226
|
+
}
|
|
1227
|
+
const submitResult2 = await submitResponse.json();
|
|
1228
|
+
if (submitResult2.node_errors && Object.keys(submitResult2.node_errors).length > 0) {
|
|
1229
|
+
throw new Error(
|
|
1230
|
+
`ComfyUI workflow validation failed: ${JSON.stringify(submitResult2.node_errors)}`
|
|
1231
|
+
);
|
|
1232
|
+
}
|
|
1233
|
+
const promptId = submitResult2.prompt_id;
|
|
1234
|
+
await this.waitForCompletion(wsUrl, promptId, onProgress);
|
|
1235
|
+
const historyResponse = await fetch(`${baseUrl}/history/${promptId}`, {
|
|
1236
|
+
signal: AbortSignal.timeout(3e4)
|
|
1237
|
+
});
|
|
1238
|
+
if (!historyResponse.ok) {
|
|
1239
|
+
throw new Error(
|
|
1240
|
+
`Failed to fetch result history: ${historyResponse.status}`
|
|
1241
|
+
);
|
|
1242
|
+
}
|
|
1243
|
+
const history = await historyResponse.json();
|
|
1244
|
+
const promptHistory = history[promptId];
|
|
1245
|
+
if (!promptHistory) {
|
|
1246
|
+
throw new Error("No result found in ComfyUI history");
|
|
1247
|
+
}
|
|
1248
|
+
const outputNodeId = workflowConfig.outputNodeId;
|
|
1249
|
+
const outputData = promptHistory.outputs[outputNodeId];
|
|
1250
|
+
const outputFiles = outputData?.gifs || outputData?.images;
|
|
1251
|
+
if (!outputFiles || outputFiles.length === 0) {
|
|
1252
|
+
throw new Error("No output files found in ComfyUI result");
|
|
1253
|
+
}
|
|
1254
|
+
const outputFile = outputFiles[0];
|
|
1255
|
+
const fileUrl = new URL(`${baseUrl}/view`);
|
|
1256
|
+
fileUrl.searchParams.set("filename", outputFile.filename);
|
|
1257
|
+
fileUrl.searchParams.set("subfolder", outputFile.subfolder || "");
|
|
1258
|
+
fileUrl.searchParams.set("type", outputFile.type || "output");
|
|
1259
|
+
const fileResponse = await fetch(fileUrl.toString(), {
|
|
1260
|
+
signal: AbortSignal.timeout(6e4)
|
|
1261
|
+
});
|
|
1262
|
+
if (!fileResponse.ok) {
|
|
1263
|
+
throw new Error(`Failed to download output file: ${fileResponse.status}`);
|
|
1264
|
+
}
|
|
1265
|
+
const fileBuffer = await fileResponse.arrayBuffer();
|
|
1266
|
+
const videoBase64 = Buffer.from(fileBuffer).toString("base64");
|
|
1267
|
+
const ext = path4.extname(outputFile.filename).toLowerCase();
|
|
1268
|
+
const mimeType = ext === ".mp4" ? "video/mp4" : ext === ".webm" ? "video/webm" : ext === ".webp" ? "image/webp" : ext === ".gif" ? "image/gif" : "video/mp4";
|
|
1269
|
+
const fps = options?.fps || defaults.fps;
|
|
1270
|
+
const numFrames = options?.numFrames || defaults.numFrames;
|
|
1271
|
+
return {
|
|
1272
|
+
videoBase64,
|
|
1273
|
+
mimeType,
|
|
1274
|
+
duration: numFrames / fps,
|
|
1275
|
+
fps,
|
|
1276
|
+
seed
|
|
1277
|
+
};
|
|
1278
|
+
}
|
|
1279
|
+
/**
|
|
1280
|
+
* Wait for a ComfyUI prompt to finish execution via WebSocket.
|
|
1281
|
+
*/
|
|
1282
|
+
waitForCompletion(wsUrl, promptId, onProgress) {
|
|
1283
|
+
return new Promise((resolve, reject) => {
|
|
1284
|
+
const timeoutMs = 30 * 60 * 1e3;
|
|
1285
|
+
let ws;
|
|
1286
|
+
const timeout = setTimeout(() => {
|
|
1287
|
+
try {
|
|
1288
|
+
ws?.close();
|
|
1289
|
+
} catch {
|
|
1290
|
+
}
|
|
1291
|
+
reject(new Error("Video generation timed out after 30 minutes"));
|
|
1292
|
+
}, timeoutMs);
|
|
1293
|
+
try {
|
|
1294
|
+
ws = new WebSocket(wsUrl);
|
|
1295
|
+
} catch (err) {
|
|
1296
|
+
clearTimeout(timeout);
|
|
1297
|
+
reject(
|
|
1298
|
+
new Error(
|
|
1299
|
+
`Failed to connect to ComfyUI WebSocket: ${err instanceof Error ? err.message : err}`
|
|
1300
|
+
)
|
|
1301
|
+
);
|
|
1302
|
+
return;
|
|
1303
|
+
}
|
|
1304
|
+
ws.onmessage = (event) => {
|
|
1305
|
+
try {
|
|
1306
|
+
const message = JSON.parse(
|
|
1307
|
+
typeof event.data === "string" ? event.data : ""
|
|
1308
|
+
);
|
|
1309
|
+
if (message.type === "progress") {
|
|
1310
|
+
const data = message.data;
|
|
1311
|
+
if (!data.prompt_id || data.prompt_id === promptId) {
|
|
1312
|
+
onProgress?.({
|
|
1313
|
+
step: data.value,
|
|
1314
|
+
totalSteps: data.max,
|
|
1315
|
+
currentNode: data.node
|
|
1316
|
+
});
|
|
1317
|
+
}
|
|
1318
|
+
}
|
|
1319
|
+
if (message.type === "execution_success") {
|
|
1320
|
+
const data = message.data;
|
|
1321
|
+
if (data.prompt_id === promptId) {
|
|
1322
|
+
clearTimeout(timeout);
|
|
1323
|
+
ws.close();
|
|
1324
|
+
resolve();
|
|
1325
|
+
}
|
|
1326
|
+
}
|
|
1327
|
+
if (message.type === "execution_error") {
|
|
1328
|
+
const data = message.data;
|
|
1329
|
+
if (data.prompt_id === promptId) {
|
|
1330
|
+
clearTimeout(timeout);
|
|
1331
|
+
ws.close();
|
|
1332
|
+
reject(
|
|
1333
|
+
new Error(
|
|
1334
|
+
`ComfyUI execution error${data.node_type ? ` in ${data.node_type}` : ""}: ${data.exception_message || "Unknown error"}`
|
|
1335
|
+
)
|
|
1336
|
+
);
|
|
1337
|
+
}
|
|
1338
|
+
}
|
|
1339
|
+
} catch {
|
|
1340
|
+
}
|
|
1341
|
+
};
|
|
1342
|
+
ws.onerror = () => {
|
|
1343
|
+
clearTimeout(timeout);
|
|
1344
|
+
reject(new Error("ComfyUI WebSocket error: connection failed"));
|
|
1345
|
+
};
|
|
1346
|
+
ws.onclose = (event) => {
|
|
1347
|
+
if (!event.wasClean) {
|
|
1348
|
+
clearTimeout(timeout);
|
|
1349
|
+
reject(new Error("ComfyUI WebSocket connection closed unexpectedly"));
|
|
1350
|
+
}
|
|
1351
|
+
};
|
|
1352
|
+
});
|
|
1353
|
+
}
|
|
1354
|
+
/**
|
|
1355
|
+
* Get parameter schemas for video generation UI configuration.
|
|
1356
|
+
*/
|
|
1357
|
+
async getParameterSchemas() {
|
|
1358
|
+
return [
|
|
1359
|
+
{
|
|
1360
|
+
type: "number",
|
|
1361
|
+
label: "Width",
|
|
1362
|
+
variable: "width",
|
|
1363
|
+
helpText: "Video width in pixels. Larger = better quality but bigger file.",
|
|
1364
|
+
defaultValue: 512,
|
|
1365
|
+
numberOptions: { min: 256, max: 1280, step: 64 }
|
|
1366
|
+
},
|
|
1367
|
+
{
|
|
1368
|
+
type: "number",
|
|
1369
|
+
label: "Height",
|
|
1370
|
+
variable: "height",
|
|
1371
|
+
helpText: "Video height in pixels. Larger = better quality but bigger file.",
|
|
1372
|
+
defaultValue: 320,
|
|
1373
|
+
numberOptions: { min: 256, max: 1280, step: 64 }
|
|
1374
|
+
},
|
|
1375
|
+
{
|
|
1376
|
+
type: "number",
|
|
1377
|
+
label: "Frames",
|
|
1378
|
+
variable: "numFrames",
|
|
1379
|
+
helpText: "Number of frames to generate. More frames = longer video but bigger file. Keep low to avoid upload limits.",
|
|
1380
|
+
defaultValue: 41,
|
|
1381
|
+
numberOptions: { min: 9, max: 97, step: 8 }
|
|
1382
|
+
},
|
|
1383
|
+
{
|
|
1384
|
+
type: "number",
|
|
1385
|
+
label: "FPS",
|
|
1386
|
+
variable: "fps",
|
|
1387
|
+
helpText: "Frames per second for the output video.",
|
|
1388
|
+
defaultValue: 8,
|
|
1389
|
+
numberOptions: { min: 4, max: 30, step: 1 }
|
|
1390
|
+
},
|
|
1391
|
+
{
|
|
1392
|
+
type: "number",
|
|
1393
|
+
label: "Steps",
|
|
1394
|
+
variable: "steps",
|
|
1395
|
+
helpText: "Number of denoising steps. More steps = higher quality but slower.",
|
|
1396
|
+
defaultValue: 20,
|
|
1397
|
+
numberOptions: { min: 10, max: 100, step: 1 }
|
|
1398
|
+
},
|
|
1399
|
+
{
|
|
1400
|
+
type: "number",
|
|
1401
|
+
label: "CFG Scale",
|
|
1402
|
+
variable: "cfgScale",
|
|
1403
|
+
helpText: "How strongly the video should follow the prompt. Higher = more literal.",
|
|
1404
|
+
defaultValue: 7,
|
|
1405
|
+
numberOptions: { min: 1, max: 20, step: 0.5 }
|
|
1406
|
+
},
|
|
1407
|
+
{
|
|
1408
|
+
type: "number",
|
|
1409
|
+
label: "Seed",
|
|
1410
|
+
variable: "seed",
|
|
1411
|
+
helpText: "A specific value used to guide randomness. Use -1 for random.",
|
|
1412
|
+
defaultValue: -1,
|
|
1413
|
+
numberOptions: { min: -1, max: 2147483647 }
|
|
1414
|
+
},
|
|
1415
|
+
{
|
|
1416
|
+
type: "text",
|
|
1417
|
+
label: "Negative Prompt",
|
|
1418
|
+
variable: "negativePrompt",
|
|
1419
|
+
helpText: "Things you don't want in the video",
|
|
1420
|
+
placeholder: "worst quality, blurry, distorted"
|
|
1421
|
+
}
|
|
1422
|
+
];
|
|
1423
|
+
}
|
|
1424
|
+
};
|
|
1425
|
+
var comfyui_default = new ComfyUIProvider();
|
|
1426
|
+
|
|
1427
|
+
// src/providers/index.ts
|
|
1428
|
+
var allProviders = [
|
|
1429
|
+
ollama_default,
|
|
1430
|
+
lmstudio_default,
|
|
1431
|
+
stable_diffusion_default,
|
|
1432
|
+
comfyui_default
|
|
1433
|
+
];
|
|
1434
|
+
function getProvider(name) {
|
|
1435
|
+
return allProviders.find((p) => p.name === name);
|
|
1436
|
+
}
|
|
1437
|
+
async function discoverRunningProviders() {
|
|
1438
|
+
const results = await Promise.all(
|
|
1439
|
+
allProviders.map(async (provider) => ({
|
|
1440
|
+
provider,
|
|
1441
|
+
running: await provider.isRunning()
|
|
1442
|
+
}))
|
|
1443
|
+
);
|
|
1444
|
+
return results.filter((r) => r.running).map((r) => r.provider);
|
|
1445
|
+
}
|
|
1446
|
+
async function discoverAllModels() {
|
|
1447
|
+
const runningProviders = await discoverRunningProviders();
|
|
1448
|
+
const modelArrays = await Promise.all(
|
|
1449
|
+
runningProviders.map((p) => p.discoverModels())
|
|
1450
|
+
);
|
|
1451
|
+
return modelArrays.flat();
|
|
1452
|
+
}
|
|
1453
|
+
async function getProviderStatuses() {
|
|
1454
|
+
return Promise.all(
|
|
1455
|
+
allProviders.map(async (provider) => ({
|
|
1456
|
+
provider,
|
|
1457
|
+
running: await provider.isRunning()
|
|
1458
|
+
}))
|
|
1459
|
+
);
|
|
1460
|
+
}
|
|
1461
|
+
async function detectAllProviderStatuses() {
|
|
1462
|
+
return Promise.all(
|
|
1463
|
+
allProviders.map(async (provider) => ({
|
|
1464
|
+
provider,
|
|
1465
|
+
status: await provider.detect()
|
|
1466
|
+
}))
|
|
1467
|
+
);
|
|
1468
|
+
}
|
|
1469
|
+
async function discoverAllModelsWithParameters() {
|
|
1470
|
+
const runningProviders = await discoverRunningProviders();
|
|
1471
|
+
const modelsWithParams = await Promise.all(
|
|
1472
|
+
runningProviders.map(async (provider) => {
|
|
1473
|
+
const models = await provider.discoverModels();
|
|
1474
|
+
if (typeof provider.getParameterSchemas === "function") {
|
|
1475
|
+
const parameters = await provider.getParameterSchemas();
|
|
1476
|
+
return models.map((model) => ({
|
|
1477
|
+
...model,
|
|
1478
|
+
parameters
|
|
1479
|
+
}));
|
|
1480
|
+
}
|
|
1481
|
+
return models;
|
|
1482
|
+
})
|
|
1483
|
+
);
|
|
1484
|
+
return modelsWithParams.flat();
|
|
1485
|
+
}
|
|
1486
|
+
|
|
1487
|
+
// src/events.ts
|
|
1488
|
+
import { EventEmitter } from "events";
|
|
1489
|
+
var RequestEventEmitter = class extends EventEmitter {
|
|
1490
|
+
emitStart(event) {
|
|
1491
|
+
this.emit("request:start", event);
|
|
1492
|
+
}
|
|
1493
|
+
emitProgress(event) {
|
|
1494
|
+
this.emit("request:progress", event);
|
|
1495
|
+
}
|
|
1496
|
+
emitComplete(event) {
|
|
1497
|
+
this.emit("request:complete", event);
|
|
1498
|
+
}
|
|
1499
|
+
onStart(handler) {
|
|
1500
|
+
this.on("request:start", handler);
|
|
1501
|
+
return () => this.off("request:start", handler);
|
|
1502
|
+
}
|
|
1503
|
+
onProgress(handler) {
|
|
1504
|
+
this.on("request:progress", handler);
|
|
1505
|
+
return () => this.off("request:progress", handler);
|
|
1506
|
+
}
|
|
1507
|
+
onComplete(handler) {
|
|
1508
|
+
this.on("request:complete", handler);
|
|
1509
|
+
return () => this.off("request:complete", handler);
|
|
1510
|
+
}
|
|
1511
|
+
};
|
|
1512
|
+
var requestEvents = new RequestEventEmitter();
|
|
1513
|
+
|
|
1514
|
+
// src/runner.ts
|
|
1515
|
+
var TunnelRunner = class {
|
|
1516
|
+
isRunning = false;
|
|
1517
|
+
modelProviderMap = /* @__PURE__ */ new Map();
|
|
1518
|
+
models = [];
|
|
1519
|
+
/**
|
|
1520
|
+
* Start with a pre-discovered list of model names.
|
|
1521
|
+
* Used by the TUI, which discovers models itself.
|
|
1522
|
+
*/
|
|
1523
|
+
async start(models) {
|
|
1524
|
+
if (this.isRunning) return;
|
|
1525
|
+
this.models = models;
|
|
1526
|
+
this.isRunning = true;
|
|
1527
|
+
const allModels = await discoverAllModels();
|
|
1528
|
+
this.buildModelProviderMap(allModels);
|
|
1529
|
+
this.pollLoop();
|
|
1530
|
+
}
|
|
1531
|
+
stop() {
|
|
1532
|
+
this.isRunning = false;
|
|
1533
|
+
disconnectHeartbeat().catch(() => {
|
|
1534
|
+
});
|
|
1535
|
+
}
|
|
1536
|
+
buildModelProviderMap(models) {
|
|
1537
|
+
this.modelProviderMap.clear();
|
|
1538
|
+
for (const model of models) {
|
|
1539
|
+
const provider = getProvider(model.provider);
|
|
1540
|
+
if (provider) {
|
|
1541
|
+
this.modelProviderMap.set(model.name, provider);
|
|
1542
|
+
}
|
|
1543
|
+
}
|
|
1544
|
+
}
|
|
1545
|
+
async pollLoop() {
|
|
1546
|
+
while (this.isRunning) {
|
|
1547
|
+
try {
|
|
1548
|
+
const request = await pollForRequest(this.models);
|
|
1549
|
+
if (request) {
|
|
1550
|
+
this.processRequest(request);
|
|
1551
|
+
}
|
|
1552
|
+
} catch (error) {
|
|
1553
|
+
await this.sleep(5e3);
|
|
1554
|
+
}
|
|
1555
|
+
}
|
|
1556
|
+
}
|
|
1557
|
+
async processRequest(request) {
|
|
1558
|
+
const startTime = Date.now();
|
|
1559
|
+
requestEvents.emitStart({
|
|
1560
|
+
id: request.id,
|
|
1561
|
+
modelId: request.modelId,
|
|
1562
|
+
requestType: request.requestType,
|
|
1563
|
+
timestamp: startTime
|
|
1564
|
+
});
|
|
1565
|
+
const provider = this.modelProviderMap.get(request.modelId);
|
|
1566
|
+
if (!provider) {
|
|
1567
|
+
const error = `Model ${request.modelId} not found`;
|
|
1568
|
+
await submitResult(request.id, false, void 0, error);
|
|
1569
|
+
requestEvents.emitComplete({
|
|
1570
|
+
id: request.id,
|
|
1571
|
+
success: false,
|
|
1572
|
+
duration: Date.now() - startTime,
|
|
1573
|
+
error
|
|
1574
|
+
});
|
|
1575
|
+
return;
|
|
1576
|
+
}
|
|
1577
|
+
try {
|
|
1578
|
+
switch (request.requestType) {
|
|
1579
|
+
case "llm_chat":
|
|
1580
|
+
await this.handleTextRequest(request, provider, startTime);
|
|
1581
|
+
break;
|
|
1582
|
+
case "image_generation":
|
|
1583
|
+
await this.handleImageRequest(request, provider, startTime);
|
|
1584
|
+
break;
|
|
1585
|
+
case "video_generation":
|
|
1586
|
+
await this.handleVideoRequest(request, provider, startTime);
|
|
1587
|
+
break;
|
|
1588
|
+
default:
|
|
1589
|
+
throw new Error(`Unsupported request type: ${request.requestType}`);
|
|
1590
|
+
}
|
|
1591
|
+
} catch (error) {
|
|
1592
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
1593
|
+
await submitResult(request.id, false, void 0, message);
|
|
1594
|
+
requestEvents.emitComplete({
|
|
1595
|
+
id: request.id,
|
|
1596
|
+
success: false,
|
|
1597
|
+
duration: Date.now() - startTime,
|
|
1598
|
+
error: message
|
|
1599
|
+
});
|
|
1600
|
+
}
|
|
1601
|
+
}
|
|
1602
|
+
async handleTextRequest(request, provider, startTime) {
|
|
1603
|
+
if (!provider.chat) {
|
|
1604
|
+
throw new Error(`Provider does not support text generation`);
|
|
1605
|
+
}
|
|
1606
|
+
const messages = (request.payload.messages || []).map((m) => ({
|
|
1607
|
+
role: m.role,
|
|
1608
|
+
content: m.content
|
|
1609
|
+
}));
|
|
1610
|
+
const stream = provider.chat(request.modelId, messages, {
|
|
1611
|
+
temperature: request.payload.temperature,
|
|
1612
|
+
maxTokens: request.payload.maxTokens
|
|
1613
|
+
});
|
|
1614
|
+
let fullContent = "";
|
|
1615
|
+
let lastProgressUpdate = 0;
|
|
1616
|
+
const progressInterval = 100;
|
|
1617
|
+
for await (const chunk of stream) {
|
|
1618
|
+
fullContent += chunk.content;
|
|
1619
|
+
const now = Date.now();
|
|
1620
|
+
if (now - lastProgressUpdate > progressInterval) {
|
|
1621
|
+
await submitProgress(request.id, fullContent);
|
|
1622
|
+
requestEvents.emitProgress({
|
|
1623
|
+
id: request.id,
|
|
1624
|
+
content: fullContent
|
|
1625
|
+
});
|
|
1626
|
+
lastProgressUpdate = now;
|
|
1627
|
+
}
|
|
1628
|
+
}
|
|
1629
|
+
await submitProgress(request.id, fullContent);
|
|
1630
|
+
await submitResult(request.id, true, {
|
|
1631
|
+
content: fullContent,
|
|
1632
|
+
usage: { promptTokens: 0, completionTokens: 0 }
|
|
1633
|
+
});
|
|
1634
|
+
requestEvents.emitComplete({
|
|
1635
|
+
id: request.id,
|
|
1636
|
+
success: true,
|
|
1637
|
+
duration: Date.now() - startTime,
|
|
1638
|
+
result: { chars: fullContent.length }
|
|
1639
|
+
});
|
|
1640
|
+
}
|
|
1641
|
+
async handleImageRequest(request, provider, startTime) {
|
|
1642
|
+
if (!provider.generateImage) {
|
|
1643
|
+
throw new Error(`Provider does not support image generation`);
|
|
1644
|
+
}
|
|
1645
|
+
const prompt = request.payload.prompt || "";
|
|
1646
|
+
const config2 = request.payload.config || {};
|
|
1647
|
+
let result;
|
|
1648
|
+
if (provider.generateImageWithProgress) {
|
|
1649
|
+
result = await provider.generateImageWithProgress(
|
|
1650
|
+
request.modelId,
|
|
1651
|
+
prompt,
|
|
1652
|
+
{
|
|
1653
|
+
negativePrompt: config2.negativePrompt,
|
|
1654
|
+
width: config2.width,
|
|
1655
|
+
height: config2.height,
|
|
1656
|
+
steps: config2.steps,
|
|
1657
|
+
cfgScale: config2.cfgScale,
|
|
1658
|
+
seed: config2.seed,
|
|
1659
|
+
sampler: config2.sampler
|
|
1660
|
+
},
|
|
1661
|
+
async (progress) => {
|
|
1662
|
+
await submitGenerationProgress(
|
|
1663
|
+
request.id,
|
|
1664
|
+
progress.step,
|
|
1665
|
+
progress.totalSteps,
|
|
1666
|
+
progress.preview
|
|
1667
|
+
);
|
|
1668
|
+
requestEvents.emitProgress({
|
|
1669
|
+
id: request.id,
|
|
1670
|
+
step: progress.step,
|
|
1671
|
+
totalSteps: progress.totalSteps
|
|
1672
|
+
});
|
|
1673
|
+
}
|
|
1674
|
+
);
|
|
1675
|
+
} else {
|
|
1676
|
+
result = await provider.generateImage(request.modelId, prompt, {
|
|
1677
|
+
negativePrompt: config2.negativePrompt,
|
|
1678
|
+
width: config2.width,
|
|
1679
|
+
height: config2.height,
|
|
1680
|
+
steps: config2.steps,
|
|
1681
|
+
cfgScale: config2.cfgScale,
|
|
1682
|
+
seed: config2.seed,
|
|
1683
|
+
sampler: config2.sampler
|
|
1684
|
+
});
|
|
1685
|
+
}
|
|
1686
|
+
await submitResult(request.id, true, {
|
|
1687
|
+
imageBase64: result.imageBase64,
|
|
1688
|
+
mimeType: result.mimeType,
|
|
1689
|
+
seed: result.seed
|
|
1690
|
+
});
|
|
1691
|
+
const imageSize = Math.round(result.imageBase64.length * 3 / 4);
|
|
1692
|
+
requestEvents.emitComplete({
|
|
1693
|
+
id: request.id,
|
|
1694
|
+
success: true,
|
|
1695
|
+
duration: Date.now() - startTime,
|
|
1696
|
+
result: { imageSize }
|
|
1697
|
+
});
|
|
1698
|
+
}
|
|
1699
|
+
async handleVideoRequest(request, provider, startTime) {
|
|
1700
|
+
if (!provider.generateVideo) {
|
|
1701
|
+
throw new Error(`Provider does not support video generation`);
|
|
1702
|
+
}
|
|
1703
|
+
const prompt = request.payload.prompt || "";
|
|
1704
|
+
const config2 = request.payload.config || {};
|
|
1705
|
+
const result = await provider.generateVideo(
|
|
1706
|
+
request.modelId,
|
|
1707
|
+
prompt,
|
|
1708
|
+
{
|
|
1709
|
+
negativePrompt: config2.negativePrompt,
|
|
1710
|
+
width: config2.width,
|
|
1711
|
+
height: config2.height,
|
|
1712
|
+
numFrames: config2.numFrames,
|
|
1713
|
+
fps: config2.fps,
|
|
1714
|
+
steps: config2.steps,
|
|
1715
|
+
cfgScale: config2.cfgScale,
|
|
1716
|
+
seed: config2.seed
|
|
1717
|
+
},
|
|
1718
|
+
async (progress) => {
|
|
1719
|
+
await submitGenerationProgress(
|
|
1720
|
+
request.id,
|
|
1721
|
+
progress.step,
|
|
1722
|
+
progress.totalSteps
|
|
1723
|
+
);
|
|
1724
|
+
requestEvents.emitProgress({
|
|
1725
|
+
id: request.id,
|
|
1726
|
+
step: progress.step,
|
|
1727
|
+
totalSteps: progress.totalSteps
|
|
1728
|
+
});
|
|
1729
|
+
}
|
|
1730
|
+
);
|
|
1731
|
+
await submitResult(request.id, true, {
|
|
1732
|
+
videoBase64: result.videoBase64,
|
|
1733
|
+
mimeType: result.mimeType,
|
|
1734
|
+
duration: result.duration,
|
|
1735
|
+
fps: result.fps,
|
|
1736
|
+
seed: result.seed
|
|
1737
|
+
});
|
|
1738
|
+
const videoSize = Math.round(result.videoBase64.length * 3 / 4);
|
|
1739
|
+
requestEvents.emitComplete({
|
|
1740
|
+
id: request.id,
|
|
1741
|
+
success: true,
|
|
1742
|
+
duration: Date.now() - startTime,
|
|
1743
|
+
result: { videoSize }
|
|
1744
|
+
});
|
|
1745
|
+
}
|
|
1746
|
+
sleep(ms) {
|
|
1747
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
1748
|
+
}
|
|
1749
|
+
};
|
|
1750
|
+
|
|
1751
|
+
export {
|
|
1752
|
+
getEnvironment,
|
|
1753
|
+
getApiKey,
|
|
1754
|
+
setApiKey,
|
|
1755
|
+
getConfigPath,
|
|
1756
|
+
verifyApiKey,
|
|
1757
|
+
registerLocalModel,
|
|
1758
|
+
getRegisteredModels,
|
|
1759
|
+
requestDeviceAuth,
|
|
1760
|
+
pollDeviceAuth,
|
|
1761
|
+
discoverAllModels,
|
|
1762
|
+
getProviderStatuses,
|
|
1763
|
+
detectAllProviderStatuses,
|
|
1764
|
+
discoverAllModelsWithParameters,
|
|
1765
|
+
requestEvents,
|
|
1766
|
+
TunnelRunner
|
|
1767
|
+
};
|
|
1768
|
+
//# sourceMappingURL=chunk-PTK4SJQK.js.map
|