@edgible-team/cli 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +136 -0
- package/README.md +450 -0
- package/dist/client/api-client.js +1057 -0
- package/dist/client/index.js +21 -0
- package/dist/commands/agent.js +1280 -0
- package/dist/commands/ai.js +608 -0
- package/dist/commands/application.js +885 -0
- package/dist/commands/auth.js +570 -0
- package/dist/commands/base/BaseCommand.js +93 -0
- package/dist/commands/base/CommandHandler.js +7 -0
- package/dist/commands/base/command-wrapper.js +58 -0
- package/dist/commands/base/middleware.js +77 -0
- package/dist/commands/config.js +116 -0
- package/dist/commands/connectivity.js +59 -0
- package/dist/commands/debug.js +98 -0
- package/dist/commands/discover.js +144 -0
- package/dist/commands/examples/migrated-command-example.js +180 -0
- package/dist/commands/gateway.js +494 -0
- package/dist/commands/managedGateway.js +787 -0
- package/dist/commands/utils/config-validator.js +76 -0
- package/dist/commands/utils/gateway-prompt.js +79 -0
- package/dist/commands/utils/input-parser.js +120 -0
- package/dist/commands/utils/output-formatter.js +109 -0
- package/dist/config/app-config.js +99 -0
- package/dist/detection/SystemCapabilityDetector.js +1244 -0
- package/dist/detection/ToolDetector.js +305 -0
- package/dist/detection/WorkloadDetector.js +314 -0
- package/dist/di/bindings.js +99 -0
- package/dist/di/container.js +88 -0
- package/dist/di/types.js +32 -0
- package/dist/index.js +52 -0
- package/dist/interfaces/IDaemonManager.js +3 -0
- package/dist/repositories/config-repository.js +62 -0
- package/dist/repositories/gateway-repository.js +35 -0
- package/dist/scripts/postinstall.js +101 -0
- package/dist/services/AgentStatusManager.js +299 -0
- package/dist/services/ConnectivityTester.js +271 -0
- package/dist/services/DependencyInstaller.js +475 -0
- package/dist/services/LocalAgentManager.js +2216 -0
- package/dist/services/application/ApplicationService.js +299 -0
- package/dist/services/auth/AuthService.js +214 -0
- package/dist/services/aws.js +644 -0
- package/dist/services/daemon/DaemonManagerFactory.js +65 -0
- package/dist/services/daemon/DockerDaemonManager.js +395 -0
- package/dist/services/daemon/LaunchdDaemonManager.js +257 -0
- package/dist/services/daemon/PodmanDaemonManager.js +369 -0
- package/dist/services/daemon/SystemdDaemonManager.js +221 -0
- package/dist/services/daemon/WindowsServiceDaemonManager.js +210 -0
- package/dist/services/daemon/index.js +16 -0
- package/dist/services/edgible.js +3060 -0
- package/dist/services/gateway/GatewayService.js +334 -0
- package/dist/state/config.js +146 -0
- package/dist/types/AgentConfig.js +5 -0
- package/dist/types/AgentStatus.js +5 -0
- package/dist/types/ApiClient.js +5 -0
- package/dist/types/ApiRequests.js +5 -0
- package/dist/types/ApiResponses.js +5 -0
- package/dist/types/Application.js +5 -0
- package/dist/types/CaddyJson.js +5 -0
- package/dist/types/UnifiedAgentStatus.js +56 -0
- package/dist/types/WireGuard.js +5 -0
- package/dist/types/Workload.js +5 -0
- package/dist/types/agent.js +5 -0
- package/dist/types/command-options.js +5 -0
- package/dist/types/connectivity.js +5 -0
- package/dist/types/errors.js +250 -0
- package/dist/types/gateway-types.js +5 -0
- package/dist/types/index.js +48 -0
- package/dist/types/models/ApplicationData.js +5 -0
- package/dist/types/models/CertificateData.js +5 -0
- package/dist/types/models/DeviceData.js +5 -0
- package/dist/types/models/DevicePoolData.js +5 -0
- package/dist/types/models/OrganizationData.js +5 -0
- package/dist/types/models/OrganizationInviteData.js +5 -0
- package/dist/types/models/ProviderConfiguration.js +5 -0
- package/dist/types/models/ResourceData.js +5 -0
- package/dist/types/models/ServiceResourceData.js +5 -0
- package/dist/types/models/UserData.js +5 -0
- package/dist/types/route.js +5 -0
- package/dist/types/validation/schemas.js +218 -0
- package/dist/types/validation.js +5 -0
- package/dist/utils/FileIntegrityManager.js +256 -0
- package/dist/utils/PathMigration.js +219 -0
- package/dist/utils/PathResolver.js +235 -0
- package/dist/utils/PlatformDetector.js +277 -0
- package/dist/utils/console-logger.js +130 -0
- package/dist/utils/docker-compose-parser.js +179 -0
- package/dist/utils/errors.js +130 -0
- package/dist/utils/health-checker.js +155 -0
- package/dist/utils/json-logger.js +72 -0
- package/dist/utils/log-formatter.js +293 -0
- package/dist/utils/logger.js +59 -0
- package/dist/utils/network-utils.js +217 -0
- package/dist/utils/output.js +182 -0
- package/dist/utils/passwordValidation.js +91 -0
- package/dist/utils/progress.js +167 -0
- package/dist/utils/sudo-checker.js +22 -0
- package/dist/utils/urls.js +32 -0
- package/dist/utils/validation.js +31 -0
- package/dist/validation/schemas.js +175 -0
- package/dist/validation/validator.js +67 -0
- package/package.json +83 -0
|
@@ -0,0 +1,608 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.setupAiCommands = setupAiCommands;
|
|
40
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
41
|
+
const child_process_1 = require("child_process");
|
|
42
|
+
const os = __importStar(require("os"));
|
|
43
|
+
const inquirer_1 = __importDefault(require("inquirer"));
|
|
44
|
+
const node_fetch_1 = __importDefault(require("node-fetch"));
|
|
45
|
+
const command_wrapper_1 = require("./base/command-wrapper");
|
|
46
|
+
const container_1 = require("../di/container");
|
|
47
|
+
const types_1 = require("../di/types");
|
|
48
|
+
const SystemCapabilityDetector_1 = require("../detection/SystemCapabilityDetector");
|
|
49
|
+
function setupAiCommands(program) {
|
|
50
|
+
const aiCommand = program
|
|
51
|
+
.command('ai')
|
|
52
|
+
.description('Manage Ollama AI service');
|
|
53
|
+
aiCommand
|
|
54
|
+
.command('setup')
|
|
55
|
+
.description('Setup Ollama: install, discover capabilities, and start with selected model')
|
|
56
|
+
.option('--model <model>', 'Model to use (skips interactive selection)')
|
|
57
|
+
.option('--auto-install', 'Automatically install Ollama without prompting')
|
|
58
|
+
.action((0, command_wrapper_1.wrapCommand)(async (options) => {
|
|
59
|
+
const container = (0, container_1.getContainer)();
|
|
60
|
+
const logger = container.get(types_1.TYPES.Logger);
|
|
61
|
+
logger.info('Setting up Ollama AI service', { model: options.model, autoInstall: options.autoInstall });
|
|
62
|
+
console.log(chalk_1.default.blue('\n🤖 Ollama AI Setup'));
|
|
63
|
+
console.log(chalk_1.default.gray('This will install Ollama, check system capabilities, and start with a selected model.\n'));
|
|
64
|
+
// Step 1: Check if Ollama is installed
|
|
65
|
+
console.log(chalk_1.default.blue('Step 1: Checking Ollama installation...\n'));
|
|
66
|
+
const isOllamaInstalled = await checkOllamaInstalled();
|
|
67
|
+
if (!isOllamaInstalled) {
|
|
68
|
+
let shouldInstall = options.autoInstall || false;
|
|
69
|
+
if (!shouldInstall) {
|
|
70
|
+
const answer = await inquirer_1.default.prompt([
|
|
71
|
+
{
|
|
72
|
+
type: 'confirm',
|
|
73
|
+
name: 'install',
|
|
74
|
+
message: 'Ollama is not installed. Would you like to install it now?',
|
|
75
|
+
default: true,
|
|
76
|
+
},
|
|
77
|
+
]);
|
|
78
|
+
shouldInstall = answer.install;
|
|
79
|
+
}
|
|
80
|
+
if (shouldInstall) {
|
|
81
|
+
console.log(chalk_1.default.yellow('Installing Ollama...\n'));
|
|
82
|
+
await installOllama();
|
|
83
|
+
console.log(chalk_1.default.green('✓ Ollama installed successfully\n'));
|
|
84
|
+
}
|
|
85
|
+
else {
|
|
86
|
+
throw new Error('Ollama is required but not installed. Please install it manually from https://ollama.com');
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
console.log(chalk_1.default.green('✓ Ollama is already installed\n'));
|
|
91
|
+
}
|
|
92
|
+
// Step 2: Discover system capabilities
|
|
93
|
+
console.log(chalk_1.default.blue('Step 2: Discovering system capabilities...\n'));
|
|
94
|
+
const capabilities = await SystemCapabilityDetector_1.SystemCapabilityDetector.detectCapabilities();
|
|
95
|
+
// Show GPU driver support status
|
|
96
|
+
if (capabilities.gpuDriverSupport.ollamaGpuReady) {
|
|
97
|
+
console.log(chalk_1.default.green('✓ GPU acceleration is ready for Ollama'));
|
|
98
|
+
if (capabilities.gpuDriverSupport.ollamaGpuReason) {
|
|
99
|
+
console.log(chalk_1.default.gray(` ${capabilities.gpuDriverSupport.ollamaGpuReason}\n`));
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
console.log(chalk_1.default.yellow('⚠ GPU acceleration not available - Ollama will run in CPU mode'));
|
|
104
|
+
if (capabilities.gpuDriverSupport.ollamaGpuReason) {
|
|
105
|
+
console.log(chalk_1.default.gray(` ${capabilities.gpuDriverSupport.ollamaGpuReason}\n`));
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
// Step 3: Select model based on recommendations
|
|
109
|
+
console.log(chalk_1.default.blue('Step 3: Selecting model...\n'));
|
|
110
|
+
let selectedModel = options.model;
|
|
111
|
+
if (!selectedModel) {
|
|
112
|
+
// Filter to only excellent and good recommendations
|
|
113
|
+
const suitableModels = capabilities.recommendedModels.filter((m) => m.suitability === 'excellent' || m.suitability === 'good');
|
|
114
|
+
if (suitableModels.length === 0) {
|
|
115
|
+
console.log(chalk_1.default.yellow('⚠ No models are well-suited for your system.'));
|
|
116
|
+
console.log(chalk_1.default.yellow('You can still run smaller models, but performance may be limited.\n'));
|
|
117
|
+
const allModels = capabilities.recommendedModels.map((m) => ({
|
|
118
|
+
name: `${m.modelName} (${m.size}) - ${m.suitability}`,
|
|
119
|
+
value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
|
|
120
|
+
}));
|
|
121
|
+
// Add custom model option
|
|
122
|
+
allModels.push({
|
|
123
|
+
name: 'Enter custom model name',
|
|
124
|
+
value: '__custom__',
|
|
125
|
+
});
|
|
126
|
+
const answer = await inquirer_1.default.prompt([
|
|
127
|
+
{
|
|
128
|
+
type: 'list',
|
|
129
|
+
name: 'model',
|
|
130
|
+
message: 'Select a model to use:',
|
|
131
|
+
choices: allModels,
|
|
132
|
+
},
|
|
133
|
+
]);
|
|
134
|
+
if (answer.model === '__custom__') {
|
|
135
|
+
const customAnswer = await inquirer_1.default.prompt([
|
|
136
|
+
{
|
|
137
|
+
type: 'input',
|
|
138
|
+
name: 'model',
|
|
139
|
+
message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
|
|
140
|
+
validate: (input) => {
|
|
141
|
+
if (!input || input.trim().length === 0) {
|
|
142
|
+
return 'Model name cannot be empty';
|
|
143
|
+
}
|
|
144
|
+
return true;
|
|
145
|
+
},
|
|
146
|
+
},
|
|
147
|
+
]);
|
|
148
|
+
selectedModel = customAnswer.model.trim();
|
|
149
|
+
}
|
|
150
|
+
else {
|
|
151
|
+
selectedModel = answer.model;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
else {
|
|
155
|
+
console.log(chalk_1.default.green('Recommended models for your system:\n'));
|
|
156
|
+
suitableModels.forEach((model, index) => {
|
|
157
|
+
const icon = model.suitability === 'excellent' ? '✓' : '•';
|
|
158
|
+
const color = model.suitability === 'excellent' ? chalk_1.default.green : chalk_1.default.cyan;
|
|
159
|
+
console.log(color(` ${icon} ${model.modelName} (${model.size})`));
|
|
160
|
+
console.log(chalk_1.default.gray(` ${model.reasoning}\n`));
|
|
161
|
+
});
|
|
162
|
+
const modelChoices = suitableModels.map((m) => ({
|
|
163
|
+
name: `${m.modelName} (${m.size}) - ${m.suitability === 'excellent' ? 'Recommended' : 'Good fit'}`,
|
|
164
|
+
value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
|
|
165
|
+
}));
|
|
166
|
+
// Add option to see all models
|
|
167
|
+
modelChoices.push({
|
|
168
|
+
name: 'Show all models (including marginal/insufficient)',
|
|
169
|
+
value: '__all__',
|
|
170
|
+
});
|
|
171
|
+
// Add custom model option
|
|
172
|
+
modelChoices.push({
|
|
173
|
+
name: 'Enter custom model name',
|
|
174
|
+
value: '__custom__',
|
|
175
|
+
});
|
|
176
|
+
const answer = await inquirer_1.default.prompt([
|
|
177
|
+
{
|
|
178
|
+
type: 'list',
|
|
179
|
+
name: 'model',
|
|
180
|
+
message: 'Select a model to use:',
|
|
181
|
+
choices: modelChoices,
|
|
182
|
+
},
|
|
183
|
+
]);
|
|
184
|
+
if (answer.model === '__custom__') {
|
|
185
|
+
const customAnswer = await inquirer_1.default.prompt([
|
|
186
|
+
{
|
|
187
|
+
type: 'input',
|
|
188
|
+
name: 'model',
|
|
189
|
+
message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
|
|
190
|
+
validate: (input) => {
|
|
191
|
+
if (!input || input.trim().length === 0) {
|
|
192
|
+
return 'Model name cannot be empty';
|
|
193
|
+
}
|
|
194
|
+
return true;
|
|
195
|
+
},
|
|
196
|
+
},
|
|
197
|
+
]);
|
|
198
|
+
selectedModel = customAnswer.model.trim();
|
|
199
|
+
}
|
|
200
|
+
else if (answer.model === '__all__') {
|
|
201
|
+
const allModels = capabilities.recommendedModels.map((m) => ({
|
|
202
|
+
name: `${m.modelName} (${m.size}) - ${m.suitability}`,
|
|
203
|
+
value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
|
|
204
|
+
}));
|
|
205
|
+
// Add custom model option to all models list too
|
|
206
|
+
allModels.push({
|
|
207
|
+
name: 'Enter custom model name',
|
|
208
|
+
value: '__custom__',
|
|
209
|
+
});
|
|
210
|
+
const allAnswer = await inquirer_1.default.prompt([
|
|
211
|
+
{
|
|
212
|
+
type: 'list',
|
|
213
|
+
name: 'model',
|
|
214
|
+
message: 'Select a model to use:',
|
|
215
|
+
choices: allModels,
|
|
216
|
+
},
|
|
217
|
+
]);
|
|
218
|
+
if (allAnswer.model === '__custom__') {
|
|
219
|
+
const customAnswer = await inquirer_1.default.prompt([
|
|
220
|
+
{
|
|
221
|
+
type: 'input',
|
|
222
|
+
name: 'model',
|
|
223
|
+
message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
|
|
224
|
+
validate: (input) => {
|
|
225
|
+
if (!input || input.trim().length === 0) {
|
|
226
|
+
return 'Model name cannot be empty';
|
|
227
|
+
}
|
|
228
|
+
return true;
|
|
229
|
+
},
|
|
230
|
+
},
|
|
231
|
+
]);
|
|
232
|
+
selectedModel = customAnswer.model.trim();
|
|
233
|
+
}
|
|
234
|
+
else {
|
|
235
|
+
selectedModel = allAnswer.model;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
else {
|
|
239
|
+
selectedModel = answer.model;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
// Normalize model name (Ollama uses lowercase with dashes)
|
|
244
|
+
if (!selectedModel) {
|
|
245
|
+
throw new Error('No model selected');
|
|
246
|
+
}
|
|
247
|
+
// If it's already a custom model (contains : or /), use as-is
|
|
248
|
+
// Otherwise normalize it (recommended models need to be mapped to Ollama format)
|
|
249
|
+
let ollamaModelName;
|
|
250
|
+
if (selectedModel.includes(':') || selectedModel.includes('/')) {
|
|
251
|
+
// Custom model name format (e.g., deepseek-r1:14b, llama3.2:3b, or org/model:tag)
|
|
252
|
+
ollamaModelName = selectedModel;
|
|
253
|
+
}
|
|
254
|
+
else {
|
|
255
|
+
// Normalize recommended model names to Ollama format (e.g., deepseek-r1-14b -> deepseek-r1:14b)
|
|
256
|
+
ollamaModelName = normalizeModelName(selectedModel);
|
|
257
|
+
}
|
|
258
|
+
console.log(chalk_1.default.blue(`\nSelected model: ${ollamaModelName}\n`));
|
|
259
|
+
// Step 4: Check if model is already pulled
|
|
260
|
+
console.log(chalk_1.default.blue('Step 4: Checking if model is available...\n'));
|
|
261
|
+
const isModelAvailable = await checkModelAvailable(ollamaModelName);
|
|
262
|
+
if (!isModelAvailable) {
|
|
263
|
+
console.log(chalk_1.default.yellow(`Model ${ollamaModelName} is not available locally.`));
|
|
264
|
+
const answer = await inquirer_1.default.prompt([
|
|
265
|
+
{
|
|
266
|
+
type: 'confirm',
|
|
267
|
+
name: 'pull',
|
|
268
|
+
message: `Would you like to download ${ollamaModelName} now? (This may take a while)`,
|
|
269
|
+
default: true,
|
|
270
|
+
},
|
|
271
|
+
]);
|
|
272
|
+
if (answer.pull) {
|
|
273
|
+
console.log(chalk_1.default.yellow(`\nDownloading ${ollamaModelName}...\n`));
|
|
274
|
+
await pullModel(ollamaModelName);
|
|
275
|
+
console.log(chalk_1.default.green(`✓ Model ${ollamaModelName} downloaded successfully\n`));
|
|
276
|
+
}
|
|
277
|
+
else {
|
|
278
|
+
throw new Error(`Model ${ollamaModelName} is not available. Please pull it manually with: ollama pull ${ollamaModelName}`);
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
else {
|
|
282
|
+
console.log(chalk_1.default.green(`✓ Model ${ollamaModelName} is available\n`));
|
|
283
|
+
}
|
|
284
|
+
// Step 5: Start Ollama service
|
|
285
|
+
console.log(chalk_1.default.blue('Step 5: Starting Ollama service...\n'));
|
|
286
|
+
await startOllama();
|
|
287
|
+
// Step 6: Verify Ollama is running
|
|
288
|
+
console.log(chalk_1.default.blue('Step 6: Verifying Ollama is running...\n'));
|
|
289
|
+
const isRunning = await checkOllamaRunning();
|
|
290
|
+
if (isRunning) {
|
|
291
|
+
console.log(chalk_1.default.green('✓ Ollama is running\n'));
|
|
292
|
+
console.log(chalk_1.default.blue('Ollama Setup Complete!\n'));
|
|
293
|
+
console.log(chalk_1.default.white('You can now use Ollama with the following commands:'));
|
|
294
|
+
console.log(chalk_1.default.gray(` ollama run ${ollamaModelName}`));
|
|
295
|
+
console.log(chalk_1.default.gray(` ollama list`));
|
|
296
|
+
console.log(chalk_1.default.gray(` edgible ai stop # Stop Ollama service\n`));
|
|
297
|
+
}
|
|
298
|
+
else {
|
|
299
|
+
throw new Error('Ollama service failed to start. Please check the logs.');
|
|
300
|
+
}
|
|
301
|
+
}, {
|
|
302
|
+
configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
|
|
303
|
+
}));
|
|
304
|
+
aiCommand
|
|
305
|
+
.command('stop')
|
|
306
|
+
.description('Stop Ollama service')
|
|
307
|
+
.action((0, command_wrapper_1.wrapCommand)(async () => {
|
|
308
|
+
const container = (0, container_1.getContainer)();
|
|
309
|
+
const logger = container.get(types_1.TYPES.Logger);
|
|
310
|
+
logger.info('Stopping Ollama service');
|
|
311
|
+
console.log(chalk_1.default.blue('\n🛑 Stopping Ollama service...\n'));
|
|
312
|
+
const isRunning = await checkOllamaRunning();
|
|
313
|
+
if (!isRunning) {
|
|
314
|
+
console.log(chalk_1.default.yellow('Ollama is not running\n'));
|
|
315
|
+
return;
|
|
316
|
+
}
|
|
317
|
+
await stopOllama();
|
|
318
|
+
console.log(chalk_1.default.green('✓ Ollama service stopped\n'));
|
|
319
|
+
}, {
|
|
320
|
+
configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
|
|
321
|
+
}));
|
|
322
|
+
}
|
|
323
|
+
/**
|
|
324
|
+
* Check if Ollama is installed
|
|
325
|
+
*/
|
|
326
|
+
async function checkOllamaInstalled() {
|
|
327
|
+
try {
|
|
328
|
+
(0, child_process_1.execSync)('ollama --version', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
|
|
329
|
+
return true;
|
|
330
|
+
}
|
|
331
|
+
catch {
|
|
332
|
+
return false;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
/**
|
|
336
|
+
* Install Ollama based on platform
|
|
337
|
+
*/
|
|
338
|
+
async function installOllama() {
|
|
339
|
+
const platform = os.platform();
|
|
340
|
+
try {
|
|
341
|
+
if (platform === 'linux') {
|
|
342
|
+
// Use official Ollama install script
|
|
343
|
+
(0, child_process_1.execSync)('curl -fsSL https://ollama.com/install.sh | sh', {
|
|
344
|
+
encoding: 'utf8',
|
|
345
|
+
stdio: 'inherit',
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
else if (platform === 'darwin') {
|
|
349
|
+
// macOS - check for Homebrew
|
|
350
|
+
try {
|
|
351
|
+
(0, child_process_1.execSync)('brew --version', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
|
|
352
|
+
(0, child_process_1.execSync)('brew install ollama', {
|
|
353
|
+
encoding: 'utf8',
|
|
354
|
+
stdio: 'inherit',
|
|
355
|
+
});
|
|
356
|
+
}
|
|
357
|
+
catch {
|
|
358
|
+
// Fallback to manual install instructions
|
|
359
|
+
console.log(chalk_1.default.yellow('Homebrew not found. Please install Ollama manually:'));
|
|
360
|
+
console.log(chalk_1.default.gray(' Visit: https://ollama.com/download\n'));
|
|
361
|
+
throw new Error('Ollama installation requires Homebrew or manual installation');
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
else if (platform === 'win32') {
|
|
365
|
+
// Windows - use winget or provide instructions
|
|
366
|
+
try {
|
|
367
|
+
(0, child_process_1.execSync)('winget --version', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
|
|
368
|
+
(0, child_process_1.execSync)('winget install Ollama.Ollama', {
|
|
369
|
+
encoding: 'utf8',
|
|
370
|
+
stdio: 'inherit',
|
|
371
|
+
});
|
|
372
|
+
}
|
|
373
|
+
catch {
|
|
374
|
+
console.log(chalk_1.default.yellow('winget not found. Please install Ollama manually:'));
|
|
375
|
+
console.log(chalk_1.default.gray(' Visit: https://ollama.com/download\n'));
|
|
376
|
+
throw new Error('Ollama installation requires winget or manual installation');
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
else {
|
|
380
|
+
throw new Error(`Unsupported platform: ${platform}`);
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
catch (error) {
|
|
384
|
+
console.error(chalk_1.default.red('Failed to install Ollama:'), error);
|
|
385
|
+
throw error;
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
/**
|
|
389
|
+
* Check if a model is available locally
|
|
390
|
+
*/
|
|
391
|
+
async function checkModelAvailable(modelName) {
|
|
392
|
+
try {
|
|
393
|
+
const output = (0, child_process_1.execSync)('ollama list', { encoding: 'utf8', timeout: 5000 });
|
|
394
|
+
// Check if model name appears in the list
|
|
395
|
+
return output.toLowerCase().includes(modelName.toLowerCase());
|
|
396
|
+
}
|
|
397
|
+
catch {
|
|
398
|
+
return false;
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
/**
|
|
402
|
+
* Pull a model from Ollama
|
|
403
|
+
*/
|
|
404
|
+
async function pullModel(modelName) {
|
|
405
|
+
try {
|
|
406
|
+
(0, child_process_1.execSync)(`ollama pull ${modelName}`, {
|
|
407
|
+
encoding: 'utf8',
|
|
408
|
+
stdio: 'inherit',
|
|
409
|
+
});
|
|
410
|
+
}
|
|
411
|
+
catch (error) {
|
|
412
|
+
console.error(chalk_1.default.red(`Failed to pull model ${modelName}:`), error);
|
|
413
|
+
throw error;
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
/**
|
|
417
|
+
* Start Ollama service
|
|
418
|
+
*/
|
|
419
|
+
async function startOllama() {
|
|
420
|
+
const platform = os.platform();
|
|
421
|
+
try {
|
|
422
|
+
if (platform === 'linux') {
|
|
423
|
+
// Try systemd service first
|
|
424
|
+
try {
|
|
425
|
+
(0, child_process_1.execSync)('systemctl --user start ollama', { encoding: 'utf8', timeout: 3000 });
|
|
426
|
+
return;
|
|
427
|
+
}
|
|
428
|
+
catch {
|
|
429
|
+
// Fallback to running ollama serve in background
|
|
430
|
+
}
|
|
431
|
+
// Fallback: start ollama serve in background
|
|
432
|
+
try {
|
|
433
|
+
(0, child_process_1.execSync)('ollama serve > /dev/null 2>&1 &', { encoding: 'utf8', timeout: 1000 });
|
|
434
|
+
// Wait a bit for it to start
|
|
435
|
+
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
436
|
+
}
|
|
437
|
+
catch {
|
|
438
|
+
// Ignore - may already be running
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
else if (platform === 'darwin') {
|
|
442
|
+
// macOS - try launchctl or start directly
|
|
443
|
+
try {
|
|
444
|
+
(0, child_process_1.execSync)('launchctl start com.ollama.ollama', { encoding: 'utf8', timeout: 3000 });
|
|
445
|
+
}
|
|
446
|
+
catch {
|
|
447
|
+
// Fallback: start ollama serve
|
|
448
|
+
try {
|
|
449
|
+
(0, child_process_1.execSync)('ollama serve > /dev/null 2>&1 &', { encoding: 'utf8', timeout: 1000 });
|
|
450
|
+
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
451
|
+
}
|
|
452
|
+
catch {
|
|
453
|
+
// Ignore
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
else if (platform === 'win32') {
|
|
458
|
+
// Windows - Ollama typically runs as a service
|
|
459
|
+
try {
|
|
460
|
+
(0, child_process_1.execSync)('net start Ollama', { encoding: 'utf8', timeout: 3000 });
|
|
461
|
+
}
|
|
462
|
+
catch {
|
|
463
|
+
// Service might already be running or not installed as service
|
|
464
|
+
// Try to start it directly
|
|
465
|
+
try {
|
|
466
|
+
(0, child_process_1.execSync)('start /B ollama serve', { encoding: 'utf8', timeout: 1000 });
|
|
467
|
+
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
468
|
+
}
|
|
469
|
+
catch {
|
|
470
|
+
// Ignore
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
catch (error) {
|
|
476
|
+
// Ollama might already be running, which is fine
|
|
477
|
+
console.log(chalk_1.default.gray('Note: Ollama service may already be running\n'));
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
/**
|
|
481
|
+
* Check if Ollama is running
|
|
482
|
+
*/
|
|
483
|
+
async function checkOllamaRunning() {
|
|
484
|
+
try {
|
|
485
|
+
// Try to query Ollama API
|
|
486
|
+
const controller = new AbortController();
|
|
487
|
+
const timeout = setTimeout(() => controller.abort(), 3000);
|
|
488
|
+
try {
|
|
489
|
+
const response = await (0, node_fetch_1.default)('http://localhost:11434/api/tags', {
|
|
490
|
+
method: 'GET',
|
|
491
|
+
signal: controller.signal,
|
|
492
|
+
});
|
|
493
|
+
clearTimeout(timeout);
|
|
494
|
+
return response.ok;
|
|
495
|
+
}
|
|
496
|
+
catch {
|
|
497
|
+
clearTimeout(timeout);
|
|
498
|
+
throw new Error('Fetch failed');
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
catch {
|
|
502
|
+
// Try alternative: check if ollama process is running
|
|
503
|
+
try {
|
|
504
|
+
const platform = os.platform();
|
|
505
|
+
if (platform === 'linux' || platform === 'darwin') {
|
|
506
|
+
(0, child_process_1.execSync)('pgrep -f ollama', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
|
|
507
|
+
return true;
|
|
508
|
+
}
|
|
509
|
+
else if (platform === 'win32') {
|
|
510
|
+
(0, child_process_1.execSync)('tasklist /FI "IMAGENAME eq ollama.exe"', {
|
|
511
|
+
encoding: 'utf8',
|
|
512
|
+
timeout: 2000,
|
|
513
|
+
stdio: 'ignore',
|
|
514
|
+
});
|
|
515
|
+
return true;
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
catch {
|
|
519
|
+
return false;
|
|
520
|
+
}
|
|
521
|
+
return false;
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
/**
|
|
525
|
+
* Stop Ollama service
|
|
526
|
+
*/
|
|
527
|
+
async function stopOllama() {
|
|
528
|
+
const platform = os.platform();
|
|
529
|
+
try {
|
|
530
|
+
if (platform === 'linux') {
|
|
531
|
+
// Try systemd service first
|
|
532
|
+
try {
|
|
533
|
+
(0, child_process_1.execSync)('systemctl --user stop ollama', { encoding: 'utf8', timeout: 3000 });
|
|
534
|
+
return;
|
|
535
|
+
}
|
|
536
|
+
catch {
|
|
537
|
+
// Fallback to killing process
|
|
538
|
+
}
|
|
539
|
+
// Fallback: kill ollama processes
|
|
540
|
+
try {
|
|
541
|
+
(0, child_process_1.execSync)('pkill -f ollama', { encoding: 'utf8', timeout: 2000 });
|
|
542
|
+
}
|
|
543
|
+
catch {
|
|
544
|
+
// Process might not be running
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
else if (platform === 'darwin') {
|
|
548
|
+
// macOS
|
|
549
|
+
try {
|
|
550
|
+
(0, child_process_1.execSync)('launchctl stop com.ollama.ollama', { encoding: 'utf8', timeout: 3000 });
|
|
551
|
+
}
|
|
552
|
+
catch {
|
|
553
|
+
// Fallback
|
|
554
|
+
try {
|
|
555
|
+
(0, child_process_1.execSync)('pkill -f ollama', { encoding: 'utf8', timeout: 2000 });
|
|
556
|
+
}
|
|
557
|
+
catch {
|
|
558
|
+
// Ignore
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
else if (platform === 'win32') {
|
|
563
|
+
// Windows
|
|
564
|
+
try {
|
|
565
|
+
(0, child_process_1.execSync)('net stop Ollama', { encoding: 'utf8', timeout: 3000 });
|
|
566
|
+
}
|
|
567
|
+
catch {
|
|
568
|
+
// Fallback
|
|
569
|
+
try {
|
|
570
|
+
(0, child_process_1.execSync)('taskkill /F /IM ollama.exe', { encoding: 'utf8', timeout: 2000 });
|
|
571
|
+
}
|
|
572
|
+
catch {
|
|
573
|
+
// Ignore
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
catch (error) {
|
|
579
|
+
console.error(chalk_1.default.yellow('Warning: Error stopping Ollama service:'), error);
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
/**
|
|
583
|
+
* Normalize model name for Ollama (lowercase, dashes, no special chars)
|
|
584
|
+
* Maps recommended model names to their Ollama model names
|
|
585
|
+
*/
|
|
586
|
+
function normalizeModelName(modelName) {
|
|
587
|
+
const normalized = modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, '').replace(/\./g, '');
|
|
588
|
+
// Map model names to Ollama model names
|
|
589
|
+
const modelMap = {
|
|
590
|
+
'llama-3.2-1b': 'llama3.2:1b',
|
|
591
|
+
'llama-3.2-3b': 'llama3.2:3b',
|
|
592
|
+
'llama-3.1-8b': 'llama3.1:8b',
|
|
593
|
+
'llama-3.1-70b': 'llama3.1:70b',
|
|
594
|
+
'mistral-7b': 'mistral:7b',
|
|
595
|
+
'phi-3-mini-3.8b': 'phi3:mini',
|
|
596
|
+
'qwen2.5-0.5b': 'qwen2.5:0.5b',
|
|
597
|
+
'qwen2.5-7b': 'qwen2.5:7b',
|
|
598
|
+
'deepseek-r1-1.5b': 'deepseek-r1:1.5b',
|
|
599
|
+
'deepseek-r1-7b': 'deepseek-r1:7b',
|
|
600
|
+
'deepseek-r1-8b': 'deepseek-r1:8b',
|
|
601
|
+
'deepseek-r1-14b': 'deepseek-r1:14b',
|
|
602
|
+
'deepseek-r1-32b': 'deepseek-r1:32b',
|
|
603
|
+
'deepseek-r1-70b': 'deepseek-r1:70b',
|
|
604
|
+
'deepseek-r1-671b': 'deepseek-r1:671b',
|
|
605
|
+
};
|
|
606
|
+
return modelMap[normalized] || normalized;
|
|
607
|
+
}
|
|
608
|
+
//# sourceMappingURL=ai.js.map
|