@edgible-team/cli 1.0.1 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (232) hide show
  1. package/LICENSE +16 -0
  2. package/README.md +114 -1
  3. package/dist/client/api-client.d.ts +575 -0
  4. package/dist/client/api-client.d.ts.map +1 -0
  5. package/dist/client/api-client.js +196 -2
  6. package/dist/client/index.d.ts +10 -0
  7. package/dist/client/index.d.ts.map +1 -0
  8. package/dist/commands/agent.d.ts +3 -0
  9. package/dist/commands/agent.d.ts.map +1 -0
  10. package/dist/commands/agent.js +18 -34
  11. package/dist/commands/ai.d.ts +3 -0
  12. package/dist/commands/ai.d.ts.map +1 -0
  13. package/dist/commands/ai.js +1661 -275
  14. package/dist/commands/application.d.ts +6 -0
  15. package/dist/commands/application.d.ts.map +1 -0
  16. package/dist/commands/application.js +377 -0
  17. package/dist/commands/auth.d.ts +3 -0
  18. package/dist/commands/auth.d.ts.map +1 -0
  19. package/dist/commands/base/BaseCommand.d.ts +53 -0
  20. package/dist/commands/base/BaseCommand.d.ts.map +1 -0
  21. package/dist/commands/base/CommandHandler.d.ts +28 -0
  22. package/dist/commands/base/CommandHandler.d.ts.map +1 -0
  23. package/dist/commands/base/command-wrapper.d.ts +21 -0
  24. package/dist/commands/base/command-wrapper.d.ts.map +1 -0
  25. package/dist/commands/base/middleware.d.ts +34 -0
  26. package/dist/commands/base/middleware.d.ts.map +1 -0
  27. package/dist/commands/config.d.ts +3 -0
  28. package/dist/commands/config.d.ts.map +1 -0
  29. package/dist/commands/connectivity.d.ts +3 -0
  30. package/dist/commands/connectivity.d.ts.map +1 -0
  31. package/dist/commands/debug.d.ts +3 -0
  32. package/dist/commands/debug.d.ts.map +1 -0
  33. package/dist/commands/debug.js +336 -0
  34. package/dist/commands/discover.d.ts +3 -0
  35. package/dist/commands/discover.d.ts.map +1 -0
  36. package/dist/commands/examples/migrated-command-example.d.ts +31 -0
  37. package/dist/commands/examples/migrated-command-example.d.ts.map +1 -0
  38. package/dist/commands/gateway.d.ts +6 -0
  39. package/dist/commands/gateway.d.ts.map +1 -0
  40. package/dist/commands/managedGateway.d.ts +6 -0
  41. package/dist/commands/managedGateway.d.ts.map +1 -0
  42. package/dist/commands/managedGateway.js +132 -0
  43. package/dist/commands/utils/config-validator.d.ts +29 -0
  44. package/dist/commands/utils/config-validator.d.ts.map +1 -0
  45. package/dist/commands/utils/gateway-prompt.d.ts +23 -0
  46. package/dist/commands/utils/gateway-prompt.d.ts.map +1 -0
  47. package/dist/commands/utils/input-parser.d.ts +34 -0
  48. package/dist/commands/utils/input-parser.d.ts.map +1 -0
  49. package/dist/commands/utils/output-formatter.d.ts +62 -0
  50. package/dist/commands/utils/output-formatter.d.ts.map +1 -0
  51. package/dist/config/app-config.d.ts +44 -0
  52. package/dist/config/app-config.d.ts.map +1 -0
  53. package/dist/detection/SystemCapabilityDetector.d.ts +139 -0
  54. package/dist/detection/SystemCapabilityDetector.d.ts.map +1 -0
  55. package/dist/detection/ToolDetector.d.ts +16 -0
  56. package/dist/detection/ToolDetector.d.ts.map +1 -0
  57. package/dist/detection/WorkloadDetector.d.ts +62 -0
  58. package/dist/detection/WorkloadDetector.d.ts.map +1 -0
  59. package/dist/detection/tools.d.ts +16 -0
  60. package/dist/detection/tools.d.ts.map +1 -0
  61. package/dist/detection/tools.js +305 -0
  62. package/dist/di/bindings.d.ts +15 -0
  63. package/dist/di/bindings.d.ts.map +1 -0
  64. package/dist/di/container.d.ts +44 -0
  65. package/dist/di/container.d.ts.map +1 -0
  66. package/dist/di/types.d.ts +23 -0
  67. package/dist/di/types.d.ts.map +1 -0
  68. package/dist/index.d.ts +3 -0
  69. package/dist/index.d.ts.map +1 -0
  70. package/dist/index.js +8 -1
  71. package/dist/interfaces/IDaemonManager.d.ts +67 -0
  72. package/dist/interfaces/IDaemonManager.d.ts.map +1 -0
  73. package/dist/repositories/config-repository.d.ts +46 -0
  74. package/dist/repositories/config-repository.d.ts.map +1 -0
  75. package/dist/repositories/gateway-repository.d.ts +37 -0
  76. package/dist/repositories/gateway-repository.d.ts.map +1 -0
  77. package/dist/services/AgentStatusManager.d.ts +30 -0
  78. package/dist/services/AgentStatusManager.d.ts.map +1 -0
  79. package/dist/services/ConnectivityTester.d.ts +30 -0
  80. package/dist/services/ConnectivityTester.d.ts.map +1 -0
  81. package/dist/services/DependencyInstaller.d.ts +32 -0
  82. package/dist/services/DependencyInstaller.d.ts.map +1 -0
  83. package/dist/services/LocalAgentManager.d.ts +220 -0
  84. package/dist/services/LocalAgentManager.d.ts.map +1 -0
  85. package/dist/services/LocalAgentManager.js +3 -5
  86. package/dist/services/application/ApplicationService.d.ts +54 -0
  87. package/dist/services/application/ApplicationService.d.ts.map +1 -0
  88. package/dist/services/application/ApplicationService.js +10 -3
  89. package/dist/services/auth/AuthService.d.ts +42 -0
  90. package/dist/services/auth/AuthService.d.ts.map +1 -0
  91. package/dist/services/aws.d.ts +136 -0
  92. package/dist/services/aws.d.ts.map +1 -0
  93. package/dist/services/aws.js +2 -2
  94. package/dist/services/daemon/DaemonManagerFactory.d.ts +17 -0
  95. package/dist/services/daemon/DaemonManagerFactory.d.ts.map +1 -0
  96. package/dist/services/daemon/DockerDaemonManager.d.ts +26 -0
  97. package/dist/services/daemon/DockerDaemonManager.d.ts.map +1 -0
  98. package/dist/services/daemon/LaunchdDaemonManager.d.ts +20 -0
  99. package/dist/services/daemon/LaunchdDaemonManager.d.ts.map +1 -0
  100. package/dist/services/daemon/LaunchdDaemonManager.js +54 -6
  101. package/dist/services/daemon/PodmanDaemonManager.d.ts +24 -0
  102. package/dist/services/daemon/PodmanDaemonManager.d.ts.map +1 -0
  103. package/dist/services/daemon/SystemdDaemonManager.d.ts +20 -0
  104. package/dist/services/daemon/SystemdDaemonManager.d.ts.map +1 -0
  105. package/dist/services/daemon/WindowsServiceDaemonManager.d.ts +19 -0
  106. package/dist/services/daemon/WindowsServiceDaemonManager.d.ts.map +1 -0
  107. package/dist/services/daemon/index.d.ts +7 -0
  108. package/dist/services/daemon/index.d.ts.map +1 -0
  109. package/dist/services/edgible.d.ts +304 -0
  110. package/dist/services/edgible.d.ts.map +1 -0
  111. package/dist/services/edgible.js +53 -4
  112. package/dist/services/gateway/GatewayService.d.ts +88 -0
  113. package/dist/services/gateway/GatewayService.d.ts.map +1 -0
  114. package/dist/state/config.d.ts +96 -0
  115. package/dist/state/config.d.ts.map +1 -0
  116. package/dist/types/AgentConfig.d.ts +126 -0
  117. package/dist/types/AgentConfig.d.ts.map +1 -0
  118. package/dist/types/AgentStatus.d.ts +30 -0
  119. package/dist/types/AgentStatus.d.ts.map +1 -0
  120. package/dist/types/ApiClient.d.ts +36 -0
  121. package/dist/types/ApiClient.d.ts.map +1 -0
  122. package/dist/types/ApiRequests.d.ts +269 -0
  123. package/dist/types/ApiRequests.d.ts.map +1 -0
  124. package/dist/types/ApiResponses.d.ts +348 -0
  125. package/dist/types/ApiResponses.d.ts.map +1 -0
  126. package/dist/types/Application.d.ts +13 -0
  127. package/dist/types/Application.d.ts.map +1 -0
  128. package/dist/types/CaddyJson.d.ts +231 -0
  129. package/dist/types/CaddyJson.d.ts.map +1 -0
  130. package/dist/types/DeviceMetrics.d.ts +95 -0
  131. package/dist/types/DeviceMetrics.d.ts.map +1 -0
  132. package/dist/types/DeviceMetrics.js +5 -0
  133. package/dist/types/LogAggregation.d.ts +106 -0
  134. package/dist/types/LogAggregation.d.ts.map +1 -0
  135. package/dist/types/LogAggregation.js +5 -0
  136. package/dist/types/LogEntry.d.ts +60 -0
  137. package/dist/types/LogEntry.d.ts.map +1 -0
  138. package/dist/types/LogEntry.js +5 -0
  139. package/dist/types/UnifiedAgentStatus.d.ts +28 -0
  140. package/dist/types/UnifiedAgentStatus.d.ts.map +1 -0
  141. package/dist/types/WireGuard.d.ts +36 -0
  142. package/dist/types/WireGuard.d.ts.map +1 -0
  143. package/dist/types/Workload.d.ts +9 -0
  144. package/dist/types/Workload.d.ts.map +1 -0
  145. package/dist/types/agent.d.ts +120 -0
  146. package/dist/types/agent.d.ts.map +1 -0
  147. package/dist/types/command-options.d.ts +115 -0
  148. package/dist/types/command-options.d.ts.map +1 -0
  149. package/dist/types/connectivity.d.ts +80 -0
  150. package/dist/types/connectivity.d.ts.map +1 -0
  151. package/dist/types/errors.d.ts +97 -0
  152. package/dist/types/errors.d.ts.map +1 -0
  153. package/dist/types/gateway-types.d.ts +46 -0
  154. package/dist/types/gateway-types.d.ts.map +1 -0
  155. package/dist/types/index.d.ts +28 -0
  156. package/dist/types/index.d.ts.map +1 -0
  157. package/dist/types/models/ApplicationData.d.ts +78 -0
  158. package/dist/types/models/ApplicationData.d.ts.map +1 -0
  159. package/dist/types/models/CertificateData.d.ts +44 -0
  160. package/dist/types/models/CertificateData.d.ts.map +1 -0
  161. package/dist/types/models/DeviceData.d.ts +29 -0
  162. package/dist/types/models/DeviceData.d.ts.map +1 -0
  163. package/dist/types/models/DevicePoolData.d.ts +47 -0
  164. package/dist/types/models/DevicePoolData.d.ts.map +1 -0
  165. package/dist/types/models/LifecycleEvent.d.ts +27 -0
  166. package/dist/types/models/LifecycleEvent.d.ts.map +1 -0
  167. package/dist/types/models/LifecycleEvent.js +5 -0
  168. package/dist/types/models/OrganizationData.d.ts +53 -0
  169. package/dist/types/models/OrganizationData.d.ts.map +1 -0
  170. package/dist/types/models/OrganizationInviteData.d.ts +39 -0
  171. package/dist/types/models/OrganizationInviteData.d.ts.map +1 -0
  172. package/dist/types/models/ProviderConfiguration.d.ts +37 -0
  173. package/dist/types/models/ProviderConfiguration.d.ts.map +1 -0
  174. package/dist/types/models/ResourceData.d.ts +18 -0
  175. package/dist/types/models/ResourceData.d.ts.map +1 -0
  176. package/dist/types/models/ServiceResourceData.d.ts +5 -0
  177. package/dist/types/models/ServiceResourceData.d.ts.map +1 -0
  178. package/dist/types/models/UserData.d.ts +12 -0
  179. package/dist/types/models/UserData.d.ts.map +1 -0
  180. package/dist/types/route.d.ts +67 -0
  181. package/dist/types/route.d.ts.map +1 -0
  182. package/dist/types/validation/schemas.d.ts +606 -0
  183. package/dist/types/validation/schemas.d.ts.map +1 -0
  184. package/dist/types/validation/schemas.js +46 -4
  185. package/dist/types/validation.d.ts +68 -0
  186. package/dist/types/validation.d.ts.map +1 -0
  187. package/dist/utils/FileIntegrityManager.d.ts +37 -0
  188. package/dist/utils/FileIntegrityManager.d.ts.map +1 -0
  189. package/dist/utils/PathMigration.d.ts +45 -0
  190. package/dist/utils/PathMigration.d.ts.map +1 -0
  191. package/dist/utils/PathResolver.d.ts +76 -0
  192. package/dist/utils/PathResolver.d.ts.map +1 -0
  193. package/dist/utils/PlatformDetector.d.ts +60 -0
  194. package/dist/utils/PlatformDetector.d.ts.map +1 -0
  195. package/dist/utils/console-logger.d.ts +37 -0
  196. package/dist/utils/console-logger.d.ts.map +1 -0
  197. package/dist/utils/docker-compose-parser.d.ts +28 -0
  198. package/dist/utils/docker-compose-parser.d.ts.map +1 -0
  199. package/dist/utils/errors.d.ts +63 -0
  200. package/dist/utils/errors.d.ts.map +1 -0
  201. package/dist/utils/health-checker.d.ts +34 -0
  202. package/dist/utils/health-checker.d.ts.map +1 -0
  203. package/dist/utils/json-logger.d.ts +23 -0
  204. package/dist/utils/json-logger.d.ts.map +1 -0
  205. package/dist/utils/log-formatter.d.ts +85 -0
  206. package/dist/utils/log-formatter.d.ts.map +1 -0
  207. package/dist/utils/log-formatter.js +39 -11
  208. package/dist/utils/logger.d.ts +34 -0
  209. package/dist/utils/logger.d.ts.map +1 -0
  210. package/dist/utils/network-utils.d.ts +56 -0
  211. package/dist/utils/network-utils.d.ts.map +1 -0
  212. package/dist/utils/output.d.ts +73 -0
  213. package/dist/utils/output.d.ts.map +1 -0
  214. package/dist/utils/passwordValidation.d.ts +32 -0
  215. package/dist/utils/passwordValidation.d.ts.map +1 -0
  216. package/dist/utils/progress.d.ts +74 -0
  217. package/dist/utils/progress.d.ts.map +1 -0
  218. package/dist/utils/sudo-checker.d.ts +9 -0
  219. package/dist/utils/sudo-checker.d.ts.map +1 -0
  220. package/dist/utils/urls.d.ts +19 -0
  221. package/dist/utils/urls.d.ts.map +1 -0
  222. package/dist/utils/urls.js +3 -3
  223. package/dist/utils/validation.d.ts +19 -0
  224. package/dist/utils/validation.d.ts.map +1 -0
  225. package/dist/validation/schemas.d.ts +197 -0
  226. package/dist/validation/schemas.d.ts.map +1 -0
  227. package/dist/validation/schemas.js +1 -1
  228. package/dist/validation/validator.d.ts +22 -0
  229. package/dist/validation/validator.d.ts.map +1 -0
  230. package/package.json +9 -4
  231. package/recipes/compose/open-webui/.env +1 -0
  232. package/recipes/compose/open-webui/docker-compose.yml +17 -0
@@ -40,148 +40,671 @@ exports.setupAiCommands = setupAiCommands;
40
40
  const chalk_1 = __importDefault(require("chalk"));
41
41
  const child_process_1 = require("child_process");
42
42
  const os = __importStar(require("os"));
43
+ const path = __importStar(require("path"));
44
+ const fs = __importStar(require("fs"));
43
45
  const inquirer_1 = __importDefault(require("inquirer"));
44
46
  const node_fetch_1 = __importDefault(require("node-fetch"));
45
47
  const command_wrapper_1 = require("./base/command-wrapper");
46
48
  const container_1 = require("../di/container");
47
49
  const types_1 = require("../di/types");
48
50
  const SystemCapabilityDetector_1 = require("../detection/SystemCapabilityDetector");
51
+ const DaemonManagerFactory_1 = require("../services/daemon/DaemonManagerFactory");
52
+ const config_validator_1 = require("./utils/config-validator");
49
53
  function setupAiCommands(program) {
50
54
  const aiCommand = program
51
55
  .command('ai')
52
56
  .description('Manage Ollama AI service');
53
57
  aiCommand
54
58
  .command('setup')
55
- .description('Setup Ollama: install, discover capabilities, and start with selected model')
59
+ .description('Setup Ollama AI and optionally expose via Edgible platform')
56
60
  .option('--model <model>', 'Model to use (skips interactive selection)')
57
61
  .option('--auto-install', 'Automatically install Ollama without prompting')
62
+ .option('--local-only', 'Skip platform integration (local setup only)')
63
+ .option('--expose-ollama', 'Expose Ollama API publicly')
64
+ .option('--setup-webui', 'Setup Open WebUI interface')
65
+ .option('--device-id <id>', 'Serving device ID for both apps')
66
+ .option('--ollama-device-id <id>', 'Serving device ID for Ollama')
67
+ .option('--webui-device-id <id>', 'Serving device ID for Open WebUI')
68
+ .option('--gateway-ids <ids>', 'Comma-separated gateway device IDs')
69
+ .option('--webui-deployment <type>', 'WebUI deployment: local or remote')
70
+ .option('--no-interactive', 'Run in non-interactive mode')
58
71
  .action((0, command_wrapper_1.wrapCommand)(async (options) => {
59
72
  const container = (0, container_1.getContainer)();
60
73
  const logger = container.get(types_1.TYPES.Logger);
61
74
  logger.info('Setting up Ollama AI service', { model: options.model, autoInstall: options.autoInstall });
62
75
  console.log(chalk_1.default.blue('\n🤖 Ollama AI Setup'));
63
76
  console.log(chalk_1.default.gray('This will install Ollama, check system capabilities, and start with a selected model.\n'));
77
+ // Check if daemon is running (required before setup)
78
+ console.log(chalk_1.default.blue('Prerequisite: Checking daemon status...\n'));
79
+ const configRepository = container.get(types_1.TYPES.ConfigRepository);
80
+ const config = configRepository.getConfig();
81
+ let daemonRunning = false;
82
+ // Check daemon status if installation type is configured
83
+ if (config.agentInstallationType) {
84
+ try {
85
+ const daemonManager = DaemonManagerFactory_1.DaemonManagerFactory.fromConfig(config.agentInstallationType);
86
+ if (daemonManager) {
87
+ const daemonStatus = await daemonManager.status();
88
+ daemonRunning = daemonStatus.running;
89
+ if (daemonRunning) {
90
+ console.log(chalk_1.default.green('✓ Daemon is running\n'));
91
+ }
92
+ else {
93
+ console.log(chalk_1.default.red('✗ Daemon is not running\n'));
94
+ }
95
+ }
96
+ }
97
+ catch (error) {
98
+ console.log(chalk_1.default.yellow('⚠ Could not check daemon status\n'));
99
+ console.log(chalk_1.default.gray(` Error: ${error instanceof Error ? error.message : String(error)}\n`));
100
+ }
101
+ }
102
+ else {
103
+ // Fallback: check agent status file if no daemon type is configured
104
+ console.log(chalk_1.default.gray(' No daemon installation type configured, checking agent status file...\n'));
105
+ const agentManager = container.get(types_1.TYPES.LocalAgentManager);
106
+ const agentStatus = await agentManager.checkLocalAgentStatus();
107
+ daemonRunning = agentStatus.running;
108
+ if (daemonRunning) {
109
+ console.log(chalk_1.default.green('✓ Agent is running\n'));
110
+ }
111
+ else {
112
+ console.log(chalk_1.default.red('✗ Agent is not running\n'));
113
+ }
114
+ }
115
+ if (!daemonRunning) {
116
+ console.log(chalk_1.default.yellow('The device agent daemon must be running before setting up AI services.\n'));
117
+ console.log(chalk_1.default.blue('Please start the agent first:'));
118
+ console.log(chalk_1.default.gray(' edgible agent start\n'));
119
+ console.log(chalk_1.default.gray('Then run this setup command again.\n'));
120
+ throw new Error('Device agent daemon is not running. Please start it with: edgible agent start');
121
+ }
122
+ // Phase 1: Local Ollama Setup
123
+ const { capabilities } = await setupLocalOllama({
124
+ autoInstall: options.autoInstall,
125
+ model: options.model,
126
+ });
127
+ // Phase 2: Platform Integration
128
+ const { ollamaUrl, webUIUrl, createdOllamaApp, createdWebUIApp, deviceName, deviceId, ollamaModelName } = await setupPlatformIntegration({
129
+ model: options.model,
130
+ capabilities,
131
+ container,
132
+ logger,
133
+ });
134
+ // Phase 3: Display Summary
135
+ displaySetupSummary({
136
+ ollamaModelName,
137
+ ollamaUrl,
138
+ webUIUrl,
139
+ deviceName,
140
+ deviceId,
141
+ createdOllamaApp,
142
+ createdWebUIApp,
143
+ });
144
+ }, {
145
+ configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
146
+ requireAuth: false,
147
+ requireOrganization: false,
148
+ }));
149
+ aiCommand
150
+ .command('stop')
151
+ .description('Stop Ollama service')
152
+ .action((0, command_wrapper_1.wrapCommand)(async () => {
153
+ const container = (0, container_1.getContainer)();
154
+ const logger = container.get(types_1.TYPES.Logger);
155
+ logger.info('Stopping Ollama service');
156
+ console.log(chalk_1.default.blue('\n🛑 Stopping Ollama service...\n'));
157
+ const isRunning = await checkOllamaRunning();
158
+ if (!isRunning) {
159
+ console.log(chalk_1.default.yellow('Ollama is not running\n'));
160
+ return;
161
+ }
162
+ await stopOllama();
163
+ console.log(chalk_1.default.green('✓ Ollama service stopped\n'));
164
+ }, {
165
+ configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
166
+ }));
167
+ aiCommand
168
+ .command('serve')
169
+ .description('Start Open WebUI connected to local Ollama service')
170
+ .option('--port <port>', 'WebUI port (default: 3200)', '3200')
171
+ .option('--ollama-url <url>', 'Override Ollama URL (default: auto-detect host)')
172
+ .option('-d, --detached', 'Run in detached mode (default: true)', true)
173
+ .action((0, command_wrapper_1.wrapCommand)(async (options) => {
174
+ const container = (0, container_1.getContainer)();
175
+ const logger = container.get(types_1.TYPES.Logger);
176
+ const configRepository = container.get(types_1.TYPES.ConfigRepository);
177
+ logger.info('Starting Open WebUI with Ollama');
178
+ console.log(chalk_1.default.blue('\n🌐 Starting Open WebUI'));
179
+ console.log(chalk_1.default.gray('This will start the Open WebUI interface connected to your local Ollama service.\n'));
64
180
  // Step 1: Check if Ollama is installed
65
181
  console.log(chalk_1.default.blue('Step 1: Checking Ollama installation...\n'));
66
182
  const isOllamaInstalled = await checkOllamaInstalled();
67
183
  if (!isOllamaInstalled) {
68
- let shouldInstall = options.autoInstall || false;
69
- if (!shouldInstall) {
70
- const answer = await inquirer_1.default.prompt([
71
- {
72
- type: 'confirm',
73
- name: 'install',
74
- message: 'Ollama is not installed. Would you like to install it now?',
75
- default: true,
76
- },
77
- ]);
78
- shouldInstall = answer.install;
184
+ console.log(chalk_1.default.red('✗ Ollama is not installed'));
185
+ console.log(chalk_1.default.yellow('\nPlease run: edgible ai setup\n'));
186
+ throw new Error('Ollama is required but not installed');
187
+ }
188
+ console.log(chalk_1.default.green('✓ Ollama is installed\n'));
189
+ // Step 2: Check if Ollama is running
190
+ console.log(chalk_1.default.blue('Step 2: Checking if Ollama is running...\n'));
191
+ const isOllamaRunning = await checkOllamaRunning();
192
+ if (!isOllamaRunning) {
193
+ console.log(chalk_1.default.yellow('⚠ Ollama is not running'));
194
+ const answer = await inquirer_1.default.prompt([
195
+ {
196
+ type: 'confirm',
197
+ name: 'start',
198
+ message: 'Would you like to start Ollama now?',
199
+ default: true,
200
+ },
201
+ ]);
202
+ if (answer.start) {
203
+ console.log(chalk_1.default.yellow('Starting Ollama...\n'));
204
+ await startOllama();
205
+ // Wait and verify
206
+ await new Promise(resolve => setTimeout(resolve, 2000));
207
+ const isRunningNow = await checkOllamaRunning();
208
+ if (!isRunningNow) {
209
+ throw new Error('Failed to start Ollama. Please start it manually.');
210
+ }
211
+ console.log(chalk_1.default.green('✓ Ollama started\n'));
79
212
  }
80
- if (shouldInstall) {
81
- console.log(chalk_1.default.yellow('Installing Ollama...\n'));
82
- await installOllama();
83
- console.log(chalk_1.default.green('✓ Ollama installed successfully\n'));
213
+ else {
214
+ throw new Error('Ollama must be running. Please start it with: edgible ai setup');
215
+ }
216
+ }
217
+ else {
218
+ console.log(chalk_1.default.green('✓ Ollama is running\n'));
219
+ }
220
+ // Step 2.5: Check and fix Ollama binding if needed (for Docker access)
221
+ console.log(chalk_1.default.blue('Step 2.5: Checking Ollama network binding...\n'));
222
+ const listeningAddress = await checkOllamaListeningAddress();
223
+ if (listeningAddress === '127.0.0.1') {
224
+ console.log(chalk_1.default.yellow('⚠ Ollama is listening on localhost only (127.0.0.1:11434)'));
225
+ console.log(chalk_1.default.gray(' Docker containers cannot access Ollama on localhost.\n'));
226
+ console.log(chalk_1.default.yellow(' Reconfiguring Ollama to listen on all interfaces (0.0.0.0:11434)...\n'));
227
+ const fixed = await fixOllamaBinding();
228
+ if (fixed) {
229
+ // Wait a moment and verify it's now on 0.0.0.0
230
+ await new Promise((resolve) => setTimeout(resolve, 2000));
231
+ const newAddress = await checkOllamaListeningAddress();
232
+ if (newAddress === '0.0.0.0') {
233
+ console.log(chalk_1.default.green('✓ Ollama reconfigured to listen on 0.0.0.0:11434\n'));
234
+ }
235
+ else {
236
+ console.log(chalk_1.default.yellow('⚠ Could not verify binding change. Continuing anyway...\n'));
237
+ }
84
238
  }
85
239
  else {
86
- throw new Error('Ollama is required but not installed. Please install it manually from https://ollama.com');
240
+ console.log(chalk_1.default.yellow(' Could not automatically reconfigure Ollama binding.\n'));
241
+ console.log(chalk_1.default.gray(' Docker may not be able to connect. Please manually set OLLAMA_HOST=0.0.0.0:11434 and restart Ollama.\n'));
87
242
  }
88
243
  }
244
+ else if (listeningAddress === '0.0.0.0') {
245
+ console.log(chalk_1.default.green('✓ Ollama is listening on all interfaces (accessible to Docker)\n'));
246
+ }
247
+ else {
248
+ console.log(chalk_1.default.gray(' Could not determine Ollama binding address (assuming accessible)\n'));
249
+ }
250
+ // Step 3: Check if any models are available
251
+ console.log(chalk_1.default.blue('Step 3: Checking available models...\n'));
252
+ const hasModels = await checkHasModels();
253
+ if (!hasModels) {
254
+ console.log(chalk_1.default.yellow('⚠ No Ollama models found'));
255
+ console.log(chalk_1.default.yellow('Please pull a model first with: edgible ai setup\n'));
256
+ throw new Error('At least one Ollama model is required');
257
+ }
258
+ console.log(chalk_1.default.green('✓ Models available\n'));
259
+ // Step 4: Check if Docker is installed
260
+ console.log(chalk_1.default.blue('Step 4: Checking Docker installation...\n'));
261
+ const isDockerInstalled = await checkDockerInstalled();
262
+ if (!isDockerInstalled) {
263
+ console.log(chalk_1.default.red('✗ Docker is not installed'));
264
+ console.log(chalk_1.default.yellow('Please install Docker from: https://docs.docker.com/get-docker/\n'));
265
+ throw new Error('Docker is required to run Open WebUI');
266
+ }
267
+ console.log(chalk_1.default.green('✓ Docker is installed\n'));
268
+ // Step 5: Determine Ollama URL for Docker
269
+ console.log(chalk_1.default.blue('Step 5: Configuring connection...\n'));
270
+ const ollamaUrl = options.ollamaUrl || await detectOllamaUrlForDocker();
271
+ console.log(chalk_1.default.gray(` Using Ollama URL: ${ollamaUrl}\n`));
272
+ // Step 6: Start Docker Compose
273
+ console.log(chalk_1.default.blue('Step 6: Starting Open WebUI...\n'));
274
+ const composeDir = getComposeDirectory();
275
+ const port = parseInt(options.port || '3200', 10);
276
+ await startOpenWebUI(composeDir, {
277
+ OLLAMA_BASE_URL: ollamaUrl,
278
+ OPEN_WEBUI_PORT: port.toString(),
279
+ });
280
+ console.log(chalk_1.default.green('✓ Open WebUI started successfully!\n'));
281
+ console.log(chalk_1.default.blue('🎉 Setup Complete!\n'));
282
+ console.log(chalk_1.default.white('Access Open WebUI at:'));
283
+ console.log(chalk_1.default.cyan(` http://localhost:${port}\n`));
284
+ console.log(chalk_1.default.gray('Useful commands:'));
285
+ console.log(chalk_1.default.gray(` edgible ai status # Check service status`));
286
+ console.log(chalk_1.default.gray(` edgible ai teardown # Stop Open WebUI`));
287
+ console.log(chalk_1.default.gray(` docker logs open-webui # View logs\n`));
288
+ }, {
289
+ configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
290
+ }));
291
+ aiCommand
292
+ .command('teardown')
293
+ .description('Stop AI services and optionally remove platform applications')
294
+ .option('--stop-ollama', 'Also stop the Ollama service')
295
+ .option('--remove-volumes', 'Remove data volumes (deletes all data)')
296
+ .option('--remove-apps', 'Remove platform applications (Ollama API, Open WebUI)')
297
+ .action((0, command_wrapper_1.wrapCommand)(async (options) => {
298
+ const container = (0, container_1.getContainer)();
299
+ const logger = container.get(types_1.TYPES.Logger);
300
+ const configRepository = container.get(types_1.TYPES.ConfigRepository);
301
+ logger.info('Tearing down AI services');
302
+ console.log(chalk_1.default.blue('\n🛑 AI Services Teardown\n'));
303
+ // Stop local Open WebUI if running
304
+ const composeDir = getComposeDirectory();
305
+ const isRunning = await checkOpenWebUIRunning();
306
+ if (!isRunning) {
307
+ console.log(chalk_1.default.gray(' Local Open WebUI is not running\n'));
308
+ }
89
309
  else {
90
- console.log(chalk_1.default.green(' Ollama is already installed\n'));
310
+ console.log(chalk_1.default.yellow('Stopping local Open WebUI...\n'));
311
+ await stopOpenWebUI(composeDir, options.removeVolumes || false);
312
+ console.log(chalk_1.default.green('✓ Local Open WebUI stopped\n'));
91
313
  }
92
- // Step 2: Discover system capabilities
93
- console.log(chalk_1.default.blue('Step 2: Discovering system capabilities...\n'));
94
- const capabilities = await SystemCapabilityDetector_1.SystemCapabilityDetector.detectCapabilities();
95
- // Show GPU driver support status
96
- if (capabilities.gpuDriverSupport.ollamaGpuReady) {
97
- console.log(chalk_1.default.green('✓ GPU acceleration is ready for Ollama'));
98
- if (capabilities.gpuDriverSupport.ollamaGpuReason) {
99
- console.log(chalk_1.default.gray(` ${capabilities.gpuDriverSupport.ollamaGpuReason}\n`));
314
+ // Optionally stop Ollama
315
+ if (options.stopOllama) {
316
+ console.log(chalk_1.default.yellow('Stopping Ollama service...\n'));
317
+ const isOllamaRunning = await checkOllamaRunning();
318
+ if (isOllamaRunning) {
319
+ await stopOllama();
320
+ console.log(chalk_1.default.green('✓ Ollama stopped\n'));
321
+ }
322
+ else {
323
+ console.log(chalk_1.default.gray('Ollama is not running\n'));
324
+ }
325
+ }
326
+ // Optionally remove platform applications
327
+ if (options.removeApps) {
328
+ console.log(chalk_1.default.yellow('\nRemoving platform applications...\n'));
329
+ try {
330
+ (0, config_validator_1.validateConfig)(configRepository, {
331
+ requireAuth: true,
332
+ requireOrganization: true,
333
+ });
334
+ const applicationService = container.get(types_1.TYPES.ApplicationService);
335
+ const applications = await applicationService.getApplications();
336
+ // Find AI-related applications
337
+ const ollamaApp = applications.find(app => app.name === 'ollama-api');
338
+ const webUIApp = applications.find(app => app.name === 'open-webui');
339
+ if (ollamaApp) {
340
+ console.log(chalk_1.default.yellow(` Removing ollama-api (${ollamaApp.id})...`));
341
+ await applicationService.deleteApplication(ollamaApp.id);
342
+ console.log(chalk_1.default.green(' ✓ Removed ollama-api'));
343
+ }
344
+ if (webUIApp) {
345
+ console.log(chalk_1.default.yellow(` Removing open-webui (${webUIApp.id})...`));
346
+ await applicationService.deleteApplication(webUIApp.id);
347
+ console.log(chalk_1.default.green(' ✓ Removed open-webui'));
348
+ }
349
+ if (!ollamaApp && !webUIApp) {
350
+ console.log(chalk_1.default.gray(' No AI applications found\n'));
351
+ }
352
+ else {
353
+ console.log(chalk_1.default.green('\n✓ Platform applications removed\n'));
354
+ }
355
+ }
356
+ catch (error) {
357
+ if (error instanceof Error && error.message.includes('auth')) {
358
+ console.log(chalk_1.default.yellow(' ⚠ Not logged in - skipping platform application removal\n'));
359
+ }
360
+ else {
361
+ console.log(chalk_1.default.red(` ✗ Error removing applications: ${error instanceof Error ? error.message : 'Unknown error'}\n`));
362
+ }
363
+ }
364
+ }
365
+ console.log(chalk_1.default.green('✅ Teardown complete!\n'));
366
+ if (options.removeVolumes) {
367
+ console.log(chalk_1.default.yellow('⚠ Data volumes were removed. All local WebUI data has been deleted.\n'));
368
+ }
369
+ }, {
370
+ configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
371
+ }));
372
+ aiCommand
373
+ .command('status')
374
+ .description('Check status of AI services (Ollama and Open WebUI)')
375
+ .action((0, command_wrapper_1.wrapCommand)(async () => {
376
+ const container = (0, container_1.getContainer)();
377
+ const logger = container.get(types_1.TYPES.Logger);
378
+ const configRepository = container.get(types_1.TYPES.ConfigRepository);
379
+ logger.info('Checking AI services status');
380
+ console.log(chalk_1.default.blue('\n📊 AI Services Status\n'));
381
+ // Check Ollama Local
382
+ console.log(chalk_1.default.white('Ollama (Local):'));
383
+ const isOllamaInstalled = await checkOllamaInstalled();
384
+ const isOllamaRunning = isOllamaInstalled ? await checkOllamaRunning() : false;
385
+ if (!isOllamaInstalled) {
386
+ console.log(chalk_1.default.red(' ✗ Not installed'));
387
+ }
388
+ else if (isOllamaRunning) {
389
+ console.log(chalk_1.default.green(' ✓ Running'));
390
+ // Check what address it's listening on
391
+ try {
392
+ const output = (0, child_process_1.execSync)('ss -tlnp 2>/dev/null | grep 11434 || netstat -tlnp 2>/dev/null | grep 11434', {
393
+ encoding: 'utf8',
394
+ timeout: 2000
395
+ });
396
+ if (output.includes('0.0.0.0:11434') || output.includes('*:11434')) {
397
+ console.log(chalk_1.default.gray(' Listening on: 0.0.0.0:11434 (accessible from network)'));
398
+ }
399
+ else if (output.includes('127.0.0.1:11434')) {
400
+ console.log(chalk_1.default.yellow(' Listening on: 127.0.0.1:11434 (localhost only)'));
401
+ }
402
+ }
403
+ catch {
404
+ // Ignore if ss/netstat fails
405
+ }
406
+ // Show available models
407
+ try {
408
+ const output = (0, child_process_1.execSync)('ollama list', { encoding: 'utf8', timeout: 5000 });
409
+ const lines = output.trim().split('\n');
410
+ if (lines.length > 1) {
411
+ console.log(chalk_1.default.gray(` Models: ${lines.length - 1} available`));
412
+ }
413
+ }
414
+ catch {
415
+ // Ignore
100
416
  }
101
417
  }
102
418
  else {
103
- console.log(chalk_1.default.yellow('⚠ GPU acceleration not available - Ollama will run in CPU mode'));
104
- if (capabilities.gpuDriverSupport.ollamaGpuReason) {
105
- console.log(chalk_1.default.gray(` ${capabilities.gpuDriverSupport.ollamaGpuReason}\n`));
106
- }
107
- }
108
- // Step 3: Select model based on recommendations
109
- console.log(chalk_1.default.blue('Step 3: Selecting model...\n'));
110
- let selectedModel = options.model;
111
- if (!selectedModel) {
112
- // Filter to only excellent and good recommendations
113
- const suitableModels = capabilities.recommendedModels.filter((m) => m.suitability === 'excellent' || m.suitability === 'good');
114
- if (suitableModels.length === 0) {
115
- console.log(chalk_1.default.yellow('⚠ No models are well-suited for your system.'));
116
- console.log(chalk_1.default.yellow('You can still run smaller models, but performance may be limited.\n'));
117
- const allModels = capabilities.recommendedModels.map((m) => ({
118
- name: `${m.modelName} (${m.size}) - ${m.suitability}`,
119
- value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
120
- }));
121
- // Add custom model option
122
- allModels.push({
123
- name: 'Enter custom model name',
124
- value: '__custom__',
419
+ console.log(chalk_1.default.yellow(' Installed but not running'));
420
+ }
421
+ console.log('');
422
+ // Check Ollama Platform Application
423
+ console.log(chalk_1.default.white('Ollama (Platform):'));
424
+ try {
425
+ (0, config_validator_1.validateConfig)(configRepository, {
426
+ requireAuth: true,
427
+ requireOrganization: true,
428
+ });
429
+ const applicationService = container.get(types_1.TYPES.ApplicationService);
430
+ const applications = await applicationService.getApplications();
431
+ const ollamaApp = applications.find(app => app.name === 'ollama-api');
432
+ if (ollamaApp) {
433
+ console.log(chalk_1.default.green(' ✓ Application exists'));
434
+ console.log(chalk_1.default.gray(` ID: ${ollamaApp.id}`));
435
+ // Extract URL from application
436
+ let ollamaUrl;
437
+ if (ollamaApp.url) {
438
+ // Use URL directly if available
439
+ ollamaUrl = ollamaApp.url;
440
+ }
441
+ else if (ollamaApp.servingIp && ollamaApp.servingIp !== 'unknown') {
442
+ // Construct URL from servingIp
443
+ const protocol = ollamaApp.protocol === 'https' ? 'https' : 'http';
444
+ const port = ollamaApp.port === 443 || ollamaApp.port === 80 ? '' : `:${ollamaApp.port}`;
445
+ ollamaUrl = `${protocol}://${ollamaApp.servingIp}${port}`;
446
+ }
447
+ if (ollamaUrl) {
448
+ console.log(chalk_1.default.cyan(` URL: ${ollamaUrl}`));
449
+ // Test if it's reachable
450
+ const isReachable = await checkOllamaUrlReachable(ollamaUrl);
451
+ if (isReachable) {
452
+ console.log(chalk_1.default.green(' ✓ Reachable and responding'));
453
+ }
454
+ else {
455
+ console.log(chalk_1.default.red(' ✗ Not reachable or not responding'));
456
+ }
457
+ }
458
+ else {
459
+ console.log(chalk_1.default.yellow(' ⚠ URL not available'));
460
+ }
461
+ }
462
+ else {
463
+ console.log(chalk_1.default.gray(' ○ No platform application found'));
464
+ }
465
+ }
466
+ catch (error) {
467
+ if (error instanceof Error && error.message.includes('auth')) {
468
+ console.log(chalk_1.default.gray(' ○ Not logged in (cannot check platform apps)'));
469
+ }
470
+ else {
471
+ console.log(chalk_1.default.yellow(` ⚠ Error checking platform: ${error instanceof Error ? error.message : 'Unknown'}`));
472
+ }
473
+ }
474
+ console.log('');
475
+ // Check Open WebUI
476
+ console.log(chalk_1.default.white('Open WebUI:'));
477
+ const webUIInfo = await getOpenWebUIInfo();
478
+ if (webUIInfo) {
479
+ console.log(chalk_1.default.green(' ✓ Running (Local)'));
480
+ console.log(chalk_1.default.cyan(` URL: http://localhost:${webUIInfo.port}`));
481
+ }
482
+ else {
483
+ // Check for platform application
484
+ try {
485
+ (0, config_validator_1.validateConfig)(configRepository, {
486
+ requireAuth: true,
487
+ requireOrganization: true,
125
488
  });
126
- const answer = await inquirer_1.default.prompt([
489
+ const applicationService = container.get(types_1.TYPES.ApplicationService);
490
+ const applications = await applicationService.getApplications();
491
+ const webUIApp = applications.find(app => app.name === 'open-webui');
492
+ if (webUIApp) {
493
+ console.log(chalk_1.default.green(' ✓ Application exists (Platform)'));
494
+ console.log(chalk_1.default.gray(` ID: ${webUIApp.id}`));
495
+ let webUIUrl;
496
+ if (webUIApp.url) {
497
+ // Use URL directly if available
498
+ webUIUrl = webUIApp.url;
499
+ }
500
+ else if (webUIApp.servingIp && webUIApp.servingIp !== 'unknown') {
501
+ // Construct URL from servingIp
502
+ const protocol = webUIApp.protocol === 'https' ? 'https' : 'http';
503
+ const port = webUIApp.port === 443 || webUIApp.port === 80 ? '' : `:${webUIApp.port}`;
504
+ webUIUrl = `${protocol}://${webUIApp.servingIp}${port}`;
505
+ }
506
+ if (webUIUrl) {
507
+ console.log(chalk_1.default.cyan(` URL: ${webUIUrl}`));
508
+ // Test if it's reachable
509
+ const isReachable = await checkUrlReachable(webUIUrl);
510
+ if (isReachable) {
511
+ console.log(chalk_1.default.green(' ✓ Reachable'));
512
+ }
513
+ else {
514
+ console.log(chalk_1.default.red(' ✗ Not reachable'));
515
+ }
516
+ }
517
+ }
518
+ else {
519
+ console.log(chalk_1.default.gray(' ○ Not running (local or platform)'));
520
+ }
521
+ }
522
+ catch {
523
+ console.log(chalk_1.default.gray(' ○ Not running'));
524
+ }
525
+ }
526
+ console.log('');
527
+ }, {
528
+ configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
529
+ requireAuth: false,
530
+ requireOrganization: false,
531
+ }));
532
+ aiCommand
533
+ .command('test')
534
+ .description('Test Ollama model connectivity and response')
535
+ .option('--model <model>', 'Model name to test (optional, tests first available if not specified)')
536
+ .action((0, command_wrapper_1.wrapCommand)(async (options) => {
537
+ const container = (0, container_1.getContainer)();
538
+ const logger = container.get(types_1.TYPES.Logger);
539
+ logger.info('Testing Ollama model connectivity', { model: options.model });
540
+ console.log(chalk_1.default.blue('\n🧪 Testing Ollama Model\n'));
541
+ // Check if Ollama is running
542
+ console.log(chalk_1.default.blue('Step 1: Checking Ollama service...\n'));
543
+ const isRunning = await checkOllamaRunning();
544
+ if (!isRunning) {
545
+ console.log(chalk_1.default.red('✗ Ollama is not running'));
546
+ console.log(chalk_1.default.yellow('\nPlease start Ollama first with: edgible ai setup\n'));
547
+ throw new Error('Ollama service is not running');
548
+ }
549
+ console.log(chalk_1.default.green('✓ Ollama is running\n'));
550
+ // Determine which model to test
551
+ let testModel = options.model;
552
+ if (!testModel) {
553
+ console.log(chalk_1.default.blue('Step 2: Finding available models...\n'));
554
+ try {
555
+ const output = (0, child_process_1.execSync)('ollama list', { encoding: 'utf8', timeout: 5000 });
556
+ const lines = output.trim().split('\n');
557
+ if (lines.length > 1) {
558
+ // Parse first model from list (skip header)
559
+ const modelLine = lines[1];
560
+ testModel = modelLine.split(/\s+/)[0];
561
+ console.log(chalk_1.default.gray(` Using first available model: ${testModel}\n`));
562
+ }
563
+ else {
564
+ console.log(chalk_1.default.red('✗ No models found'));
565
+ console.log(chalk_1.default.yellow('\nPlease pull a model first with: edgible ai setup\n'));
566
+ throw new Error('No Ollama models available');
567
+ }
568
+ }
569
+ catch (error) {
570
+ console.log(chalk_1.default.red('✗ Failed to list models\n'));
571
+ throw error;
572
+ }
573
+ }
574
+ // Test model connectivity
575
+ console.log(chalk_1.default.blue(`Step 3: Testing model "${testModel}"...\n`));
576
+ const modelWorks = await testModelConnectivity(testModel, true);
577
+ if (modelWorks) {
578
+ console.log(chalk_1.default.green(`\n✅ Model "${testModel}" is working correctly!\n`));
579
+ console.log(chalk_1.default.white('The model is ready to use with:'));
580
+ console.log(chalk_1.default.gray(` edgible ai serve # Start Open WebUI`));
581
+ console.log(chalk_1.default.gray(` ollama run ${testModel} # Use in terminal\n`));
582
+ }
583
+ else {
584
+ console.log(chalk_1.default.red(`\n✗ Model "${testModel}" failed to respond\n`));
585
+ console.log(chalk_1.default.yellow('Troubleshooting tips:'));
586
+ console.log(chalk_1.default.gray(' 1. Check if the model exists: ollama list'));
587
+ console.log(chalk_1.default.gray(' 2. Try pulling the model again: ollama pull ' + testModel));
588
+ console.log(chalk_1.default.gray(' 3. Check Ollama logs for errors'));
589
+ console.log(chalk_1.default.gray(' 4. Restart Ollama: edgible ai stop && edgible ai setup\n'));
590
+ }
591
+ }, {
592
+ configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
593
+ }));
594
+ }
595
+ /**
596
+ * Select a model based on capabilities and user input
597
+ */
598
+ async function selectModel(providedModel, capabilities) {
599
+ let selectedModel = providedModel;
600
+ if (!selectedModel) {
601
+ // Filter to only excellent and good recommendations
602
+ const suitableModels = capabilities.recommendedModels.filter((m) => m.suitability === 'excellent' || m.suitability === 'good');
603
+ if (suitableModels.length === 0) {
604
+ console.log(chalk_1.default.yellow('⚠ No models are well-suited for your system.'));
605
+ console.log(chalk_1.default.yellow('You can still run smaller models, but performance may be limited.\n'));
606
+ const allModels = capabilities.recommendedModels.map((m) => ({
607
+ name: `${m.modelName} (${m.size}) - ${m.suitability}`,
608
+ value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
609
+ }));
610
+ // Add custom model option
611
+ allModels.push({
612
+ name: 'Enter custom model name',
613
+ value: '__custom__',
614
+ });
615
+ const answer = await inquirer_1.default.prompt([
616
+ {
617
+ type: 'list',
618
+ name: 'model',
619
+ message: 'Select a model to use:',
620
+ choices: allModels,
621
+ },
622
+ ]);
623
+ if (answer.model === '__custom__') {
624
+ const customAnswer = await inquirer_1.default.prompt([
127
625
  {
128
- type: 'list',
626
+ type: 'input',
129
627
  name: 'model',
130
- message: 'Select a model to use:',
131
- choices: allModels,
628
+ message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
629
+ validate: (input) => {
630
+ if (!input || input.trim().length === 0) {
631
+ return 'Model name cannot be empty';
632
+ }
633
+ return true;
634
+ },
132
635
  },
133
636
  ]);
134
- if (answer.model === '__custom__') {
135
- const customAnswer = await inquirer_1.default.prompt([
136
- {
137
- type: 'input',
138
- name: 'model',
139
- message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
140
- validate: (input) => {
141
- if (!input || input.trim().length === 0) {
142
- return 'Model name cannot be empty';
143
- }
144
- return true;
145
- },
146
- },
147
- ]);
148
- selectedModel = customAnswer.model.trim();
149
- }
150
- else {
151
- selectedModel = answer.model;
152
- }
637
+ selectedModel = customAnswer.model.trim();
153
638
  }
154
639
  else {
155
- console.log(chalk_1.default.green('Recommended models for your system:\n'));
156
- suitableModels.forEach((model, index) => {
157
- const icon = model.suitability === 'excellent' ? '✓' : '•';
158
- const color = model.suitability === 'excellent' ? chalk_1.default.green : chalk_1.default.cyan;
159
- console.log(color(` ${icon} ${model.modelName} (${model.size})`));
160
- console.log(chalk_1.default.gray(` ${model.reasoning}\n`));
161
- });
162
- const modelChoices = suitableModels.map((m) => ({
163
- name: `${m.modelName} (${m.size}) - ${m.suitability === 'excellent' ? 'Recommended' : 'Good fit'}`,
640
+ selectedModel = answer.model;
641
+ }
642
+ }
643
+ else {
644
+ console.log(chalk_1.default.green('Recommended models for your system:\n'));
645
+ suitableModels.forEach((model) => {
646
+ const icon = model.suitability === 'excellent' ? '✓' : '•';
647
+ const color = model.suitability === 'excellent' ? chalk_1.default.green : chalk_1.default.cyan;
648
+ console.log(color(` ${icon} ${model.modelName} (${model.size})`));
649
+ console.log(chalk_1.default.gray(` ${model.reasoning}\n`));
650
+ });
651
+ const modelChoices = suitableModels.map((m) => ({
652
+ name: `${m.modelName} (${m.size}) - ${m.suitability === 'excellent' ? 'Recommended' : 'Good fit'}`,
653
+ value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
654
+ }));
655
+ // Add option to see all models
656
+ modelChoices.push({
657
+ name: 'Show all models (including marginal/insufficient)',
658
+ value: '__all__',
659
+ });
660
+ // Add custom model option
661
+ modelChoices.push({
662
+ name: 'Enter custom model name',
663
+ value: '__custom__',
664
+ });
665
+ const answer = await inquirer_1.default.prompt([
666
+ {
667
+ type: 'list',
668
+ name: 'model',
669
+ message: 'Select a model to use:',
670
+ choices: modelChoices,
671
+ },
672
+ ]);
673
+ if (answer.model === '__custom__') {
674
+ const customAnswer = await inquirer_1.default.prompt([
675
+ {
676
+ type: 'input',
677
+ name: 'model',
678
+ message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
679
+ validate: (input) => {
680
+ if (!input || input.trim().length === 0) {
681
+ return 'Model name cannot be empty';
682
+ }
683
+ return true;
684
+ },
685
+ },
686
+ ]);
687
+ selectedModel = customAnswer.model.trim();
688
+ }
689
+ else if (answer.model === '__all__') {
690
+ const allModels = capabilities.recommendedModels.map((m) => ({
691
+ name: `${m.modelName} (${m.size}) - ${m.suitability}`,
164
692
  value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
165
693
  }));
166
- // Add option to see all models
167
- modelChoices.push({
168
- name: 'Show all models (including marginal/insufficient)',
169
- value: '__all__',
170
- });
171
- // Add custom model option
172
- modelChoices.push({
694
+ // Add custom model option to all models list too
695
+ allModels.push({
173
696
  name: 'Enter custom model name',
174
697
  value: '__custom__',
175
698
  });
176
- const answer = await inquirer_1.default.prompt([
699
+ const allAnswer = await inquirer_1.default.prompt([
177
700
  {
178
701
  type: 'list',
179
702
  name: 'model',
180
703
  message: 'Select a model to use:',
181
- choices: modelChoices,
704
+ choices: allModels,
182
705
  },
183
706
  ]);
184
- if (answer.model === '__custom__') {
707
+ if (allAnswer.model === '__custom__') {
185
708
  const customAnswer = await inquirer_1.default.prompt([
186
709
  {
187
710
  type: 'input',
@@ -197,128 +720,310 @@ function setupAiCommands(program) {
197
720
  ]);
198
721
  selectedModel = customAnswer.model.trim();
199
722
  }
200
- else if (answer.model === '__all__') {
201
- const allModels = capabilities.recommendedModels.map((m) => ({
202
- name: `${m.modelName} (${m.size}) - ${m.suitability}`,
203
- value: m.modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, ''),
204
- }));
205
- // Add custom model option to all models list too
206
- allModels.push({
207
- name: 'Enter custom model name',
208
- value: '__custom__',
209
- });
210
- const allAnswer = await inquirer_1.default.prompt([
211
- {
212
- type: 'list',
213
- name: 'model',
214
- message: 'Select a model to use:',
215
- choices: allModels,
216
- },
217
- ]);
218
- if (allAnswer.model === '__custom__') {
219
- const customAnswer = await inquirer_1.default.prompt([
220
- {
221
- type: 'input',
222
- name: 'model',
223
- message: 'Enter Ollama model name (e.g., deepseek-r1:8b, llama3.2:3b):',
224
- validate: (input) => {
225
- if (!input || input.trim().length === 0) {
226
- return 'Model name cannot be empty';
227
- }
228
- return true;
229
- },
230
- },
231
- ]);
232
- selectedModel = customAnswer.model.trim();
233
- }
234
- else {
235
- selectedModel = allAnswer.model;
236
- }
237
- }
238
723
  else {
239
- selectedModel = answer.model;
724
+ selectedModel = allAnswer.model;
240
725
  }
241
726
  }
727
+ else {
728
+ selectedModel = answer.model;
729
+ }
242
730
  }
243
- // Normalize model name (Ollama uses lowercase with dashes)
244
- if (!selectedModel) {
245
- throw new Error('No model selected');
246
- }
247
- // If it's already a custom model (contains : or /), use as-is
248
- // Otherwise normalize it (recommended models need to be mapped to Ollama format)
249
- let ollamaModelName;
250
- if (selectedModel.includes(':') || selectedModel.includes('/')) {
251
- // Custom model name format (e.g., deepseek-r1:14b, llama3.2:3b, or org/model:tag)
252
- ollamaModelName = selectedModel;
253
- }
254
- else {
255
- // Normalize recommended model names to Ollama format (e.g., deepseek-r1-14b -> deepseek-r1:14b)
256
- ollamaModelName = normalizeModelName(selectedModel);
257
- }
258
- console.log(chalk_1.default.blue(`\nSelected model: ${ollamaModelName}\n`));
259
- // Step 4: Check if model is already pulled
260
- console.log(chalk_1.default.blue('Step 4: Checking if model is available...\n'));
261
- const isModelAvailable = await checkModelAvailable(ollamaModelName);
262
- if (!isModelAvailable) {
263
- console.log(chalk_1.default.yellow(`Model ${ollamaModelName} is not available locally.`));
731
+ }
732
+ // Normalize model name (Ollama uses lowercase with dashes)
733
+ if (!selectedModel) {
734
+ throw new Error('No model selected');
735
+ }
736
+ // If it's already a custom model (contains : or /), use as-is
737
+ // Otherwise normalize it (recommended models need to be mapped to Ollama format)
738
+ let ollamaModelName;
739
+ if (selectedModel.includes(':') || selectedModel.includes('/')) {
740
+ // Custom model name format (e.g., deepseek-r1:14b, llama3.2:3b, or org/model:tag)
741
+ ollamaModelName = selectedModel;
742
+ }
743
+ else {
744
+ // Normalize recommended model names to Ollama format (e.g., deepseek-r1-14b -> deepseek-r1:14b)
745
+ ollamaModelName = normalizeModelName(selectedModel);
746
+ }
747
+ return ollamaModelName;
748
+ }
749
+ /**
750
+ * Setup local Ollama installation and configuration
751
+ */
752
+ async function setupLocalOllama(options) {
753
+ // Step 1: Check if Ollama is installed
754
+ console.log(chalk_1.default.blue('Step 1: Checking Ollama installation...\n'));
755
+ const isOllamaInstalled = await checkOllamaInstalled();
756
+ if (!isOllamaInstalled) {
757
+ let shouldInstall = options.autoInstall || false;
758
+ if (!shouldInstall) {
264
759
  const answer = await inquirer_1.default.prompt([
265
760
  {
266
761
  type: 'confirm',
267
- name: 'pull',
268
- message: `Would you like to download ${ollamaModelName} now? (This may take a while)`,
762
+ name: 'install',
763
+ message: 'Ollama is not installed. Would you like to install it now?',
269
764
  default: true,
270
765
  },
271
766
  ]);
272
- if (answer.pull) {
273
- console.log(chalk_1.default.yellow(`\nDownloading ${ollamaModelName}...\n`));
274
- await pullModel(ollamaModelName);
275
- console.log(chalk_1.default.green(`✓ Model ${ollamaModelName} downloaded successfully\n`));
276
- }
277
- else {
278
- throw new Error(`Model ${ollamaModelName} is not available. Please pull it manually with: ollama pull ${ollamaModelName}`);
279
- }
767
+ shouldInstall = answer.install;
768
+ }
769
+ if (shouldInstall) {
770
+ console.log(chalk_1.default.yellow('Installing Ollama...\n'));
771
+ await installOllama();
772
+ console.log(chalk_1.default.green('✓ Ollama installed successfully\n'));
280
773
  }
281
774
  else {
282
- console.log(chalk_1.default.green(`✓ Model ${ollamaModelName} is available\n`));
775
+ throw new Error('Ollama is required but not installed. Please install it manually from https://ollama.com');
283
776
  }
284
- // Step 5: Start Ollama service
285
- console.log(chalk_1.default.blue('Step 5: Starting Ollama service...\n'));
286
- await startOllama();
287
- // Step 6: Verify Ollama is running
288
- console.log(chalk_1.default.blue('Step 6: Verifying Ollama is running...\n'));
289
- const isRunning = await checkOllamaRunning();
290
- if (isRunning) {
291
- console.log(chalk_1.default.green('✓ Ollama is running\n'));
292
- console.log(chalk_1.default.blue('Ollama Setup Complete!\n'));
293
- console.log(chalk_1.default.white('You can now use Ollama with the following commands:'));
294
- console.log(chalk_1.default.gray(` ollama run ${ollamaModelName}`));
295
- console.log(chalk_1.default.gray(` ollama list`));
296
- console.log(chalk_1.default.gray(` edgible ai stop # Stop Ollama service\n`));
777
+ }
778
+ else {
779
+ console.log(chalk_1.default.green('✓ Ollama is already installed\n'));
780
+ }
781
+ // Step 2: Discover system capabilities
782
+ console.log(chalk_1.default.blue('Step 2: Discovering system capabilities...\n'));
783
+ const capabilities = await SystemCapabilityDetector_1.SystemCapabilityDetector.detectCapabilities();
784
+ // Show GPU driver support status
785
+ if (capabilities.gpuDriverSupport.ollamaGpuReady) {
786
+ console.log(chalk_1.default.green(' GPU acceleration is ready for Ollama'));
787
+ if (capabilities.gpuDriverSupport.ollamaGpuReason) {
788
+ console.log(chalk_1.default.gray(` ${capabilities.gpuDriverSupport.ollamaGpuReason}\n`));
789
+ }
790
+ }
791
+ else {
792
+ console.log(chalk_1.default.yellow('⚠ GPU acceleration not detected - Ollama may run in CPU mode'));
793
+ if (capabilities.gpuDriverSupport.ollamaGpuReason) {
794
+ console.log(chalk_1.default.gray(` ${capabilities.gpuDriverSupport.ollamaGpuReason}\n`));
795
+ }
796
+ }
797
+ // Step 5: Start Ollama service
798
+ // console.log(chalk.blue('Step 5: Starting Ollama service...\n'));
799
+ // await startOllama();
800
+ //
801
+ // // Step 6: Verify Ollama is running
802
+ // console.log(chalk.blue('Step 6: Verifying Ollama is running...\n'));
803
+ // const isRunning = await checkOllamaRunning();
804
+ //
805
+ // if (!isRunning) {
806
+ // throw new Error('Ollama service failed to start. Please check the logs.');
807
+ // }
808
+ //
809
+ // console.log(chalk.green('✓ Ollama is running\n'));
810
+ //
811
+ // // Step 6.5: Check and fix Ollama binding if needed
812
+ // console.log(chalk.blue('Step 6.5: Checking Ollama network binding...\n'));
813
+ // const listeningAddress = await checkOllamaListeningAddress();
814
+ //
815
+ // if (listeningAddress === '127.0.0.1') {
816
+ // console.log(chalk.yellow('⚠ Ollama is listening on localhost only (127.0.0.1:11434)'));
817
+ // console.log(chalk.gray(' This will prevent Docker containers and network access from reaching Ollama.\n'));
818
+ // console.log(chalk.yellow(' Reconfiguring Ollama to listen on all interfaces (0.0.0.0:11434)...\n'));
819
+ //
820
+ // const fixed = await fixOllamaBinding();
821
+ // if (fixed) {
822
+ // // Wait a moment and verify it's now on 0.0.0.0
823
+ // await new Promise((resolve) => setTimeout(resolve, 2000));
824
+ // const newAddress = await checkOllamaListeningAddress();
825
+ // if (newAddress === '0.0.0.0') {
826
+ // console.log(chalk.green('✓ Ollama reconfigured to listen on 0.0.0.0:11434\n'));
827
+ // } else {
828
+ // console.log(chalk.yellow('⚠ Could not verify binding change. Ollama may need manual configuration.\n'));
829
+ // }
830
+ // } else {
831
+ // console.log(chalk.yellow('⚠ Could not automatically reconfigure Ollama binding.\n'));
832
+ // console.log(chalk.gray(' Please manually set OLLAMA_HOST=0.0.0.0:11434 and restart Ollama.\n'));
833
+ // }
834
+ // } else if (listeningAddress === '0.0.0.0') {
835
+ // console.log(chalk.green('✓ Ollama is listening on all interfaces (0.0.0.0:11434)\n'));
836
+ // } else {
837
+ // console.log(chalk.gray(' Could not determine Ollama binding address (this is usually fine)\n'));
838
+ // }
839
+ //
840
+ // Step 7: Test model connectivity
841
+ // console.log(chalk.blue('Step 7: Testing model connectivity...\n'));
842
+ // const modelWorks = await testModelConnectivity(ollamaModelName);
843
+ //
844
+ // if (modelWorks) {
845
+ // console.log(chalk.green('✓ Model is accessible and ready to use\n'));
846
+ // } else {
847
+ // console.log(chalk.yellow('⚠ Model may not be fully loaded yet\n'));
848
+ // console.log(chalk.gray(` This is normal for large models. The model will load on first use.\n`));
849
+ // }
850
+ //
851
+ console.log(chalk_1.default.green('✅ Phase 1: Local Ollama Setup Complete!\n'));
852
+ return { capabilities };
853
+ }
854
+ /**
855
+ * Setup platform integration (create applications)
856
+ */
857
+ async function setupPlatformIntegration(options) {
858
+ const { model, capabilities, container, logger } = options;
859
+ // Always require auth and organization for platform integration
860
+ const configRepository = container.get(types_1.TYPES.ConfigRepository);
861
+ try {
862
+ (0, config_validator_1.validateConfig)(configRepository, {
863
+ requireAuth: true,
864
+ requireOrganization: true,
865
+ requireDeviceId: true,
866
+ });
867
+ }
868
+ catch (error) {
869
+ console.log(chalk_1.default.yellow('\n⚠ Platform integration requires authentication'));
870
+ console.log(chalk_1.default.blue('\nPlease login first:'));
871
+ console.log(chalk_1.default.gray(' edgible auth login\n'));
872
+ console.log(chalk_1.default.gray('Then run setup again to continue.\n'));
873
+ throw error;
874
+ }
875
+ // Get device ID from config (same device as agent)
876
+ const deviceId = (0, config_validator_1.requireDeviceId)(configRepository);
877
+ const deviceInfo = await container.get(types_1.TYPES.EdgibleService).getDevice(deviceId);
878
+ const deviceName = deviceInfo.device?.name || deviceId;
879
+ console.log(chalk_1.default.blue('\n📡 Creating Platform Applications\n'));
880
+ const applicationService = container.get(types_1.TYPES.ApplicationService);
881
+ const gatewayService = container.get(types_1.TYPES.GatewayService);
882
+ const edgibleService = container.get(types_1.TYPES.EdgibleService);
883
+ // Step 3: Select model based on recommendations (before creating application)
884
+ console.log(chalk_1.default.blue('Step 3: Selecting model...\n'));
885
+ const ollamaModelName = await selectModel(model, capabilities);
886
+ console.log(chalk_1.default.blue(`\nSelected model: ${ollamaModelName}\n`));
887
+ // Step 7: Create Ollama API Application
888
+ console.log(chalk_1.default.blue('Step 7: Creating Ollama API application...\n'));
889
+ console.log(chalk_1.default.gray(` Device: ${deviceName} (${deviceId.substring(0, 8)}...)\n`));
890
+ console.log(chalk_1.default.gray(` Gateway: Managed Gateway\n`));
891
+ console.log(chalk_1.default.gray(` Protocol: HTTPS\n`));
892
+ const ollamaResult = await createOllamaApplication({
893
+ modelName: ollamaModelName,
894
+ deviceId: deviceId,
895
+ configRepository,
896
+ applicationService,
897
+ gatewayService,
898
+ edgibleService,
899
+ logger,
900
+ });
901
+ const createdOllamaApp = ollamaResult.app;
902
+ // Ensure URL has https:// protocol
903
+ const ollamaUrl = ollamaResult.url.startsWith('http://') || ollamaResult.url.startsWith('https://')
904
+ ? ollamaResult.url
905
+ : `https://${ollamaResult.url}`;
906
+ console.log(chalk_1.default.green('✓ Ollama API application created'));
907
+ console.log(chalk_1.default.cyan(` URL: ${ollamaUrl}\n`));
908
+ // Step 8: Verify endpoint is accessible (retry mechanism)
909
+ console.log(chalk_1.default.blue('Step 8: Verifying endpoint is accessible...\n'));
910
+ console.log(chalk_1.default.gray(` Checking ${ollamaUrl}/api/tags...\n`));
911
+ let endpointAccessible = false;
912
+ const maxRetries = 20;
913
+ const retryDelay = 5000; // 5 seconds
914
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
915
+ try {
916
+ const controller = new AbortController();
917
+ const timeout = setTimeout(() => controller.abort(), 5000);
918
+ const response = await (0, node_fetch_1.default)(`${ollamaUrl}/api/tags`, {
919
+ method: 'GET',
920
+ signal: controller.signal,
921
+ });
922
+ clearTimeout(timeout);
923
+ if (response.ok) {
924
+ endpointAccessible = true;
925
+ console.log(chalk_1.default.green(`✓ Endpoint is accessible (attempt ${attempt}/${maxRetries})\n`));
926
+ break;
927
+ }
928
+ }
929
+ catch (error) {
930
+ // Endpoint not ready yet, continue retrying
931
+ if (attempt < maxRetries) {
932
+ console.log(chalk_1.default.gray(` Attempt ${attempt}/${maxRetries} failed, retrying in ${retryDelay / 1000} seconds...\n`));
933
+ await new Promise((resolve) => setTimeout(resolve, retryDelay));
934
+ }
935
+ }
936
+ }
937
+ if (!endpointAccessible) {
938
+ console.log(chalk_1.default.yellow(`⚠ Endpoint not accessible after ${maxRetries} attempts\n`));
939
+ console.log(chalk_1.default.gray(` The application may still be starting. This can take several minutes.\n`));
940
+ console.log(chalk_1.default.gray(` You can check status with: curl ${ollamaUrl}/api/tags\n`));
941
+ }
942
+ // Step 4: Check if model is already pulled
943
+ console.log(chalk_1.default.blue('Step 4: Checking if model is available...\n'));
944
+ const isModelAvailable = await checkModelAvailable(ollamaModelName);
945
+ if (!isModelAvailable) {
946
+ console.log(chalk_1.default.yellow(`Model ${ollamaModelName} is not available locally.`));
947
+ const answer = await inquirer_1.default.prompt([
948
+ {
949
+ type: 'confirm',
950
+ name: 'pull',
951
+ message: `Would you like to download ${ollamaModelName} now? (This may take a while)`,
952
+ default: true,
953
+ },
954
+ ]);
955
+ if (answer.pull) {
956
+ console.log(chalk_1.default.yellow(`\nDownloading ${ollamaModelName}...\n`));
957
+ await pullModel(ollamaModelName);
958
+ console.log(chalk_1.default.green(`✓ Model ${ollamaModelName} downloaded successfully\n`));
297
959
  }
298
960
  else {
299
- throw new Error('Ollama service failed to start. Please check the logs.');
961
+ throw new Error(`Model ${ollamaModelName} is not available. Please pull it manually with: ollama pull ${ollamaModelName}`);
300
962
  }
301
- }, {
302
- configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
303
- }));
304
- aiCommand
305
- .command('stop')
306
- .description('Stop Ollama service')
307
- .action((0, command_wrapper_1.wrapCommand)(async () => {
308
- const container = (0, container_1.getContainer)();
309
- const logger = container.get(types_1.TYPES.Logger);
310
- logger.info('Stopping Ollama service');
311
- console.log(chalk_1.default.blue('\n🛑 Stopping Ollama service...\n'));
312
- const isRunning = await checkOllamaRunning();
313
- if (!isRunning) {
314
- console.log(chalk_1.default.yellow('Ollama is not running\n'));
315
- return;
963
+ }
964
+ else {
965
+ console.log(chalk_1.default.green(`✓ Model ${ollamaModelName} is available\n`));
966
+ }
967
+ // Step 9: Create Open WebUI Application
968
+ console.log(chalk_1.default.blue('Step 9: Creating Open WebUI application...\n'));
969
+ console.log(chalk_1.default.gray(` Device: ${deviceName} (${deviceId.substring(0, 8)}...)\n`));
970
+ console.log(chalk_1.default.gray(` Gateway: Managed Gateway\n`));
971
+ console.log(chalk_1.default.gray(` Protocol: HTTPS\n`));
972
+ console.log(chalk_1.default.gray(` Connected to Ollama: ${ollamaUrl}\n`));
973
+ const webUIResult = await createOpenWebUIApplication({
974
+ ollamaUrl: ollamaUrl,
975
+ deviceId: deviceId,
976
+ configRepository: configRepository,
977
+ applicationService,
978
+ gatewayService,
979
+ edgibleService,
980
+ logger,
981
+ });
982
+ const createdWebUIApp = webUIResult.app;
983
+ const webUIUrl = webUIResult.url;
984
+ console.log(chalk_1.default.green('✓ Open WebUI application created'));
985
+ console.log(chalk_1.default.cyan(` URL: ${webUIUrl}\n`));
986
+ return {
987
+ ollamaUrl,
988
+ webUIUrl,
989
+ createdOllamaApp,
990
+ createdWebUIApp,
991
+ deviceName,
992
+ deviceId,
993
+ ollamaModelName,
994
+ };
995
+ }
996
+ /**
997
+ * Display setup summary
998
+ */
999
+ function displaySetupSummary(options) {
1000
+ const { ollamaModelName, ollamaUrl, webUIUrl, deviceName, deviceId, createdOllamaApp, createdWebUIApp } = options;
1001
+ console.log(chalk_1.default.blue('\n🎉 AI Setup Complete!\n'));
1002
+ console.log(chalk_1.default.white('📋 Summary:\n'));
1003
+ console.log(chalk_1.default.gray(` Model: ${ollamaModelName}`));
1004
+ console.log(chalk_1.default.gray(` Ollama API: ${ollamaUrl}`));
1005
+ if (webUIUrl) {
1006
+ console.log(chalk_1.default.gray(` Open WebUI: ${webUIUrl}`));
1007
+ }
1008
+ console.log(chalk_1.default.gray(` Device: ${deviceName} (${deviceId.substring(0, 8)}...)`));
1009
+ console.log(chalk_1.default.gray(` Gateway: Managed Gateway`));
1010
+ if (createdOllamaApp || createdWebUIApp) {
1011
+ console.log(chalk_1.default.white('\n📱 Created Applications:\n'));
1012
+ if (createdOllamaApp) {
1013
+ console.log(chalk_1.default.gray(` • ollama-api (${createdOllamaApp.id})`));
316
1014
  }
317
- await stopOllama();
318
- console.log(chalk_1.default.green('✓ Ollama service stopped\n'));
319
- }, {
320
- configRepository: (0, container_1.getContainer)().get(types_1.TYPES.ConfigRepository),
321
- }));
1015
+ if (createdWebUIApp) {
1016
+ console.log(chalk_1.default.gray(` • open-webui (${createdWebUIApp.id})`));
1017
+ }
1018
+ }
1019
+ console.log(chalk_1.default.white('\n🔧 Next Steps:\n'));
1020
+ console.log(chalk_1.default.gray(` • Test endpoint: curl ${ollamaUrl}/api/tags`));
1021
+ console.log(chalk_1.default.gray(` • Test model: edgible ai test --model ${ollamaModelName}`));
1022
+ console.log(chalk_1.default.gray(' • Status: edgible ai status'));
1023
+ console.log(chalk_1.default.gray(' • List apps: edgible application list'));
1024
+ if (createdOllamaApp) {
1025
+ console.log(chalk_1.default.gray(' • Teardown: edgible ai teardown --remove-apps'));
1026
+ }
322
1027
  }
323
1028
  /**
324
1029
  * Check if Ollama is installed
@@ -403,7 +1108,7 @@ async function checkModelAvailable(modelName) {
403
1108
  */
404
1109
  async function pullModel(modelName) {
405
1110
  try {
406
- (0, child_process_1.execSync)(`ollama pull ${modelName}`, {
1111
+ (0, child_process_1.execSync)(`OLLAMA_HOST=127.0.0.1:11435 ollama pull ${modelName}`, {
407
1112
  encoding: 'utf8',
408
1113
  stdio: 'inherit',
409
1114
  });
@@ -415,109 +1120,363 @@ async function pullModel(modelName) {
415
1120
  }
416
1121
  /**
417
1122
  * Start Ollama service
1123
+ * Configures Ollama to listen on 0.0.0.0:11434 so Docker containers can access it
418
1124
  */
419
1125
  async function startOllama() {
420
1126
  const platform = os.platform();
421
1127
  try {
422
1128
  if (platform === 'linux') {
423
- // Try systemd service first
1129
+ // Check for systemd service (user or system)
1130
+ let serviceType = null;
1131
+ let serviceName = 'ollama';
1132
+ // Check user service first
424
1133
  try {
425
- (0, child_process_1.execSync)('systemctl --user start ollama', { encoding: 'utf8', timeout: 3000 });
426
- return;
1134
+ (0, child_process_1.execSync)('systemctl --user is-enabled ollama > /dev/null 2>&1', { encoding: 'utf8', timeout: 2000 });
1135
+ serviceType = 'user';
427
1136
  }
428
1137
  catch {
429
- // Fallback to running ollama serve in background
1138
+ // Check system service
1139
+ try {
1140
+ (0, child_process_1.execSync)('systemctl is-enabled ollama > /dev/null 2>&1', { encoding: 'utf8', timeout: 2000 });
1141
+ serviceType = 'system';
1142
+ }
1143
+ catch {
1144
+ // No systemd service found
1145
+ }
430
1146
  }
431
- // Fallback: start ollama serve in background
1147
+ if (serviceType) {
1148
+ // Configure systemd service to listen on all interfaces
1149
+ const systemctlCmd = serviceType === 'user' ? 'systemctl --user' : 'sudo systemctl';
1150
+ const serviceFile = serviceType === 'user'
1151
+ ? `${os.homedir()}/.config/systemd/user/ollama.service.d/override.conf`
1152
+ : '/etc/systemd/system/ollama.service.d/override.conf';
1153
+ const serviceDir = path.dirname(serviceFile);
1154
+ try {
1155
+ // Create override directory if it doesn't exist
1156
+ if (!fs.existsSync(serviceDir)) {
1157
+ (0, child_process_1.execSync)(`mkdir -p "${serviceDir}"`, { encoding: 'utf8' });
1158
+ }
1159
+ // Check if override already has OLLAMA_HOST
1160
+ let needsUpdate = true;
1161
+ if (fs.existsSync(serviceFile)) {
1162
+ const content = fs.readFileSync(serviceFile, 'utf8');
1163
+ if (content.includes('OLLAMA_HOST=0.0.0.0:11434')) {
1164
+ needsUpdate = false;
1165
+ }
1166
+ }
1167
+ if (needsUpdate) {
1168
+ // Write override file
1169
+ const overrideContent = `[Service]
1170
+ Environment="OLLAMA_HOST=0.0.0.0:11434"
1171
+ `;
1172
+ fs.writeFileSync(serviceFile, overrideContent);
1173
+ console.log(chalk_1.default.gray(`Configured systemd service to listen on 0.0.0.0:11434\n`));
1174
+ }
1175
+ // Reload and restart
1176
+ (0, child_process_1.execSync)(`${systemctlCmd} daemon-reload`, { encoding: 'utf8', timeout: 3000 });
1177
+ (0, child_process_1.execSync)(`${systemctlCmd} restart ollama`, { encoding: 'utf8', timeout: 5000 });
1178
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1179
+ console.log(chalk_1.default.gray('Started Ollama via systemd listening on 0.0.0.0:11434 (accessible to Docker)\n'));
1180
+ return;
1181
+ }
1182
+ catch (error) {
1183
+ console.log(chalk_1.default.yellow(`⚠ Could not configure systemd service: ${error instanceof Error ? error.message : 'Unknown error'}`));
1184
+ console.log(chalk_1.default.gray('Falling back to manual start...\n'));
1185
+ // Fall through to manual start
1186
+ }
1187
+ }
1188
+ // Fallback: start ollama serve in background with OLLAMA_HOST set
432
1189
  try {
433
- (0, child_process_1.execSync)('ollama serve > /dev/null 2>&1 &', { encoding: 'utf8', timeout: 1000 });
1190
+ // Kill any existing ollama processes first (if not managed by systemd)
1191
+ try {
1192
+ (0, child_process_1.execSync)('pkill -f "ollama serve"', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1193
+ await new Promise((resolve) => setTimeout(resolve, 1000));
1194
+ }
1195
+ catch {
1196
+ // Ignore if no process to kill
1197
+ }
1198
+ // Start Ollama with host binding to all interfaces
1199
+ (0, child_process_1.execSync)('OLLAMA_HOST=0.0.0.0:11434 nohup ollama serve > /dev/null 2>&1 &', {
1200
+ encoding: 'utf8',
1201
+ timeout: 1000,
1202
+ shell: '/bin/bash'
1203
+ });
434
1204
  // Wait a bit for it to start
435
1205
  await new Promise((resolve) => setTimeout(resolve, 2000));
1206
+ console.log(chalk_1.default.gray('Started Ollama listening on 0.0.0.0:11434 (accessible to Docker)\n'));
436
1207
  }
437
1208
  catch {
438
1209
  // Ignore - may already be running
439
1210
  }
440
1211
  }
441
1212
  else if (platform === 'darwin') {
442
- // macOS - try launchctl or start directly
1213
+ // macOS - try launchctl or start directly
1214
+ try {
1215
+ (0, child_process_1.execSync)('launchctl start com.ollama.ollama', { encoding: 'utf8', timeout: 3000 });
1216
+ console.log(chalk_1.default.gray('Note: Started via launchd. To allow Docker access, set OLLAMA_HOST in launchd config\n'));
1217
+ }
1218
+ catch {
1219
+ // Fallback: start ollama serve with host binding
1220
+ try {
1221
+ // Kill any existing ollama processes first
1222
+ try {
1223
+ (0, child_process_1.execSync)('pkill -f "ollama serve"', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1224
+ await new Promise((resolve) => setTimeout(resolve, 1000));
1225
+ }
1226
+ catch {
1227
+ // Ignore if no process to kill
1228
+ }
1229
+ (0, child_process_1.execSync)('OLLAMA_HOST=0.0.0.0:11434 nohup ollama serve > /dev/null 2>&1 &', {
1230
+ encoding: 'utf8',
1231
+ timeout: 1000,
1232
+ shell: '/bin/bash'
1233
+ });
1234
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1235
+ console.log(chalk_1.default.gray('Started Ollama listening on 0.0.0.0:11434 (accessible to Docker)\n'));
1236
+ }
1237
+ catch {
1238
+ // Ignore
1239
+ }
1240
+ }
1241
+ }
1242
+ else if (platform === 'win32') {
1243
+ // Windows - Ollama typically runs as a service
1244
+ try {
1245
+ (0, child_process_1.execSync)('net start Ollama', { encoding: 'utf8', timeout: 3000 });
1246
+ console.log(chalk_1.default.gray('Note: Started as Windows service. To allow Docker access, set OLLAMA_HOST environment variable\n'));
1247
+ }
1248
+ catch {
1249
+ // Service might already be running or not installed as service
1250
+ // Try to start it directly with host binding
1251
+ try {
1252
+ (0, child_process_1.execSync)('set OLLAMA_HOST=0.0.0.0:11434 && start /B ollama serve', {
1253
+ encoding: 'utf8',
1254
+ timeout: 1000
1255
+ });
1256
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1257
+ console.log(chalk_1.default.gray('Started Ollama listening on 0.0.0.0:11434 (accessible to Docker)\n'));
1258
+ }
1259
+ catch {
1260
+ // Ignore
1261
+ }
1262
+ }
1263
+ }
1264
+ }
1265
+ catch (error) {
1266
+ // Ollama might already be running, which is fine
1267
+ console.log(chalk_1.default.gray('Note: Ollama service may already be running\n'));
1268
+ }
1269
+ }
1270
+ /**
1271
+ * Check if Ollama is running
1272
+ */
1273
+ async function checkOllamaRunning() {
1274
+ try {
1275
+ // Try to query Ollama API
1276
+ const controller = new AbortController();
1277
+ const timeout = setTimeout(() => controller.abort(), 3000);
1278
+ try {
1279
+ const response = await (0, node_fetch_1.default)('http://localhost:11434/api/tags', {
1280
+ method: 'GET',
1281
+ signal: controller.signal,
1282
+ });
1283
+ clearTimeout(timeout);
1284
+ return response.ok;
1285
+ }
1286
+ catch {
1287
+ clearTimeout(timeout);
1288
+ throw new Error('Fetch failed');
1289
+ }
1290
+ }
1291
+ catch {
1292
+ // Try alternative: check if ollama process is running
1293
+ try {
1294
+ const platform = os.platform();
1295
+ if (platform === 'linux' || platform === 'darwin') {
1296
+ (0, child_process_1.execSync)('pgrep -f ollama', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1297
+ return true;
1298
+ }
1299
+ else if (platform === 'win32') {
1300
+ (0, child_process_1.execSync)('tasklist /FI "IMAGENAME eq ollama.exe"', {
1301
+ encoding: 'utf8',
1302
+ timeout: 2000,
1303
+ stdio: 'ignore',
1304
+ });
1305
+ return true;
1306
+ }
1307
+ }
1308
+ catch {
1309
+ return false;
1310
+ }
1311
+ return false;
1312
+ }
1313
+ }
1314
+ /**
1315
+ * Check what address Ollama is listening on
1316
+ * @returns '0.0.0.0' if listening on all interfaces, '127.0.0.1' if localhost only, null if unknown
1317
+ */
1318
+ async function checkOllamaListeningAddress() {
1319
+ try {
1320
+ const platform = os.platform();
1321
+ let output;
1322
+ if (platform === 'linux' || platform === 'darwin') {
1323
+ // Try ss first (modern), fallback to netstat
1324
+ try {
1325
+ output = (0, child_process_1.execSync)('ss -tlnp 2>/dev/null | grep 11434', {
1326
+ encoding: 'utf8',
1327
+ timeout: 2000
1328
+ });
1329
+ }
1330
+ catch {
1331
+ try {
1332
+ output = (0, child_process_1.execSync)('netstat -tlnp 2>/dev/null | grep 11434', {
1333
+ encoding: 'utf8',
1334
+ timeout: 2000
1335
+ });
1336
+ }
1337
+ catch {
1338
+ return null;
1339
+ }
1340
+ }
1341
+ }
1342
+ else if (platform === 'win32') {
1343
+ try {
1344
+ output = (0, child_process_1.execSync)('netstat -an | findstr :11434', {
1345
+ encoding: 'utf8',
1346
+ timeout: 2000,
1347
+ });
1348
+ }
1349
+ catch {
1350
+ return null;
1351
+ }
1352
+ }
1353
+ else {
1354
+ return null;
1355
+ }
1356
+ if (output.includes('0.0.0.0:11434') || output.includes('*:11434') || output.includes('[::]:11434')) {
1357
+ return '0.0.0.0';
1358
+ }
1359
+ else if (output.includes('127.0.0.1:11434') || output.includes('::1:11434')) {
1360
+ return '127.0.0.1';
1361
+ }
1362
+ return null;
1363
+ }
1364
+ catch {
1365
+ return null;
1366
+ }
1367
+ }
1368
+ /**
1369
+ * Fix Ollama binding if it's listening on localhost only
1370
+ * Attempts to reconfigure Ollama to listen on 0.0.0.0:11434
1371
+ */
1372
+ async function fixOllamaBinding() {
1373
+ const platform = os.platform();
1374
+ try {
1375
+ if (platform === 'linux') {
1376
+ // Check for systemd service first
1377
+ let serviceType = null;
1378
+ try {
1379
+ (0, child_process_1.execSync)('systemctl --user is-enabled ollama > /dev/null 2>&1', { encoding: 'utf8', timeout: 2000 });
1380
+ serviceType = 'user';
1381
+ }
1382
+ catch {
1383
+ try {
1384
+ (0, child_process_1.execSync)('systemctl is-enabled ollama > /dev/null 2>&1', { encoding: 'utf8', timeout: 2000 });
1385
+ serviceType = 'system';
1386
+ }
1387
+ catch {
1388
+ // No systemd service
1389
+ }
1390
+ }
1391
+ if (serviceType) {
1392
+ // Configure systemd service
1393
+ const systemctlCmd = serviceType === 'user' ? 'systemctl --user' : 'sudo systemctl';
1394
+ const serviceFile = serviceType === 'user'
1395
+ ? `${os.homedir()}/.config/systemd/user/ollama.service.d/override.conf`
1396
+ : '/etc/systemd/system/ollama.service.d/override.conf';
1397
+ const serviceDir = path.dirname(serviceFile);
1398
+ try {
1399
+ // Create override directory if it doesn't exist
1400
+ if (!fs.existsSync(serviceDir)) {
1401
+ (0, child_process_1.execSync)(`mkdir -p "${serviceDir}"`, { encoding: 'utf8' });
1402
+ }
1403
+ // Check if override already has OLLAMA_HOST
1404
+ let needsUpdate = true;
1405
+ if (fs.existsSync(serviceFile)) {
1406
+ const content = fs.readFileSync(serviceFile, 'utf8');
1407
+ if (content.includes('OLLAMA_HOST=0.0.0.0:11434')) {
1408
+ needsUpdate = false;
1409
+ }
1410
+ }
1411
+ if (needsUpdate) {
1412
+ // Write override file
1413
+ const overrideContent = `[Service]
1414
+ Environment="OLLAMA_HOST=0.0.0.0:11434"
1415
+ `;
1416
+ fs.writeFileSync(serviceFile, overrideContent);
1417
+ // Reload and restart
1418
+ (0, child_process_1.execSync)(`${systemctlCmd} daemon-reload`, { encoding: 'utf8', timeout: 3000 });
1419
+ (0, child_process_1.execSync)(`${systemctlCmd} restart ollama`, { encoding: 'utf8', timeout: 5000 });
1420
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1421
+ return true;
1422
+ }
1423
+ }
1424
+ catch (error) {
1425
+ // Fall through to manual restart
1426
+ }
1427
+ }
1428
+ // Fallback: kill and restart with OLLAMA_HOST
1429
+ try {
1430
+ (0, child_process_1.execSync)('pkill -f "ollama serve"', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1431
+ await new Promise((resolve) => setTimeout(resolve, 1000));
1432
+ }
1433
+ catch {
1434
+ // Ignore if no process to kill
1435
+ }
1436
+ (0, child_process_1.execSync)('OLLAMA_HOST=0.0.0.0:11434 nohup ollama serve > /dev/null 2>&1 &', {
1437
+ encoding: 'utf8',
1438
+ timeout: 1000,
1439
+ shell: '/bin/bash'
1440
+ });
1441
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1442
+ return true;
1443
+ }
1444
+ else if (platform === 'darwin') {
1445
+ // macOS - try to restart with OLLAMA_HOST
443
1446
  try {
444
- (0, child_process_1.execSync)('launchctl start com.ollama.ollama', { encoding: 'utf8', timeout: 3000 });
1447
+ (0, child_process_1.execSync)('pkill -f "ollama serve"', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1448
+ await new Promise((resolve) => setTimeout(resolve, 1000));
445
1449
  }
446
1450
  catch {
447
- // Fallback: start ollama serve
448
- try {
449
- (0, child_process_1.execSync)('ollama serve > /dev/null 2>&1 &', { encoding: 'utf8', timeout: 1000 });
450
- await new Promise((resolve) => setTimeout(resolve, 2000));
451
- }
452
- catch {
453
- // Ignore
454
- }
1451
+ // Ignore
455
1452
  }
1453
+ (0, child_process_1.execSync)('OLLAMA_HOST=0.0.0.0:11434 nohup ollama serve > /dev/null 2>&1 &', {
1454
+ encoding: 'utf8',
1455
+ timeout: 1000,
1456
+ shell: '/bin/bash'
1457
+ });
1458
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1459
+ return true;
456
1460
  }
457
1461
  else if (platform === 'win32') {
458
- // Windows - Ollama typically runs as a service
1462
+ // Windows - try to restart with OLLAMA_HOST
459
1463
  try {
460
- (0, child_process_1.execSync)('net start Ollama', { encoding: 'utf8', timeout: 3000 });
1464
+ (0, child_process_1.execSync)('taskkill /F /IM ollama.exe', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1465
+ await new Promise((resolve) => setTimeout(resolve, 1000));
461
1466
  }
462
1467
  catch {
463
- // Service might already be running or not installed as service
464
- // Try to start it directly
465
- try {
466
- (0, child_process_1.execSync)('start /B ollama serve', { encoding: 'utf8', timeout: 1000 });
467
- await new Promise((resolve) => setTimeout(resolve, 2000));
468
- }
469
- catch {
470
- // Ignore
471
- }
1468
+ // Ignore
472
1469
  }
473
- }
474
- }
475
- catch (error) {
476
- // Ollama might already be running, which is fine
477
- console.log(chalk_1.default.gray('Note: Ollama service may already be running\n'));
478
- }
479
- }
480
- /**
481
- * Check if Ollama is running
482
- */
483
- async function checkOllamaRunning() {
484
- try {
485
- // Try to query Ollama API
486
- const controller = new AbortController();
487
- const timeout = setTimeout(() => controller.abort(), 3000);
488
- try {
489
- const response = await (0, node_fetch_1.default)('http://localhost:11434/api/tags', {
490
- method: 'GET',
491
- signal: controller.signal,
1470
+ (0, child_process_1.execSync)('set OLLAMA_HOST=0.0.0.0:11434 && start /B ollama serve', {
1471
+ encoding: 'utf8',
1472
+ timeout: 1000
492
1473
  });
493
- clearTimeout(timeout);
494
- return response.ok;
495
- }
496
- catch {
497
- clearTimeout(timeout);
498
- throw new Error('Fetch failed');
1474
+ await new Promise((resolve) => setTimeout(resolve, 2000));
1475
+ return true;
499
1476
  }
1477
+ return false;
500
1478
  }
501
- catch {
502
- // Try alternative: check if ollama process is running
503
- try {
504
- const platform = os.platform();
505
- if (platform === 'linux' || platform === 'darwin') {
506
- (0, child_process_1.execSync)('pgrep -f ollama', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
507
- return true;
508
- }
509
- else if (platform === 'win32') {
510
- (0, child_process_1.execSync)('tasklist /FI "IMAGENAME eq ollama.exe"', {
511
- encoding: 'utf8',
512
- timeout: 2000,
513
- stdio: 'ignore',
514
- });
515
- return true;
516
- }
517
- }
518
- catch {
519
- return false;
520
- }
1479
+ catch (error) {
521
1480
  return false;
522
1481
  }
523
1482
  }
@@ -582,27 +1541,454 @@ async function stopOllama() {
582
1541
  /**
583
1542
  * Normalize model name for Ollama (lowercase, dashes, no special chars)
584
1543
  * Maps recommended model names to their Ollama model names
1544
+ * Converts last dash before size to colon (e.g., llama-3.2-3b -> llama3.2:3b)
585
1545
  */
586
1546
  function normalizeModelName(modelName) {
587
- const normalized = modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, '').replace(/\./g, '');
1547
+ // First normalize: lowercase, replace spaces with dashes, remove parentheses
1548
+ // Keep dots for version numbers (e.g., 3.2, 2.5)
1549
+ const normalized = modelName.toLowerCase().replace(/\s+/g, '-').replace(/[()]/g, '');
588
1550
  // Map model names to Ollama model names
589
1551
  const modelMap = {
590
- 'llama-3.2-1b': 'llama3.2:1b',
591
- 'llama-3.2-3b': 'llama3.2:3b',
592
- 'llama-3.1-8b': 'llama3.1:8b',
593
- 'llama-3.1-70b': 'llama3.1:70b',
1552
+ 'llama-32-1b': 'llama3.2:1b',
1553
+ 'llama-32-3b': 'llama3.2:3b',
1554
+ 'llama-31-8b': 'llama3.1:8b',
1555
+ 'llama-31-70b': 'llama3.1:70b',
1556
+ 'llama3.2-1b': 'llama3.2:1b',
1557
+ 'llama3.2-3b': 'llama3.2:3b',
1558
+ 'llama3.1-8b': 'llama3.1:8b',
1559
+ 'llama3.1-70b': 'llama3.1:70b',
594
1560
  'mistral-7b': 'mistral:7b',
1561
+ 'phi-3-mini-38b': 'phi3:mini',
595
1562
  'phi-3-mini-3.8b': 'phi3:mini',
1563
+ 'qwen25-05b': 'qwen2.5:0.5b',
1564
+ 'qwen25-7b': 'qwen2.5:7b',
596
1565
  'qwen2.5-0.5b': 'qwen2.5:0.5b',
597
1566
  'qwen2.5-7b': 'qwen2.5:7b',
598
- 'deepseek-r1-1.5b': 'deepseek-r1:1.5b',
1567
+ 'deepseek-r1-15b': 'deepseek-r1:1.5b',
599
1568
  'deepseek-r1-7b': 'deepseek-r1:7b',
600
1569
  'deepseek-r1-8b': 'deepseek-r1:8b',
601
1570
  'deepseek-r1-14b': 'deepseek-r1:14b',
602
1571
  'deepseek-r1-32b': 'deepseek-r1:32b',
603
1572
  'deepseek-r1-70b': 'deepseek-r1:70b',
604
1573
  'deepseek-r1-671b': 'deepseek-r1:671b',
1574
+ 'deepseek-r1-1.5b': 'deepseek-r1:1.5b',
1575
+ };
1576
+ // Check if we have a direct mapping
1577
+ if (modelMap[normalized]) {
1578
+ return modelMap[normalized];
1579
+ }
1580
+ // Fallback: try to convert last dash before size indicator to colon
1581
+ // Matches patterns like: model-name-XXb or model-name-X.Xb
1582
+ const match = normalized.match(/^(.+)-(\d+(?:\.\d+)?b)$/);
1583
+ if (match) {
1584
+ return `${match[1]}:${match[2]}`;
1585
+ }
1586
+ // Return as-is if no pattern matches
1587
+ return normalized;
1588
+ }
1589
+ /**
1590
+ * Check if Docker is installed
1591
+ */
1592
+ async function checkDockerInstalled() {
1593
+ try {
1594
+ (0, child_process_1.execSync)('docker --version', { encoding: 'utf8', timeout: 2000, stdio: 'ignore' });
1595
+ return true;
1596
+ }
1597
+ catch {
1598
+ return false;
1599
+ }
1600
+ }
1601
+ /**
1602
+ * Check if any Ollama models are available
1603
+ */
1604
+ async function checkHasModels() {
1605
+ try {
1606
+ const output = (0, child_process_1.execSync)('ollama list', { encoding: 'utf8', timeout: 5000 });
1607
+ const lines = output.trim().split('\n');
1608
+ // First line is header, so check if there's more than one line
1609
+ return lines.length > 1;
1610
+ }
1611
+ catch {
1612
+ return false;
1613
+ }
1614
+ }
1615
+ /**
1616
+ * Detect the appropriate Ollama URL for Docker containers to use
1617
+ */
1618
+ async function detectOllamaUrlForDocker() {
1619
+ const platform = os.platform();
1620
+ if (platform === 'darwin' || platform === 'win32') {
1621
+ // macOS and Windows: Docker Desktop provides host.docker.internal
1622
+ return 'http://host.docker.internal:11434';
1623
+ }
1624
+ // Linux: host.docker.internal may not work, need to detect host IP
1625
+ try {
1626
+ // Try to get the docker0 bridge IP (typically 172.17.0.1)
1627
+ const output = (0, child_process_1.execSync)("ip -4 addr show docker0 | grep -oP '(?<=inet\\s)\\d+(\\.\\d+){3}'", { encoding: 'utf8', timeout: 2000 });
1628
+ const ip = output.trim();
1629
+ if (ip) {
1630
+ return `http://${ip}:11434`;
1631
+ }
1632
+ }
1633
+ catch {
1634
+ // Fallback: try to get the default gateway IP
1635
+ try {
1636
+ const output = (0, child_process_1.execSync)("ip route | grep default | awk '{print $3}'", { encoding: 'utf8', timeout: 2000 });
1637
+ const ip = output.trim();
1638
+ if (ip) {
1639
+ return `http://${ip}:11434`;
1640
+ }
1641
+ }
1642
+ catch {
1643
+ // Last resort
1644
+ }
1645
+ }
1646
+ // Fallback to host.docker.internal (works with newer Docker versions on Linux)
1647
+ return 'http://host.docker.internal:11434';
1648
+ }
1649
+ /**
1650
+ * Get the path to the docker-compose directory
1651
+ */
1652
+ function getComposeDirectory() {
1653
+ // When packaged as npm module, recipes are in package root
1654
+ const packageRecipes = path.join(__dirname, '..', '..', 'recipes', 'compose', 'open-webui');
1655
+ // If copied to dist during build (optional)
1656
+ const distRecipes = path.join(__dirname, '..', 'recipes', 'compose', 'open-webui');
1657
+ // Development location (root level)
1658
+ const devRecipes = path.join(process.cwd(), 'recipes', 'compose', 'open-webui');
1659
+ // Check in order of likelihood
1660
+ if (fs.existsSync(packageRecipes)) {
1661
+ return packageRecipes;
1662
+ }
1663
+ if (fs.existsSync(distRecipes)) {
1664
+ return distRecipes;
1665
+ }
1666
+ if (fs.existsSync(devRecipes)) {
1667
+ return devRecipes;
1668
+ }
1669
+ throw new Error('Could not locate Open WebUI compose directory');
1670
+ }
1671
+ /**
1672
+ * Start Open WebUI with docker-compose
1673
+ */
1674
+ async function startOpenWebUI(composeDir, env) {
1675
+ const composeFile = path.join(composeDir, 'docker-compose.yml');
1676
+ if (!fs.existsSync(composeFile)) {
1677
+ throw new Error(`Docker compose file not found: ${composeFile}`);
1678
+ }
1679
+ try {
1680
+ // Set environment variables
1681
+ const envVars = Object.entries(env)
1682
+ .map(([key, value]) => `${key}=${value}`)
1683
+ .join(' ');
1684
+ // Run docker compose up
1685
+ (0, child_process_1.execSync)(`${envVars} docker compose -f "${composeFile}" up -d`, {
1686
+ encoding: 'utf8',
1687
+ stdio: 'inherit',
1688
+ cwd: composeDir,
1689
+ });
1690
+ }
1691
+ catch (error) {
1692
+ console.error(chalk_1.default.red('Failed to start Open WebUI:'), error);
1693
+ throw error;
1694
+ }
1695
+ }
1696
+ /**
1697
+ * Stop Open WebUI
1698
+ */
1699
+ async function stopOpenWebUI(composeDir, removeVolumes) {
1700
+ const composeFile = path.join(composeDir, 'docker-compose.yml');
1701
+ try {
1702
+ const volumeFlag = removeVolumes ? '-v' : '';
1703
+ (0, child_process_1.execSync)(`docker compose -f "${composeFile}" down ${volumeFlag}`, {
1704
+ encoding: 'utf8',
1705
+ stdio: 'inherit',
1706
+ cwd: composeDir,
1707
+ });
1708
+ }
1709
+ catch (error) {
1710
+ console.error(chalk_1.default.red('Failed to stop Open WebUI:'), error);
1711
+ throw error;
1712
+ }
1713
+ }
1714
+ /**
1715
+ * Check if Open WebUI is running
1716
+ */
1717
+ async function checkOpenWebUIRunning() {
1718
+ try {
1719
+ const output = (0, child_process_1.execSync)('docker ps --format "{{.Names}}"', {
1720
+ encoding: 'utf8',
1721
+ timeout: 2000,
1722
+ });
1723
+ return output.includes('open-webui');
1724
+ }
1725
+ catch {
1726
+ return false;
1727
+ }
1728
+ }
1729
+ /**
1730
+ * Get Open WebUI information if running
1731
+ */
1732
+ async function getOpenWebUIInfo() {
1733
+ try {
1734
+ const output = (0, child_process_1.execSync)('docker ps --filter "name=open-webui" --format "{{.Ports}}"', {
1735
+ encoding: 'utf8',
1736
+ timeout: 2000,
1737
+ });
1738
+ if (!output) {
1739
+ return null;
1740
+ }
1741
+ // Parse port from output like "0.0.0.0:3200->8080/tcp"
1742
+ const portMatch = output.match(/0\.0\.0\.0:(\d+)->/);
1743
+ if (portMatch) {
1744
+ return { port: parseInt(portMatch[1], 10) };
1745
+ }
1746
+ // Default port if we can't parse
1747
+ return { port: 3200 };
1748
+ }
1749
+ catch {
1750
+ return null;
1751
+ }
1752
+ }
1753
+ /**
1754
+ * Test if a model can be accessed and generates responses
1755
+ * @param modelName - The Ollama model name to test
1756
+ * @param verbose - Show detailed output
1757
+ * @returns true if model responds successfully
1758
+ */
1759
+ async function testModelConnectivity(modelName, verbose = false) {
1760
+ try {
1761
+ if (verbose) {
1762
+ console.log(chalk_1.default.gray(` Sending test prompt to ${modelName}...`));
1763
+ }
1764
+ const controller = new AbortController();
1765
+ const timeout = setTimeout(() => controller.abort(), 30000); // 30 second timeout for model loading
1766
+ try {
1767
+ const response = await (0, node_fetch_1.default)('http://localhost:11434/api/generate', {
1768
+ method: 'POST',
1769
+ headers: {
1770
+ 'Content-Type': 'application/json',
1771
+ },
1772
+ body: JSON.stringify({
1773
+ model: modelName,
1774
+ prompt: 'Hello',
1775
+ stream: false,
1776
+ }),
1777
+ signal: controller.signal,
1778
+ });
1779
+ clearTimeout(timeout);
1780
+ if (response.ok) {
1781
+ const data = await response.json();
1782
+ if (data.error) {
1783
+ if (verbose) {
1784
+ console.log(chalk_1.default.red(` ✗ Model error: ${data.error}`));
1785
+ }
1786
+ return false;
1787
+ }
1788
+ if (data.response) {
1789
+ if (verbose) {
1790
+ console.log(chalk_1.default.green(' ✓ Model responded successfully'));
1791
+ console.log(chalk_1.default.gray(` Response: "${data.response.substring(0, 50)}${data.response.length > 50 ? '...' : ''}"`));
1792
+ }
1793
+ return true;
1794
+ }
1795
+ }
1796
+ else {
1797
+ if (verbose) {
1798
+ console.log(chalk_1.default.red(` ✗ HTTP ${response.status}: ${response.statusText}`));
1799
+ }
1800
+ return false;
1801
+ }
1802
+ }
1803
+ catch (error) {
1804
+ clearTimeout(timeout);
1805
+ if (verbose) {
1806
+ if (error instanceof Error && error.name === 'AbortError') {
1807
+ console.log(chalk_1.default.yellow(' ⚠ Request timed out (model may be loading)'));
1808
+ }
1809
+ else {
1810
+ console.log(chalk_1.default.red(` ✗ Connection error: ${error instanceof Error ? error.message : 'Unknown error'}`));
1811
+ }
1812
+ }
1813
+ return false;
1814
+ }
1815
+ return false;
1816
+ }
1817
+ catch (error) {
1818
+ if (verbose) {
1819
+ console.log(chalk_1.default.red(` ✗ Test failed: ${error instanceof Error ? error.message : 'Unknown error'}`));
1820
+ }
1821
+ return false;
1822
+ }
1823
+ }
1824
+ /**
1825
+ * Verify model is available on endpoint
1826
+ */
1827
+ async function verifyModelOnEndpoint(endpointUrl, modelName) {
1828
+ try {
1829
+ const controller = new AbortController();
1830
+ const timeout = setTimeout(() => controller.abort(), 10000);
1831
+ const response = await (0, node_fetch_1.default)(`${endpointUrl}/api/tags`, {
1832
+ method: 'GET',
1833
+ signal: controller.signal,
1834
+ });
1835
+ clearTimeout(timeout);
1836
+ if (response.ok) {
1837
+ const data = await response.json();
1838
+ if (data.models) {
1839
+ // Check if model exists (exact match or starts with model name)
1840
+ const modelExists = data.models.some(m => {
1841
+ const modelFullName = m.name;
1842
+ return modelFullName === modelName ||
1843
+ modelFullName.startsWith(modelName + ':') ||
1844
+ modelFullName === modelName.split(':')[0];
1845
+ });
1846
+ return modelExists;
1847
+ }
1848
+ }
1849
+ return false;
1850
+ }
1851
+ catch (error) {
1852
+ return false;
1853
+ }
1854
+ }
1855
+ /**
1856
+ * Helper to parse gateway IDs from comma-separated string
1857
+ */
1858
+ function parseGatewayIds(ids) {
1859
+ if (!ids)
1860
+ return [];
1861
+ return ids.split(',').map(s => s.trim()).filter(Boolean);
1862
+ }
1863
+ /**
1864
+ * Create Ollama application on Edgible platform
1865
+ */
1866
+ async function createOllamaApplication(config) {
1867
+ // Device ID is already provided from config (same device as agent)
1868
+ const ollamaDeviceId = config.deviceId;
1869
+ // Always use managed gateway
1870
+ const useManagedGateway = true;
1871
+ // Build configuration for managed-process
1872
+ // Ollama should be installed and available in PATH on the device
1873
+ const configuration = {
1874
+ command: 'ollama serve',
1875
+ env: {
1876
+ OLLAMA_HOST: '0.0.0.0:11435',
1877
+ },
1878
+ };
1879
+ // Create application as managed-process
1880
+ const result = await config.applicationService.createApplicationProgrammatically({
1881
+ name: 'ollama-api',
1882
+ description: `Ollama AI API (${config.modelName}) - Managed Process`,
1883
+ port: 11435,
1884
+ protocol: 'https',
1885
+ deviceIds: [ollamaDeviceId],
1886
+ gatewayIds: undefined, // Managed gateway
1887
+ useManagedGateway: true,
1888
+ subtype: 'managed-process',
1889
+ configuration,
1890
+ });
1891
+ return {
1892
+ app: result,
1893
+ url: result.url || 'https://ollama-api.your-domain.com',
1894
+ };
1895
+ }
1896
+ /**
1897
+ * Create Open WebUI application on Edgible platform
1898
+ */
1899
+ async function createOpenWebUIApplication(config) {
1900
+ // Always use the same device as Ollama (same device as agent)
1901
+ const webuiDeviceId = config.deviceId;
1902
+ // Always use managed gateway
1903
+ const useManagedGateway = true;
1904
+ // Create application
1905
+ // Note: Backend should configure OLLAMA_BASE_URL environment variable to ${config.ollamaUrl}
1906
+ // This URL should be the platform Ollama application URL (not localhost) for remote deployments
1907
+ const composeDir = getComposeDirectory();
1908
+ const composeFilePath = path.join(composeDir, 'docker-compose.yml');
1909
+ const result = await config.applicationService.createApplicationProgrammatically({
1910
+ name: 'open-webui',
1911
+ description: `Open WebUI - AI Chat Interface (OLLAMA_BASE_URL: ${config.ollamaUrl})`,
1912
+ port: 3200,
1913
+ protocol: 'https',
1914
+ deviceIds: [webuiDeviceId],
1915
+ gatewayIds: undefined, // Managed gateway
1916
+ useManagedGateway: true,
1917
+ subtype: 'docker-compose',
1918
+ configuration: {
1919
+ 'dockerComposePath': composeFilePath,
1920
+ 'env': {
1921
+ 'OLLAMA_BASE_URL': config.ollamaUrl
1922
+ },
1923
+ 'isWorking': true
1924
+ }
1925
+ });
1926
+ return {
1927
+ app: result,
1928
+ url: result.url || 'https://open-webui.your-domain.com',
605
1929
  };
606
- return modelMap[normalized] || normalized;
1930
+ }
1931
+ /**
1932
+ * Start Open WebUI locally with docker-compose
1933
+ */
1934
+ async function startOpenWebUILocal(ollamaUrl) {
1935
+ // Check if Docker is installed
1936
+ const isDockerInstalled = await checkDockerInstalled();
1937
+ if (!isDockerInstalled) {
1938
+ throw new Error('Docker is required to run Open WebUI locally. Please install Docker first.');
1939
+ }
1940
+ const composeDir = getComposeDirectory();
1941
+ await startOpenWebUI(composeDir, {
1942
+ OLLAMA_BASE_URL: ollamaUrl,
1943
+ OPEN_WEBUI_PORT: '3200',
1944
+ });
1945
+ }
1946
+ /**
1947
+ * Check if an Ollama URL is reachable and responding
1948
+ */
1949
+ async function checkOllamaUrlReachable(url) {
1950
+ try {
1951
+ const controller = new AbortController();
1952
+ const timeout = setTimeout(() => controller.abort(), 5000); // 5 second timeout
1953
+ try {
1954
+ const response = await (0, node_fetch_1.default)(`${url}/api/tags`, {
1955
+ method: 'GET',
1956
+ signal: controller.signal,
1957
+ });
1958
+ clearTimeout(timeout);
1959
+ return response.ok;
1960
+ }
1961
+ catch (error) {
1962
+ clearTimeout(timeout);
1963
+ return false;
1964
+ }
1965
+ }
1966
+ catch {
1967
+ return false;
1968
+ }
1969
+ }
1970
+ /**
1971
+ * Check if a URL is reachable (general purpose)
1972
+ */
1973
+ async function checkUrlReachable(url) {
1974
+ try {
1975
+ const controller = new AbortController();
1976
+ const timeout = setTimeout(() => controller.abort(), 5000); // 5 second timeout
1977
+ try {
1978
+ const response = await (0, node_fetch_1.default)(url, {
1979
+ method: 'GET',
1980
+ signal: controller.signal,
1981
+ });
1982
+ clearTimeout(timeout);
1983
+ return response.ok || response.status < 500; // Accept redirects and client errors as "reachable"
1984
+ }
1985
+ catch (error) {
1986
+ clearTimeout(timeout);
1987
+ return false;
1988
+ }
1989
+ }
1990
+ catch {
1991
+ return false;
1992
+ }
607
1993
  }
608
1994
  //# sourceMappingURL=ai.js.map