groove-dev 0.27.30 → 0.27.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@groove-dev/cli",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "GROOVE CLI — manage AI coding agents from your terminal",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "type": "module",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@groove-dev/daemon",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "GROOVE daemon — agent orchestration engine",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "type": "module",
@@ -426,7 +426,10 @@ export function createApi(app, daemon) {
426
426
  app.post('/api/providers/ollama/pull', async (req, res) => {
427
427
  const { model } = req.body;
428
428
  if (!model) return res.status(400).json({ error: 'model is required' });
429
- if (!OllamaProvider.isInstalled()) return res.status(400).json({ error: 'Ollama is not installed. Install with: brew install ollama' });
429
+ if (!OllamaProvider.isInstalled()) {
430
+ const install = OllamaProvider.installCommand();
431
+ return res.status(400).json({ error: `Ollama is not installed. Install with: ${install.command}` });
432
+ }
430
433
  const broadcast = daemon.broadcast.bind(daemon);
431
434
  try {
432
435
  // Auto-start Ollama server if not running
@@ -93,14 +93,16 @@ export class LocalProvider extends Provider {
93
93
 
94
94
  static _hasOllama() {
95
95
  try {
96
- execSync('which ollama', { stdio: 'ignore' });
96
+ const cmd = process.platform === 'win32' ? 'where ollama' : 'which ollama';
97
+ execSync(cmd, { stdio: 'ignore' });
97
98
  return true;
98
99
  } catch { return false; }
99
100
  }
100
101
 
101
102
  static _hasLlamaServer() {
102
103
  try {
103
- execSync('which llama-server', { stdio: 'ignore' });
104
+ const cmd = process.platform === 'win32' ? 'where llama-server' : 'which llama-server';
105
+ execSync(cmd, { stdio: 'ignore' });
104
106
  return true;
105
107
  } catch { return false; }
106
108
  }
@@ -52,7 +52,8 @@ export class OllamaProvider extends Provider {
52
52
 
53
53
  static isInstalled() {
54
54
  try {
55
- execSync('which ollama', { stdio: 'ignore' });
55
+ const cmd = process.platform === 'win32' ? 'where ollama' : 'which ollama';
56
+ execSync(cmd, { stdio: 'ignore' });
56
57
  return true;
57
58
  } catch {
58
59
  return false;
@@ -67,6 +68,9 @@ export class OllamaProvider extends Provider {
67
68
  if (platform === 'linux') {
68
69
  return { command: 'curl -fsSL https://ollama.ai/install.sh | sh', platform: 'Linux' };
69
70
  }
71
+ if (platform === 'win32') {
72
+ return { command: 'winget install Ollama.Ollama', alt: 'Or download from https://ollama.ai/download', platform: 'Windows' };
73
+ }
70
74
  return { command: 'Download from https://ollama.ai/download', platform: 'other' };
71
75
  }
72
76
 
@@ -98,6 +102,22 @@ export class OllamaProvider extends Provider {
98
102
  }
99
103
  }
100
104
  }
105
+ if (platform === 'win32') {
106
+ try {
107
+ let cmd = 'ollama';
108
+ try {
109
+ execSync('where ollama', { stdio: 'ignore' });
110
+ } catch {
111
+ const localAppData = process.env.LOCALAPPDATA || '';
112
+ const fallback = localAppData + '\\Programs\\Ollama\\ollama.exe';
113
+ cmd = fallback;
114
+ }
115
+ execFile(cmd, ['serve'], { stdio: 'ignore', detached: true, shell: true }).unref();
116
+ return { started: true, method: 'ollama serve' };
117
+ } catch {
118
+ return { started: false, command: 'ollama serve' };
119
+ }
120
+ }
101
121
  // Linux / other
102
122
  try {
103
123
  execFile('ollama', ['serve'], { stdio: 'ignore', detached: true }).unref();
@@ -109,6 +129,14 @@ export class OllamaProvider extends Provider {
109
129
 
110
130
  static stopServer() {
111
131
  const platform = process.platform;
132
+ if (platform === 'win32') {
133
+ try {
134
+ execSync('taskkill /IM ollama.exe /F', { stdio: 'ignore', timeout: 5000 });
135
+ return { stopped: true, method: 'taskkill' };
136
+ } catch {
137
+ return { stopped: false };
138
+ }
139
+ }
112
140
  if (platform === 'darwin') {
113
141
  try {
114
142
  execSync('brew services stop ollama', { stdio: 'ignore', timeout: 10000 });
@@ -135,7 +163,11 @@ export class OllamaProvider extends Provider {
135
163
  minRAM: 4,
136
164
  recommendedRAM: 16,
137
165
  gpuRecommended: true,
138
- note: 'Apple Silicon Macs use unified memory — all RAM is GPU RAM. NVIDIA GPUs recommended on Linux.',
166
+ note: process.platform === 'win32'
167
+ ? 'NVIDIA or AMD GPUs recommended. Ensure GPU drivers are up to date.'
168
+ : process.platform === 'darwin'
169
+ ? 'Apple Silicon Macs use unified memory — all RAM is GPU RAM.'
170
+ : 'NVIDIA GPUs recommended on Linux.',
139
171
  };
140
172
  }
141
173
 
@@ -151,7 +183,7 @@ export class OllamaProvider extends Provider {
151
183
  let gpu = null;
152
184
  if (isAppleSilicon) {
153
185
  gpu = { type: 'apple-silicon', name: cpuModel.replace(/Apple /g, ''), vram: totalRamGb, note: 'Unified memory — all RAM available to GPU' };
154
- } else if (platform === 'linux') {
186
+ } else if (platform === 'linux' || platform === 'win32') {
155
187
  try {
156
188
  const out = execSync('nvidia-smi --query-gpu=name,memory.total --format=csv,noheader,nounits', { encoding: 'utf8', timeout: 5000 });
157
189
  const [name, vram] = out.trim().split(', ');
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@groove-dev/gui",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "GROOVE GUI — visual agent control plane",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "type": "module",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "groove-dev",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "Open-source agent orchestration layer — the AI company OS. Local model agent engine (GGUF/Ollama/llama-server), HuggingFace model browser, MCP integrations (Slack, Gmail, Stripe, 15+), agent scheduling (cron), business roles (CMO, CFO, EA). GUI dashboard, multi-agent coordination, zero cold-start, infinite sessions. Works with Claude Code, Codex, Gemini CLI, Ollama, any local model.",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "author": "Groove Dev <hello@groovedev.ai> (https://groovedev.ai)",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@groove-dev/cli",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "GROOVE CLI — manage AI coding agents from your terminal",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "type": "module",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@groove-dev/daemon",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "GROOVE daemon — agent orchestration engine",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "type": "module",
@@ -426,7 +426,10 @@ export function createApi(app, daemon) {
426
426
  app.post('/api/providers/ollama/pull', async (req, res) => {
427
427
  const { model } = req.body;
428
428
  if (!model) return res.status(400).json({ error: 'model is required' });
429
- if (!OllamaProvider.isInstalled()) return res.status(400).json({ error: 'Ollama is not installed. Install with: brew install ollama' });
429
+ if (!OllamaProvider.isInstalled()) {
430
+ const install = OllamaProvider.installCommand();
431
+ return res.status(400).json({ error: `Ollama is not installed. Install with: ${install.command}` });
432
+ }
430
433
  const broadcast = daemon.broadcast.bind(daemon);
431
434
  try {
432
435
  // Auto-start Ollama server if not running
@@ -93,14 +93,16 @@ export class LocalProvider extends Provider {
93
93
 
94
94
  static _hasOllama() {
95
95
  try {
96
- execSync('which ollama', { stdio: 'ignore' });
96
+ const cmd = process.platform === 'win32' ? 'where ollama' : 'which ollama';
97
+ execSync(cmd, { stdio: 'ignore' });
97
98
  return true;
98
99
  } catch { return false; }
99
100
  }
100
101
 
101
102
  static _hasLlamaServer() {
102
103
  try {
103
- execSync('which llama-server', { stdio: 'ignore' });
104
+ const cmd = process.platform === 'win32' ? 'where llama-server' : 'which llama-server';
105
+ execSync(cmd, { stdio: 'ignore' });
104
106
  return true;
105
107
  } catch { return false; }
106
108
  }
@@ -52,7 +52,8 @@ export class OllamaProvider extends Provider {
52
52
 
53
53
  static isInstalled() {
54
54
  try {
55
- execSync('which ollama', { stdio: 'ignore' });
55
+ const cmd = process.platform === 'win32' ? 'where ollama' : 'which ollama';
56
+ execSync(cmd, { stdio: 'ignore' });
56
57
  return true;
57
58
  } catch {
58
59
  return false;
@@ -67,6 +68,9 @@ export class OllamaProvider extends Provider {
67
68
  if (platform === 'linux') {
68
69
  return { command: 'curl -fsSL https://ollama.ai/install.sh | sh', platform: 'Linux' };
69
70
  }
71
+ if (platform === 'win32') {
72
+ return { command: 'winget install Ollama.Ollama', alt: 'Or download from https://ollama.ai/download', platform: 'Windows' };
73
+ }
70
74
  return { command: 'Download from https://ollama.ai/download', platform: 'other' };
71
75
  }
72
76
 
@@ -98,6 +102,22 @@ export class OllamaProvider extends Provider {
98
102
  }
99
103
  }
100
104
  }
105
+ if (platform === 'win32') {
106
+ try {
107
+ let cmd = 'ollama';
108
+ try {
109
+ execSync('where ollama', { stdio: 'ignore' });
110
+ } catch {
111
+ const localAppData = process.env.LOCALAPPDATA || '';
112
+ const fallback = localAppData + '\\Programs\\Ollama\\ollama.exe';
113
+ cmd = fallback;
114
+ }
115
+ execFile(cmd, ['serve'], { stdio: 'ignore', detached: true, shell: true }).unref();
116
+ return { started: true, method: 'ollama serve' };
117
+ } catch {
118
+ return { started: false, command: 'ollama serve' };
119
+ }
120
+ }
101
121
  // Linux / other
102
122
  try {
103
123
  execFile('ollama', ['serve'], { stdio: 'ignore', detached: true }).unref();
@@ -109,6 +129,14 @@ export class OllamaProvider extends Provider {
109
129
 
110
130
  static stopServer() {
111
131
  const platform = process.platform;
132
+ if (platform === 'win32') {
133
+ try {
134
+ execSync('taskkill /IM ollama.exe /F', { stdio: 'ignore', timeout: 5000 });
135
+ return { stopped: true, method: 'taskkill' };
136
+ } catch {
137
+ return { stopped: false };
138
+ }
139
+ }
112
140
  if (platform === 'darwin') {
113
141
  try {
114
142
  execSync('brew services stop ollama', { stdio: 'ignore', timeout: 10000 });
@@ -135,7 +163,11 @@ export class OllamaProvider extends Provider {
135
163
  minRAM: 4,
136
164
  recommendedRAM: 16,
137
165
  gpuRecommended: true,
138
- note: 'Apple Silicon Macs use unified memory — all RAM is GPU RAM. NVIDIA GPUs recommended on Linux.',
166
+ note: process.platform === 'win32'
167
+ ? 'NVIDIA or AMD GPUs recommended. Ensure GPU drivers are up to date.'
168
+ : process.platform === 'darwin'
169
+ ? 'Apple Silicon Macs use unified memory — all RAM is GPU RAM.'
170
+ : 'NVIDIA GPUs recommended on Linux.',
139
171
  };
140
172
  }
141
173
 
@@ -151,7 +183,7 @@ export class OllamaProvider extends Provider {
151
183
  let gpu = null;
152
184
  if (isAppleSilicon) {
153
185
  gpu = { type: 'apple-silicon', name: cpuModel.replace(/Apple /g, ''), vram: totalRamGb, note: 'Unified memory — all RAM available to GPU' };
154
- } else if (platform === 'linux') {
186
+ } else if (platform === 'linux' || platform === 'win32') {
155
187
  try {
156
188
  const out = execSync('nvidia-smi --query-gpu=name,memory.total --format=csv,noheader,nounits', { encoding: 'utf8', timeout: 5000 });
157
189
  const [name, vram] = out.trim().split(', ');
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@groove-dev/gui",
3
- "version": "0.27.30",
3
+ "version": "0.27.32",
4
4
  "description": "GROOVE GUI — visual agent control plane",
5
5
  "license": "FSL-1.1-Apache-2.0",
6
6
  "type": "module",