scai 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -4,8 +4,19 @@ import { checkEnv } from "./commands/EnvCmd.js";
4
4
  import { checkGit } from "./commands/GitCmd.js";
5
5
  import { suggestCommitMessage } from "./commands/CommitSuggesterCmd.js";
6
6
  import { handleRefactor } from "./commands/RefactorCmd.js";
7
- const cmd = new Command('devcheck')
7
+ // Import the model check and initialization logic
8
+ import { bootstrap } from './modelSetup.js';
9
+ // Create the CLI instance
10
+ const cmd = new Command('scai')
8
11
  .version('0.1.0');
12
+ // Define CLI commands
13
+ cmd
14
+ .command('init')
15
+ .description('Initialize the model and download required models')
16
+ .action(async () => {
17
+ await bootstrap();
18
+ console.log('✅ Model initialization completed!');
19
+ });
9
20
  cmd
10
21
  .command('env')
11
22
  .description('Check environment variables')
@@ -23,4 +34,5 @@ cmd
23
34
  .command('refactor <file>')
24
35
  .description('Suggest a refactor for the given JS file')
25
36
  .action((file) => handleRefactor(file));
37
+ // Parse CLI arguments
26
38
  cmd.parse(process.argv);
@@ -0,0 +1,96 @@
1
+ import { spawn, execSync } from 'child_process';
2
+ import * as readline from 'readline';
3
+ // Port and models
4
+ const MODEL_PORT = 11434;
5
+ const REQUIRED_MODELS = ['llama3', 'mistral']; // Add more if needed
6
+ // Ensure Ollama is running
7
+ async function ensureOllamaRunning() {
8
+ try {
9
+ const res = await fetch(`http://localhost:${MODEL_PORT}`);
10
+ if (res.ok) {
11
+ console.log('✅ Ollama is already running.');
12
+ }
13
+ }
14
+ catch (error) {
15
+ console.error('🟡 Ollama is not running. Starting it in the background...');
16
+ if (error instanceof Error) {
17
+ console.error('❌ Error during Ollama health check:', error.message);
18
+ }
19
+ else {
20
+ console.error('❌ Unexpected error during Ollama health check:', error);
21
+ }
22
+ const child = spawn('ollama', ['serve'], {
23
+ detached: true,
24
+ stdio: 'ignore',
25
+ windowsHide: true,
26
+ });
27
+ child.unref();
28
+ await new Promise((res) => setTimeout(res, 3000)); // Wait a bit for server to be ready
29
+ }
30
+ }
31
+ // Get installed models via ollama list
32
+ async function getInstalledModels() {
33
+ try {
34
+ const result = execSync('ollama list', { encoding: 'utf-8' });
35
+ const installedModels = result
36
+ .split('\n')
37
+ .map((line) => line.split(/\s+/)[0].split(':')[0]) // Get model name, ignore version (e.g., 'llama3:latest' becomes 'llama3')
38
+ .filter((model) => REQUIRED_MODELS.includes(model)); // Filter based on required models
39
+ return installedModels;
40
+ }
41
+ catch (error) {
42
+ console.error('❌ Failed to fetch installed models:', error instanceof Error ? error.message : error);
43
+ return [];
44
+ }
45
+ }
46
+ // Prompt user for input
47
+ function promptUser(question) {
48
+ const rl = readline.createInterface({
49
+ input: process.stdin,
50
+ output: process.stdout,
51
+ });
52
+ return new Promise((resolve) => rl.question(question, (answer) => {
53
+ rl.close();
54
+ resolve(answer.trim());
55
+ }));
56
+ }
57
+ // Ensure all required models are downloaded
58
+ async function ensureModelsDownloaded() {
59
+ const installedModels = await getInstalledModels();
60
+ const missingModels = REQUIRED_MODELS.filter((model) => !installedModels.includes(model));
61
+ if (missingModels.length === 0) {
62
+ console.log('✅ All required models are already installed.');
63
+ return;
64
+ }
65
+ console.log(`🟡 Missing models: ${missingModels.join(', ')}`);
66
+ const answer = await promptUser('Do you want to download the missing models now? (y/N): ');
67
+ if (answer.toLowerCase() !== 'y') {
68
+ console.log('🚫 Missing models not downloaded. Exiting.');
69
+ process.exit(1);
70
+ }
71
+ for (const model of missingModels) {
72
+ try {
73
+ console.log(`⬇️ Pulling model: ${model} ...`);
74
+ execSync(`ollama pull ${model}`, { stdio: 'inherit' });
75
+ console.log(`✅ Successfully pulled ${model}.`);
76
+ }
77
+ catch (err) {
78
+ console.error(`❌ Failed to pull ${model}:`, err instanceof Error ? err.message : err);
79
+ process.exit(1);
80
+ }
81
+ }
82
+ }
83
+ // Initialize the application
84
+ export async function bootstrap() {
85
+ try {
86
+ // Ensure Ollama is running only once at the start
87
+ await ensureOllamaRunning();
88
+ // Ensure models are downloaded once
89
+ await ensureModelsDownloaded();
90
+ // Now your CLI logic can proceed here...
91
+ }
92
+ catch (error) {
93
+ console.error('❌ Error during initialization:', error instanceof Error ? error.message : error);
94
+ process.exit(1);
95
+ }
96
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "scai",
3
- "version": "0.1.1",
3
+ "version": "0.1.2",
4
4
  "type": "module",
5
5
  "bin": {
6
6
  "scai": "./dist/index.js"