@alia-codea/cli 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,52 @@
1
+ {
2
+ "name": "@alia-codea/cli",
3
+ "version": "1.0.0",
4
+ "description": "Codea CLI - AI coding assistant for your terminal by Alia",
5
+ "main": "dist/index.js",
6
+ "bin": {
7
+ "codea": "./dist/index.js"
8
+ },
9
+ "type": "module",
10
+ "scripts": {
11
+ "build": "tsup src/index.ts --format esm --dts --clean",
12
+ "dev": "tsx src/index.ts",
13
+ "start": "node dist/index.js",
14
+ "lint": "eslint src",
15
+ "typecheck": "tsc --noEmit"
16
+ },
17
+ "keywords": [
18
+ "ai",
19
+ "cli",
20
+ "coding",
21
+ "assistant",
22
+ "terminal",
23
+ "alia",
24
+ "codea"
25
+ ],
26
+ "author": "Alia",
27
+ "license": "MIT",
28
+ "dependencies": {
29
+ "chalk": "^5.3.0",
30
+ "commander": "^12.1.0",
31
+ "conf": "^13.0.1",
32
+ "ink": "^5.0.1",
33
+ "ink-spinner": "^5.0.0",
34
+ "ink-text-input": "^6.0.0",
35
+ "marked": "^15.0.4",
36
+ "marked-terminal": "^7.2.1",
37
+ "openai": "^6.16.0",
38
+ "ora": "^8.1.1",
39
+ "react": "^18.3.1",
40
+ "simple-git": "^3.27.0"
41
+ },
42
+ "devDependencies": {
43
+ "@types/node": "^22.10.5",
44
+ "@types/react": "^18.3.12",
45
+ "tsup": "^8.3.5",
46
+ "tsx": "^4.19.2",
47
+ "typescript": "^5.7.2"
48
+ },
49
+ "engines": {
50
+ "node": ">=18"
51
+ }
52
+ }
@@ -0,0 +1,66 @@
1
+ import * as readline from 'readline';
2
+ import chalk from 'chalk';
3
+ import { config } from '../utils/config.js';
4
+ import { printSuccess, printError, printInfo } from '../utils/ui.js';
5
+
6
+ export async function login(): Promise<void> {
7
+ console.log();
8
+ console.log(chalk.bold('Codea CLI Login'));
9
+ console.log(chalk.gray('Enter your Alia API key to get started.'));
10
+ console.log(chalk.gray('Get your API key at: ') + chalk.cyan('https://alia.onl/settings/api'));
11
+ console.log();
12
+
13
+ const rl = readline.createInterface({
14
+ input: process.stdin,
15
+ output: process.stdout
16
+ });
17
+
18
+ return new Promise((resolve) => {
19
+ rl.question(chalk.cyan('API Key: '), async (apiKey) => {
20
+ rl.close();
21
+
22
+ const trimmedKey = apiKey.trim();
23
+
24
+ if (!trimmedKey) {
25
+ printError('No API key provided.');
26
+ resolve();
27
+ return;
28
+ }
29
+
30
+ // Validate the API key by making a test request
31
+ printInfo('Validating API key...');
32
+
33
+ try {
34
+ const baseUrl = config.get('apiBaseUrl') || 'https://api.alia.onl';
35
+ const response = await fetch(`${baseUrl}/codea/me`, {
36
+ headers: {
37
+ 'Authorization': `Bearer ${trimmedKey}`
38
+ }
39
+ });
40
+
41
+ if (response.ok) {
42
+ const data = await response.json();
43
+ config.set('apiKey', trimmedKey);
44
+ console.log();
45
+ printSuccess(`Logged in successfully!`);
46
+ if (data.name) {
47
+ console.log(chalk.gray(`Welcome, ${data.name}!`));
48
+ }
49
+ console.log();
50
+ console.log(chalk.gray('Run ') + chalk.cyan('codea') + chalk.gray(' to start coding.'));
51
+ } else {
52
+ printError('Invalid API key. Please check and try again.');
53
+ }
54
+ } catch (error: any) {
55
+ printError(`Could not validate API key: ${error.message}`);
56
+ }
57
+
58
+ resolve();
59
+ });
60
+ });
61
+ }
62
+
63
+ export function logout(): void {
64
+ config.delete('apiKey');
65
+ printSuccess('Logged out successfully.');
66
+ }
@@ -0,0 +1,309 @@
1
+ import * as readline from 'readline';
2
+ import chalk from 'chalk';
3
+ import { config, createSession, saveSession } from '../utils/config.js';
4
+ import { streamChat } from '../utils/api.js';
5
+ import { executeTool, formatToolCall } from '../tools/executor.js';
6
+ import {
7
+ printBanner,
8
+ printTips,
9
+ printPrompt,
10
+ printToolExecution,
11
+ printToolResult,
12
+ showThinkingStatus,
13
+ hideThinkingStatus,
14
+ printStatusBar,
15
+ printAssistantPrefix,
16
+ printError,
17
+ printInfo
18
+ } from '../utils/ui.js';
19
+ import { buildSystemMessage, getCodebaseContext } from '../utils/context.js';
20
+
21
+ interface Message {
22
+ role: 'user' | 'assistant' | 'system' | 'tool';
23
+ content: string;
24
+ tool_calls?: any[];
25
+ tool_call_id?: string;
26
+ }
27
+
28
+ interface ReplOptions {
29
+ model: string;
30
+ context: boolean;
31
+ }
32
+
33
+ export async function startRepl(options: ReplOptions): Promise<void> {
34
+ const session = createSession();
35
+ const messages: Message[] = [];
36
+ let isProcessing = false;
37
+ let contextUsed = 0;
38
+ const maxContext = 128000;
39
+
40
+ // Print welcome UI
41
+ printTips();
42
+
43
+ // Get initial codebase context
44
+ let codebaseContext = '';
45
+ if (options.context !== false) {
46
+ printInfo('Analyzing codebase...');
47
+ codebaseContext = await getCodebaseContext();
48
+ if (codebaseContext) {
49
+ printInfo(`Loaded context from ${codebaseContext.split('\n').length} files`);
50
+ }
51
+ }
52
+
53
+ // Setup readline
54
+ const rl = readline.createInterface({
55
+ input: process.stdin,
56
+ output: process.stdout,
57
+ terminal: true
58
+ });
59
+
60
+ // Handle Ctrl+C
61
+ rl.on('SIGINT', () => {
62
+ if (isProcessing) {
63
+ isProcessing = false;
64
+ hideThinkingStatus();
65
+ console.log(chalk.yellow('\nCancelled.'));
66
+ printPrompt();
67
+ } else {
68
+ console.log(chalk.gray('\nGoodbye!'));
69
+ process.exit(0);
70
+ }
71
+ });
72
+
73
+ const askQuestion = (): void => {
74
+ printPrompt();
75
+ rl.question('', async (input) => {
76
+ const trimmed = input.trim();
77
+
78
+ if (!trimmed) {
79
+ askQuestion();
80
+ return;
81
+ }
82
+
83
+ // Handle slash commands
84
+ if (trimmed.startsWith('/')) {
85
+ await handleSlashCommand(trimmed, messages, session, options);
86
+ askQuestion();
87
+ return;
88
+ }
89
+
90
+ // Add user message
91
+ messages.push({ role: 'user', content: trimmed });
92
+ isProcessing = true;
93
+
94
+ // Build system message
95
+ const systemMessage = buildSystemMessage(options.model, codebaseContext);
96
+
97
+ // Process conversation with tool loop
98
+ await processConversation(messages, systemMessage, options.model, () => isProcessing);
99
+
100
+ isProcessing = false;
101
+
102
+ // Update session
103
+ session.messages = messages.map(m => ({ role: m.role, content: m.content }));
104
+ session.title = messages[0]?.content.slice(0, 50) || 'New conversation';
105
+ session.updatedAt = Date.now();
106
+ saveSession(session);
107
+
108
+ // Update context usage estimate
109
+ contextUsed = Math.min(95, Math.floor(messages.reduce((acc, m) => acc + m.content.length, 0) / maxContext * 100));
110
+
111
+ // Print status bar
112
+ printStatusBar(process.cwd(), getModelDisplayName(options.model), 100 - contextUsed);
113
+
114
+ askQuestion();
115
+ });
116
+ };
117
+
118
+ askQuestion();
119
+ }
120
+
121
+ async function processConversation(
122
+ messages: Message[],
123
+ systemMessage: string,
124
+ model: string,
125
+ isActive: () => boolean
126
+ ): Promise<void> {
127
+ while (isActive()) {
128
+ console.log();
129
+ printAssistantPrefix();
130
+
131
+ let fullContent = '';
132
+ let toolCalls: any[] | undefined;
133
+
134
+ showThinkingStatus('Thinking');
135
+
136
+ try {
137
+ await streamChat(messages, systemMessage, model, {
138
+ onContent: (content) => {
139
+ if (!isActive()) return;
140
+ hideThinkingStatus();
141
+ process.stdout.write(content);
142
+ fullContent += content;
143
+ },
144
+ onToolCall: (tc) => {
145
+ // Tool calls are accumulated
146
+ },
147
+ onDone: (content, tcs) => {
148
+ hideThinkingStatus();
149
+ toolCalls = tcs;
150
+ },
151
+ onError: (error) => {
152
+ hideThinkingStatus();
153
+ printError(error.message);
154
+ }
155
+ });
156
+ } catch (error: any) {
157
+ hideThinkingStatus();
158
+ printError(error.message);
159
+ break;
160
+ }
161
+
162
+ if (!isActive()) break;
163
+
164
+ // Handle tool calls
165
+ if (toolCalls && toolCalls.length > 0) {
166
+ // Add assistant message with tool calls
167
+ messages.push({
168
+ role: 'assistant',
169
+ content: fullContent,
170
+ tool_calls: toolCalls
171
+ });
172
+
173
+ if (fullContent) {
174
+ console.log(); // New line after content
175
+ }
176
+
177
+ // Execute each tool
178
+ for (const tc of toolCalls) {
179
+ if (!isActive()) break;
180
+
181
+ const args = JSON.parse(tc.function.arguments);
182
+ printToolExecution(tc.function.name, formatToolArgs(tc.function.name, args));
183
+
184
+ showThinkingStatus(`Executing ${tc.function.name}`);
185
+ const result = await executeTool(tc.function.name, args);
186
+ hideThinkingStatus();
187
+
188
+ printToolResult(result.success, result.result);
189
+
190
+ // Add tool result
191
+ messages.push({
192
+ role: 'tool',
193
+ tool_call_id: tc.id,
194
+ content: result.result
195
+ });
196
+ }
197
+
198
+ // Continue loop for next response
199
+ continue;
200
+ } else {
201
+ // No tool calls, conversation turn complete
202
+ if (fullContent) {
203
+ messages.push({ role: 'assistant', content: fullContent });
204
+ console.log(); // New line after response
205
+ }
206
+ break;
207
+ }
208
+ }
209
+ }
210
+
211
+ async function handleSlashCommand(
212
+ command: string,
213
+ messages: Message[],
214
+ session: any,
215
+ options: ReplOptions
216
+ ): Promise<void> {
217
+ const [cmd, ...args] = command.slice(1).split(' ');
218
+
219
+ switch (cmd.toLowerCase()) {
220
+ case 'help':
221
+ console.log();
222
+ console.log(chalk.bold('Available commands:'));
223
+ console.log(chalk.cyan(' /help') + chalk.gray(' - Show this help'));
224
+ console.log(chalk.cyan(' /clear') + chalk.gray(' - Clear conversation'));
225
+ console.log(chalk.cyan(' /model') + chalk.gray(' - Switch model'));
226
+ console.log(chalk.cyan(' /context') + chalk.gray(' - Show current context'));
227
+ console.log(chalk.cyan(' /save') + chalk.gray(' - Save conversation'));
228
+ console.log(chalk.cyan(' /exit') + chalk.gray(' - Exit Codea'));
229
+ console.log();
230
+ break;
231
+
232
+ case 'clear':
233
+ messages.length = 0;
234
+ console.log(chalk.green('Conversation cleared.'));
235
+ break;
236
+
237
+ case 'model':
238
+ const modelArg = args[0];
239
+ if (modelArg) {
240
+ options.model = modelArg.startsWith('alia-') ? modelArg : `alia-v1-${modelArg}`;
241
+ console.log(chalk.green(`Model switched to ${options.model}`));
242
+ } else {
243
+ console.log(chalk.gray('Current model: ') + chalk.cyan(options.model));
244
+ try {
245
+ const { fetchModels } = await import('../utils/api.js');
246
+ const apiModels = await fetchModels();
247
+ if (apiModels.length > 0) {
248
+ console.log(chalk.gray('Available models:'));
249
+ for (const m of apiModels) {
250
+ console.log(chalk.gray(' ') + chalk.cyan(m.id) + chalk.gray(` - ${m.name}`));
251
+ }
252
+ } else {
253
+ console.log(chalk.gray('Available: codea, codea-pro, codea-thinking'));
254
+ }
255
+ } catch {
256
+ console.log(chalk.gray('Available: codea, codea-pro, codea-thinking'));
257
+ }
258
+ }
259
+ break;
260
+
261
+ case 'context':
262
+ console.log(chalk.gray(`Messages in context: ${messages.length}`));
263
+ console.log(chalk.gray(`Working directory: ${process.cwd()}`));
264
+ break;
265
+
266
+ case 'save':
267
+ session.messages = messages.map(m => ({ role: m.role, content: m.content }));
268
+ session.updatedAt = Date.now();
269
+ saveSession(session);
270
+ console.log(chalk.green('Conversation saved.'));
271
+ break;
272
+
273
+ case 'exit':
274
+ case 'quit':
275
+ console.log(chalk.gray('Goodbye!'));
276
+ process.exit(0);
277
+ break;
278
+
279
+ default:
280
+ console.log(chalk.yellow(`Unknown command: /${cmd}`));
281
+ console.log(chalk.gray('Type /help for available commands.'));
282
+ }
283
+ }
284
+
285
+ function formatToolArgs(name: string, args: Record<string, any>): string {
286
+ switch (name) {
287
+ case 'read_file':
288
+ case 'write_file':
289
+ case 'edit_file':
290
+ return args.path || '';
291
+ case 'list_files':
292
+ return args.path || '.';
293
+ case 'search_files':
294
+ return `"${args.pattern}" in ${args.path || '.'}`;
295
+ case 'run_command':
296
+ return args.command || '';
297
+ default:
298
+ return JSON.stringify(args).slice(0, 50);
299
+ }
300
+ }
301
+
302
+ function getModelDisplayName(model: string): string {
303
+ const names: Record<string, string> = {
304
+ 'alia-v1-codea': 'codea',
305
+ 'alia-v1-pro': 'codea-pro',
306
+ 'alia-v1-thinking': 'codea-thinking'
307
+ };
308
+ return names[model] || model;
309
+ }
@@ -0,0 +1,177 @@
1
+ import chalk from 'chalk';
2
+ import { config } from '../utils/config.js';
3
+ import { streamChat } from '../utils/api.js';
4
+ import { executeTool, formatToolCall } from '../tools/executor.js';
5
+ import { buildSystemMessage, getCodebaseContext } from '../utils/context.js';
6
+ import {
7
+ printToolExecution,
8
+ printToolResult,
9
+ showThinkingStatus,
10
+ hideThinkingStatus,
11
+ printAssistantPrefix,
12
+ printError,
13
+ printInfo
14
+ } from '../utils/ui.js';
15
+
16
+ interface Message {
17
+ role: 'user' | 'assistant' | 'system' | 'tool';
18
+ content: string;
19
+ tool_calls?: any[];
20
+ tool_call_id?: string;
21
+ }
22
+
23
+ interface RunOptions {
24
+ model: string;
25
+ yes: boolean;
26
+ context: boolean;
27
+ }
28
+
29
+ export async function runPrompt(prompt: string, options: RunOptions): Promise<void> {
30
+ const messages: Message[] = [];
31
+
32
+ // Get codebase context
33
+ let codebaseContext = '';
34
+ if (options.context !== false) {
35
+ codebaseContext = await getCodebaseContext();
36
+ }
37
+
38
+ // Add user message
39
+ messages.push({ role: 'user', content: prompt });
40
+
41
+ // Build system message
42
+ const systemMessage = buildSystemMessage(options.model, codebaseContext);
43
+
44
+ // Process with tool loop
45
+ await processConversation(messages, systemMessage, options.model, options.yes);
46
+ }
47
+
48
+ async function processConversation(
49
+ messages: Message[],
50
+ systemMessage: string,
51
+ model: string,
52
+ autoApprove: boolean
53
+ ): Promise<void> {
54
+ let continueProcessing = true;
55
+
56
+ while (continueProcessing) {
57
+ printAssistantPrefix();
58
+
59
+ let fullContent = '';
60
+ let toolCalls: any[] | undefined;
61
+
62
+ showThinkingStatus('Thinking');
63
+
64
+ try {
65
+ await streamChat(messages, systemMessage, model, {
66
+ onContent: (content) => {
67
+ hideThinkingStatus();
68
+ process.stdout.write(content);
69
+ fullContent += content;
70
+ },
71
+ onToolCall: () => {},
72
+ onDone: (content, tcs) => {
73
+ hideThinkingStatus();
74
+ toolCalls = tcs;
75
+ },
76
+ onError: (error) => {
77
+ hideThinkingStatus();
78
+ printError(error.message);
79
+ continueProcessing = false;
80
+ }
81
+ });
82
+ } catch (error: any) {
83
+ hideThinkingStatus();
84
+ printError(error.message);
85
+ break;
86
+ }
87
+
88
+ // Handle tool calls
89
+ if (toolCalls && toolCalls.length > 0) {
90
+ messages.push({
91
+ role: 'assistant',
92
+ content: fullContent,
93
+ tool_calls: toolCalls
94
+ });
95
+
96
+ if (fullContent) console.log();
97
+
98
+ for (const tc of toolCalls) {
99
+ const args = JSON.parse(tc.function.arguments);
100
+
101
+ // Check if we need approval for file writes
102
+ const isDestructive = ['write_file', 'edit_file', 'run_command'].includes(tc.function.name);
103
+
104
+ if (isDestructive && !autoApprove) {
105
+ console.log();
106
+ console.log(chalk.yellow('⚠ ') + chalk.bold('Approval required:'));
107
+ console.log(formatToolCall(tc.function.name, args));
108
+ console.log();
109
+
110
+ const approved = await askApproval();
111
+ if (!approved) {
112
+ messages.push({
113
+ role: 'tool',
114
+ tool_call_id: tc.id,
115
+ content: 'User declined this action.'
116
+ });
117
+ continue;
118
+ }
119
+ }
120
+
121
+ printToolExecution(tc.function.name, formatToolArgs(tc.function.name, args));
122
+
123
+ showThinkingStatus(`Executing ${tc.function.name}`);
124
+ const result = await executeTool(tc.function.name, args);
125
+ hideThinkingStatus();
126
+
127
+ printToolResult(result.success, result.result);
128
+
129
+ messages.push({
130
+ role: 'tool',
131
+ tool_call_id: tc.id,
132
+ content: result.result
133
+ });
134
+ }
135
+
136
+ continue;
137
+ } else {
138
+ if (fullContent) {
139
+ messages.push({ role: 'assistant', content: fullContent });
140
+ console.log();
141
+ }
142
+ break;
143
+ }
144
+ }
145
+ }
146
+
147
+ async function askApproval(): Promise<boolean> {
148
+ const readline = await import('readline');
149
+ const rl = readline.createInterface({
150
+ input: process.stdin,
151
+ output: process.stdout
152
+ });
153
+
154
+ return new Promise((resolve) => {
155
+ rl.question(chalk.cyan('Allow? [y/N] '), (answer) => {
156
+ rl.close();
157
+ resolve(answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes');
158
+ });
159
+ });
160
+ }
161
+
162
+ function formatToolArgs(name: string, args: Record<string, any>): string {
163
+ switch (name) {
164
+ case 'read_file':
165
+ case 'write_file':
166
+ case 'edit_file':
167
+ return args.path || '';
168
+ case 'list_files':
169
+ return args.path || '.';
170
+ case 'search_files':
171
+ return `"${args.pattern}" in ${args.path || '.'}`;
172
+ case 'run_command':
173
+ return args.command || '';
174
+ default:
175
+ return JSON.stringify(args).slice(0, 50);
176
+ }
177
+ }