corex-cli 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,211 @@
1
+ import * as fs from 'fs';
2
+ import * as path from 'path';
3
+ import { fileURLToPath } from 'url';
4
+
5
+ const __filename = fileURLToPath(import.meta.url);
6
+ const __dirname = path.dirname(__filename);
7
+
8
+ const LOG_DIR = path.join(__dirname, '..', 'logs');
9
+ const LOG_FILE = path.join(LOG_DIR, 'network.log');
10
+
11
+ function ensureLogDir(): void {
12
+ if (!fs.existsSync(LOG_DIR)) {
13
+ fs.mkdirSync(LOG_DIR, { recursive: true });
14
+ }
15
+ }
16
+
17
+ function getTimestamp(): string {
18
+ return new Date().toISOString();
19
+ }
20
+
21
+ export function logNetworkEvent(
22
+ provider: string,
23
+ statusCode: number | null,
24
+ errorMessage: string | null,
25
+ details?: string
26
+ ): void {
27
+ ensureLogDir();
28
+ const logEntry = [
29
+ getTimestamp(),
30
+ `provider=${provider}`,
31
+ statusCode !== null ? `status=${statusCode}` : 'status=null',
32
+ errorMessage ? `error=${errorMessage}` : 'error=null',
33
+ details ? `details=${details}` : '',
34
+ ].filter(Boolean).join(' | ') + '\n';
35
+
36
+ fs.appendFileSync(LOG_FILE, logEntry);
37
+ }
38
+
39
+ export async function verifyInternet(): Promise<boolean> {
40
+ try {
41
+ const controller = new AbortController();
42
+ const timeoutId = setTimeout(() => controller.abort(), 5000);
43
+
44
+ const response = await fetch('https://example.com', {
45
+ method: 'HEAD',
46
+ signal: controller.signal,
47
+ } as RequestInit);
48
+
49
+ clearTimeout(timeoutId);
50
+ return response.ok;
51
+ } catch {
52
+ return false;
53
+ }
54
+ }
55
+
56
+ export interface CorexRequestOptions extends RequestInit {
57
+ timeout?: number;
58
+ }
59
+
60
+ export class NetworkError extends Error {
61
+ constructor(
62
+ message: string,
63
+ public readonly code: 'NETWORK_OFFLINE' | 'NETWORK_FAILURE' | 'API_ERROR' | 'UNKNOWN',
64
+ public readonly statusCode?: number,
65
+ public readonly rawResponse?: string
66
+ ) {
67
+ super(message);
68
+ this.name = 'NetworkError';
69
+ }
70
+ }
71
+
72
+ export async function corexRequest(
73
+ url: string,
74
+ options: CorexRequestOptions,
75
+ provider: string
76
+ ): Promise<any> {
77
+ const { timeout = 30000, ...fetchOptions } = options;
78
+
79
+ let response: Response;
80
+
81
+ try {
82
+ const controller = new AbortController();
83
+ const timeoutId = setTimeout(() => controller.abort(), timeout);
84
+
85
+ response = await fetch(url, {
86
+ ...fetchOptions,
87
+ signal: controller.signal,
88
+ } as RequestInit);
89
+
90
+ clearTimeout(timeoutId);
91
+ } catch (networkError: any) {
92
+ const isOnline = await verifyInternet();
93
+
94
+ if (!isOnline) {
95
+ logNetworkEvent(provider, null, 'NETWORK_OFFLINE', networkError.message);
96
+ throw new NetworkError(
97
+ 'Internet connection unavailable.',
98
+ 'NETWORK_OFFLINE'
99
+ );
100
+ }
101
+
102
+ const errorMessage = networkError.code === 'AbortError'
103
+ ? 'Request timeout'
104
+ : `Network request failed: ${networkError.message}`;
105
+
106
+ logNetworkEvent(provider, null, 'NETWORK_FAILURE', errorMessage);
107
+ throw new NetworkError(
108
+ errorMessage,
109
+ 'NETWORK_FAILURE'
110
+ );
111
+ }
112
+
113
+ let rawResponse: string;
114
+ try {
115
+ rawResponse = await response.text();
116
+ } catch (e: any) {
117
+ logNetworkEvent(provider, response.status, 'READ_RESPONSE_FAILED', e.message);
118
+ throw new NetworkError(
119
+ `Failed to read response: ${e.message}`,
120
+ 'NETWORK_FAILURE',
121
+ response.status
122
+ );
123
+ }
124
+
125
+ if (!response.ok) {
126
+ let errorDetails = '';
127
+
128
+ try {
129
+ const jsonError = JSON.parse(rawResponse);
130
+ errorDetails = jsonError.error?.message || jsonError.message || rawResponse;
131
+ } catch {
132
+ errorDetails = rawResponse.substring(0, 500);
133
+ }
134
+
135
+ logNetworkEvent(provider, response.status, 'API_ERROR', errorDetails);
136
+
137
+ throw new NetworkError(
138
+ formatApiError(response.status, errorDetails),
139
+ 'API_ERROR',
140
+ response.status,
141
+ rawResponse
142
+ );
143
+ }
144
+
145
+ logNetworkEvent(provider, response.status, null);
146
+
147
+ try {
148
+ return JSON.parse(rawResponse);
149
+ } catch (e: any) {
150
+ if (rawResponse.trim() === '') {
151
+ return {};
152
+ }
153
+ throw new NetworkError(
154
+ `Invalid JSON response: ${e.message}`,
155
+ 'UNKNOWN',
156
+ response.status,
157
+ rawResponse
158
+ );
159
+ }
160
+ }
161
+
162
+ function formatApiError(status: number, errorDetails: string): string {
163
+ switch (status) {
164
+ case 401:
165
+ return 'Invalid API key.';
166
+ case 403:
167
+ return 'Access forbidden. Check API key permissions.';
168
+ case 404:
169
+ return 'Endpoint not found.';
170
+ case 429:
171
+ return 'Rate limit exceeded. Please wait and try again.';
172
+ case 500:
173
+ return 'Provider server error. Please try again later.';
174
+ case 502:
175
+ case 503:
176
+ case 504:
177
+ return 'Provider service unavailable.';
178
+ default:
179
+ if (status >= 500) {
180
+ return `Provider server error (${status}).`;
181
+ }
182
+ if (status >= 400) {
183
+ return `API error (${status}): ${errorDetails}`;
184
+ }
185
+ return errorDetails;
186
+ }
187
+ }
188
+
189
+ export function parseApiError(error: any, defaultMessage: string): string {
190
+ if (error instanceof NetworkError) {
191
+ return error.message;
192
+ }
193
+
194
+ if (error?.status === 401) {
195
+ return 'Invalid API key. Run /config to update.';
196
+ }
197
+
198
+ if (error?.status === 429) {
199
+ return 'Rate limit exceeded. Please wait and try again.';
200
+ }
201
+
202
+ if (error?.status >= 500) {
203
+ return 'Provider service unavailable.';
204
+ }
205
+
206
+ if (error?.message) {
207
+ return error.message;
208
+ }
209
+
210
+ return defaultMessage;
211
+ }
@@ -0,0 +1,107 @@
1
+ import { corexRequest, NetworkError } from '../network/request.js';
2
+ import { Message } from '../../types.js';
3
+
4
+ export interface ProviderConfig {
5
+ apiKey: string;
6
+ model: string;
7
+ systemPrompt: string;
8
+ temperature: number;
9
+ maxTokens: number;
10
+ }
11
+
12
+ export interface ChatResponse {
13
+ content: string;
14
+ usage?: {
15
+ inputTokens: number;
16
+ outputTokens: number;
17
+ totalTokens: number;
18
+ };
19
+ }
20
+
21
+ export async function chatAnthropic(
22
+ messages: Message[],
23
+ userMessage: string,
24
+ config: ProviderConfig,
25
+ onToken: (token: string) => void
26
+ ): Promise<ChatResponse> {
27
+ const endpoint = 'https://api.anthropic.com/v1/messages';
28
+
29
+ const formattedMessages = [
30
+ ...messages.map(m => ({
31
+ role: m.role as 'user' | 'assistant',
32
+ content: m.content
33
+ })),
34
+ { role: 'user' as const, content: userMessage }
35
+ ];
36
+
37
+ const requestBody: any = {
38
+ model: config.model,
39
+ max_tokens: config.maxTokens,
40
+ temperature: config.temperature,
41
+ system: config.systemPrompt,
42
+ messages: formattedMessages,
43
+ stream: true,
44
+ };
45
+
46
+ const response = await corexRequest(
47
+ endpoint,
48
+ {
49
+ method: 'POST',
50
+ headers: {
51
+ 'x-api-key': config.apiKey,
52
+ 'anthropic-version': '2023-06-01',
53
+ 'content-type': 'application/json',
54
+ },
55
+ body: JSON.stringify(requestBody),
56
+ timeout: 60000,
57
+ },
58
+ 'anthropic'
59
+ );
60
+
61
+ let fullContent = '';
62
+ let usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
63
+
64
+ if (response && response.forEach) {
65
+ for (const event of response) {
66
+ if (event.type === 'content_block_delta' && event.delta?.type === 'text_delta') {
67
+ const text = event.delta.text;
68
+ fullContent += text;
69
+ onToken(text);
70
+ }
71
+ if (event.type === 'message_delta' && event.usage) {
72
+ usage = {
73
+ inputTokens: event.usage.input_tokens || 0,
74
+ outputTokens: event.usage.output_tokens || 0,
75
+ totalTokens: (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0),
76
+ };
77
+ }
78
+ }
79
+ }
80
+
81
+ return { content: fullContent, usage };
82
+ }
83
+
84
+ export async function detectProvider(apiKey: string): Promise<string> {
85
+ const key = apiKey.trim();
86
+
87
+ if (key.startsWith('sk-ant-')) {
88
+ return 'anthropic';
89
+ }
90
+ if (key.startsWith('AIza')) {
91
+ return 'gemini';
92
+ }
93
+ if (key.startsWith('sk-or-v1-') || key.startsWith('sk-or-')) {
94
+ return 'openrouter';
95
+ }
96
+ if (key.startsWith('sk-proj-')) {
97
+ return 'openai';
98
+ }
99
+ if (key.startsWith('sk-') && !key.startsWith('sk-or-')) {
100
+ return 'openai';
101
+ }
102
+ if (key.startsWith('ds-') || key.toLowerCase().includes('deepseek')) {
103
+ return 'deepseek';
104
+ }
105
+
106
+ throw new Error(`Cannot detect provider from API key. Please check your key format.`);
107
+ }
@@ -0,0 +1,56 @@
1
+ import { corexRequest } from '../network/request.js';
2
+ import { Message } from '../../types.js';
3
+ import { ProviderConfig, ChatResponse } from './anthropic.js';
4
+
5
+ export async function chatGemini(
6
+ messages: Message[],
7
+ userMessage: string,
8
+ config: ProviderConfig,
9
+ onToken: (token: string) => void
10
+ ): Promise<ChatResponse> {
11
+ const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${config.model}:streamGenerateContent?alt=sse`;
12
+
13
+ const contents = [
14
+ ...messages.map(m => ({
15
+ role: m.role === 'assistant' ? 'model' : 'user',
16
+ parts: [{ text: m.content }]
17
+ })),
18
+ { role: 'user', parts: [{ text: userMessage }] }
19
+ ];
20
+
21
+ const response = await corexRequest(
22
+ endpoint,
23
+ {
24
+ method: 'POST',
25
+ headers: {
26
+ 'Content-Type': 'application/json',
27
+ },
28
+ body: JSON.stringify({
29
+ contents,
30
+ generationConfig: {
31
+ temperature: config.temperature,
32
+ maxOutputTokens: config.maxTokens,
33
+ },
34
+ systemInstruction: {
35
+ parts: [{ text: config.systemPrompt }]
36
+ }
37
+ }),
38
+ timeout: 60000,
39
+ },
40
+ 'gemini'
41
+ );
42
+
43
+ let fullContent = '';
44
+
45
+ if (response && response.forEach) {
46
+ for (const chunk of response) {
47
+ if (chunk.candidates?.[0]?.content?.parts?.[0]?.text) {
48
+ const text = chunk.candidates[0].content.parts[0].text;
49
+ fullContent += text;
50
+ onToken(text);
51
+ }
52
+ }
53
+ }
54
+
55
+ return { content: fullContent };
56
+ }
@@ -0,0 +1,4 @@
1
+ export { chatAnthropic, detectProvider } from './anthropic.js';
2
+ export { chatOpenAI } from './openai.js';
3
+ export { chatGemini } from './gemini.js';
4
+ export type { ProviderConfig, ChatResponse } from './anthropic.js';
@@ -0,0 +1,64 @@
1
+ import { corexRequest } from '../network/request.js';
2
+ import { Message } from '../../types.js';
3
+ import { ProviderConfig, ChatResponse } from './anthropic.js';
4
+
5
+ export async function chatOpenAI(
6
+ messages: Message[],
7
+ userMessage: string,
8
+ config: ProviderConfig,
9
+ onToken: (token: string) => void,
10
+ baseURL: string = 'https://api.openai.com/v1'
11
+ ): Promise<ChatResponse> {
12
+ const endpoint = `${baseURL}/chat/completions`;
13
+
14
+ const formattedMessages = [
15
+ { role: 'system', content: config.systemPrompt },
16
+ ...messages.map(m => ({
17
+ role: m.role as 'user' | 'assistant',
18
+ content: m.content
19
+ })),
20
+ { role: 'user' as const, content: userMessage }
21
+ ];
22
+
23
+ const response = await corexRequest(
24
+ endpoint,
25
+ {
26
+ method: 'POST',
27
+ headers: {
28
+ 'Authorization': `Bearer ${config.apiKey}`,
29
+ 'Content-Type': 'application/json',
30
+ },
31
+ body: JSON.stringify({
32
+ model: config.model,
33
+ messages: formattedMessages,
34
+ temperature: config.temperature,
35
+ max_tokens: config.maxTokens,
36
+ stream: true,
37
+ }),
38
+ timeout: 60000,
39
+ },
40
+ baseURL.includes('openrouter') ? 'openrouter' : baseURL.includes('deepseek') ? 'deepseek' : 'openai'
41
+ );
42
+
43
+ let fullContent = '';
44
+ let usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
45
+
46
+ if (response && response.forEach) {
47
+ for (const chunk of response) {
48
+ if (chunk.choices?.[0]?.delta?.content) {
49
+ const text = chunk.choices[0].delta.content;
50
+ fullContent += text;
51
+ onToken(text);
52
+ }
53
+ if (chunk.usage) {
54
+ usage = {
55
+ inputTokens: chunk.usage.prompt_tokens || 0,
56
+ outputTokens: chunk.usage.completion_tokens || 0,
57
+ totalTokens: chunk.usage.total_tokens || 0,
58
+ };
59
+ }
60
+ }
61
+ }
62
+
63
+ return { content: fullContent, usage };
64
+ }
package/src/index.ts ADDED
@@ -0,0 +1,62 @@
1
+ import React from 'react';
2
+ import { render } from 'ink';
3
+ import dotenv from 'dotenv';
4
+ import { loadConfig } from './lib/config.js';
5
+ import App from './app.js';
6
+
7
+ dotenv.config();
8
+
9
+ const nodeVersion = parseInt(process.version.slice(1).split('.')[0], 10);
10
+ if (nodeVersion < 18) {
11
+ console.error('COREX requires Node.js 18 or higher.');
12
+ process.exit(1);
13
+ }
14
+
15
+ const isRawModeSupported = () => {
16
+ return process.stdin.isTTY;
17
+ };
18
+
19
+ async function main(): Promise<void> {
20
+ const args = process.argv.slice(2);
21
+ if (args[0] === 'logout') {
22
+ console.log('Logged out. Run \'corex\' to set up again.');
23
+ process.exit(0);
24
+ }
25
+
26
+ const defaultConfig = {
27
+ apiKey: process.env.COREX_API_KEY || '',
28
+ provider: 'anthropic' as const,
29
+ model: 'claude-3-5-sonnet-20241022',
30
+ theme: 'default' as const,
31
+ systemPrompt: 'You are COREX, an elite AI assistant.',
32
+ maxTokens: 4096,
33
+ temperature: 0.7,
34
+ saveHistory: false,
35
+ userName: 'You',
36
+ };
37
+
38
+ const savedConfig = loadConfig();
39
+ const config = savedConfig || defaultConfig;
40
+
41
+ if (!isRawModeSupported()) {
42
+ console.log('\n\x1b[33mWarning: Terminal does not support raw mode.\x1b[0m');
43
+ console.log('For best experience, run in a proper terminal emulator.\n');
44
+ }
45
+
46
+ const { waitUntilExit } = render(React.createElement(App, { config }));
47
+
48
+ await waitUntilExit();
49
+ process.exit(0);
50
+ }
51
+
52
+ process.on('SIGINT', () => {
53
+ try {
54
+ if (process.stdin.isTTY && process.stdin.isRaw) {
55
+ process.stdin.setRawMode(false);
56
+ }
57
+ } catch (e) {}
58
+ process.stdout.write('\x1b[?25h\n');
59
+ process.exit(0);
60
+ });
61
+
62
+ main();
package/src/lib/ai.ts ADDED
@@ -0,0 +1,167 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
2
+ import { GoogleGenerativeAI } from '@google/generative-ai';
3
+ import OpenAI from 'openai';
4
+ import { Message, TokenUsage, CorexConfig } from '../types.js';
5
+
6
+ let anthropic: Anthropic | null = null;
7
+ let gemini: GoogleGenerativeAI | null = null;
8
+ let openai: OpenAI | null = null;
9
+
10
+ export function initAI(apiKey: string, config: CorexConfig): void {
11
+ const { provider } = config;
12
+ // Reset clients
13
+ anthropic = null;
14
+ gemini = null;
15
+ openai = null;
16
+
17
+ if (provider === 'anthropic') {
18
+ anthropic = new Anthropic({ apiKey });
19
+ } else if (provider === 'gemini') {
20
+ gemini = new GoogleGenerativeAI(apiKey);
21
+ } else if (provider === 'openai') {
22
+ openai = new OpenAI({ apiKey });
23
+ } else if (provider === 'openrouter') {
24
+ openai = new OpenAI({
25
+ apiKey,
26
+ baseURL: 'https://openrouter.ai/api/v1',
27
+ defaultHeaders: {
28
+ "HTTP-Referer": "https://github.com/corex-ai",
29
+ "X-Title": "COREX CLI",
30
+ }
31
+ });
32
+ } else if (provider === 'deepseek') {
33
+ openai = new OpenAI({
34
+ apiKey,
35
+ baseURL: 'https://api.deepseek.com'
36
+ });
37
+ }
38
+ }
39
+
40
+ export async function sendMessage(
41
+ history: Message[],
42
+ userMessage: string,
43
+ config: CorexConfig,
44
+ onToken: (token: string) => void,
45
+ onComplete: (fullText: string, usage: TokenUsage) => void,
46
+ onError: (error: Error) => void,
47
+ imageContent?: string | null
48
+ ): Promise<void> {
49
+ const { provider, model, systemPrompt, temperature, maxTokens } = config;
50
+
51
+ try {
52
+ if (provider === 'anthropic' && anthropic) {
53
+ const currentMessageContent: any[] = [{ type: 'text', text: userMessage }];
54
+ if (imageContent) {
55
+ currentMessageContent.push({
56
+ type: 'image',
57
+ source: {
58
+ type: 'base64',
59
+ media_type: 'image/png', // Assuming png for simplicity or detect?
60
+ data: imageContent,
61
+ }
62
+ });
63
+ }
64
+
65
+ const stream = anthropic.messages.stream({
66
+ model,
67
+ max_tokens: maxTokens,
68
+ temperature,
69
+ system: systemPrompt,
70
+ messages: [
71
+ ...history.map(m => ({
72
+ role: m.role as 'user' | 'assistant',
73
+ content: m.content
74
+ })),
75
+ { role: 'user', content: currentMessageContent as any } // Cast because SDK types might be strict
76
+ ],
77
+ });
78
+
79
+ let fullText = '';
80
+ stream.on('text', (text) => {
81
+ fullText += text;
82
+ onToken(text);
83
+ });
84
+
85
+ const finalMessage = await stream.finalMessage();
86
+ onComplete(fullText, {
87
+ inputTokens: finalMessage.usage?.input_tokens || 0,
88
+ outputTokens: finalMessage.usage?.output_tokens || 0,
89
+ totalTokens: (finalMessage.usage?.input_tokens || 0) + (finalMessage.usage?.output_tokens || 0),
90
+ });
91
+
92
+ } else if (provider === 'gemini' && gemini) {
93
+ const genModel = gemini.getGenerativeModel({ model });
94
+ const chat = genModel.startChat({
95
+ history: history.map(m => ({
96
+ role: m.role === 'user' ? 'user' : 'model',
97
+ parts: [{ text: m.content }]
98
+ })),
99
+ generationConfig: { maxOutputTokens: maxTokens, temperature },
100
+ });
101
+
102
+ const parts: any[] = [{ text: userMessage }];
103
+ if (imageContent) {
104
+ parts.push({
105
+ inlineData: {
106
+ mimeType: 'image/png',
107
+ data: imageContent
108
+ }
109
+ });
110
+ }
111
+
112
+ const result = await chat.sendMessageStream(parts);
113
+ let fullText = '';
114
+ for await (const chunk of result.stream) {
115
+ const chunkText = chunk.text();
116
+ fullText += chunkText;
117
+ onToken(chunkText);
118
+ }
119
+ onComplete(fullText, { inputTokens: 0, outputTokens: 0, totalTokens: 0 });
120
+
121
+ } else if ((provider === 'openai' || provider === 'openrouter' || provider === 'deepseek') && openai) {
122
+ let messages: any[] = [
123
+ { role: 'system', content: systemPrompt },
124
+ ...history.map(m => ({ role: m.role as 'user' | 'assistant', content: m.content }))
125
+ ];
126
+
127
+ const userMsgObj: any = { role: 'user', content: userMessage };
128
+ if (imageContent) {
129
+ userMsgObj.content = [
130
+ { type: 'text', text: userMessage },
131
+ {
132
+ type: 'image_url',
133
+ image_url: {
134
+ url: `data:image/png;base64,${imageContent}`
135
+ }
136
+ }
137
+ ];
138
+ }
139
+ messages.push(userMsgObj);
140
+
141
+ const stream = await openai.chat.completions.create({
142
+ model,
143
+ messages,
144
+ stream: true,
145
+ temperature,
146
+ max_tokens: maxTokens,
147
+ });
148
+
149
+ let fullText = '';
150
+ for await (const chunk of stream) {
151
+ const content = chunk.choices[0]?.delta?.content || '';
152
+ if (content) {
153
+ fullText += content;
154
+ onToken(content);
155
+ }
156
+ }
157
+ onComplete(fullText, { inputTokens: 0, outputTokens: 0, totalTokens: 0 });
158
+
159
+ } else {
160
+ throw new Error(`Provider ${provider} not initialized.`);
161
+ }
162
+ } catch (err: any) {
163
+ let message = err.message || 'An unexpected error occurred.';
164
+ if (err.status === 401) message = 'Invalid API key. Run /config to update.';
165
+ onError(new Error(message));
166
+ }
167
+ }