carto-cli 0.1.0-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,489 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
35
+ Object.defineProperty(exports, "__esModule", { value: true });
36
+ exports.handleAiFeature = handleAiFeature;
37
+ exports.handleAiProxy = handleAiProxy;
38
+ const api_1 = require("../api");
39
+ const colors_1 = require("../colors");
40
+ const fs_1 = require("fs");
41
+ const readline = __importStar(require("readline"));
42
+ const AI_FEATURES = {
43
+ 'aiagent': {
44
+ buildQueryParams: (args) => ({ mapId: args[0] }),
45
+ buildFirstMessageVars: () => ({
46
+ conversation_start_time: new Date().toISOString(),
47
+ user_timezone: Intl.DateTimeFormat().resolvedOptions().timeZone || 'UTC'
48
+ }),
49
+ requiredArg: 'map ID',
50
+ argName: '<map-id>'
51
+ }
52
+ // Future features can be added here:
53
+ // 'workflowsCreatorAgent': {
54
+ // buildQueryParams: (args: string[]) => ({ workflowId: args[0] }),
55
+ // requiredArg: 'workflow ID',
56
+ // argName: '<workflow-id>'
57
+ // }
58
+ };
59
+ /**
60
+ * Handle AI Feature commands (aiagent, workflowsCreatorAgent, etc.)
61
+ */
62
+ async function handleAiFeature(featureName, args, options, token, baseUrl, jsonOutput, debug = false, profile) {
63
+ try {
64
+ const client = await api_1.ApiClient.create(token, baseUrl, debug, profile);
65
+ const feature = AI_FEATURES[featureName];
66
+ if (!feature) {
67
+ throw new Error(`Unknown AI feature: ${featureName}`);
68
+ }
69
+ // Validate required argument
70
+ if (args.length === 0) {
71
+ throw new Error(`${featureName} requires ${feature.requiredArg}. Usage: carto ai ${featureName} ${feature.argName} [message]`);
72
+ }
73
+ // Fetch agent token if we're using aiagent feature
74
+ let agentClient = client;
75
+ if (featureName === 'aiagent') {
76
+ const mapId = args[0];
77
+ if (!jsonOutput) {
78
+ console.log((0, colors_1.dim)('Fetching agent configuration...'));
79
+ }
80
+ try {
81
+ const mapData = await client.getWorkspace(`/maps/${mapId}`);
82
+ if (!mapData.agent || !mapData.agent.config) {
83
+ throw new Error(`Map does not have an AI agent configured. Enable an agent in the map settings first.`);
84
+ }
85
+ if (!mapData.agent.token) {
86
+ throw new Error(`Map agent is missing authentication token. Please reconfigure the agent.`);
87
+ }
88
+ // Create new client with agent-specific token
89
+ agentClient = await api_1.ApiClient.create(mapData.agent.token, baseUrl, debug, profile);
90
+ if (!jsonOutput) {
91
+ console.log((0, colors_1.dim)('Agent configured with model: ' + (mapData.agent.config?.model || 'unknown')));
92
+ }
93
+ }
94
+ catch (err) {
95
+ if (err.message.includes('does not have') || err.message.includes('is missing')) {
96
+ throw err;
97
+ }
98
+ throw new Error(`Failed to fetch agent configuration: ${err.message}`);
99
+ }
100
+ }
101
+ const message = args[1];
102
+ // Interactive mode: no message provided
103
+ if (!message && !options.file && process.stdin.isTTY) {
104
+ await interactiveMode(agentClient, featureName, feature, args, options, jsonOutput);
105
+ return;
106
+ }
107
+ // One-shot mode: message provided
108
+ const userMessage = await getMessageFromInput(message, options);
109
+ if (!userMessage) {
110
+ throw new Error('No message provided. Use: carto ai ' + featureName + ' ' + feature.argName + ' "message" or --file <path>');
111
+ }
112
+ await oneShotMode(agentClient, featureName, feature, args, userMessage, options, jsonOutput);
113
+ }
114
+ catch (err) {
115
+ if (jsonOutput) {
116
+ console.log(JSON.stringify({ success: false, error: err.message }));
117
+ }
118
+ else {
119
+ if (err.message.includes('401') || err.message.includes('Token not defined')) {
120
+ console.log((0, colors_1.error)('✗ Authentication required'));
121
+ console.log('Please run: carto auth login');
122
+ }
123
+ else {
124
+ console.log((0, colors_1.error)('✗ ' + err.message));
125
+ }
126
+ }
127
+ process.exit(1);
128
+ }
129
+ }
130
+ /**
131
+ * Interactive mode - multi-turn conversation with readline
132
+ */
133
+ async function interactiveMode(client, featureName, feature, args, options, jsonOutput) {
134
+ const conversationState = {
135
+ lastMessageId: options.conversationId || null,
136
+ conversationId: null,
137
+ isFirstMessage: !options.conversationId
138
+ };
139
+ if (!jsonOutput) {
140
+ console.log((0, colors_1.bold)(`\n🤖 Interactive AI Chat (${featureName})`));
141
+ console.log((0, colors_1.dim)('Type your message and press Enter. Type "exit" or "quit" to end.\n'));
142
+ }
143
+ const rl = readline.createInterface({
144
+ input: process.stdin,
145
+ output: process.stdout,
146
+ prompt: (0, colors_1.bold)('You: ')
147
+ });
148
+ rl.prompt();
149
+ rl.on('line', async (line) => {
150
+ const input = line.trim();
151
+ if (input === 'exit' || input === 'quit') {
152
+ rl.close();
153
+ return;
154
+ }
155
+ if (!input) {
156
+ rl.prompt();
157
+ return;
158
+ }
159
+ try {
160
+ // Disable prompt while processing
161
+ rl.pause();
162
+ if (!jsonOutput) {
163
+ console.log((0, colors_1.bold)('\nAssistant: '));
164
+ }
165
+ await sendMessage(client, featureName, feature, args, input, conversationState, jsonOutput);
166
+ if (!jsonOutput) {
167
+ console.log(''); // Empty line before next prompt
168
+ }
169
+ // Re-enable prompt
170
+ rl.resume();
171
+ rl.prompt();
172
+ }
173
+ catch (err) {
174
+ console.log((0, colors_1.error)('\n✗ Error: ' + err.message));
175
+ rl.resume();
176
+ rl.prompt();
177
+ }
178
+ });
179
+ rl.on('close', () => {
180
+ if (!jsonOutput) {
181
+ console.log((0, colors_1.dim)('\n\nGoodbye!'));
182
+ }
183
+ process.exit(0);
184
+ });
185
+ }
186
+ /**
187
+ * One-shot mode - single message
188
+ */
189
+ async function oneShotMode(client, featureName, feature, args, message, options, jsonOutput) {
190
+ const conversationState = {
191
+ lastMessageId: options.conversationId || null,
192
+ conversationId: null,
193
+ isFirstMessage: !options.conversationId
194
+ };
195
+ if (!jsonOutput) {
196
+ console.log((0, colors_1.bold)('Assistant: '));
197
+ }
198
+ await sendMessage(client, featureName, feature, args, message, conversationState, jsonOutput);
199
+ if (jsonOutput && conversationState.lastMessageId) {
200
+ // Output conversation ID for continuation
201
+ console.log(JSON.stringify({
202
+ success: true,
203
+ conversationId: conversationState.conversationId,
204
+ messageId: conversationState.lastMessageId
205
+ }));
206
+ }
207
+ }
208
+ /**
209
+ * Send a message to AI Feature and handle streaming response
210
+ */
211
+ async function sendMessage(client, featureName, feature, args, message, conversationState, jsonOutput) {
212
+ const queryParams = feature.buildQueryParams(args);
213
+ const body = {
214
+ input: message
215
+ };
216
+ // Add parent for conversation continuation
217
+ if (conversationState.lastMessageId) {
218
+ body.parent = conversationState.lastMessageId;
219
+ }
220
+ // Add vars for first message
221
+ if (conversationState.isFirstMessage && feature.buildFirstMessageVars) {
222
+ body.vars = feature.buildFirstMessageVars();
223
+ }
224
+ let responseText = '';
225
+ await client.streamAiFeature(featureName, queryParams, body, async (chunk) => {
226
+ if (chunk.type === 'delta') {
227
+ // Streaming text chunk
228
+ if (jsonOutput) {
229
+ console.log(JSON.stringify({ type: 'delta', text: chunk.delta }));
230
+ }
231
+ else {
232
+ process.stdout.write(chunk.delta);
233
+ }
234
+ responseText += chunk.delta;
235
+ }
236
+ else if (chunk.type === 'response') {
237
+ // Complete response with IDs
238
+ conversationState.lastMessageId = chunk.id;
239
+ conversationState.conversationId = chunk.conversationId;
240
+ conversationState.isFirstMessage = false;
241
+ if (chunk.message && chunk.message !== responseText) {
242
+ // Full message provided (fallback if no deltas)
243
+ if (jsonOutput) {
244
+ console.log(JSON.stringify({ type: 'response', message: chunk.message, id: chunk.id }));
245
+ }
246
+ else {
247
+ process.stdout.write(chunk.message);
248
+ }
249
+ }
250
+ if (chunk.actions && chunk.actions.length > 0) {
251
+ // Frontend actions requested
252
+ if (jsonOutput) {
253
+ console.log(JSON.stringify({ type: 'actions', actions: chunk.actions }));
254
+ }
255
+ else {
256
+ console.log((0, colors_1.dim)(`\n[Agent requested ${chunk.actions.length} action(s)]`));
257
+ }
258
+ }
259
+ }
260
+ else if (chunk.type === 'action_execution') {
261
+ // Backend action started
262
+ if (jsonOutput) {
263
+ console.log(JSON.stringify({ type: 'action_execution', action: chunk.action, action_id: chunk.action_id }));
264
+ }
265
+ else {
266
+ console.log((0, colors_1.dim)(`\n[Executing: ${chunk.action}...]`));
267
+ }
268
+ }
269
+ else if (chunk.type === 'action_execution_finished') {
270
+ // Backend action completed
271
+ if (jsonOutput) {
272
+ console.log(JSON.stringify({ type: 'action_finished', action: chunk.action, action_id: chunk.action_id }));
273
+ }
274
+ }
275
+ else if (chunk.type === 'error') {
276
+ // Error from API
277
+ if (jsonOutput) {
278
+ console.log(JSON.stringify({ type: 'error', message: chunk.message }));
279
+ }
280
+ else {
281
+ console.log((0, colors_1.error)('\n✗ Error: ' + (chunk.message || 'Unknown error')));
282
+ }
283
+ }
284
+ else if (chunk.type === 'end') {
285
+ // Stream ended
286
+ if (jsonOutput) {
287
+ console.log(JSON.stringify({ type: 'end', conversationId: chunk.conversationId }));
288
+ }
289
+ }
290
+ else if (chunk.type === 'debug') {
291
+ // Debug information
292
+ if (jsonOutput) {
293
+ console.log(JSON.stringify({ type: 'debug', message: chunk.message, details: chunk.details }));
294
+ }
295
+ }
296
+ });
297
+ }
298
+ /**
299
+ * Handle AI Proxy commands (chat, models, etc.)
300
+ */
301
+ async function handleAiProxy(args, options, token, baseUrl, jsonOutput, debug = false, profile) {
302
+ try {
303
+ const client = await api_1.ApiClient.create(token, baseUrl, debug, profile);
304
+ if (args.length === 0) {
305
+ throw new Error('AI proxy requires a subcommand (chat, models, info)');
306
+ }
307
+ const proxyCommand = args[0];
308
+ switch (proxyCommand) {
309
+ case 'chat':
310
+ await handleProxyChat(args.slice(1), options, client, jsonOutput);
311
+ break;
312
+ case 'models':
313
+ await handleProxyModels(options, client, jsonOutput);
314
+ break;
315
+ case 'info':
316
+ await handleProxyInfo(options, client, jsonOutput);
317
+ break;
318
+ default:
319
+ throw new Error(`Unknown proxy command: ${proxyCommand}. Available: chat, models, info`);
320
+ }
321
+ }
322
+ catch (err) {
323
+ if (jsonOutput) {
324
+ console.log(JSON.stringify({ success: false, error: err.message }));
325
+ }
326
+ else {
327
+ if (err.message.includes('401') || err.message.includes('Token not defined')) {
328
+ console.log((0, colors_1.error)('✗ Authentication required'));
329
+ console.log('Please run: carto auth login');
330
+ }
331
+ else {
332
+ console.log((0, colors_1.error)('✗ ' + err.message));
333
+ }
334
+ }
335
+ process.exit(1);
336
+ }
337
+ }
338
+ /**
339
+ * Handle proxy chat command (OpenAI-compatible chat completions)
340
+ */
341
+ async function handleProxyChat(args, options, client, jsonOutput) {
342
+ // Get message from args, stdin, or file
343
+ const message = await getMessageFromInput(args[0], options);
344
+ if (!message) {
345
+ throw new Error('No message provided. Use: carto ai proxy chat "message" or --file <path> or pipe via stdin');
346
+ }
347
+ // Validate model is provided
348
+ if (!options.model) {
349
+ throw new Error('Model is required. Use --model <name>. See available models with: carto ai proxy models');
350
+ }
351
+ // Build OpenAI chat completion request
352
+ const requestBody = {
353
+ model: options.model,
354
+ messages: []
355
+ };
356
+ // Add system message if provided
357
+ if (options.system) {
358
+ requestBody.messages.push({
359
+ role: 'system',
360
+ content: options.system
361
+ });
362
+ }
363
+ // Add user message
364
+ requestBody.messages.push({
365
+ role: 'user',
366
+ content: message
367
+ });
368
+ // Add optional parameters
369
+ if (options.temperature !== undefined) {
370
+ requestBody.temperature = parseFloat(options.temperature);
371
+ }
372
+ if (options.maxTokens !== undefined) {
373
+ requestBody.max_tokens = parseInt(options.maxTokens);
374
+ }
375
+ if (options.topP !== undefined) {
376
+ requestBody.top_p = parseFloat(options.topP);
377
+ }
378
+ if (!jsonOutput) {
379
+ console.log((0, colors_1.dim)(`Model: ${options.model}`));
380
+ console.log((0, colors_1.bold)('\nAssistant:\n'));
381
+ }
382
+ // Call LiteLLM proxy
383
+ const response = await client.postLiteLLMProxy('/v1/chat/completions', requestBody);
384
+ if (jsonOutput) {
385
+ console.log(JSON.stringify(response));
386
+ }
387
+ else {
388
+ // Pretty-print response
389
+ if (response.choices && response.choices.length > 0) {
390
+ const content = response.choices[0].message?.content || '';
391
+ console.log(content);
392
+ console.log();
393
+ console.log((0, colors_1.dim)(`Tokens: ${response.usage?.total_tokens || 'N/A'} (prompt: ${response.usage?.prompt_tokens || 'N/A'}, completion: ${response.usage?.completion_tokens || 'N/A'})`));
394
+ }
395
+ else {
396
+ console.log((0, colors_1.error)('No response from model'));
397
+ }
398
+ }
399
+ }
400
+ /**
401
+ * Handle proxy models command (list available models)
402
+ */
403
+ async function handleProxyModels(options, client, jsonOutput) {
404
+ const response = await client.getLiteLLMProxy('/v1/models');
405
+ if (jsonOutput) {
406
+ console.log(JSON.stringify(response));
407
+ }
408
+ else {
409
+ // Pretty-print models list
410
+ if (response.data && Array.isArray(response.data)) {
411
+ console.log((0, colors_1.bold)('\nAvailable Models:\n'));
412
+ response.data.forEach((model) => {
413
+ console.log((0, colors_1.bold)(' • ' + model.id));
414
+ if (model.owned_by) {
415
+ console.log((0, colors_1.dim)(` Owner: ${model.owned_by}`));
416
+ }
417
+ if (model.created) {
418
+ const date = new Date(model.created * 1000).toLocaleString();
419
+ console.log((0, colors_1.dim)(` Created: ${date}`));
420
+ }
421
+ console.log();
422
+ });
423
+ console.log((0, colors_1.dim)(`Total: ${response.data.length} model(s)`));
424
+ }
425
+ else {
426
+ console.log((0, colors_1.error)('No models found'));
427
+ }
428
+ }
429
+ }
430
+ /**
431
+ * Handle proxy info command (show connection information)
432
+ */
433
+ async function handleProxyInfo(options, client, jsonOutput) {
434
+ if (jsonOutput) {
435
+ console.log(JSON.stringify({
436
+ name: 'CARTO LiteLLM',
437
+ apiMode: 'openai-compatible',
438
+ apiHost: client.litellmUrl,
439
+ apiBase: `${client.litellmUrl}/v1`,
440
+ apiKey: client['token'] || '(not set)',
441
+ endpoints: {
442
+ chat: '/v1/chat/completions',
443
+ completions: '/v1/completions',
444
+ embeddings: '/v1/embeddings',
445
+ models: '/v1/models'
446
+ }
447
+ }));
448
+ }
449
+ else {
450
+ console.log((0, colors_1.bold)('\nCARTO LiteLLM Proxy Configuration\n'));
451
+ console.log((0, colors_1.bold)('API Mode: ') + 'OpenAI Compatible');
452
+ console.log((0, colors_1.bold)('API Host: ') + client.litellmUrl);
453
+ console.log((0, colors_1.bold)('API Base URL: ') + `${client.litellmUrl}/v1`);
454
+ console.log((0, colors_1.bold)('API Key: ') + (client['token'] || '(not set)'));
455
+ console.log();
456
+ console.log((0, colors_1.bold)('Endpoints:'));
457
+ console.log(' Chat completions: ' + (0, colors_1.dim)('/v1/chat/completions'));
458
+ console.log(' Completions: ' + (0, colors_1.dim)('/v1/completions'));
459
+ console.log(' Embeddings: ' + (0, colors_1.dim)('/v1/embeddings'));
460
+ console.log(' Models: ' + (0, colors_1.dim)('/v1/models'));
461
+ console.log();
462
+ console.log((0, colors_1.dim)('Use this information to configure OpenAI-compatible tools and MCP servers.'));
463
+ console.log((0, colors_1.dim)('To list available models, run: carto aiproxy models'));
464
+ }
465
+ }
466
+ /**
467
+ * Get message from various input sources (arg, file, stdin)
468
+ */
469
+ async function getMessageFromInput(message, options) {
470
+ let msg = message;
471
+ // Priority 1: --file flag
472
+ if (options.file) {
473
+ try {
474
+ msg = (0, fs_1.readFileSync)(options.file, 'utf-8').trim();
475
+ }
476
+ catch (err) {
477
+ throw new Error(`Failed to read file: ${err.message}`);
478
+ }
479
+ }
480
+ // Priority 2: stdin
481
+ if (!msg && !process.stdin.isTTY) {
482
+ const chunks = [];
483
+ for await (const chunk of process.stdin) {
484
+ chunks.push(chunk);
485
+ }
486
+ msg = Buffer.concat(chunks).toString('utf-8').trim();
487
+ }
488
+ return msg;
489
+ }