pokt-cli 1.0.11 → 1.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/bin/pokt.js CHANGED
@@ -124,7 +124,7 @@ async function showMenu() {
124
124
  }
125
125
  }
126
126
  async function handleModelsMenu(providerFilter) {
127
- const { config, getEffectiveActiveModel, PROVIDER_LABELS, ALL_PROVIDERS } = await import('../config.js');
127
+ const { config, getEffectiveActiveModel, PROVIDER_LABELS, ALL_PROVIDERS, getOllamaCloudApiKey } = await import('../config.js');
128
128
  let allModels = config.get('registeredModels');
129
129
  if (!Array.isArray(allModels)) {
130
130
  const defaults = [
@@ -157,8 +157,20 @@ async function handleModelsMenu(providerFilter) {
157
157
  return handleAddModelsMenu();
158
158
  return handleModelsMenu(cat.category);
159
159
  }
160
- // Segunda tela: listar modelos da categoria escolhida
161
- const providerModels = allModels.filter((m) => m.provider === providerFilter);
160
+ // Segunda tela: listar modelos da categoria escolhida — sincroniza da API se a lista estiver vazia e houver credencial (ou API pública OpenRouter)
161
+ let providerModels = allModels.filter((m) => m.provider === providerFilter);
162
+ if (providerModels.length === 0 && providerFilter === 'openrouter') {
163
+ const { modelsCommand } = await import('../commands/models.js');
164
+ await modelsCommand.handler({ action: 'fetch-openrouter' });
165
+ allModels = config.get('registeredModels');
166
+ providerModels = allModels.filter((m) => m.provider === providerFilter);
167
+ }
168
+ if (providerModels.length === 0 && providerFilter === 'ollama-cloud' && getOllamaCloudApiKey()) {
169
+ const { modelsCommand } = await import('../commands/models.js');
170
+ await modelsCommand.handler({ action: 'fetch-ollama-cloud' });
171
+ allModels = config.get('registeredModels');
172
+ providerModels = allModels.filter((m) => m.provider === providerFilter);
173
+ }
162
174
  const label = PROVIDER_LABELS[providerFilter] || providerFilter;
163
175
  const choices = [
164
176
  ...providerModels.map((m, i) => ({
@@ -1,12 +1,12 @@
1
1
  import OpenAI from 'openai';
2
- import { getPoktApiBaseUrl, getProPortalBaseUrl, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getGeminiApiKey, getOllamaCloudApiKey, getOllamaBaseUrl, getPoktToken, } from '../config.js';
2
+ import { getPoktApiBaseUrl, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getGeminiApiKey, getOllamaCloudApiKey, getOllamaBaseUrl, getPoktToken, } from '../config.js';
3
3
  export async function getClient(modelConfig) {
4
4
  // openai / grok / … → hosts oficiais abaixo. Só `controller` usa getPoktApiBaseUrl (token Pokt, não é api.openai.com).
5
5
  if (modelConfig.provider === 'controller') {
6
6
  const baseUrl = getPoktApiBaseUrl();
7
7
  const token = getPoktToken();
8
8
  if (!token) {
9
- throw new Error(`Token Pokt não configurado. Painel: ${getProPortalBaseUrl()} — pokt config set-pokt-token -v <token>`);
9
+ throw new Error('Token Pokt não configurado. Use: pokt config set-pokt-token -v <token>');
10
10
  }
11
11
  return new OpenAI({
12
12
  baseURL: `${baseUrl}/api/v1`,
package/dist/chat/loop.js CHANGED
@@ -11,6 +11,7 @@ import { connectMcpServer, getAllMcpToolsOpenAI, callMcpTool, isMcpTool, disconn
11
11
  import { getMergedMcpServers } from '../mcp/project-mcp.js';
12
12
  import { runMcpFromBashMarkdown, stripExecutedStyleMcpBashBlocks, tryAutoMcpForListDatabases, } from './mcp-from-text.js';
13
13
  import { slimToolsForUpstreamPayload } from './slim-tools.js';
14
+ import { emptyUsageAccumulator, mergeCompletionUsage, sendCliUsageTelemetryFireAndForget, } from './telemetry.js';
14
15
  /** Base do system prompt; a lista de ferramentas MCP é anexada em runtime quando houver servidores. */
15
16
  const SYSTEM_PROMPT_BASE = `You are Pokt CLI, an elite AI Software Engineer.
16
17
  Your goal is to help the user build, fix, and maintain software projects with high quality.
@@ -21,7 +22,7 @@ CORE CAPABILITIES:
21
22
  3. **Problem Solving**: You analyze errors and propose/apply fixes.
22
23
 
23
24
  FUNCTION CALLING (native tools — USE THEM):
24
- - This chat uses OpenAI-style **tool_calls**. You MUST use the provided functions for actions: \`read_file\`, \`search_replace\`, \`write_file\`, \`run_command\`, \`list_files\`, etc., and any tool whose name starts with \`mcp_\`.
25
+ - This chat uses OpenAI-style **tool_calls**. You MUST use the provided functions for actions: \`read_file\`, \`search_replace\`, \`write_file\`, \`run_command\`, \`list_files\`, \`delete_file\`, \`delete_directory\`, etc., and any tool whose name starts with \`mcp_\`.
25
26
  - **Edits vs rewrites**: For modifying existing files, prefer \`search_replace\` (old_string, new_string, path) — targeted, minimal changes. Use \`write_file\` only for new files or full rewrites. Always call \`read_file\` first to get exact content before \`search_replace\`.
26
27
  - **Avoid** shell lines like \`mcp_Something_tool "..."\` in markdown — the CLI may run them as **fallback** if they match a registered tool, but **native tool_calls are always better** (correct args, one round-trip).
27
28
  - For databases/APIs exposed via MCP, call the real \`mcp_*\` tools with the correct JSON arguments (e.g. Neon: run SQL via the server's SQL tool, not a invented command name).
@@ -41,6 +42,7 @@ GUIDELINES:
41
42
  - You have full access to the current terminal via \`run_command\` for \`npm install\`, \`tsc\`, etc. You may also emit **scripts executáveis** (Node, Python, npx, \`psql\`, etc.) via \`run_command\` when MCP não estiver disponível ou o usuário pedir código para rodar localmente.
42
43
  - **MCP tools**: Tools named \`mcp_<ServerName>_<toolName>\` connect to external services. Prefer them when they match the task.
43
44
  - **Never** return a completely empty assistant message: always include a short natural-language answer and/or use tool_calls. After tools run, summarize results for the user in Portuguese.
45
+ - **Next.js app/pages conflict**: When build fails with "Conflicting app and page file" (pages/index.js vs app/page.tsx), resolve by calling \`delete_directory("app")\` to remove the app folder and keep only pages. Prefer tool_calls over shell blocks. On Windows, \`delete_directory\` is cross-platform and does not require rmdir/cmd.
44
46
  - **After MCP/SQL succeeds**: give a **short** confirmation plus a **markdown table** (or bullet list) for rows/columns — do **not** repeat bash blocks with mcp_* lines, raw tool JSON, or invented shell commands; the CLI already executed native tool_calls.
45
47
  - Be extremely concise in your explanations.
46
48
  - The current working directory is: ${process.cwd()}
@@ -412,7 +414,7 @@ Atalhos:
412
414
  content: `Current Project Structure:\n${projectStructure}\n\n${structureHint}`,
413
415
  });
414
416
  }
415
- await processLLMResponse(client, activeModel.id, messages, toolsForApi, sessionFileCtx);
417
+ await processLLMResponse(client, activeModel, messages, toolsForApi, sessionFileCtx);
416
418
  // Atualiza auto-save após resposta
417
419
  saveAuto(messages);
418
420
  // Captura última resposta do assistente para /copy (melhor esforço)
@@ -458,6 +460,61 @@ const CODE_EXT = /\.(py|js|ts|tsx|jsx|html|css|json|md|txt|java|go|rs|c|cpp|rb|p
458
460
  function isShellLikeBlock(lang) {
459
461
  return /^(bash|sh|shell|zsh|powershell|ps1|cmd|console)$/i.test(lang);
460
462
  }
463
+ /** Comandos que resolvem conflito Next.js app/pages - executar via run_command quando em blocos shell. */
464
+ function looksLikeNextJsConflictFix(code) {
465
+ const t = code.trim().toLowerCase();
466
+ if (/mcp_[a-z0-9_-]+/i.test(t))
467
+ return false; // MCP já tratado separadamente
468
+ return ((/\brmdir\b.*\bapp\b/.test(t) || /\bremove-item\b.*\bapp\b/i.test(t) || /\brm\s+-rf?\s+app\b/.test(t)) ||
469
+ (/\brmdir\b.*\.next\b/.test(t) || /\bremove-item\b.*\.next\b/i.test(t) || /\brm\s+-rf?\s+\.next\b/.test(t)));
470
+ }
471
+ /**
472
+ * Executa blocos shell (cmd, powershell) que parecem corrigir conflito Next.js.
473
+ * Fallback quando a IA coloca rmdir/Remove-Item em markdown em vez de usar delete_directory.
474
+ */
475
+ async function executeShellBlocksFromContent(content) {
476
+ const codeBlockRe = /```(cmd|powershell|ps1|bash|sh)\s*\n([\s\S]*?)```/gi;
477
+ const executedBlocks = [];
478
+ let executed = false;
479
+ let m;
480
+ const isWin = process.platform === 'win32';
481
+ while ((m = codeBlockRe.exec(content)) !== null) {
482
+ const lang = (m[1] || '').toLowerCase();
483
+ const code = (m[2] || '').replace(/\r\n/g, '\n').trim();
484
+ if (!code || !looksLikeNextJsConflictFix(code))
485
+ continue;
486
+ const lines = code.split('\n').map((l) => l.trim()).filter(Boolean);
487
+ if (lines.length === 0)
488
+ continue;
489
+ for (const line of lines) {
490
+ if (/mcp_[a-z0-9_-]+/i.test(line))
491
+ continue;
492
+ const cmd = isWin && /^(bash|sh)$/.test(lang)
493
+ ? `cmd /c ${line.replace(/rm\s+-rf?\s+(\S+)/g, 'rmdir /s /q $1')}`
494
+ : line;
495
+ try {
496
+ if (toolsVerbose())
497
+ console.log(ui.warn(`\n[Fallback] Executando: ${cmd}`));
498
+ else
499
+ console.log(ui.dim(`\n[Fallback] Executando comando: ${cmd.slice(0, 60)}${cmd.length > 60 ? '…' : ''}`));
500
+ await executeTool('run_command', JSON.stringify({ command: cmd }));
501
+ executed = true;
502
+ executedBlocks.push({ start: m.index, end: m.index + m[0].length, command: cmd });
503
+ break;
504
+ }
505
+ catch {
506
+ // ignora falha
507
+ }
508
+ }
509
+ }
510
+ let displayContent = content;
511
+ for (let i = executedBlocks.length - 1; i >= 0; i--) {
512
+ const { start, end, command } = executedBlocks[i];
513
+ const placeholder = `\n${ui.dim('[Comando executado: ' + command.slice(0, 50) + (command.length > 50 ? '…' : '') + ']')}\n`;
514
+ displayContent = displayContent.substring(0, start) + placeholder + displayContent.substring(end);
515
+ }
516
+ return { executed, displayContent };
517
+ }
461
518
  /**
462
519
  * Caminhos que o fallback nunca deve sobrescrever (config MCP, env, lockfile).
463
520
  */
@@ -665,16 +722,19 @@ async function applyCodeBlocksFromContent(content, sessionFileCtx) {
665
722
  }
666
723
  return { applied, displayContent };
667
724
  }
668
- async function createCompletionWithRetry(client, modelId, messages, toolsList, toolChoice = 'auto') {
725
+ async function createCompletionWithRetry(client, modelId, messages, toolsList, toolChoice = 'auto', usageAcc) {
669
726
  let lastError;
670
727
  for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
671
728
  try {
672
- return await client.chat.completions.create({
729
+ const out = await client.chat.completions.create({
673
730
  model: modelId,
674
731
  messages,
675
732
  tools: toolsList,
676
733
  tool_choice: toolChoice,
677
734
  });
735
+ if (usageAcc)
736
+ mergeCompletionUsage(usageAcc, out);
737
+ return out;
678
738
  }
679
739
  catch (err) {
680
740
  lastError = err;
@@ -692,7 +752,7 @@ async function createCompletionWithRetry(client, modelId, messages, toolsList, t
692
752
  /**
693
753
  * Executa todas as rodadas de tool_calls até o modelo devolver mensagem sem ferramentas.
694
754
  */
695
- async function drainToolCalls(client, modelId, messages, toolsList, startMessage, spinner, sessionFileCtx) {
755
+ async function drainToolCalls(client, modelId, messages, toolsList, startMessage, spinner, sessionFileCtx, usageAcc) {
696
756
  let message = startMessage;
697
757
  let writeFileExecuted = false;
698
758
  let anyToolExecuted = false;
@@ -737,7 +797,7 @@ async function drainToolCalls(client, modelId, messages, toolsList, startMessage
737
797
  });
738
798
  }
739
799
  spinner.start('Thinking...');
740
- const completion = await createCompletionWithRetry(client, modelId, messages, toolsList, 'auto');
800
+ const completion = await createCompletionWithRetry(client, modelId, messages, toolsList, 'auto', usageAcc);
741
801
  spinner.stop();
742
802
  const nextMsg = getFirstChoiceMessage(completion);
743
803
  if (!nextMsg) {
@@ -747,20 +807,36 @@ async function drainToolCalls(client, modelId, messages, toolsList, startMessage
747
807
  }
748
808
  return { message, writeFileExecuted, anyToolExecuted, mcpToolExecuted };
749
809
  }
750
- async function processLLMResponse(client, modelId, messages, toolsList, sessionFileCtx) {
810
+ function emitCliTelemetryForTurn(modelConfig, acc) {
811
+ const total = acc.prompt + acc.completion;
812
+ if (total <= 0)
813
+ return;
814
+ sendCliUsageTelemetryFireAndForget({
815
+ provider: modelConfig.provider,
816
+ model: modelConfig.id,
817
+ promptTokens: acc.prompt,
818
+ completionTokens: acc.completion,
819
+ totalTokens: total,
820
+ cost: acc.cost,
821
+ });
822
+ }
823
+ async function processLLMResponse(client, modelConfig, messages, toolsList, sessionFileCtx) {
751
824
  const spinner = ora('Thinking...').start();
825
+ const modelId = modelConfig.id;
826
+ const usageAcc = emptyUsageAccumulator();
752
827
  try {
753
- let completion = await createCompletionWithRetry(client, modelId, messages, toolsList);
828
+ let completion = await createCompletionWithRetry(client, modelId, messages, toolsList, 'auto', usageAcc);
754
829
  const first = getFirstChoiceMessage(completion);
755
830
  if (!first) {
756
831
  spinner.stop();
757
832
  console.log(ui.error('\nResposta da API sem choices/mensagem. Tente novamente ou outro modelo.'));
758
833
  messages.pop();
834
+ emitCliTelemetryForTurn(modelConfig, usageAcc);
759
835
  return;
760
836
  }
761
837
  let message = first;
762
838
  spinner.stop();
763
- const drained = await drainToolCalls(client, modelId, messages, toolsList, message, spinner, sessionFileCtx);
839
+ const drained = await drainToolCalls(client, modelId, messages, toolsList, message, spinner, sessionFileCtx, usageAcc);
764
840
  message = drained.message;
765
841
  let writeFileExecutedThisTurn = drained.writeFileExecuted;
766
842
  let anyToolExecutedThisTurn = drained.anyToolExecuted;
@@ -796,7 +872,7 @@ async function processLLMResponse(client, modelId, messages, toolsList, sessionF
796
872
  let recoveryHint = '[Pokt — recuperação] A última resposta veio vazia. Responda em português. Use tool_calls. Nunca devolva corpo vazio. ';
797
873
  if (isModificationRequest) {
798
874
  recoveryHint +=
799
- 'O usuário pediu modificação em projeto existente: chame list_files para ver arquivos, read_file no arquivo relevante, depois search_replace ou write_file no MESMO caminho. NÃO crie projeto novo. ';
875
+ 'O usuário pediu modificação em projeto existente: chame list_files para ver arquivos, read_file no arquivo relevante, depois search_replace ou write_file no MESMO caminho. NÃO crie projeto novo. Para conflito Next.js app/pages: delete_directory("app"). ';
800
876
  }
801
877
  recoveryHint +=
802
878
  'Para bancos PostgreSQL: mcp_*_run_sql com {"sql":"SELECT datname FROM pg_database WHERE datistemplate = false ORDER BY 1;"}. ' +
@@ -805,19 +881,20 @@ async function processLLMResponse(client, modelId, messages, toolsList, sessionF
805
881
  spinner.start('Recuperando resposta vazia…');
806
882
  let recovery;
807
883
  try {
808
- recovery = await createCompletionWithRetry(client, modelId, messages, toolsList, 'required');
884
+ recovery = await createCompletionWithRetry(client, modelId, messages, toolsList, 'required', usageAcc);
809
885
  }
810
886
  catch {
811
- recovery = await createCompletionWithRetry(client, modelId, messages, toolsList, 'auto');
887
+ recovery = await createCompletionWithRetry(client, modelId, messages, toolsList, 'auto', usageAcc);
812
888
  }
813
889
  spinner.stop();
814
890
  const recoveryFirst = getFirstChoiceMessage(recovery);
815
891
  if (!recoveryFirst) {
816
892
  console.log(ui.error('\nResposta da API sem choices na recuperação. Tente outro modelo.'));
817
893
  messages.pop();
894
+ emitCliTelemetryForTurn(modelConfig, usageAcc);
818
895
  return;
819
896
  }
820
- const drained2 = await drainToolCalls(client, modelId, messages, toolsList, recoveryFirst, spinner, sessionFileCtx);
897
+ const drained2 = await drainToolCalls(client, modelId, messages, toolsList, recoveryFirst, spinner, sessionFileCtx, usageAcc);
821
898
  message = drained2.message;
822
899
  writeFileExecutedThisTurn = writeFileExecutedThisTurn || drained2.writeFileExecuted;
823
900
  anyToolExecutedThisTurn = anyToolExecutedThisTurn || drained2.anyToolExecuted;
@@ -851,6 +928,10 @@ async function processLLMResponse(client, modelId, messages, toolsList, sessionF
851
928
  finalContent = autoDb;
852
929
  }
853
930
  }
931
+ // Executar blocos shell que resolvem conflito Next.js (rmdir app, etc.)
932
+ const shellResult = await executeShellBlocksFromContent(contentStr);
933
+ if (shellResult.executed)
934
+ contentStr = shellResult.displayContent;
854
935
  // Quando a API não executa tools, tentar aplicar blocos de código da resposta
855
936
  if (!writeFileExecutedThisTurn) {
856
937
  let result = await applyCodeBlocksFromContent(contentStr, sessionFileCtx);
@@ -859,10 +940,10 @@ async function processLLMResponse(client, modelId, messages, toolsList, sessionF
859
940
  || (/call\s+(read_file|search_replace|write_file)/i.test(contentStr) && contentStr.length < 400);
860
941
  if (!result.applied && looksLikeToolIntentOnly) {
861
942
  messages.push({ role: 'assistant', content: rawContent ?? contentStr });
862
- const followUpSystem = `You replied as if tools would run in text only. Use tool_calls for read_file/search_replace/write_file/run_command/mcp_* when possible. Prefer search_replace for edits. If you must output a file as markdown only: mention the filename then a full \`\`\`lang\`\`\` block — never use fake shell lines like mcp_Foo_bar. Do that now for the user's last request.`;
943
+ const followUpSystem = `You replied as if tools would run in text only. Use tool_calls for read_file/search_replace/write_file/run_command/delete_directory/delete_file/mcp_* when possible. Prefer search_replace for edits. For Next.js app/pages conflict: call delete_directory("app"). If you must output a file as markdown only: mention the filename then a full \`\`\`lang\`\`\` block — never use fake shell lines like mcp_Foo_bar. Do that now for the user's last request.`;
863
944
  messages.push({ role: 'system', content: followUpSystem });
864
945
  spinner.start('Getting code...');
865
- const followUp = await createCompletionWithRetry(client, modelId, messages, toolsList);
946
+ const followUp = await createCompletionWithRetry(client, modelId, messages, toolsList, 'auto', usageAcc);
866
947
  spinner.stop();
867
948
  const followUpMsg = getFirstChoiceMessage(followUp);
868
949
  if (!followUpMsg) {
@@ -914,9 +995,11 @@ async function processLLMResponse(client, modelId, messages, toolsList, sessionF
914
995
  messages.push({ role: 'assistant', content: '' });
915
996
  }
916
997
  }
998
+ emitCliTelemetryForTurn(modelConfig, usageAcc);
917
999
  }
918
1000
  catch (error) {
919
1001
  spinner.stop();
1002
+ emitCliTelemetryForTurn(modelConfig, usageAcc);
920
1003
  const status = getStatusCode(error);
921
1004
  if (status === 429) {
922
1005
  console.log(ui.error('\nLimite de taxa (429). O provedor está te limitando por volume ou quota.'));
@@ -0,0 +1,18 @@
1
+ /** Envia uso (tokens, modelo, provedor) ao Pokt_CLI_Back — não bloqueia o chat; falhas são ignoradas. */
2
+ export declare function sendCliUsageTelemetryFireAndForget(params: {
3
+ provider: string;
4
+ model: string;
5
+ promptTokens: number;
6
+ completionTokens: number;
7
+ totalTokens: number;
8
+ cost: number | null;
9
+ }): void;
10
+ export type ChatUsageAccumulator = {
11
+ prompt: number;
12
+ completion: number;
13
+ cost: number | null;
14
+ };
15
+ export declare function emptyUsageAccumulator(): ChatUsageAccumulator;
16
+ export declare function mergeCompletionUsage(acc: ChatUsageAccumulator, completion: {
17
+ usage?: unknown;
18
+ }): void;
@@ -0,0 +1,62 @@
1
+ import { readFileSync } from 'fs';
2
+ import { dirname, join } from 'path';
3
+ import { fileURLToPath } from 'url';
4
+ import { getOrCreateCliInstallId, getCliHostLabel, getPoktApiBaseUrl, getPoktToken, isCliTelemetryDisabled, } from '../config.js';
5
+ let cachedVersion = null;
6
+ function readCliVersion() {
7
+ if (cachedVersion)
8
+ return cachedVersion;
9
+ try {
10
+ const base = dirname(fileURLToPath(import.meta.url));
11
+ const pkgPath = join(base, '..', '..', 'package.json');
12
+ const raw = readFileSync(pkgPath, 'utf8');
13
+ const j = JSON.parse(raw);
14
+ cachedVersion = typeof j.version === 'string' ? j.version.slice(0, 32) : 'unknown';
15
+ }
16
+ catch {
17
+ cachedVersion = 'unknown';
18
+ }
19
+ return cachedVersion;
20
+ }
21
+ /** Envia uso (tokens, modelo, provedor) ao Pokt_CLI_Back — não bloqueia o chat; falhas são ignoradas. */
22
+ export function sendCliUsageTelemetryFireAndForget(params) {
23
+ if (isCliTelemetryDisabled())
24
+ return;
25
+ if (params.provider === 'controller')
26
+ return;
27
+ const pt = params.promptTokens + params.completionTokens;
28
+ if (pt <= 0 && params.totalTokens <= 0)
29
+ return;
30
+ const base = getPoktApiBaseUrl();
31
+ const url = `${base}/api/v1/telemetry/usage`;
32
+ const token = getPoktToken();
33
+ const headers = { 'Content-Type': 'application/json' };
34
+ if (token)
35
+ headers.Authorization = `Bearer ${token}`;
36
+ const total = params.totalTokens > 0 ? params.totalTokens : params.promptTokens + params.completionTokens;
37
+ const body = JSON.stringify({
38
+ installId: getOrCreateCliInstallId(),
39
+ hostLabel: getCliHostLabel(),
40
+ provider: params.provider,
41
+ model: params.model,
42
+ prompt_tokens: params.promptTokens,
43
+ completion_tokens: params.completionTokens,
44
+ total_tokens: total,
45
+ cost: params.cost,
46
+ cli_version: readCliVersion(),
47
+ });
48
+ void fetch(url, { method: 'POST', headers, body }).catch(() => { });
49
+ }
50
+ export function emptyUsageAccumulator() {
51
+ return { prompt: 0, completion: 0, cost: null };
52
+ }
53
+ export function mergeCompletionUsage(acc, completion) {
54
+ const u = completion.usage;
55
+ if (!u)
56
+ return;
57
+ acc.prompt += Number(u.prompt_tokens) || 0;
58
+ acc.completion += Number(u.completion_tokens) || 0;
59
+ if (u.cost != null && !Number.isNaN(Number(u.cost))) {
60
+ acc.cost = (acc.cost ?? 0) + Number(u.cost);
61
+ }
62
+ }
@@ -247,6 +247,34 @@ export const tools = [
247
247
  }
248
248
  }
249
249
  }
250
+ },
251
+ {
252
+ type: 'function',
253
+ function: {
254
+ name: 'delete_file',
255
+ description: 'Deletes a single file. Use for removing files that cause conflicts (e.g. app/page.tsx in Next.js).',
256
+ parameters: {
257
+ type: 'object',
258
+ properties: {
259
+ path: { type: 'string', description: 'Relative path to the file to delete' }
260
+ },
261
+ required: ['path']
262
+ }
263
+ }
264
+ },
265
+ {
266
+ type: 'function',
267
+ function: {
268
+ name: 'delete_directory',
269
+ description: 'Deletes a directory and all its contents recursively. Use to resolve Next.js app/pages conflict: delete_directory("app") removes the app folder so only pages/ remains. Cross-platform, no shell needed.',
270
+ parameters: {
271
+ type: 'object',
272
+ properties: {
273
+ path: { type: 'string', description: 'Relative path to the directory to delete (e.g. "app" for Next.js app folder)' }
274
+ },
275
+ required: ['path']
276
+ }
277
+ }
250
278
  }
251
279
  ];
252
280
  export async function executeTool(name, argsStr) {
@@ -331,6 +359,45 @@ export async function executeTool(name, argsStr) {
331
359
  walk(root);
332
360
  return files.join('\n') || 'No files found.';
333
361
  }
362
+ /** Garante que o caminho resolvido está dentro do cwd (evita path traversal) */
363
+ function ensureWithinCwd(resolved) {
364
+ const cwd = process.cwd();
365
+ const normCwd = path.resolve(cwd);
366
+ const normResolved = path.resolve(resolved);
367
+ return normResolved.startsWith(normCwd + path.sep) || normResolved === normCwd;
368
+ }
369
+ if (name === 'delete_file') {
370
+ const filePath = path.resolve(process.cwd(), args.path);
371
+ if (!ensureWithinCwd(filePath)) {
372
+ return `Error: Path "${args.path}" is outside the project directory. Refused for safety.`;
373
+ }
374
+ if (!fs.existsSync(filePath)) {
375
+ return `Error: File not found: ${args.path}`;
376
+ }
377
+ const stat = fs.statSync(filePath);
378
+ if (stat.isDirectory()) {
379
+ return `Error: ${args.path} is a directory. Use delete_directory to remove it.`;
380
+ }
381
+ fs.unlinkSync(filePath);
382
+ console.log(chalk.blue.bold(`\n🗑️ Removido: ${path.relative(process.cwd(), filePath)}\n`));
383
+ return `Successfully deleted ${args.path}`;
384
+ }
385
+ if (name === 'delete_directory') {
386
+ const dirPath = path.resolve(process.cwd(), args.path);
387
+ if (!ensureWithinCwd(dirPath)) {
388
+ return `Error: Path "${args.path}" is outside the project directory. Refused for safety.`;
389
+ }
390
+ if (!fs.existsSync(dirPath)) {
391
+ return `Error: Directory not found: ${args.path}`;
392
+ }
393
+ const stat = fs.statSync(dirPath);
394
+ if (!stat.isDirectory()) {
395
+ return `Error: ${args.path} is not a directory. Use delete_file to remove it.`;
396
+ }
397
+ fs.rmSync(dirPath, { recursive: true });
398
+ console.log(chalk.blue.bold(`\n🗑️ Pasta removida: ${path.relative(process.cwd(), dirPath)}\n`));
399
+ return `Successfully deleted directory ${args.path}`;
400
+ }
334
401
  return `Unknown tool: ${name}`;
335
402
  }
336
403
  catch (error) {
@@ -1,4 +1,4 @@
1
- import { getEffectiveActiveModel, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getGeminiApiKey, getPoktToken, getProPortalBaseUrl, } from '../config.js';
1
+ import { getEffectiveActiveModel, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getGeminiApiKey, getOllamaCloudApiKey, getPoktToken, } from '../config.js';
2
2
  import { startChatLoop } from '../chat/loop.js';
3
3
  import { ui } from '../ui.js';
4
4
  export const chatCommand = {
@@ -27,9 +27,13 @@ export const chatCommand = {
27
27
  console.log(ui.error('Gemini API key not set. Use: pokt config set-gemini -v <key>'));
28
28
  return;
29
29
  }
30
+ if (activeModel.provider === 'ollama-cloud' && !getOllamaCloudApiKey()) {
31
+ console.log(ui.error('Ollama Cloud API key not set. Use: pokt config set-ollama-cloud -v <key>'));
32
+ return;
33
+ }
30
34
  if (activeModel.provider === 'controller') {
31
35
  if (!getPoktToken()) {
32
- console.log(ui.error(`Pokt token not set. Painel: ${getProPortalBaseUrl()} — pokt config set-pokt-token -v <token>`));
36
+ console.log(ui.error('Pokt token not set. Use: pokt config set-pokt-token -v <token>'));
33
37
  return;
34
38
  }
35
39
  }
@@ -1,4 +1,4 @@
1
- import { config, getEffectiveActiveModel, PROVIDER_LABELS, getOpenAIApiKey, getGrokApiKey } from '../config.js';
1
+ import { config, getEffectiveActiveModel, PROVIDER_LABELS, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getOllamaCloudApiKey } from '../config.js';
2
2
  import chalk from 'chalk';
3
3
  import ora from 'ora';
4
4
  import { ui } from '../ui.js';
@@ -101,7 +101,10 @@ export const modelsCommand = {
101
101
  if (action === 'fetch-openrouter') {
102
102
  const spinner = ora('Fetching OpenRouter models...').start();
103
103
  try {
104
- const response = await fetch('https://openrouter.ai/api/v1/models');
104
+ const orToken = getOpenRouterToken();
105
+ const response = await fetch('https://openrouter.ai/api/v1/models', {
106
+ headers: orToken ? { Authorization: `Bearer ${orToken}` } : undefined,
107
+ });
105
108
  if (!response.ok) {
106
109
  spinner.fail(ui.error(`Failed to fetch OpenRouter models: HTTP ${response.status}`));
107
110
  const body = await response.text();
@@ -149,7 +152,7 @@ export const modelsCommand = {
149
152
  return;
150
153
  }
151
154
  if (action === 'fetch-ollama-cloud') {
152
- const apiKey = config.get('ollamaCloudApiKey');
155
+ const apiKey = getOllamaCloudApiKey();
153
156
  if (!apiKey) {
154
157
  console.log(ui.error('Ollama Cloud API key not set. Run: pokt config set-ollama-cloud -v <key>'));
155
158
  console.log(ui.dim('Create keys at: https://ollama.com/settings/keys'));
@@ -1,4 +1,4 @@
1
- import { config, ALL_PROVIDERS, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getGeminiApiKey, getOllamaCloudApiKey, getPoktToken, getProPortalBaseUrl, } from '../config.js';
1
+ import { config, ALL_PROVIDERS, getOpenAIApiKey, getGrokApiKey, getOpenRouterToken, getGeminiApiKey, getOllamaCloudApiKey, getPoktToken, } from '../config.js';
2
2
  import { ui } from '../ui.js';
3
3
  export const providerCommand = {
4
4
  command: 'provider use <provider>',
@@ -54,7 +54,7 @@ export const providerCommand = {
54
54
  return;
55
55
  }
56
56
  if (provider === 'controller' && !getPoktToken()) {
57
- console.log(ui.error(`Pokt token not set. Painel: ${getProPortalBaseUrl()} — pokt config set-pokt-token -v <token>`));
57
+ console.log(ui.error('Pokt token not set. Use: pokt config set-pokt-token -v <token>'));
58
58
  return;
59
59
  }
60
60
  if (provider === 'ollama-cloud' && !getOllamaCloudApiKey()) {
package/dist/config.d.ts CHANGED
@@ -41,6 +41,8 @@ interface AppConfig {
41
41
  /** Só compra de token / checkout — Vercel */
42
42
  tokenPurchaseBaseUrl: string;
43
43
  poktToken: string;
44
+ /** ID estável por instalação (telemetria de uso no Back) */
45
+ cliInstallId: string;
44
46
  registeredModels: ModelConfig[];
45
47
  activeModel: ModelConfig | null;
46
48
  mcpServers: McpServerConfig[];
@@ -54,6 +56,7 @@ export declare const env: {
54
56
  readonly ollamaBaseUrl: readonly ["OLLAMA_BASE_URL"];
55
57
  readonly ollamaCloudApiKey: readonly ["OLLAMA_CLOUD_API_KEY"];
56
58
  readonly poktToken: readonly ["POKT_TOKEN"];
59
+ readonly disableTelemetry: readonly ["POKT_DISABLE_TELEMETRY"];
57
60
  readonly poktApiBaseUrl: readonly ["POKT_API_BASE_URL"];
58
61
  /** Painel e URLs gerais (Railway) */
59
62
  readonly proPortalUrl: readonly ["POKT_PRO_PORTAL_URL", "POKT_CONTROLLER_PORTAL_URL"];
@@ -67,6 +70,11 @@ export declare function getGeminiApiKey(): string;
67
70
  export declare function getOllamaBaseUrl(): string;
68
71
  export declare function getOllamaCloudApiKey(): string;
69
72
  export declare function getPoktToken(): string;
73
+ /** UUID persistente por máquina/instalação (identifica uso no painel quando não há token Pokt). */
74
+ export declare function getOrCreateCliInstallId(): string;
75
+ /** Nome do PC (sanitizado) para exibição no log de uso. */
76
+ export declare function getCliHostLabel(): string;
77
+ export declare function isCliTelemetryDisabled(): boolean;
70
78
  /** Base da API só para provider `controller` (Bearer Pokt). OpenAI direto usa outro ramo no getClient. */
71
79
  export declare function getPoktApiBaseUrl(): string;
72
80
  /** Painel e links gerais (Railway), exceto compra de token — ver getTokenPurchaseUrl(). */
@@ -77,6 +85,8 @@ export declare function getTokenPurchaseUrl(): string;
77
85
  export declare const getControllerBaseUrl: typeof getPoktApiBaseUrl;
78
86
  /** URL aberta por `pokt pro` (comprar token) — Vercel por padrão. */
79
87
  export declare const getProPurchaseUrl: () => string;
80
- /** Prioridade: modelo ativo explícito Pokt (controller) se token setado OpenRouter Gemini Ollama Cloud Ollama local */
88
+ /** True se o modelo pode ser usado com as credenciais atuais (evita ficar preso em controller sem token Pokt). */
89
+ export declare function isModelCredentialReady(model: ModelConfig): boolean;
90
+ /** Prioridade: modelo ativo explícito (se credenciais OK) → Pokt (controller) se token setado → OpenRouter → Gemini → Ollama Cloud → Ollama local */
81
91
  export declare function getEffectiveActiveModel(): ModelConfig | null;
82
92
  export {};
package/dist/config.js CHANGED
@@ -1,4 +1,6 @@
1
1
  import Conf from 'conf';
2
+ import { randomUUID } from 'crypto';
3
+ import os from 'os';
2
4
  export const PROVIDER_LABELS = {
3
5
  controller: 'Pokt API (Controller)',
4
6
  openai: 'OpenAI',
@@ -41,6 +43,7 @@ export const config = new Conf({
41
43
  poktApiBaseUrl: DEFAULT_POKT_SERVICE_BASE_URL,
42
44
  tokenPurchaseBaseUrl: DEFAULT_TOKEN_PURCHASE_BASE_URL,
43
45
  poktToken: '',
46
+ cliInstallId: '',
44
47
  registeredModels: [
45
48
  { provider: 'controller', id: 'default' },
46
49
  { provider: 'openai', id: 'gpt-4o-mini' },
@@ -70,6 +73,7 @@ export const env = {
70
73
  ollamaBaseUrl: ['OLLAMA_BASE_URL'],
71
74
  ollamaCloudApiKey: ['OLLAMA_CLOUD_API_KEY'],
72
75
  poktToken: ['POKT_TOKEN'],
76
+ disableTelemetry: ['POKT_DISABLE_TELEMETRY'],
73
77
  poktApiBaseUrl: ['POKT_API_BASE_URL'],
74
78
  /** Painel e URLs gerais (Railway) */
75
79
  proPortalUrl: ['POKT_PRO_PORTAL_URL', 'POKT_CONTROLLER_PORTAL_URL'],
@@ -113,6 +117,29 @@ export function getOllamaCloudApiKey() {
113
117
  export function getPoktToken() {
114
118
  return readEnvFirst(env.poktToken) || config.get('poktToken') || '';
115
119
  }
120
+ /** UUID persistente por máquina/instalação (identifica uso no painel quando não há token Pokt). */
121
+ export function getOrCreateCliInstallId() {
122
+ let id = config.get('cliInstallId');
123
+ if (typeof id !== 'string' || !/^[0-9a-f-]{36}$/i.test(id)) {
124
+ id = randomUUID();
125
+ config.set('cliInstallId', id);
126
+ }
127
+ return id;
128
+ }
129
+ /** Nome do PC (sanitizado) para exibição no log de uso. */
130
+ export function getCliHostLabel() {
131
+ try {
132
+ const h = os.hostname().replace(/[^\w.-]+/g, '_').slice(0, 120);
133
+ return h || 'PC';
134
+ }
135
+ catch {
136
+ return 'PC';
137
+ }
138
+ }
139
+ export function isCliTelemetryDisabled() {
140
+ const v = readEnvFirst(env.disableTelemetry);
141
+ return v === '1' || v.toLowerCase() === 'true' || v.toLowerCase() === 'yes';
142
+ }
116
143
  /** Base da API só para provider `controller` (Bearer Pokt). OpenAI direto usa outro ramo no getClient. */
117
144
  export function getPoktApiBaseUrl() {
118
145
  const fromEnv = readEnvFirst(env.poktApiBaseUrl);
@@ -135,10 +162,31 @@ export function getTokenPurchaseUrl() {
135
162
  export const getControllerBaseUrl = getPoktApiBaseUrl;
136
163
  /** URL aberta por `pokt pro` (comprar token) — Vercel por padrão. */
137
164
  export const getProPurchaseUrl = () => getTokenPurchaseUrl();
138
- /** Prioridade: modelo ativo explícito Pokt (controller) se token setado OpenRouter Gemini Ollama Cloud Ollama local */
165
+ /** True se o modelo pode ser usado com as credenciais atuais (evita ficar preso em controller sem token Pokt). */
166
+ export function isModelCredentialReady(model) {
167
+ switch (model.provider) {
168
+ case 'controller':
169
+ return !!getPoktToken();
170
+ case 'openai':
171
+ return !!getOpenAIApiKey();
172
+ case 'grok':
173
+ return !!getGrokApiKey();
174
+ case 'openrouter':
175
+ return !!getOpenRouterToken();
176
+ case 'gemini':
177
+ return !!getGeminiApiKey();
178
+ case 'ollama-cloud':
179
+ return !!getOllamaCloudApiKey();
180
+ case 'ollama':
181
+ return true;
182
+ default:
183
+ return false;
184
+ }
185
+ }
186
+ /** Prioridade: modelo ativo explícito (se credenciais OK) → Pokt (controller) se token setado → OpenRouter → Gemini → Ollama Cloud → Ollama local */
139
187
  export function getEffectiveActiveModel() {
140
188
  const explicit = config.get('activeModel');
141
- if (explicit)
189
+ if (explicit && isModelCredentialReady(explicit))
142
190
  return explicit;
143
191
  const models = config.get('registeredModels');
144
192
  if (getPoktToken()) {
@@ -174,5 +222,6 @@ export function getEffectiveActiveModel() {
174
222
  const ollama = models.find((m) => m.provider === 'ollama');
175
223
  if (ollama)
176
224
  return ollama;
177
- return models[0] ?? null;
225
+ const anyUsable = models.find((m) => isModelCredentialReady(m));
226
+ return anyUsable ?? null;
178
227
  }
@@ -77,7 +77,7 @@ async function connectMcpStdio(serverConfig) {
77
77
  args: serverConfig.args ?? [],
78
78
  env: mergeProcessEnv(serverConfig.env),
79
79
  });
80
- const client = new Client({ name: 'pokt-cli', version: '1.0.8' });
80
+ const client = new Client({ name: 'pokt-cli', version: '1.0.13' });
81
81
  await client.connect(transport);
82
82
  const tools = await buildToolsFromClient(serverConfig, client);
83
83
  return pushSession(serverConfig, client, tools, transport);
@@ -112,7 +112,7 @@ async function connectMcpHttp(serverConfig) {
112
112
  }
113
113
  return makeStreamableTransport(url, { authProvider, headers });
114
114
  };
115
- const client = new Client({ name: 'pokt-cli', version: '1.0.8' });
115
+ const client = new Client({ name: 'pokt-cli', version: '1.0.13' });
116
116
  let transport = await makeTransport();
117
117
  const cleanupCb = async () => {
118
118
  try {
@@ -151,7 +151,7 @@ async function connectMcpHttp(serverConfig) {
151
151
  const transport = useSse
152
152
  ? await makeSseTransport(url, { headers })
153
153
  : await makeStreamableTransport(url, { headers });
154
- const client = new Client({ name: 'pokt-cli', version: '1.0.8' });
154
+ const client = new Client({ name: 'pokt-cli', version: '1.0.13' });
155
155
  await client.connect(transport);
156
156
  const tools = await buildToolsFromClient(serverConfig, client);
157
157
  return pushSession(serverConfig, client, tools, transport);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "pokt-cli",
3
- "version": "1.0.11",
3
+ "version": "1.0.13",
4
4
  "description": "Vibe Coding AI CLI for OpenRouter and Ollama",
5
5
  "main": "./dist/bin/pokt.js",
6
6
  "type": "module",