@hasna/terminal 3.7.0 → 3.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -487,13 +487,25 @@ export function createServer() {
487
487
  const start = Date.now();
488
488
  const result = cachedRead(path, { offset, limit });
489
489
  if (summarize && result.content.length > 500) {
490
- const processed = await processOutput(`cat ${path}`, result.content);
491
- logCall("read_file", { command: path, outputTokens: estimateTokens(result.content), tokensSaved: processed.tokensSaved, durationMs: Date.now() - start, aiProcessed: true });
490
+ // AI-native file summary ask directly what the file does
491
+ const provider = getOutputProvider();
492
+ const outputModel = provider.name === "groq" ? "llama-3.1-8b-instant" : undefined;
493
+ const content = result.content.length > 8000 ? result.content.slice(0, 8000) : result.content;
494
+ const summary = await provider.complete(`File: ${path}\n\n${content}`, {
495
+ model: outputModel,
496
+ system: `Describe what this source file does in 2-4 lines. Include: main class/module name, key methods/functions, what it exports, and its purpose. Be specific — name the actual functions and what they do. Never just say "N lines of code."`,
497
+ maxTokens: 300,
498
+ temperature: 0.2,
499
+ });
500
+ const outputTokens = estimateTokens(result.content);
501
+ const summaryTokens = estimateTokens(summary);
502
+ const saved = Math.max(0, outputTokens - summaryTokens);
503
+ logCall("read_file", { command: path, outputTokens, tokensSaved: saved, durationMs: Date.now() - start, aiProcessed: true });
492
504
  return {
493
505
  content: [{ type: "text", text: JSON.stringify({
494
- summary: processed.summary,
506
+ summary,
495
507
  lines: result.content.split("\n").length,
496
- tokensSaved: processed.tokensSaved,
508
+ tokensSaved: saved,
497
509
  cached: result.cached,
498
510
  }) }],
499
511
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hasna/terminal",
3
- "version": "3.7.0",
3
+ "version": "3.7.1",
4
4
  "description": "Smart terminal wrapper for AI agents and humans — structured output, token compression, MCP server, natural language",
5
5
  "type": "module",
6
6
  "files": [
package/src/mcp/server.ts CHANGED
@@ -690,13 +690,28 @@ export function createServer(): McpServer {
690
690
  const result = cachedRead(path, { offset, limit });
691
691
 
692
692
  if (summarize && result.content.length > 500) {
693
- const processed = await processOutput(`cat ${path}`, result.content);
694
- logCall("read_file", { command: path, outputTokens: estimateTokens(result.content), tokensSaved: processed.tokensSaved, durationMs: Date.now() - start, aiProcessed: true });
693
+ // AI-native file summary ask directly what the file does
694
+ const provider = getOutputProvider();
695
+ const outputModel = provider.name === "groq" ? "llama-3.1-8b-instant" : undefined;
696
+ const content = result.content.length > 8000 ? result.content.slice(0, 8000) : result.content;
697
+ const summary = await provider.complete(
698
+ `File: ${path}\n\n${content}`,
699
+ {
700
+ model: outputModel,
701
+ system: `Describe what this source file does in 2-4 lines. Include: main class/module name, key methods/functions, what it exports, and its purpose. Be specific — name the actual functions and what they do. Never just say "N lines of code."`,
702
+ maxTokens: 300,
703
+ temperature: 0.2,
704
+ }
705
+ );
706
+ const outputTokens = estimateTokens(result.content);
707
+ const summaryTokens = estimateTokens(summary);
708
+ const saved = Math.max(0, outputTokens - summaryTokens);
709
+ logCall("read_file", { command: path, outputTokens, tokensSaved: saved, durationMs: Date.now() - start, aiProcessed: true });
695
710
  return {
696
711
  content: [{ type: "text" as const, text: JSON.stringify({
697
- summary: processed.summary,
712
+ summary,
698
713
  lines: result.content.split("\n").length,
699
- tokensSaved: processed.tokensSaved,
714
+ tokensSaved: saved,
700
715
  cached: result.cached,
701
716
  }) }],
702
717
  };