nothumanallowed 14.1.21 → 14.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nothumanallowed",
3
- "version": "14.1.21",
3
+ "version": "14.1.22",
4
4
  "description": "NotHumanAllowed — 38 AI agents, 80 tools, Studio (visual agentic workflows). Email, calendar, browser automation, screen capture, canvas, cron/heartbeat, Alexandria E2E messaging, GitHub, Notion, Slack, voice chat, free AI (Liara), 28 languages. Zero-dependency CLI.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/constants.mjs CHANGED
@@ -5,7 +5,7 @@ import { fileURLToPath } from 'url';
5
5
  const __filename = fileURLToPath(import.meta.url);
6
6
  const __dirname = path.dirname(__filename);
7
7
 
8
- export const VERSION = '14.1.21';
8
+ export const VERSION = '14.1.22';
9
9
  export const BASE_URL = 'https://nothumanallowed.com/cli';
10
10
  export const API_BASE = 'https://nothumanallowed.com/api/v1';
11
11
 
@@ -544,6 +544,52 @@ function countTokens(text) {
544
544
  return Math.ceil((text || '').length / 4);
545
545
  }
546
546
 
547
+ /**
548
+ * Simulated streaming: for providers that return full text at once (NHA/Liara),
549
+ * we split the text into ~20-char chunks and emit them with setImmediate gaps
550
+ * so the browser receives a real byte-by-byte stream over SSE.
551
+ */
552
+ async function emitTextAsStream(text, onChunk) {
553
+ const CHUNK_SIZE = 20;
554
+ for (let i = 0; i < text.length; i += CHUNK_SIZE) {
555
+ onChunk(text.slice(i, i + CHUNK_SIZE));
556
+ await new Promise((r) => setImmediate(r));
557
+ }
558
+ }
559
+
560
+ /**
561
+ * Detects if an LLM output appears to be truncated / incomplete.
562
+ * Returns true if the file likely needs a continuation call.
563
+ */
564
+ function isFileTruncated(content, filename) {
565
+ if (!content || content.length < 10) return true;
566
+ const trimmed = content.trimEnd();
567
+ const ext = filename.split('.').pop()?.toLowerCase();
568
+
569
+ // Common truncation signs: ends mid-statement without closing bracket/brace
570
+ if (ext === 'js' || ext === 'mjs' || ext === 'ts') {
571
+ // Balanced braces check (fast approximation)
572
+ const open = (trimmed.match(/\{/g) || []).length;
573
+ const close = (trimmed.match(/\}/g) || []).length;
574
+ if (open > close + 2) return true;
575
+ // Last meaningful line should not end with an operator or comma
576
+ const lastLine = trimmed.split('\n').pop()?.trim() ?? '';
577
+ if (/[,({=+\-*/<>|&]$/.test(lastLine)) return true;
578
+ }
579
+ if (ext === 'css') {
580
+ const open = (trimmed.match(/\{/g) || []).length;
581
+ const close = (trimmed.match(/\}/g) || []).length;
582
+ if (open > close + 1) return true;
583
+ }
584
+ if (ext === 'html') {
585
+ if (!trimmed.includes('</html>') && !trimmed.includes('</body>')) return true;
586
+ }
587
+ if (ext === 'json') {
588
+ try { JSON.parse(trimmed); } catch { return true; }
589
+ }
590
+ return false;
591
+ }
592
+
547
593
  async function runGenerate(config, projectName, description, blocks, authFields, emit) {
548
594
  const blocksDesc = Object.entries(blocks)
549
595
  .filter(([, enabled]) => enabled)
@@ -666,20 +712,43 @@ ${prevContext ? `Recent files generated (for consistency):\n${prevContext}\n\n`
666
712
  const fileTokensIn = countTokens(fileSys) + countTokens(filePrompt);
667
713
 
668
714
  try {
715
+ // Collect full LLM output first, then stream it to the client in small chunks.
716
+ // This gives real word-by-word animation even with non-streaming providers (NHA/Liara).
717
+ let rawOutput = '';
669
718
  await callLLMStream(config, fileSys, filePrompt, (chunk) => {
670
- fileContent += chunk;
671
- emit({ type: 'file_chunk', name: fileSpec.name, chunk, fi: fi + 1, total: filePlan.length });
719
+ rawOutput += chunk;
672
720
  }, { max_tokens: maxTokens });
673
721
 
722
+ // Strip markdown fences if LLM wrapped the output
723
+ rawOutput = rawOutput
724
+ .replace(/^```[\w]*\n/, '').replace(/\n```$/, '')
725
+ .replace(/^```[\w]*\r\n/, '').replace(/\r\n```$/, '').trim();
726
+
727
+ // Continuation: if the file appears truncated, ask the model to continue
728
+ if (isFileTruncated(rawOutput, fileSpec.name)) {
729
+ const contPrompt = `Continue writing the file ${fileSpec.name} exactly from where it was cut off. Output ONLY the continuation (no repetition of what was already written, no explanation):
730
+
731
+ ${rawOutput.slice(-800)}`;
732
+ let continuation = '';
733
+ await callLLMStream(config, fileSys, contPrompt, (chunk) => {
734
+ continuation += chunk;
735
+ }, { max_tokens: Math.min(maxTokens, 4096) });
736
+ continuation = continuation
737
+ .replace(/^```[\w]*\n/, '').replace(/\n```$/, '').trim();
738
+ rawOutput = rawOutput + '\n' + continuation;
739
+ }
740
+
741
+ fileContent = rawOutput;
742
+
743
+ // Stream the final content to the browser in small chunks for animation
744
+ await emitTextAsStream(fileContent, (chunk) => {
745
+ emit({ type: 'file_chunk', name: fileSpec.name, chunk, fi: fi + 1, total: filePlan.length });
746
+ });
747
+
674
748
  const fileTokensOut = countTokens(fileContent);
675
749
  totalTokensIn += fileTokensIn;
676
750
  totalTokensOut += fileTokensOut;
677
751
 
678
- // Strip markdown fences if LLM wrapped the output anyway
679
- fileContent = fileContent
680
- .replace(/^```[\w]*\n/, '').replace(/\n```$/, '')
681
- .replace(/^```[\w]*\r\n/, '').replace(/\r\n```$/, '').trim();
682
-
683
752
  // Quick syntax check for JS/TS files
684
753
  if (fileSpec.name.endsWith('.js') || fileSpec.name.endsWith('.mjs')) {
685
754
  try { new Function(fileContent); } catch (e) { syntaxError = e.message.replace(/\n.*/s, ''); }