nothumanallowed 13.5.158 → 13.5.160

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nothumanallowed",
3
- "version": "13.5.158",
3
+ "version": "13.5.160",
4
4
  "description": "NotHumanAllowed — 38 AI agents, 80 tools, Studio (visual agentic workflows). Email, calendar, browser automation, screen capture, canvas, cron/heartbeat, Alexandria E2E messaging, GitHub, Notion, Slack, voice chat, free AI (Liara), 28 languages. Zero-dependency CLI.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/constants.mjs CHANGED
@@ -5,7 +5,7 @@ import { fileURLToPath } from 'url';
5
5
  const __filename = fileURLToPath(import.meta.url);
6
6
  const __dirname = path.dirname(__filename);
7
7
 
8
- export const VERSION = '13.5.158';
8
+ export const VERSION = '13.5.160';
9
9
  export const BASE_URL = 'https://nothumanallowed.com/cli';
10
10
  export const API_BASE = 'https://nothumanallowed.com/api/v1';
11
11
 
@@ -8844,15 +8844,24 @@ async function wcGenerate() {
8844
8844
  var splitPrompts = WC_CSS_SPLIT[fp.name];
8845
8845
  if (splitPrompts) {
8846
8846
  // Two-pass generation: streaming on first pass only
8847
- var part1 = await wcCallLLM(sysPreamble, splitPrompts[0] + _nl2 + _nl2 + 'File: ' + fp.name, signal, fp.lang, 8192, onLiveUpdate);
8847
+ var part1 = await wcCallLLM(sysPreamble, splitPrompts[0] + _nl2 + _nl2 + 'File: ' + fp.name, signal, fp.lang, 8192, onLiveUpdate, fp.name);
8848
8848
  part1 = wcStripFences(part1);
8849
8849
  if (signal && signal.aborted) return part1;
8850
- var part2 = await wcCallLLM(sysPreamble, splitPrompts[1] + _nl2 + _nl2 + 'File: ' + fp.name, signal, fp.lang, 8192, function(p2) { if (onLiveUpdate) onLiveUpdate(part1 + _nl2 + _nl2 + p2); });
8850
+ var part2 = await wcCallLLM(sysPreamble, splitPrompts[1] + _nl2 + _nl2 + 'File: ' + fp.name, signal, fp.lang, 8192, function(p2) { if (onLiveUpdate) onLiveUpdate(part1 + _nl2 + _nl2 + p2); }, fp.name);
8851
8851
  part2 = wcStripFences(part2);
8852
8852
  return part1 + _nl2 + _nl2 + part2;
8853
8853
  }
8854
- var content = await wcCallLLM(sysPreamble, fp.prompt + _nl2 + _nl2 + 'File to generate: ' + fp.name, signal, fp.lang, undefined, onLiveUpdate);
8854
+ var content = await wcCallLLM(sysPreamble, fp.prompt + _nl2 + _nl2 + 'File to generate: ' + fp.name, signal, fp.lang, undefined, onLiveUpdate, fp.name);
8855
8855
  content = wcStripFences(content);
8856
+ // Detect model confusion: if output looks like a conversational reply instead of code, retry once
8857
+ var firstLine = content.trim().split(_nl2)[0] || '';
8858
+ var confusionPhrases = ['I notice', 'Could you please', 'I need to know', 'I don', 'To help you', 'Please clarify', 'I apologize', 'Unfortunately', 'As an AI'];
8859
+ var isConfused = confusionPhrases.some(function(p) { return firstLine.indexOf(p) === 0; });
8860
+ if (isConfused && !(signal && signal.aborted)) {
8861
+ var retryPrompt = 'IMPORTANT: Output ONLY the raw file content for ' + fp.name + '. No explanations, no questions, no markdown. Just the code.' + _nl2 + _nl2 + fp.prompt;
8862
+ content = await wcCallLLM(sysPreamble, retryPrompt + _nl2 + _nl2 + 'File to generate: ' + fp.name, signal, fp.lang, undefined, onLiveUpdate, fp.name);
8863
+ content = wcStripFences(content);
8864
+ }
8856
8865
  // Post-process: fix LLM streaming artifacts (spaces inserted inside keywords/identifiers)
8857
8866
  if (fp.lang === 'javascript' || fp.lang === 'typescript') {
8858
8867
  // Fix spaces inside JS/TS keywords that LLMs sometimes split during streaming
@@ -8951,50 +8960,43 @@ async function wcGenerate() {
8951
8960
  }, 80);
8952
8961
  }
8953
8962
 
8954
- // Generate in parallel batches of 4 each call is independent/fresh to Liara
8955
- // Counter increments 1 per file as it completes, not per batch
8956
- var BATCH = 4;
8963
+ // Generate sequentially one file at a time so every file streams visibly
8964
+ // and the progress bar increments file by file
8957
8965
  var doneCount = 0;
8958
8966
  wcUpdateGenOverlay(0, filePlan.length, '');
8959
- for (var bi = 0; bi < filePlan.length; bi += BATCH) {
8967
+ for (var si = 0; si < filePlan.length; si++) {
8960
8968
  if (_wcGenAbortCtrl && _wcGenAbortCtrl.signal.aborted) break;
8961
- var batch = filePlan.slice(bi, bi + BATCH);
8962
- var results = await Promise.allSettled(batch.map(function(fp, bii) {
8963
- // Point activeFile at the first file of this batch so tokens appear immediately
8964
- if (bii === 0) {
8965
- var firstIdx = wcState.generatedFiles.findIndex(function(f){ return f.name === fp.name; });
8966
- if (firstIdx >= 0) wcState.activeFile = firstIdx;
8967
- }
8968
- // Show file name in overlay as soon as it starts
8969
- wcUpdateGenOverlay(doneCount, filePlan.length, fp.name);
8970
- var liveCallback = function(partial) { wcLiveUpdateFile(fp.name, fp.lang, partial); };
8971
- return wcGenOneFile(fp, _wcGenAbortCtrl ? _wcGenAbortCtrl.signal : null, liveCallback).then(function(c){ return { fp: fp, content: c }; });
8972
- }));
8973
- results.forEach(function(r, ri) {
8974
- var batchIdx = ri;
8975
- if (r.status === 'fulfilled') {
8976
- var fp = r.value.fp;
8977
- for (var gi = 0; gi < wcState.generatedFiles.length; gi++) {
8978
- if (wcState.generatedFiles[gi].name === fp.name) {
8979
- wcState.generatedFiles[gi] = { name: fp.name, content: r.value.content, lang: fp.lang };
8980
- break;
8981
- }
8969
+ var fp = filePlan[si];
8970
+
8971
+ // Switch viewer to this file and show its name in the bar
8972
+ var fileIdx = wcState.generatedFiles.findIndex(function(f){ return f.name === fp.name; });
8973
+ if (fileIdx >= 0) wcState.activeFile = fileIdx;
8974
+ wcUpdateGenOverlay(doneCount, filePlan.length, fp.name);
8975
+
8976
+ var liveCallback = (function(fpCap) {
8977
+ return function(partial) { wcLiveUpdateFile(fpCap.name, fpCap.lang, partial); };
8978
+ }(fp));
8979
+
8980
+ try {
8981
+ var genContent = await wcGenOneFile(fp, _wcGenAbortCtrl ? _wcGenAbortCtrl.signal : null, liveCallback);
8982
+ for (var gi = 0; gi < wcState.generatedFiles.length; gi++) {
8983
+ if (wcState.generatedFiles[gi].name === fp.name) {
8984
+ wcState.generatedFiles[gi] = { name: fp.name, content: genContent, lang: fp.lang };
8985
+ break;
8982
8986
  }
8983
- } else if (r.reason && r.reason.name !== 'AbortError') {
8984
- var fpErr = batch[batchIdx];
8985
- if (fpErr) {
8986
- for (var gi2 = 0; gi2 < wcState.generatedFiles.length; gi2++) {
8987
- if (wcState.generatedFiles[gi2].name === fpErr.name) {
8988
- wcState.generatedFiles[gi2] = { name: fpErr.name, content: '// Error generating this file: ' + (r.reason.message || 'unknown error'), lang: fpErr.lang || '', _error: true };
8989
- break;
8990
- }
8991
- }
8987
+ }
8988
+ } catch(genErr) {
8989
+ if (genErr && genErr.name === 'AbortError') break;
8990
+ for (var gi2 = 0; gi2 < wcState.generatedFiles.length; gi2++) {
8991
+ if (wcState.generatedFiles[gi2].name === fp.name) {
8992
+ wcState.generatedFiles[gi2] = { name: fp.name, content: '// Error generating this file: ' + (genErr && genErr.message || 'unknown error'), lang: fp.lang || '', _error: true };
8993
+ break;
8992
8994
  }
8993
8995
  }
8994
- doneCount++;
8995
- // Update counter immediately on each file completion — no full re-render
8996
- wcUpdateGenOverlay(doneCount, filePlan.length, doneCount < filePlan.length ? (batch[Math.min(batchIdx+1, batch.length-1)] || batch[batchIdx] || {}).name || '' : '');
8997
- });
8996
+ }
8997
+
8998
+ doneCount++;
8999
+ wcUpdateGenOverlay(doneCount, filePlan.length, si + 1 < filePlan.length ? filePlan[si + 1].name : '');
8998
9000
  }
8999
9001
 
9000
9002
  if (_wcTimerInterval) { clearInterval(_wcTimerInterval); _wcTimerInterval = null; }
@@ -9301,19 +9303,21 @@ async function wcCallLLMRaw(sys, user, signal, maxTok, onToken) {
9301
9303
  }
9302
9304
  }
9303
9305
 
9304
- async function wcCallLLM(sys, user, signal, lang, maxTok, onToken) {
9306
+ async function wcCallLLM(sys, user, signal, lang, maxTok, onToken, fileName) {
9305
9307
  var content = await wcCallLLMRaw(sys, user, signal, maxTok, onToken);
9306
9308
  // Continuation loop: if response is truncated, ask model to continue (no streaming for continuations)
9307
9309
  var maxContinuations = 2;
9308
9310
  for (var ci = 0; ci < maxContinuations; ci++) {
9309
9311
  if (!wcIsTruncated(content, lang || 'text')) break;
9310
9312
  if (signal && signal.aborted) break;
9311
- var continuePrompt = 'Continue generating the file EXACTLY from where you stopped. Do not repeat anything already written. Output ONLY the remaining code, starting from the next character after where you stopped.' +
9312
- String.fromCharCode(10) + String.fromCharCode(10) + 'The file so far ends with:' +
9313
- String.fromCharCode(10) + content.slice(-300);
9313
+ var _nlc = String.fromCharCode(10);
9314
+ var continuePrompt = (fileName ? 'File: ' + fileName + _nlc + _nlc : '') +
9315
+ 'You were generating this file and ran out of tokens. The file is INCOMPLETE.' + _nlc +
9316
+ 'Continue EXACTLY from where you stopped. Output ONLY the remaining code — do NOT repeat anything already written, do NOT explain, do NOT use markdown fences.' + _nlc + _nlc +
9317
+ 'The file so far ends with (last 600 chars):' + _nlc + content.slice(-600);
9314
9318
  var continuation = await wcCallLLMRaw(sys, continuePrompt, signal, maxTok);
9315
9319
  if (!continuation || continuation.trim().length < 5) break;
9316
- content = content + String.fromCharCode(10) + continuation;
9320
+ content = content + _nlc + continuation;
9317
9321
  if (onToken) onToken(content);
9318
9322
  }
9319
9323
  return content;