@bonginkan/maria 4.3.31 → 4.3.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # MARIA - AI Development Platform v4.3.31
1
+ # MARIA - AI Development Platform v4.3.32
2
2
 
3
3
  [![npm version](https://img.shields.io/npm/v/@bonginkan/maria.svg)](https://www.npmjs.com/package/@bonginkan/maria)
4
4
  [![License](https://img.shields.io/badge/license-Multi--tier-blue.svg)](LICENSE)
@@ -10,7 +10,7 @@
10
10
 
11
11
  > **Enterprise-grade AI development platform with 100% command availability and comprehensive fallback support**
12
12
 
13
- ## 🚀 What's New in v4.3.31 (September 16, 2025)
13
+ ## 🚀 What's New in v4.3.32 (September 16, 2025)
14
14
 
15
15
  ### 🎯 Interactive Improvements & Choice Memory
16
16
  - **Choice Memory System**: Smart persistence of user selections across sessions
@@ -922,7 +922,7 @@ await secureWorkflow.executeWithAuth(workflowDefinition, securityContext);
922
922
  ### Quick Installation
923
923
  ```bash
924
924
  # Install globally (recommended)
925
- npm install -g @bonginkan/maria@4.3.31
925
+ npm install -g @bonginkan/maria@4.3.32
926
926
 
927
927
  # Verify installation
928
928
  maria --version # Should show v4.3.9
@@ -1126,7 +1126,7 @@ MARIA CODE is distributed under a comprehensive licensing system designed for in
1126
1126
 
1127
1127
  *MARIA v4.1.4 represents the pinnacle of multimodal AI development platform evolution - combining revolutionary voice-to-code capabilities, advanced memory systems, and comprehensive command ecosystems with enterprise-grade security and performance. This release establishes MARIA as the definitive choice for developers and Fortune 500 enterprises seeking intelligent, multimodal development experiences with GraphRAG intelligence, multilingual support, and zero-anxiety coding workflows.*
1128
1128
 
1129
- **Transform your development experience today**: `npm install -g @bonginkan/maria@4.3.31`
1129
+ **Transform your development experience today**: `npm install -g @bonginkan/maria@4.3.32`
1130
1130
 
1131
1131
  🌐 **Official Website**: [https://maria-code.ai](https://maria-code.ai)
1132
1132
  💬 **Community**: [https://discord.gg/SMSmSGcEQy](https://discord.gg/SMSmSGcEQy)
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "version": "lite-1.0.0",
3
- "generatedAt": "2025-09-28T05:40:03.382Z",
3
+ "generatedAt": "2025-09-28T09:23:52.572Z",
4
4
  "totalCommands": 16,
5
5
  "readyCount": 16,
6
6
  "partialCount": 0,
@@ -26066,8 +26066,8 @@ var require_package = __commonJS({
26066
26066
  "package.json"(exports, module) {
26067
26067
  module.exports = {
26068
26068
  name: "@bonginkan/maria",
26069
- version: "4.3.31",
26070
- description: "\u{1F680} MARIA v4.3.31 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
26069
+ version: "4.3.32",
26070
+ description: "\u{1F680} MARIA v4.3.32 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
26071
26071
  keywords: [
26072
26072
  "ai",
26073
26073
  "cli",
@@ -28099,7 +28099,7 @@ var init_AuthenticationManager = __esm({
28099
28099
  const response = await fetch(`${this.apiBase}/api/user/profile`, {
28100
28100
  headers: {
28101
28101
  "Authorization": `Bearer ${tokens2.accessToken}`,
28102
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.31"}`
28102
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.32"}`
28103
28103
  }
28104
28104
  });
28105
28105
  if (response.status === 401) {
@@ -28731,7 +28731,7 @@ async function callApi(path64, init3 = {}) {
28731
28731
  "Authorization": `Bearer ${token}`,
28732
28732
  "X-Device-Id": getDeviceId(),
28733
28733
  "X-Session-Id": getSessionId() || "",
28734
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.31"}`,
28734
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.32"}`,
28735
28735
  "Content-Type": init3.headers?.["Content-Type"] || "application/json"
28736
28736
  });
28737
28737
  const doFetch = async (token) => {
@@ -50689,7 +50689,7 @@ var init_about_command = __esm({
50689
50689
  async execute(args2, context2) {
50690
50690
  const output3 = [];
50691
50691
  output3.push("");
50692
- output3.push(chalk14__default.default.cyan.bold("\u{1F916} About MARIA v4.3.31"));
50692
+ output3.push(chalk14__default.default.cyan.bold("\u{1F916} About MARIA v4.3.32"));
50693
50693
  output3.push(chalk14__default.default.gray("\u2550".repeat(40)));
50694
50694
  output3.push("");
50695
50695
  output3.push(chalk14__default.default.white.bold("MARIA - Minimal API, Maximum Power"));
@@ -70777,16 +70777,7 @@ var init_EvaluationOrchestrator = __esm({
70777
70777
  code: options.code,
70778
70778
  files
70779
70779
  };
70780
- let result;
70781
- if (options.llmScoring !== false) {
70782
- try {
70783
- result = await this.scoreWithLLM(input3, criteria);
70784
- } catch {
70785
- result = await this.engine.evaluate(input3, criteria);
70786
- }
70787
- } else {
70788
- result = await this.engine.evaluate(input3, criteria);
70789
- }
70780
+ const result = await this.scoreWithLLM(input3, criteria);
70790
70781
  result.files = input3.files || [];
70791
70782
  return { criteriaPath, result };
70792
70783
  }
@@ -70805,7 +70796,7 @@ var init_EvaluationOrchestrator = __esm({
70805
70796
  await fs21.promises.mkdir(dir, { recursive: true });
70806
70797
  const system = [
70807
70798
  "You are an expert evaluation designer.",
70808
- "Create a JSON criteria file for evaluating the given idea/code/files.",
70799
+ "Create a JSON criteria file for evaluating the given idea/code/files. If a local file path is provided, make sure you read the uploaded file before taking any actions.",
70809
70800
  "Respond ONLY with a single multi-file section in this exact format:",
70810
70801
  `[BEGIN file: ${targetPath}]`,
70811
70802
  "{JSON}",
@@ -70863,12 +70854,22 @@ ${user}`,
70863
70854
  return map;
70864
70855
  }
70865
70856
  extractFirstJsonObject(text) {
70866
- const fence = /```json\r?\n([\s\S]*?)```/i.exec(text);
70867
- if (fence) return fence[1];
70868
- const start = text.indexOf("{");
70869
- const end = text.lastIndexOf("}");
70857
+ const fenceJson = /```\s*json\s*\r?\n([\s\S]*?)```/i.exec(text);
70858
+ if (fenceJson) return fenceJson[1];
70859
+ const fencePlain = /```\s*\r?\n([\s\S]*?)```/i.exec(text);
70860
+ if (fencePlain) {
70861
+ const body = fencePlain[1];
70862
+ try {
70863
+ JSON.parse(body);
70864
+ return body;
70865
+ } catch {
70866
+ }
70867
+ }
70868
+ const defenced = text.replace(/```[a-zA-Z]*\s*\r?\n([\s\S]*?)```/g, "$1");
70869
+ const start = defenced.indexOf("{");
70870
+ const end = defenced.lastIndexOf("}");
70870
70871
  if (start >= 0 && end > start) {
70871
- const cand = text.slice(start, end + 1);
70872
+ const cand = defenced.slice(start, end + 1);
70872
70873
  try {
70873
70874
  JSON.parse(cand);
70874
70875
  return cand;
@@ -70879,18 +70880,20 @@ ${user}`,
70879
70880
  }
70880
70881
  async scoreWithLLM(input3, criteria) {
70881
70882
  const criteriaPreview = criteria.items.map((it) => ({ id: it.id, name: it.name, weight: it.weight, rubric: it.rubric })).slice(0, 20);
70882
- const bundleParts = [];
70883
- if (input3.idea) bundleParts.push(`Idea:
70883
+ const parts = [];
70884
+ if (input3.idea) parts.push(`Idea:
70884
70885
  ${input3.idea}`);
70885
- if (input3.code) bundleParts.push(`Code:
70886
+ if (input3.code) parts.push(`Code:
70886
70887
  ${input3.code}`);
70887
- if (input3.files && input3.files.length) {
70888
- bundleParts.push(`Files:
70889
- ${input3.files.slice(0, 5).map((f3) => `- ${f3.path}`).join("\n")}`);
70888
+ const files = input3.files || [];
70889
+ if (files.length) {
70890
+ parts.push(`Files attached:
70891
+ ${files.slice(0, 20).map((f3) => `- ${f3.path}`).join("\n")}`);
70890
70892
  }
70891
- const bundle = bundleParts.join("\n\n");
70893
+ const bundle = parts.join("\n\n");
70892
70894
  const system = [
70893
70895
  "You are an impartial evaluator. Score each criterion between 0 and 1.",
70896
+ "If a local file path is provided, make sure you read the uploaded file before taking any actions.",
70894
70897
  "Return JSON only in the following schema:",
70895
70898
  '{ "totalScore": number (0..1), "details": [{ "id": string, "score": number (0..1), "reason": string }] }',
70896
70899
  "Keep reasons short (<= 120 chars)."
@@ -70900,15 +70903,23 @@ ${input3.files.slice(0, 5).map((f3) => `- ${f3.path}`).join("\n")}`);
70900
70903
  "Input to evaluate:",
70901
70904
  bundle
70902
70905
  ].join("\n\n");
70906
+ (input3.files || []).some((f3) => f3.mime && f3.mime !== "text/plain");
70907
+ const provider = "google";
70908
+ const model = "gemini-2.5-flash";
70903
70909
  const response = await callAPI("/v1/ai-proxy", {
70904
70910
  method: "POST",
70905
70911
  body: {
70912
+ provider,
70913
+ model,
70906
70914
  prompt: `${system}
70907
70915
 
70908
70916
  ---
70909
70917
 
70910
70918
  ${user}`,
70911
- taskType: "evaluation"
70919
+ taskType: "evaluation",
70920
+ metadata: {
70921
+ attachments: (input3.files || []).map((f3) => this.toAttachmentPayload(f3))
70922
+ }
70912
70923
  }
70913
70924
  });
70914
70925
  const raw = (response?.data?.content || response?.output || "").trim();
@@ -70918,7 +70929,12 @@ ${user}`,
70918
70929
  if (sections.size > 0) {
70919
70930
  jsonText = Array.from(sections.values())[0];
70920
70931
  }
70921
- const parsed = JSON.parse(jsonText);
70932
+ let parsed;
70933
+ try {
70934
+ parsed = JSON.parse(jsonText);
70935
+ } catch {
70936
+ return { totalScore: 0, details: criteria.items.map((it) => ({ id: it.id, name: it.name, weight: it.weight, score: 0, reason: "" })), passThroughText: raw };
70937
+ }
70922
70938
  const details = criteria.items.map((it) => {
70923
70939
  const found = parsed.details.find((d) => d.id === it.id);
70924
70940
  const score = Math.max(0, Math.min(1, found?.score ?? 0));
@@ -70929,22 +70945,61 @@ ${user}`,
70929
70945
  const totalScore = Number.isFinite(parsed.totalScore) ? Math.max(0, Math.min(1, parsed.totalScore)) : weighted;
70930
70946
  return { totalScore, details };
70931
70947
  }
70948
+ toAttachmentPayload(file) {
70949
+ const ext2 = path11__namespace.default.extname(file.path).toLowerCase();
70950
+ const mime = file.mime || (ext2 === ".pdf" ? "application/pdf" : "text/plain");
70951
+ const b64 = file.binaryBase64 || Buffer.from(file.content || "", "utf8").toString("base64");
70952
+ return { name: path11__namespace.default.basename(file.path), path: file.path, mime, data_base64: b64 };
70953
+ }
70932
70954
  async readFiles(pathsInput) {
70933
70955
  const out = [];
70934
70956
  for (const p of pathsInput) {
70935
- const abs = path11__namespace.default.isAbsolute(p) ? p : path11__namespace.default.join(this.projectRoot, p);
70957
+ const normalized = p.replace(/^"|"$/g, "").replace(/^'|'$/g, "");
70958
+ const abs = path11__namespace.default.isAbsolute(normalized) ? normalized : path11__namespace.default.join(this.projectRoot, normalized);
70936
70959
  try {
70937
70960
  const stat13 = await fs21.promises.stat(abs);
70938
70961
  if (stat13.isDirectory()) {
70939
70962
  continue;
70940
70963
  }
70941
- const content = await fs21.promises.readFile(abs, "utf8");
70942
- out.push({ path: abs, content });
70964
+ const ext2 = path11__namespace.default.extname(abs).toLowerCase();
70965
+ const raw = await fs21.promises.readFile(abs);
70966
+ let content = "";
70967
+ if (/\.pdf$/i.test(abs)) {
70968
+ content = await this.tryExtractPdfText(abs);
70969
+ } else {
70970
+ try {
70971
+ content = raw.toString("utf8");
70972
+ } catch {
70973
+ content = "";
70974
+ }
70975
+ }
70976
+ const mime = ext2 === ".pdf" ? "application/pdf" : "text/plain";
70977
+ const binaryBase64 = raw.toString("base64");
70978
+ out.push({ path: abs, content, binaryBase64, mime });
70943
70979
  } catch {
70944
70980
  }
70945
70981
  }
70946
70982
  return out;
70947
70983
  }
70984
+ async tryExtractPdfText(absPath) {
70985
+ try {
70986
+ const buf = await fs21.promises.readFile(absPath);
70987
+ try {
70988
+ const mod = await import('pdf-parse');
70989
+ const pdfParse = mod?.default || mod;
70990
+ if (typeof pdfParse === "function") {
70991
+ const res = await pdfParse(buf);
70992
+ const text = (res?.text || "").trim();
70993
+ if (text) return text;
70994
+ }
70995
+ } catch {
70996
+ }
70997
+ const kb = Math.round(buf.byteLength / 1024);
70998
+ return `[[PDF ${absPath} (${kb} KB) \u2014 text extraction unavailable in this environment]]`;
70999
+ } catch {
71000
+ return "[[PDF read error]]";
71001
+ }
71002
+ }
70948
71003
  };
70949
71004
  }
70950
71005
  });
@@ -70976,6 +71031,7 @@ async function inferAssessParams(rawText, cwd2) {
70976
71031
  "You extract structured options for an evaluation command.",
70977
71032
  'Return JSON only with keys: { "criteriaPath"?: string, "regenerate"?: boolean, "criteriaOnly"?: boolean, "files"?: string[], "idea"?: string, "code"?: string }.',
70978
71033
  'Prefer concise relative paths for criteria (e.g., "AGI-hackathon-ideathon/evaluation.criteria.json").',
71034
+ 'If the text says "do not regenerate" or "DO NOT regenerate", set regenerate=false (override any other hint).',
70979
71035
  "If the text asks to create/save criteria/rubric (without asking for evaluation), set criteriaOnly=true.",
70980
71036
  "If the text mentions saving criteria to a folder, set criteriaPath accordingly and regenerate=true.",
70981
71037
  "If it references files or folders, include them in files[] (relative to cwd).",
@@ -71137,7 +71193,9 @@ var init_evaluate_command = __esm({
71137
71193
  }
71138
71194
  const filesOption = options["file"];
71139
71195
  const filesFromFlags = Array.isArray(filesOption) ? filesOption : filesOption ? [filesOption] : [];
71140
- const files = Array.from(/* @__PURE__ */ new Set([...inferred.files || [], ...filesFromFlags]));
71196
+ const detectedFromText = Array.from(rawText.match(/[A-Za-z]:\\[^\r\n]+/g) || []).map((s2) => s2.trim().replace(/[\s\.,;\)]*$/, ""));
71197
+ const noRegen = /\bdo\s*not\s*regenerate\b|\bdo\s*not\s*re\s*gen\b|\bdo\s*not\s*re\s*create\b|\bno\s*regen\b|\bDO\s*NOT\s*REGENERATE\b/i.test(rawText);
71198
+ const files = Array.from(/* @__PURE__ */ new Set([...inferred.files || [], ...filesFromFlags, ...detectedFromText]));
71141
71199
  const orchestrator = new EvaluationOrchestrator(root);
71142
71200
  const spinner = new ProcessAnimation();
71143
71201
  spinner.start();
@@ -71150,7 +71208,7 @@ var init_evaluate_command = __esm({
71150
71208
  files,
71151
71209
  idea: options["idea"] || inferred.idea || void 0,
71152
71210
  code: options["code"] || inferred.code || void 0,
71153
- regenerateCriteria: !!options["regenerate"] || !!options["regen"] || !options["criteria"] || !!inferred.regenerate,
71211
+ regenerateCriteria: noRegen ? false : !!options["regenerate"] || !!options["regen"] || !options["criteria"] || !!inferred.regenerate,
71154
71212
  llmScoring: options["no-llm-scoring"] ? false : true,
71155
71213
  criteriaOnly: !!options["criteria-only"] || !!inferred.criteriaOnly
71156
71214
  });
@@ -71168,29 +71226,34 @@ var init_evaluate_command = __esm({
71168
71226
  "- A one-line overall verdict",
71169
71227
  "- A weighted score (0-100)",
71170
71228
  "- A short bullet breakdown per criterion (name: score/100 - brief reason)",
71171
- "Be direct and professional."
71229
+ "Be direct and professional.",
71230
+ // Strong guard: attachments are uploaded; do not ask for local file content
71231
+ "The input files are already uploaded and referenced via file URIs.",
71232
+ "Use the provided attachments; do not ask the user to paste file contents.",
71233
+ "Do not claim you cannot access local file paths."
71172
71234
  ].join("\n");
71173
71235
  if (result === null) {
71174
71236
  const rel = path11__namespace.default.relative(root, criteriaPath);
71175
71237
  return this.success(`Criteria file created: ${rel}`);
71176
71238
  }
71239
+ if (result?.passThroughText) {
71240
+ return this.success(result.passThroughText, {
71241
+ type: "evaluation-assess-passthrough",
71242
+ total: 0
71243
+ });
71244
+ }
71177
71245
  const breakdown = result.details.map((d) => `- ${d.name} (${Math.round(d.weight * 100)}%): ${Math.round(d.score * 100)}/100 - ${d.reason}`).join("\n");
71178
71246
  const user = [
71179
71247
  `Project root: ${path11__namespace.default.basename(root)}`,
71180
71248
  `Criteria file: ${path11__namespace.default.relative(root, criteriaPath)}`,
71181
- `Heuristic total: ${Math.round(result.totalScore * 100)}/100`,
71249
+ `Weighted total: ${Math.round(result.totalScore * 100)}/100`,
71182
71250
  `Breakdown:
71183
71251
  ${breakdown}`,
71184
71252
  options["idea"] || inferred.idea ? `Idea:
71185
71253
  ${options["idea"] || inferred.idea}` : void 0,
71186
71254
  options["code"] || inferred.code ? `Code snippet:
71187
- ${options["code"] || inferred.code}` : void 0,
71188
- files.length ? `File contents:
71189
- ${files.map((f3) => {
71190
- const fObj = result?.files?.find?.((x2) => x2.path === f3);
71191
- return fObj ? `### ${fObj.path}
71192
- ${fObj.content}` : `### ${f3}`;
71193
- }).join("\n\n")}` : void 0
71255
+ ${options["code"] || inferred.code}` : void 0
71256
+ // No inline file contents; rely on attachments only
71194
71257
  ].filter(Boolean).join("\n\n");
71195
71258
  const llmSpinner = new ProcessAnimation();
71196
71259
  llmSpinner.start();
@@ -71200,42 +71263,53 @@ ${fObj.content}` : `### ${f3}`;
71200
71263
  "\n---\n",
71201
71264
  user
71202
71265
  ].join("\n");
71266
+ const attachments = Array.isArray(result?.files) ? result.files.map((f3) => ({
71267
+ name: path11__namespace.default.basename(String(f3.path || "attachment.txt")),
71268
+ path: String(f3.path || ""),
71269
+ mime: String(f3.mime || "text/plain"),
71270
+ data_base64: f3.binaryBase64 ? String(f3.binaryBase64) : f3.content ? Buffer.from(f3.content, "utf8").toString("base64") : void 0
71271
+ })).filter((a) => !!a.data_base64) : [];
71203
71272
  const response = await callAPI("/v1/ai-proxy", {
71204
71273
  method: "POST",
71205
71274
  body: {
71275
+ // Force Google route and always use the attachments-capable model
71276
+ provider: "google",
71277
+ model: "gemini-2.5-flash",
71206
71278
  prompt: enriched,
71207
- taskType: "evaluation"
71279
+ taskType: "evaluation",
71280
+ metadata: attachments.length ? { attachments } : void 0
71208
71281
  }
71209
71282
  });
71210
71283
  const routedModel = response?.data?.routedModel;
71211
71284
  const content = (response?.data?.content || response?.output || "").trim();
71285
+ const uploads = response?.data?.uploads || [];
71212
71286
  if (content) {
71213
- return this.success(content, {
71287
+ const uploadNote = uploads.length ? `Attached ${uploads.length} file(s) uploaded and referenced.` : attachments.length ? `Attached ${attachments.length} local file(s).` : void 0;
71288
+ const finalText = uploadNote ? `${uploadNote}
71289
+
71290
+ ${content}` : content;
71291
+ return this.success(finalText, {
71214
71292
  type: "evaluation-assess",
71215
71293
  total: result.totalScore,
71216
71294
  details: result.details,
71217
- routedModel
71295
+ routedModel,
71296
+ uploads
71218
71297
  });
71219
71298
  }
71220
71299
  } catch (e2) {
71300
+ try {
71301
+ llmSpinner.stop();
71302
+ } catch {
71303
+ }
71304
+ const msg = e2?.message || "LLM scoring failed";
71305
+ return this.error(`Evaluation failed: ${msg}`, "EVALUATION_ERROR", "LLM scoring is required and fallback is disabled");
71221
71306
  } finally {
71222
71307
  try {
71223
71308
  llmSpinner.stop();
71224
71309
  } catch {
71225
71310
  }
71226
71311
  }
71227
- const lines = [];
71228
- lines.push("Evaluation Summary (local fallback)");
71229
- lines.push(`Weighted Score: ${Math.round(result.totalScore * 100)}/100`);
71230
- lines.push("\nBreakdown:");
71231
- for (const d of result.details) {
71232
- lines.push(`- ${d.name} (${Math.round(d.weight * 100)}%): ${Math.round(d.score * 100)}/100 - ${d.reason}`);
71233
- }
71234
- return this.success(lines.join("\n"), {
71235
- type: "evaluation-assess",
71236
- total: result.totalScore,
71237
- details: result.details
71238
- });
71312
+ return this.error("Evaluation failed: unknown state", "EVALUATION_ERROR");
71239
71313
  }
71240
71314
  /**
71241
71315
  * Run A/B evaluation test