@bonginkan/maria 4.3.42 → 4.3.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # MARIA - AI Development Platform v4.3.42
1
+ # MARIA - AI Development Platform v4.3.43
2
2
 
3
3
  [![npm version](https://img.shields.io/npm/v/@bonginkan/maria.svg)](https://www.npmjs.com/package/@bonginkan/maria)
4
4
  [![License](https://img.shields.io/badge/license-Multi--tier-blue.svg)](LICENSE)
@@ -10,7 +10,7 @@
10
10
 
11
11
  > **Enterprise-grade AI development platform with 100% command availability and comprehensive fallback support**
12
12
 
13
- ## 🚀 What's New in v4.3.42 (October, 2025)
13
+ ## 🚀 What's New in v4.3.43 (October, 2025)
14
14
 
15
15
  ### Functinal enhancements
16
16
  - **Enhanced Natural Language Support**: Main commands called automatically by natural language input
@@ -927,7 +927,7 @@ await secureWorkflow.executeWithAuth(workflowDefinition, securityContext);
927
927
  ### Quick Installation
928
928
  ```bash
929
929
  # Install globally (recommended)
930
- npm install -g @bonginkan/maria@4.3.42
930
+ npm install -g @bonginkan/maria@4.3.43
931
931
 
932
932
  # Verify installation
933
933
  maria --version # Should show v4.3.9
@@ -1131,7 +1131,7 @@ MARIA CODE is distributed under a comprehensive licensing system designed for in
1131
1131
 
1132
1132
  *MARIA v4.1.4 represents the pinnacle of multimodal AI development platform evolution - combining revolutionary voice-to-code capabilities, advanced memory systems, and comprehensive command ecosystems with enterprise-grade security and performance. This release establishes MARIA as the definitive choice for developers and Fortune 500 enterprises seeking intelligent, multimodal development experiences with GraphRAG intelligence, multilingual support, and zero-anxiety coding workflows.*
1133
1133
 
1134
- **Transform your development experience today**: `npm install -g @bonginkan/maria@4.3.42`
1134
+ **Transform your development experience today**: `npm install -g @bonginkan/maria@4.3.43`
1135
1135
 
1136
1136
  🌐 **Official Website**: [https://maria-code.ai](https://maria-code.ai)
1137
1137
  💬 **Community**: [https://discord.gg/SMSmSGcEQy](https://discord.gg/SMSmSGcEQy)
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "version": "lite-1.0.0",
3
- "generatedAt": "2025-10-05T11:54:11.483Z",
3
+ "generatedAt": "2025-10-05T12:39:54.354Z",
4
4
  "totalCommands": 19,
5
5
  "readyCount": 19,
6
6
  "partialCount": 0,
@@ -26066,8 +26066,8 @@ var require_package = __commonJS({
26066
26066
  "package.json"(exports, module) {
26067
26067
  module.exports = {
26068
26068
  name: "@bonginkan/maria",
26069
- version: "4.3.42",
26070
- description: "\u{1F680} MARIA v4.3.42 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
26069
+ version: "4.3.43",
26070
+ description: "\u{1F680} MARIA v4.3.43 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
26071
26071
  keywords: [
26072
26072
  "ai",
26073
26073
  "cli",
@@ -28104,7 +28104,7 @@ var init_AuthenticationManager = __esm({
28104
28104
  const response = await fetch(`${this.apiBase}/api/user/profile`, {
28105
28105
  headers: {
28106
28106
  "Authorization": `Bearer ${tokens2.accessToken}`,
28107
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.42"}`
28107
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.43"}`
28108
28108
  }
28109
28109
  });
28110
28110
  if (response.status === 401) {
@@ -28758,7 +28758,7 @@ async function callApi(path65, init3 = {}) {
28758
28758
  "Authorization": `Bearer ${token}`,
28759
28759
  "X-Device-Id": getDeviceId(),
28760
28760
  "X-Session-Id": getSessionId() || "",
28761
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.42"}`,
28761
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.3.43"}`,
28762
28762
  "Content-Type": init3.headers?.["Content-Type"] || "application/json"
28763
28763
  });
28764
28764
  const doFetch = async (token) => {
@@ -29103,11 +29103,28 @@ async function callAPI(endpoint, options = {}) {
29103
29103
  }
29104
29104
  }
29105
29105
  async function executeChat(messages) {
29106
- const response = await callAPI("/v1/ai-proxy", {
29107
- method: "POST",
29108
- body: { messages }
29109
- });
29110
- return response;
29106
+ const maxAttempts = 4;
29107
+ let lastErr;
29108
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
29109
+ try {
29110
+ const response = await callAPI("/v1/ai-proxy", {
29111
+ method: "POST",
29112
+ body: { messages, taskType: "chat" }
29113
+ });
29114
+ return response;
29115
+ } catch (e2) {
29116
+ lastErr = e2;
29117
+ const isRateLimit = e2?.name === "RateLimitError" || /rate\s*limit|429/i.test(String(e2?.message || ""));
29118
+ if (!isRateLimit || attempt === maxAttempts) {
29119
+ throw e2;
29120
+ }
29121
+ const waitSec = Math.max(1, Math.min(60, Number(e2?.retryAfter || 0)));
29122
+ const baseMs = (waitSec > 0 ? waitSec * 1e3 : 1500) * attempt;
29123
+ const jitter = Math.floor(Math.random() * 400);
29124
+ await new Promise((r2) => setTimeout(r2, Math.min(3e4, baseMs + jitter)));
29125
+ }
29126
+ }
29127
+ throw lastErr;
29111
29128
  }
29112
29129
  async function executeCode(input3) {
29113
29130
  const isOptions = typeof input3 === "object";
@@ -29121,17 +29138,34 @@ async function executeCode(input3) {
29121
29138
  if (attachments && attachments.length > 0) {
29122
29139
  body.metadata = { attachments };
29123
29140
  }
29124
- const response = await callAPI("/v1/ai-proxy", {
29125
- method: "POST",
29126
- body
29127
- });
29128
- if (response.data?.routedModel) {
29129
- response.routedModel = response.data.routedModel;
29130
- }
29131
- if (response.data?.content) {
29132
- response.output = response.data.content;
29141
+ const maxAttempts = 5;
29142
+ let lastErr;
29143
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
29144
+ try {
29145
+ const response = await callAPI("/v1/ai-proxy", {
29146
+ method: "POST",
29147
+ body
29148
+ });
29149
+ if (response.data?.routedModel) {
29150
+ response.routedModel = response.data.routedModel;
29151
+ }
29152
+ if (response.data?.content) {
29153
+ response.output = response.data.content;
29154
+ }
29155
+ return response;
29156
+ } catch (e2) {
29157
+ lastErr = e2;
29158
+ const isRateLimit = e2?.name === "RateLimitError" || /rate\s*limit|429/i.test(String(e2?.message || ""));
29159
+ if (!isRateLimit || attempt === maxAttempts) {
29160
+ throw e2;
29161
+ }
29162
+ const waitSec = Math.max(1, Math.min(60, Number(e2?.retryAfter || 0)));
29163
+ const baseMs = (waitSec > 0 ? waitSec * 1e3 : 2e3) * attempt;
29164
+ const jitter = Math.floor(Math.random() * 500);
29165
+ await new Promise((r2) => setTimeout(r2, Math.min(45e3, baseMs + jitter)));
29166
+ }
29133
29167
  }
29134
- return response;
29168
+ throw lastErr;
29135
29169
  }
29136
29170
  async function executeAIProxy(provider, model, messages, options) {
29137
29171
  return callAPI("/v1/ai-proxy", {
@@ -51261,7 +51295,7 @@ var init_about_command = __esm({
51261
51295
  async execute(args2, context2) {
51262
51296
  const output3 = [];
51263
51297
  output3.push("");
51264
- output3.push(chalk14__default.default.cyan.bold("\u{1F916} About MARIA v4.3.42"));
51298
+ output3.push(chalk14__default.default.cyan.bold("\u{1F916} About MARIA v4.3.43"));
51265
51299
  output3.push(chalk14__default.default.gray("\u2550".repeat(40)));
51266
51300
  output3.push("");
51267
51301
  output3.push(chalk14__default.default.white.bold("MARIA - Minimal API, Maximum Power"));
@@ -56056,14 +56090,7 @@ ${h2.head}`);
56056
56090
  { role: "user", content: user }
56057
56091
  ]);
56058
56092
  const raw = (resp?.output || "").trim();
56059
- const jsonText = (() => {
56060
- try {
56061
- const m2 = raw.match(/\[[\s\S]*\]/);
56062
- return m2 ? m2[0] : raw;
56063
- } catch {
56064
- return raw;
56065
- }
56066
- })();
56093
+ const jsonText = extractJsonSafe(raw, "array") || raw;
56067
56094
  const arr = JSON.parse(jsonText);
56068
56095
  const set = new Set(candidates.map((c) => c.toLowerCase()));
56069
56096
  const out = [];
@@ -56109,14 +56136,7 @@ ${h2.head}`);
56109
56136
  { role: "user", content: user }
56110
56137
  ]);
56111
56138
  const raw = (resp?.output || "").trim();
56112
- const jsonText = (() => {
56113
- try {
56114
- const m2 = raw.match(/\{[\s\S]*\}/);
56115
- return m2 ? m2[0] : raw;
56116
- } catch {
56117
- return raw;
56118
- }
56119
- })();
56139
+ const jsonText = extractJsonSafe(raw, "object") || raw;
56120
56140
  const parsed = JSON.parse(jsonText);
56121
56141
  if (parsed && (parsed.action === "modify" || parsed.action === "create") && typeof parsed.path === "string") {
56122
56142
  return { action: parsed.action, path: parsed.path.replace(/^\/+/, "") };