@bonginkan/maria 4.4.3 → 4.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # MARIA - AI Development Platform v4.4.3
1
+ # MARIA - AI Development Platform v4.4.4
2
2
 
3
3
  [![npm version](https://img.shields.io/npm/v/@bonginkan/maria.svg)](https://www.npmjs.com/package/@bonginkan/maria)
4
4
  [![License](https://img.shields.io/badge/license-Multi--tier-blue.svg)](LICENSE)
@@ -10,7 +10,7 @@
10
10
 
11
11
  > **Enterprise-grade AI development platform with 100% command availability and comprehensive fallback support**
12
12
 
13
- ## 🚀 What's New in v4.4.3 (October, 2025)
13
+ ## 🚀 What's New in v4.4.4 (October, 2025)
14
14
  ### Functional enhancements
15
15
  - **Improved coding**
16
16
  - **Enhanced video/image support**
@@ -933,7 +933,7 @@ await secureWorkflow.executeWithAuth(workflowDefinition, securityContext);
933
933
  ### Quick Installation
934
934
  ```bash
935
935
  # Install globally (recommended)
936
- npm install -g @bonginkan/maria@4.4.3
936
+ npm install -g @bonginkan/maria@4.4.4
937
937
 
938
938
  # Verify installation
939
939
  maria --version # Should show v4.3.9
@@ -1137,7 +1137,7 @@ MARIA CODE is distributed under a comprehensive licensing system designed for in
1137
1137
 
1138
1138
  *MARIA v4.1.4 represents the pinnacle of multimodal AI development platform evolution - combining revolutionary voice-to-code capabilities, advanced memory systems, and comprehensive command ecosystems with enterprise-grade security and performance. This release establishes MARIA as the definitive choice for developers and Fortune 500 enterprises seeking intelligent, multimodal development experiences with GraphRAG intelligence, multilingual support, and zero-anxiety coding workflows.*
1139
1139
 
1140
- **Transform your development experience today**: `npm install -g @bonginkan/maria@4.4.3`
1140
+ **Transform your development experience today**: `npm install -g @bonginkan/maria@4.4.4`
1141
1141
 
1142
1142
  🌐 **Official Website**: [https://maria-code.ai](https://maria-code.ai)
1143
1143
  💬 **Community**: [https://discord.gg/SMSmSGcEQy](https://discord.gg/SMSmSGcEQy)
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "version": "lite-1.0.0",
3
- "generatedAt": "2025-10-21T04:00:45.377Z",
3
+ "generatedAt": "2025-10-21T06:25:21.206Z",
4
4
  "totalCommands": 20,
5
5
  "readyCount": 20,
6
6
  "partialCount": 0,
@@ -8798,14 +8798,37 @@ var init_llm_health_checker = __esm({
8798
8798
  const _lmsPath = "/Users/bongin_max/.lmstudio/bin/lms";
8799
8799
  return new Promise((resolve19) => {
8800
8800
  const _child = spawn5(_lmsPath, ["server", "start"], {
8801
- stdio: "ignore",
8802
- detached: true
8801
+ stdio: ["ignore", "pipe", "pipe"],
8802
+ detached: true,
8803
+ env: { ...process.env, NO_COLOR: "1", FORCE_COLOR: "0" }
8803
8804
  });
8804
- _child.on("_error", () => {
8805
+ _child.on("error", () => {
8805
8806
  resolve19(false);
8806
8807
  });
8808
+ try {
8809
+ _child.stdout?.on("data", (d) => {
8810
+ if (process.env.MARIA_DEBUG === "1") {
8811
+ try {
8812
+ console.log("[DEBUG/lms][server]", String(d));
8813
+ } catch {
8814
+ }
8815
+ }
8816
+ });
8817
+ _child.stderr?.on("data", (d) => {
8818
+ if (process.env.MARIA_DEBUG === "1") {
8819
+ try {
8820
+ console.log("[DEBUG/lms][server][err]", String(d));
8821
+ } catch {
8822
+ }
8823
+ }
8824
+ });
8825
+ } catch {
8826
+ }
8807
8827
  _child.on("spawn", () => {
8808
- _child.unref();
8828
+ try {
8829
+ _child.unref();
8830
+ } catch {
8831
+ }
8809
8832
  setTimeout(async () => {
8810
8833
  const _status = await this.checkService("LM Studio");
8811
8834
  resolve19(_status.isRunning);
@@ -26067,8 +26090,8 @@ var require_package = __commonJS({
26067
26090
  "package.json"(exports, module) {
26068
26091
  module.exports = {
26069
26092
  name: "@bonginkan/maria",
26070
- version: "4.4.3",
26071
- description: "\u{1F680} MARIA v4.4.3 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
26093
+ version: "4.4.4",
26094
+ description: "\u{1F680} MARIA v4.4.4 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
26072
26095
  keywords: [
26073
26096
  "ai",
26074
26097
  "cli",
@@ -28224,7 +28247,7 @@ var init_AuthenticationManager = __esm({
28224
28247
  const response = await fetch(`${this.apiBase}/api/user/profile`, {
28225
28248
  headers: {
28226
28249
  "Authorization": `Bearer ${tokens2.accessToken}`,
28227
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.3"}`
28250
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.4"}`
28228
28251
  }
28229
28252
  });
28230
28253
  if (response.status === 401) {
@@ -28878,7 +28901,7 @@ async function callApi(path66, init3 = {}) {
28878
28901
  "Authorization": `Bearer ${token}`,
28879
28902
  "X-Device-Id": getDeviceId(),
28880
28903
  "X-Session-Id": getSessionId() || "",
28881
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.3"}`,
28904
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.4"}`,
28882
28905
  "Content-Type": init3.headers?.["Content-Type"] || "application/json"
28883
28906
  });
28884
28907
  const doFetch = async (token) => {
@@ -29159,11 +29182,43 @@ function debugLog(...args2) {
29159
29182
  }
29160
29183
  }
29161
29184
  async function isUp(provider) {
29185
+ if (provider === "lmstudio") {
29186
+ try {
29187
+ const r2 = await runCommand("lms", ["ls"], void 0, 4e3);
29188
+ if (r2.code === 0 && (r2.stdout.trim() || r2.stderr.trim())) {
29189
+ return "http://localhost:1234/v1";
29190
+ }
29191
+ } catch {
29192
+ }
29193
+ }
29162
29194
  const res = await discoverLocalProvider(provider, { timeoutMs: 600, nearbyScan: true });
29163
29195
  return res?.base || null;
29164
29196
  }
29165
29197
  async function tryStartLMStudio() {
29166
- child_process.spawn("lms", ["server", "start"], { stdio: "ignore", detached: true }).unref();
29198
+ const child = child_process.spawn("lms", ["server", "start"], { stdio: ["ignore", "pipe", "pipe"], detached: true, env: { ...process.env, NO_COLOR: "1", FORCE_COLOR: "0" } });
29199
+ try {
29200
+ child.stdout?.on("data", (d) => {
29201
+ if (process.env.MARIA_DEBUG === "1") {
29202
+ try {
29203
+ console.log("[DEBUG/lms][server]", String(d));
29204
+ } catch {
29205
+ }
29206
+ }
29207
+ });
29208
+ child.stderr?.on("data", (d) => {
29209
+ if (process.env.MARIA_DEBUG === "1") {
29210
+ try {
29211
+ console.log("[DEBUG/lms][server][err]", String(d));
29212
+ } catch {
29213
+ }
29214
+ }
29215
+ });
29216
+ } catch {
29217
+ }
29218
+ try {
29219
+ child.unref();
29220
+ } catch {
29221
+ }
29167
29222
  }
29168
29223
  async function tryStartOllama() {
29169
29224
  child_process.spawn("ollama", ["serve"], { stdio: "ignore", detached: true }).unref();
@@ -29188,7 +29243,7 @@ async function ensureLocalProviderUp(preferredOrder = ["lmstudio", "ollama"]) {
29188
29243
  debugLog("attempting autostart", p);
29189
29244
  if (p === "lmstudio") await tryStartLMStudio();
29190
29245
  else await tryStartOllama();
29191
- const providerTimeout = p === "lmstudio" ? parseInt(process.env.LMSTUDIO_STARTUP_TIMEOUT_MS || "") || 9e4 : parseInt(process.env.OLLAMA_STARTUP_TIMEOUT_MS || "") || 2e4;
29246
+ const providerTimeout = p === "lmstudio" ? parseInt(process.env.LMSTUDIO_STARTUP_TIMEOUT_MS || "") || 3e4 : parseInt(process.env.OLLAMA_STARTUP_TIMEOUT_MS || "") || 2e4;
29192
29247
  const base = await waitUntilUp(p, providerTimeout);
29193
29248
  if (base) {
29194
29249
  debugLog("provider started", p, base);
@@ -29250,6 +29305,29 @@ function pickBestModel(models, prefs) {
29250
29305
  });
29251
29306
  return sorted[0] || null;
29252
29307
  }
29308
+ function extractTextFromResponsesApi(body) {
29309
+ try {
29310
+ const out = Array.isArray(body?.output) ? body.output : [];
29311
+ for (const item of out) {
29312
+ const contents = Array.isArray(item?.content) ? item.content : [];
29313
+ for (const c of contents) {
29314
+ if (typeof c?.text === "string" && c.text.trim()) return c.text.trim();
29315
+ }
29316
+ }
29317
+ if (typeof body?.text === "string" && body.text.trim()) return body.text.trim();
29318
+ if (typeof body?.content === "string" && body.content.trim()) return body.content.trim();
29319
+ } catch {
29320
+ }
29321
+ return null;
29322
+ }
29323
+ function supportsReasoningForLmStudioModel(modelId) {
29324
+ try {
29325
+ const fam = parseModelId(modelId).family.toLowerCase();
29326
+ return fam === "gpt-oss";
29327
+ } catch {
29328
+ return false;
29329
+ }
29330
+ }
29253
29331
  async function listModels(provider, baseUrl) {
29254
29332
  const viaCli = await listModelsViaCli(provider).catch(() => []);
29255
29333
  if (viaCli.length) return viaCli;
@@ -29270,7 +29348,7 @@ async function listModels(provider, baseUrl) {
29270
29348
  }
29271
29349
  async function ensureBaselineModel(provider) {
29272
29350
  if (provider === "lmstudio") {
29273
- child_process.spawn("lms", ["model", "download", "openai/gpt-oss-20b"], { stdio: "ignore", detached: true }).unref();
29351
+ child_process.spawn("lms", ["get", "openai/gpt-oss-20b"], { stdio: "ignore", detached: true }).unref();
29274
29352
  return;
29275
29353
  }
29276
29354
  child_process.spawn("ollama", ["pull", "gpt-oss:latest"], { stdio: "ignore", detached: true }).unref();
@@ -29314,7 +29392,10 @@ async function selectLocalProviderAndModel(prefs = {}) {
29314
29392
  }
29315
29393
  function runCommand(cmd, args2, input3, timeoutMs = 3e5) {
29316
29394
  return new Promise((resolve19) => {
29317
- const child = child_process.spawn(cmd, args2, { stdio: ["pipe", "pipe", "pipe"] });
29395
+ const child = child_process.spawn(cmd, args2, {
29396
+ stdio: ["pipe", "pipe", "pipe"],
29397
+ env: { ...process.env, NO_COLOR: "1", FORCE_COLOR: "0", CI: "1" }
29398
+ });
29318
29399
  let stdout2 = "";
29319
29400
  let stderr = "";
29320
29401
  const timer = setTimeout(() => {
@@ -29329,10 +29410,14 @@ function runCommand(cmd, args2, input3, timeoutMs = 3e5) {
29329
29410
  child.stderr?.on("data", (d) => {
29330
29411
  stderr += d.toString();
29331
29412
  });
29332
- child.on("exit", (code) => {
29413
+ child.on("close", (code) => {
29333
29414
  clearTimeout(timer);
29334
29415
  resolve19({ code, stdout: stdout2, stderr });
29335
29416
  });
29417
+ child.on("error", () => {
29418
+ clearTimeout(timer);
29419
+ resolve19({ code: -1, stdout: stdout2, stderr });
29420
+ });
29336
29421
  if (input3 && child.stdin) {
29337
29422
  child.stdin.write(input3);
29338
29423
  child.stdin.end();
@@ -29368,36 +29453,66 @@ async function listModelsViaCli(provider) {
29368
29453
  return [];
29369
29454
  }
29370
29455
  }
29371
- async function generateViaCli(provider, model, prompt) {
29456
+ async function generateViaCli(provider, model, prompt, baseUrl) {
29372
29457
  try {
29373
29458
  if (provider === "lmstudio") {
29374
- const args2 = ["chat", model, "-p", prompt, "--yes", "--offline", "--ttl", "2"];
29375
- if (process.env.MARIA_DEBUG === "1") {
29376
- try {
29377
- console.log("[DEBUG/local][lmstudio-cli] args", args2);
29378
- } catch {
29459
+ let lmsBase = baseUrl;
29460
+ if (!lmsBase) {
29461
+ const found = await discoverLocalProvider("lmstudio", { timeoutMs: 800, nearbyScan: true });
29462
+ lmsBase = found?.base || "http://localhost:1234/v1";
29463
+ }
29464
+ const endpoint = `${lmsBase.replace(/\/$/, "")}/responses`;
29465
+ const controller = new AbortController();
29466
+ const timer = setTimeout(() => controller.abort(), 6e4);
29467
+ try {
29468
+ const withReasoning = supportsReasoningForLmStudioModel(model);
29469
+ const r2 = await fetch(endpoint, {
29470
+ method: "POST",
29471
+ headers: { "Content-Type": "application/json" },
29472
+ body: JSON.stringify({
29473
+ model,
29474
+ input: prompt,
29475
+ ...withReasoning ? { reasoning: { effort: "high" } } : {}
29476
+ }),
29477
+ signal: controller.signal
29478
+ });
29479
+ if (r2.ok) {
29480
+ const body = await r2.json().catch(() => ({}));
29481
+ const text = extractTextFromResponsesApi(body);
29482
+ if (text) return text;
29379
29483
  }
29380
- }
29381
- const exec2 = await runCommand("lms", args2);
29382
- if (process.env.MARIA_DEBUG === "1") {
29383
- try {
29384
- console.log("[DEBUG/local][lmstudio-cli] exit", exec2.code, "stdout.len:", exec2.stdout.length, "stderr.len:", exec2.stderr.length);
29385
- } catch {
29484
+ const r22 = await fetch(endpoint, {
29485
+ method: "POST",
29486
+ headers: { "Content-Type": "application/json" },
29487
+ body: JSON.stringify({
29488
+ model,
29489
+ messages: [{ role: "user", content: prompt }],
29490
+ ...withReasoning ? { reasoning: { effort: "high" } } : {}
29491
+ }),
29492
+ signal: controller.signal
29493
+ });
29494
+ if (r22.ok) {
29495
+ const body2 = await r22.json().catch(() => ({}));
29496
+ const text2 = extractTextFromResponsesApi(body2);
29497
+ if (text2) return text2;
29386
29498
  }
29387
- }
29388
- if (exec2.code === 0) {
29389
- const out = exec2.stdout.trim();
29390
- if (out) return out;
29391
- const err = exec2.stderr.trim();
29392
- if (err) return err;
29499
+ } finally {
29500
+ clearTimeout(timer);
29393
29501
  }
29394
29502
  return null;
29395
29503
  }
29396
- const res = await runCommand("ollama", ["run", model], prompt);
29397
- if (res.code === 0) return res.stdout.trim();
29398
- const res2 = await runCommand("ollama", ["run", model, prompt]);
29399
- if (res2.code === 0) return res2.stdout.trim();
29400
- return null;
29504
+ try {
29505
+ const res = await runCommand("ollama", ["run", "--keepalive", "0", model], prompt);
29506
+ if (res.code === 0) return res.stdout.trim();
29507
+ const res2 = await runCommand("ollama", ["run", "--keepalive", "0", model, prompt]);
29508
+ if (res2.code === 0) return res2.stdout.trim();
29509
+ return null;
29510
+ } finally {
29511
+ try {
29512
+ await runCommand("ollama", ["stop", model], void 0, 8e3);
29513
+ } catch {
29514
+ }
29515
+ }
29401
29516
  } catch {
29402
29517
  return null;
29403
29518
  }
@@ -29492,12 +29607,12 @@ async function callAPI(endpoint, options = {}) {
29492
29607
  let inlineProvider;
29493
29608
  let inlineModel;
29494
29609
  {
29495
- const provMatch = /\s--provider=([a-zA-Z0-9_-]+)/i.exec(effectivePrompt);
29610
+ const provMatch = /(?:^|\s)--provider(?:=|\s+)([^\s]+)/i.exec(effectivePrompt);
29496
29611
  if (provMatch) inlineProvider = provMatch[1].toLowerCase();
29497
- const modelMatch = /\s--model=([^\s]+)/i.exec(effectivePrompt);
29612
+ const modelMatch = /(?:^|\s)--model(?:=|\s+)([^\s]+)/i.exec(effectivePrompt);
29498
29613
  if (modelMatch) inlineModel = modelMatch[1];
29499
29614
  if (inlineProvider || inlineModel) {
29500
- effectivePrompt = effectivePrompt.replace(/\s--provider=[^\s]+/ig, "").replace(/\s--model=[^\s]+/ig, "").trim();
29615
+ effectivePrompt = effectivePrompt.replace(/(?:^|\s)--provider(?:=|\s+)[^\s]+/ig, " ").replace(/(?:^|\s)--model(?:=|\s+)[^\s]+/ig, " ").replace(/\s{2,}/g, " ").trim();
29501
29616
  }
29502
29617
  }
29503
29618
  const { selectLocalProviderAndModel: selectLocalProviderAndModel2, generateViaCli: generateViaCli2 } = await Promise.resolve().then(() => (init_local_llm_manager(), local_llm_manager_exports));
@@ -52007,7 +52122,7 @@ var init_about_command = __esm({
52007
52122
  async execute(args2, context2) {
52008
52123
  const output3 = [];
52009
52124
  output3.push("");
52010
- output3.push(chalk14__default.default.cyan.bold("About MARIA v4.4.3"));
52125
+ output3.push(chalk14__default.default.cyan.bold("About MARIA v4.4.4"));
52011
52126
  output3.push(chalk14__default.default.gray("\u2550".repeat(40)));
52012
52127
  output3.push("");
52013
52128
  output3.push(chalk14__default.default.white.bold("MARIA - Minimal API, Maximum Power"));