@bonginkan/maria 4.4.3 → 4.4.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/dist/READY.manifest.json +1 -1
- package/dist/bin/maria.cjs +155 -40
- package/dist/bin/maria.cjs.map +1 -1
- package/dist/cli.cjs +155 -40
- package/dist/cli.cjs.map +1 -1
- package/dist/index.js +2 -2
- package/dist/index.js.map +1 -1
- package/dist/server/express-server.cjs +1 -1
- package/dist/server/express-server.js +1 -1
- package/dist/server-express.cjs +1 -1
- package/dist/server-express.cjs.map +1 -1
- package/package.json +2 -2
- package/src/slash-commands/READY.manifest.json +1 -1
package/dist/cli.cjs
CHANGED
|
@@ -1829,7 +1829,7 @@ var init_AuthenticationManager = __esm({
|
|
|
1829
1829
|
const response = await fetch(`${this.apiBase}/api/user/profile`, {
|
|
1830
1830
|
headers: {
|
|
1831
1831
|
"Authorization": `Bearer ${tokens2.accessToken}`,
|
|
1832
|
-
"User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.
|
|
1832
|
+
"User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.4"}`
|
|
1833
1833
|
}
|
|
1834
1834
|
});
|
|
1835
1835
|
if (response.status === 401) {
|
|
@@ -2554,7 +2554,7 @@ async function callApi(path66, init3 = {}) {
|
|
|
2554
2554
|
"Authorization": `Bearer ${token}`,
|
|
2555
2555
|
"X-Device-Id": getDeviceId(),
|
|
2556
2556
|
"X-Session-Id": getSessionId() || "",
|
|
2557
|
-
"User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.
|
|
2557
|
+
"User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.4"}`,
|
|
2558
2558
|
"Content-Type": init3.headers?.["Content-Type"] || "application/json"
|
|
2559
2559
|
});
|
|
2560
2560
|
const doFetch = async (token) => {
|
|
@@ -3363,11 +3363,43 @@ function debugLog(...args2) {
|
|
|
3363
3363
|
}
|
|
3364
3364
|
}
|
|
3365
3365
|
async function isUp(provider) {
|
|
3366
|
+
if (provider === "lmstudio") {
|
|
3367
|
+
try {
|
|
3368
|
+
const r2 = await runCommand("lms", ["ls"], void 0, 4e3);
|
|
3369
|
+
if (r2.code === 0 && (r2.stdout.trim() || r2.stderr.trim())) {
|
|
3370
|
+
return "http://localhost:1234/v1";
|
|
3371
|
+
}
|
|
3372
|
+
} catch {
|
|
3373
|
+
}
|
|
3374
|
+
}
|
|
3366
3375
|
const res = await discoverLocalProvider(provider, { timeoutMs: 600, nearbyScan: true });
|
|
3367
3376
|
return res?.base || null;
|
|
3368
3377
|
}
|
|
3369
3378
|
async function tryStartLMStudio() {
|
|
3370
|
-
child_process.spawn("lms", ["server", "start"], { stdio: "ignore", detached: true })
|
|
3379
|
+
const child = child_process.spawn("lms", ["server", "start"], { stdio: ["ignore", "pipe", "pipe"], detached: true, env: { ...process.env, NO_COLOR: "1", FORCE_COLOR: "0" } });
|
|
3380
|
+
try {
|
|
3381
|
+
child.stdout?.on("data", (d) => {
|
|
3382
|
+
if (process.env.MARIA_DEBUG === "1") {
|
|
3383
|
+
try {
|
|
3384
|
+
console.log("[DEBUG/lms][server]", String(d));
|
|
3385
|
+
} catch {
|
|
3386
|
+
}
|
|
3387
|
+
}
|
|
3388
|
+
});
|
|
3389
|
+
child.stderr?.on("data", (d) => {
|
|
3390
|
+
if (process.env.MARIA_DEBUG === "1") {
|
|
3391
|
+
try {
|
|
3392
|
+
console.log("[DEBUG/lms][server][err]", String(d));
|
|
3393
|
+
} catch {
|
|
3394
|
+
}
|
|
3395
|
+
}
|
|
3396
|
+
});
|
|
3397
|
+
} catch {
|
|
3398
|
+
}
|
|
3399
|
+
try {
|
|
3400
|
+
child.unref();
|
|
3401
|
+
} catch {
|
|
3402
|
+
}
|
|
3371
3403
|
}
|
|
3372
3404
|
async function tryStartOllama() {
|
|
3373
3405
|
child_process.spawn("ollama", ["serve"], { stdio: "ignore", detached: true }).unref();
|
|
@@ -3392,7 +3424,7 @@ async function ensureLocalProviderUp(preferredOrder = ["lmstudio", "ollama"]) {
|
|
|
3392
3424
|
debugLog("attempting autostart", p);
|
|
3393
3425
|
if (p === "lmstudio") await tryStartLMStudio();
|
|
3394
3426
|
else await tryStartOllama();
|
|
3395
|
-
const providerTimeout = p === "lmstudio" ? parseInt(process.env.LMSTUDIO_STARTUP_TIMEOUT_MS || "") ||
|
|
3427
|
+
const providerTimeout = p === "lmstudio" ? parseInt(process.env.LMSTUDIO_STARTUP_TIMEOUT_MS || "") || 3e4 : parseInt(process.env.OLLAMA_STARTUP_TIMEOUT_MS || "") || 2e4;
|
|
3396
3428
|
const base = await waitUntilUp(p, providerTimeout);
|
|
3397
3429
|
if (base) {
|
|
3398
3430
|
debugLog("provider started", p, base);
|
|
@@ -3454,6 +3486,29 @@ function pickBestModel(models, prefs) {
|
|
|
3454
3486
|
});
|
|
3455
3487
|
return sorted[0] || null;
|
|
3456
3488
|
}
|
|
3489
|
+
function extractTextFromResponsesApi(body) {
|
|
3490
|
+
try {
|
|
3491
|
+
const out = Array.isArray(body?.output) ? body.output : [];
|
|
3492
|
+
for (const item of out) {
|
|
3493
|
+
const contents = Array.isArray(item?.content) ? item.content : [];
|
|
3494
|
+
for (const c of contents) {
|
|
3495
|
+
if (typeof c?.text === "string" && c.text.trim()) return c.text.trim();
|
|
3496
|
+
}
|
|
3497
|
+
}
|
|
3498
|
+
if (typeof body?.text === "string" && body.text.trim()) return body.text.trim();
|
|
3499
|
+
if (typeof body?.content === "string" && body.content.trim()) return body.content.trim();
|
|
3500
|
+
} catch {
|
|
3501
|
+
}
|
|
3502
|
+
return null;
|
|
3503
|
+
}
|
|
3504
|
+
function supportsReasoningForLmStudioModel(modelId) {
|
|
3505
|
+
try {
|
|
3506
|
+
const fam = parseModelId(modelId).family.toLowerCase();
|
|
3507
|
+
return fam === "gpt-oss";
|
|
3508
|
+
} catch {
|
|
3509
|
+
return false;
|
|
3510
|
+
}
|
|
3511
|
+
}
|
|
3457
3512
|
async function listModels(provider, baseUrl) {
|
|
3458
3513
|
const viaCli = await listModelsViaCli(provider).catch(() => []);
|
|
3459
3514
|
if (viaCli.length) return viaCli;
|
|
@@ -3474,7 +3529,7 @@ async function listModels(provider, baseUrl) {
|
|
|
3474
3529
|
}
|
|
3475
3530
|
async function ensureBaselineModel(provider) {
|
|
3476
3531
|
if (provider === "lmstudio") {
|
|
3477
|
-
child_process.spawn("lms", ["
|
|
3532
|
+
child_process.spawn("lms", ["get", "openai/gpt-oss-20b"], { stdio: "ignore", detached: true }).unref();
|
|
3478
3533
|
return;
|
|
3479
3534
|
}
|
|
3480
3535
|
child_process.spawn("ollama", ["pull", "gpt-oss:latest"], { stdio: "ignore", detached: true }).unref();
|
|
@@ -3518,7 +3573,10 @@ async function selectLocalProviderAndModel(prefs = {}) {
|
|
|
3518
3573
|
}
|
|
3519
3574
|
function runCommand(cmd, args2, input3, timeoutMs = 3e5) {
|
|
3520
3575
|
return new Promise((resolve19) => {
|
|
3521
|
-
const child = child_process.spawn(cmd, args2, {
|
|
3576
|
+
const child = child_process.spawn(cmd, args2, {
|
|
3577
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
3578
|
+
env: { ...process.env, NO_COLOR: "1", FORCE_COLOR: "0", CI: "1" }
|
|
3579
|
+
});
|
|
3522
3580
|
let stdout2 = "";
|
|
3523
3581
|
let stderr = "";
|
|
3524
3582
|
const timer = setTimeout(() => {
|
|
@@ -3533,10 +3591,14 @@ function runCommand(cmd, args2, input3, timeoutMs = 3e5) {
|
|
|
3533
3591
|
child.stderr?.on("data", (d) => {
|
|
3534
3592
|
stderr += d.toString();
|
|
3535
3593
|
});
|
|
3536
|
-
child.on("
|
|
3594
|
+
child.on("close", (code) => {
|
|
3537
3595
|
clearTimeout(timer);
|
|
3538
3596
|
resolve19({ code, stdout: stdout2, stderr });
|
|
3539
3597
|
});
|
|
3598
|
+
child.on("error", () => {
|
|
3599
|
+
clearTimeout(timer);
|
|
3600
|
+
resolve19({ code: -1, stdout: stdout2, stderr });
|
|
3601
|
+
});
|
|
3540
3602
|
if (input3 && child.stdin) {
|
|
3541
3603
|
child.stdin.write(input3);
|
|
3542
3604
|
child.stdin.end();
|
|
@@ -3572,36 +3634,66 @@ async function listModelsViaCli(provider) {
|
|
|
3572
3634
|
return [];
|
|
3573
3635
|
}
|
|
3574
3636
|
}
|
|
3575
|
-
async function generateViaCli(provider, model, prompt) {
|
|
3637
|
+
async function generateViaCli(provider, model, prompt, baseUrl) {
|
|
3576
3638
|
try {
|
|
3577
3639
|
if (provider === "lmstudio") {
|
|
3578
|
-
|
|
3579
|
-
if (
|
|
3580
|
-
|
|
3581
|
-
|
|
3582
|
-
|
|
3640
|
+
let lmsBase = baseUrl;
|
|
3641
|
+
if (!lmsBase) {
|
|
3642
|
+
const found = await discoverLocalProvider("lmstudio", { timeoutMs: 800, nearbyScan: true });
|
|
3643
|
+
lmsBase = found?.base || "http://localhost:1234/v1";
|
|
3644
|
+
}
|
|
3645
|
+
const endpoint = `${lmsBase.replace(/\/$/, "")}/responses`;
|
|
3646
|
+
const controller = new AbortController();
|
|
3647
|
+
const timer = setTimeout(() => controller.abort(), 6e4);
|
|
3648
|
+
try {
|
|
3649
|
+
const withReasoning = supportsReasoningForLmStudioModel(model);
|
|
3650
|
+
const r2 = await fetch(endpoint, {
|
|
3651
|
+
method: "POST",
|
|
3652
|
+
headers: { "Content-Type": "application/json" },
|
|
3653
|
+
body: JSON.stringify({
|
|
3654
|
+
model,
|
|
3655
|
+
input: prompt,
|
|
3656
|
+
...withReasoning ? { reasoning: { effort: "high" } } : {}
|
|
3657
|
+
}),
|
|
3658
|
+
signal: controller.signal
|
|
3659
|
+
});
|
|
3660
|
+
if (r2.ok) {
|
|
3661
|
+
const body = await r2.json().catch(() => ({}));
|
|
3662
|
+
const text = extractTextFromResponsesApi(body);
|
|
3663
|
+
if (text) return text;
|
|
3583
3664
|
}
|
|
3584
|
-
|
|
3585
|
-
|
|
3586
|
-
|
|
3587
|
-
|
|
3588
|
-
|
|
3589
|
-
|
|
3665
|
+
const r22 = await fetch(endpoint, {
|
|
3666
|
+
method: "POST",
|
|
3667
|
+
headers: { "Content-Type": "application/json" },
|
|
3668
|
+
body: JSON.stringify({
|
|
3669
|
+
model,
|
|
3670
|
+
messages: [{ role: "user", content: prompt }],
|
|
3671
|
+
...withReasoning ? { reasoning: { effort: "high" } } : {}
|
|
3672
|
+
}),
|
|
3673
|
+
signal: controller.signal
|
|
3674
|
+
});
|
|
3675
|
+
if (r22.ok) {
|
|
3676
|
+
const body2 = await r22.json().catch(() => ({}));
|
|
3677
|
+
const text2 = extractTextFromResponsesApi(body2);
|
|
3678
|
+
if (text2) return text2;
|
|
3590
3679
|
}
|
|
3591
|
-
}
|
|
3592
|
-
|
|
3593
|
-
const out = exec2.stdout.trim();
|
|
3594
|
-
if (out) return out;
|
|
3595
|
-
const err = exec2.stderr.trim();
|
|
3596
|
-
if (err) return err;
|
|
3680
|
+
} finally {
|
|
3681
|
+
clearTimeout(timer);
|
|
3597
3682
|
}
|
|
3598
3683
|
return null;
|
|
3599
3684
|
}
|
|
3600
|
-
|
|
3601
|
-
|
|
3602
|
-
|
|
3603
|
-
|
|
3604
|
-
|
|
3685
|
+
try {
|
|
3686
|
+
const res = await runCommand("ollama", ["run", "--keepalive", "0", model], prompt);
|
|
3687
|
+
if (res.code === 0) return res.stdout.trim();
|
|
3688
|
+
const res2 = await runCommand("ollama", ["run", "--keepalive", "0", model, prompt]);
|
|
3689
|
+
if (res2.code === 0) return res2.stdout.trim();
|
|
3690
|
+
return null;
|
|
3691
|
+
} finally {
|
|
3692
|
+
try {
|
|
3693
|
+
await runCommand("ollama", ["stop", model], void 0, 8e3);
|
|
3694
|
+
} catch {
|
|
3695
|
+
}
|
|
3696
|
+
}
|
|
3605
3697
|
} catch {
|
|
3606
3698
|
return null;
|
|
3607
3699
|
}
|
|
@@ -3696,12 +3788,12 @@ async function callAPI(endpoint, options = {}) {
|
|
|
3696
3788
|
let inlineProvider;
|
|
3697
3789
|
let inlineModel;
|
|
3698
3790
|
{
|
|
3699
|
-
const provMatch =
|
|
3791
|
+
const provMatch = /(?:^|\s)--provider(?:=|\s+)([^\s]+)/i.exec(effectivePrompt);
|
|
3700
3792
|
if (provMatch) inlineProvider = provMatch[1].toLowerCase();
|
|
3701
|
-
const modelMatch =
|
|
3793
|
+
const modelMatch = /(?:^|\s)--model(?:=|\s+)([^\s]+)/i.exec(effectivePrompt);
|
|
3702
3794
|
if (modelMatch) inlineModel = modelMatch[1];
|
|
3703
3795
|
if (inlineProvider || inlineModel) {
|
|
3704
|
-
effectivePrompt = effectivePrompt.replace(
|
|
3796
|
+
effectivePrompt = effectivePrompt.replace(/(?:^|\s)--provider(?:=|\s+)[^\s]+/ig, " ").replace(/(?:^|\s)--model(?:=|\s+)[^\s]+/ig, " ").replace(/\s{2,}/g, " ").trim();
|
|
3705
3797
|
}
|
|
3706
3798
|
}
|
|
3707
3799
|
const { selectLocalProviderAndModel: selectLocalProviderAndModel2, generateViaCli: generateViaCli2 } = await Promise.resolve().then(() => (init_local_llm_manager(), local_llm_manager_exports));
|
|
@@ -17286,8 +17378,8 @@ var require_package = __commonJS({
|
|
|
17286
17378
|
"package.json"(exports, module) {
|
|
17287
17379
|
module.exports = {
|
|
17288
17380
|
name: "@bonginkan/maria",
|
|
17289
|
-
version: "4.4.
|
|
17290
|
-
description: "\u{1F680} MARIA v4.4.
|
|
17381
|
+
version: "4.4.4",
|
|
17382
|
+
description: "\u{1F680} MARIA v4.4.4 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
|
|
17291
17383
|
keywords: [
|
|
17292
17384
|
"ai",
|
|
17293
17385
|
"cli",
|
|
@@ -27306,7 +27398,7 @@ var init_about_command = __esm({
|
|
|
27306
27398
|
async execute(args2, context2) {
|
|
27307
27399
|
const output3 = [];
|
|
27308
27400
|
output3.push("");
|
|
27309
|
-
output3.push(chalk42__default.default.cyan.bold("About MARIA v4.4.
|
|
27401
|
+
output3.push(chalk42__default.default.cyan.bold("About MARIA v4.4.4"));
|
|
27310
27402
|
output3.push(chalk42__default.default.gray("\u2550".repeat(40)));
|
|
27311
27403
|
output3.push("");
|
|
27312
27404
|
output3.push(chalk42__default.default.white.bold("MARIA - Minimal API, Maximum Power"));
|
|
@@ -66483,14 +66575,37 @@ var init_llm_health_checker = __esm({
|
|
|
66483
66575
|
const _lmsPath = "/Users/bongin_max/.lmstudio/bin/lms";
|
|
66484
66576
|
return new Promise((resolve19) => {
|
|
66485
66577
|
const _child = spawn5(_lmsPath, ["server", "start"], {
|
|
66486
|
-
stdio: "ignore",
|
|
66487
|
-
detached: true
|
|
66578
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
66579
|
+
detached: true,
|
|
66580
|
+
env: { ...process.env, NO_COLOR: "1", FORCE_COLOR: "0" }
|
|
66488
66581
|
});
|
|
66489
|
-
_child.on("
|
|
66582
|
+
_child.on("error", () => {
|
|
66490
66583
|
resolve19(false);
|
|
66491
66584
|
});
|
|
66585
|
+
try {
|
|
66586
|
+
_child.stdout?.on("data", (d) => {
|
|
66587
|
+
if (process.env.MARIA_DEBUG === "1") {
|
|
66588
|
+
try {
|
|
66589
|
+
console.log("[DEBUG/lms][server]", String(d));
|
|
66590
|
+
} catch {
|
|
66591
|
+
}
|
|
66592
|
+
}
|
|
66593
|
+
});
|
|
66594
|
+
_child.stderr?.on("data", (d) => {
|
|
66595
|
+
if (process.env.MARIA_DEBUG === "1") {
|
|
66596
|
+
try {
|
|
66597
|
+
console.log("[DEBUG/lms][server][err]", String(d));
|
|
66598
|
+
} catch {
|
|
66599
|
+
}
|
|
66600
|
+
}
|
|
66601
|
+
});
|
|
66602
|
+
} catch {
|
|
66603
|
+
}
|
|
66492
66604
|
_child.on("spawn", () => {
|
|
66493
|
-
|
|
66605
|
+
try {
|
|
66606
|
+
_child.unref();
|
|
66607
|
+
} catch {
|
|
66608
|
+
}
|
|
66494
66609
|
setTimeout(async () => {
|
|
66495
66610
|
const _status = await this.checkService("LM Studio");
|
|
66496
66611
|
resolve19(_status.isRunning);
|