@bonginkan/maria 4.4.1 → 4.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.cjs CHANGED
@@ -1736,7 +1736,7 @@ var init_AuthenticationManager = __esm({
1736
1736
  const response = await fetch(`${this.apiBase}/api/user/profile`, {
1737
1737
  headers: {
1738
1738
  "Authorization": `Bearer ${tokens2.accessToken}`,
1739
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.1"}`
1739
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.2"}`
1740
1740
  }
1741
1741
  });
1742
1742
  if (response.status === 401) {
@@ -2461,7 +2461,7 @@ async function callApi(path66, init3 = {}) {
2461
2461
  "Authorization": `Bearer ${token}`,
2462
2462
  "X-Device-Id": getDeviceId(),
2463
2463
  "X-Session-Id": getSessionId() || "",
2464
- "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.1"}`,
2464
+ "User-Agent": `maria-cli/${process.env.CLI_VERSION || "4.4.2"}`,
2465
2465
  "Content-Type": init3.headers?.["Content-Type"] || "application/json"
2466
2466
  });
2467
2467
  const doFetch = async (token) => {
@@ -13421,16 +13421,7 @@ var init_HelpCommand = __esm({
13421
13421
  async showCommandHelp(commandName) {
13422
13422
  const command = await this.readyService.getCommand(commandName);
13423
13423
  if (!command) {
13424
- const searchResults = await this.readyService.searchCommands(commandName, 3);
13425
- const suggestions = searchResults.map((r2) => `/${r2.command.name}`);
13426
- return this.error(
13427
- `READY command not found: /${commandName}`,
13428
- "COMMAND_NOT_FOUND",
13429
- {
13430
- suggestions,
13431
- tip: "Only contract-validated READY commands are shown"
13432
- }
13433
- );
13424
+ return await this.showGeneralHelp();
13434
13425
  }
13435
13426
  const lines = this.formatMinimalUsage(command);
13436
13427
  return this.success(lines);
@@ -16622,8 +16613,8 @@ var require_package = __commonJS({
16622
16613
  "package.json"(exports, module) {
16623
16614
  module.exports = {
16624
16615
  name: "@bonginkan/maria",
16625
- version: "4.4.1",
16626
- description: "\u{1F680} MARIA v4.4.1 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
16616
+ version: "4.4.2",
16617
+ description: "\u{1F680} MARIA v4.4.2 - Enterprise AI Development Platform with identity system and character voice implementation. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
16627
16618
  keywords: [
16628
16619
  "ai",
16629
16620
  "cli",
@@ -22937,6 +22928,94 @@ var init_ImageArgumentInference = __esm({
22937
22928
  init_api_caller();
22938
22929
  }
22939
22930
  });
22931
+
22932
+ // src/services/media-orchestrator/VideoArgumentInference.ts
22933
+ function extractFirstJson4(text) {
22934
+ const matches = text.match(/[\[{][\s\S]*[\]}]/g) || [];
22935
+ for (const cand of matches) {
22936
+ try {
22937
+ JSON.parse(cand);
22938
+ return cand;
22939
+ } catch {
22940
+ }
22941
+ }
22942
+ return null;
22943
+ }
22944
+ function parseSizeAny2(x2) {
22945
+ if (!x2) return void 0;
22946
+ const s2 = String(x2).trim().toLowerCase().replace(/p$/, "");
22947
+ const m2 = /^(\d{2,4})x(\d{2,4})$/.exec(s2);
22948
+ if (m2) {
22949
+ const w = Number(m2[1]);
22950
+ const h2 = Number(m2[2]);
22951
+ if (Number.isFinite(w) && Number.isFinite(h2) && w >= 256 && h2 >= 256 && w <= 4096 && h2 <= 4096) return [w, h2];
22952
+ return void 0;
22953
+ }
22954
+ if (s2 === "720") return [1280, 720];
22955
+ if (s2 === "1080") return [1920, 1080];
22956
+ return void 0;
22957
+ }
22958
+ async function inferVideoArgsLLM(promptText) {
22959
+ const system = [
22960
+ "You extract video generation options from user natural language.",
22961
+ 'Return JSON only with keys: { "model"?: "sora-2"|"veo-3.1-generate-preview", "duration"?: number, "aspect"?: "16:9"|"9:16", "size"?: "WIDTHxHEIGHT"|"720"|"1080" }.',
22962
+ "Rules:",
22963
+ '- If user mentions OpenAI Sora, choose model "sora-2". If Google Veo, choose "veo-3.1-generate-preview".',
22964
+ "- Duration options differ: for Sora use one of 4, 8, 12; for Veo prefer 4, 6, 8 (1080p fixed 8).",
22965
+ "- If user says 10 seconds, map to closest allowed (e.g., 8 for Sora/Veo).",
22966
+ '- Aspect: map "portrait"/"vertical" to 9:16, "landscape"/"wide" to 16:9. If square mentioned, prefer 1:1 but output closest supported by providers (choose 16:9).',
22967
+ '- Size: if 720p or 1080p mentioned, return "720" or "1080". If explicit WxH, return as is if within 256..4096 per side.',
22968
+ "Do not add explanations; JSON only."
22969
+ ].join("\n");
22970
+ let explicitModel;
22971
+ let explicitProvider;
22972
+ try {
22973
+ const { extractExplicitModel: extractExplicitModel2, extractExplicitProvider: extractExplicitProvider2, providerFromModel: providerFromModel2 } = await Promise.resolve().then(() => (init_llm_flags(), llm_flags_exports));
22974
+ explicitModel = extractExplicitModel2(promptText);
22975
+ explicitProvider = extractExplicitProvider2(promptText) || providerFromModel2(explicitModel);
22976
+ } catch {
22977
+ }
22978
+ let attachments = [];
22979
+ try {
22980
+ const { collectFileAttachmentsFromText: collectFileAttachmentsFromText2 } = await Promise.resolve().then(() => (init_attachment_utils(), attachment_utils_exports));
22981
+ attachments = await collectFileAttachmentsFromText2(promptText, process.cwd(), { maxBytes: 12 * 1024 * 1024 });
22982
+ } catch {
22983
+ }
22984
+ const resp = await callAPI("/v1/ai-proxy", {
22985
+ method: "POST",
22986
+ body: {
22987
+ ...explicitProvider ? { provider: explicitProvider } : {},
22988
+ ...explicitModel ? { model: explicitModel } : {},
22989
+ prompt: `${system}
22990
+
22991
+ ---
22992
+
22993
+ ${promptText}`,
22994
+ taskType: "media",
22995
+ ...attachments.length ? { metadata: { attachments } } : {}
22996
+ }
22997
+ });
22998
+ const raw = (resp?.data?.content || resp?.output || "").trim();
22999
+ const jsonText = extractFirstJson4(raw) || raw;
23000
+ let parsed;
23001
+ try {
23002
+ parsed = JSON.parse(jsonText);
23003
+ } catch {
23004
+ return {};
23005
+ }
23006
+ const out = {};
23007
+ if (typeof parsed?.model === "string" && parsed.model.trim()) out.model = String(parsed.model).trim();
23008
+ if (Number.isFinite(Number(parsed?.duration))) out.duration = Math.max(1, Math.floor(Number(parsed.duration)));
23009
+ if (parsed?.aspect === "16:9" || parsed?.aspect === "9:16") out.aspect = parsed.aspect;
23010
+ const size = parseSizeAny2(parsed?.size);
23011
+ if (size) out.size = size;
23012
+ return out;
23013
+ }
23014
+ var init_VideoArgumentInference = __esm({
23015
+ "src/services/media-orchestrator/VideoArgumentInference.ts"() {
23016
+ init_api_caller();
23017
+ }
23018
+ });
22940
23019
  function parseSize(value) {
22941
23020
  const m2 = /^\s*(\d{2,4})x(\d{2,4})\s*$/i.exec(value || "");
22942
23021
  if (!m2) throw new Error(`invalid size: ${value}`);
@@ -23156,6 +23235,15 @@ function normalizeVideoArgs(raw, root) {
23156
23235
  break;
23157
23236
  }
23158
23237
  }
23238
+ try {
23239
+ if (String(process.env.MARIA_USE_LLM_INFER || "1") === "1") {
23240
+ global.__MARIA_VIDEO_LLM_INFER__ = async () => {
23241
+ const llm = await inferVideoArgsLLM(prompt);
23242
+ return llm;
23243
+ };
23244
+ }
23245
+ } catch {
23246
+ }
23159
23247
  if (!out.apply && !out.planOnly && !out.dryRun) {
23160
23248
  out.apply = true;
23161
23249
  }
@@ -23195,6 +23283,7 @@ var init_Normalizer = __esm({
23195
23283
  init_types4();
23196
23284
  init_NLInference();
23197
23285
  init_ImageArgumentInference();
23286
+ init_VideoArgumentInference();
23198
23287
  }
23199
23288
  });
23200
23289
  function ensureDirSync(p) {
@@ -24410,7 +24499,39 @@ var init_video_command = __esm({
24410
24499
  try {
24411
24500
  const root = context2.environment.cwd;
24412
24501
  const cli = normalizeVideoArgs(args2.raw, root);
24413
- cli.duration = 8;
24502
+ try {
24503
+ const hook = global.__MARIA_VIDEO_LLM_INFER__;
24504
+ if (hook) {
24505
+ const llm = await hook();
24506
+ if (llm) {
24507
+ if (llm.model && typeof llm.model === "string") cli.model = llm.model;
24508
+ if (Number.isFinite(Number(llm.duration))) {
24509
+ const d = Math.floor(Number(llm.duration));
24510
+ cli.duration = Math.max(1, Math.min(60, d));
24511
+ }
24512
+ if (llm.aspect === "16:9" || llm.aspect === "9:16") {
24513
+ cli.aspect = llm.aspect;
24514
+ const [w, h2] = cli.size;
24515
+ if (llm.size && Array.isArray(llm.size) && llm.size.length === 2) {
24516
+ cli.size = llm.size;
24517
+ } else if (w === 1280 && h2 === 720 || w === 720 && h2 === 1280 || w === 1920 && h2 === 1080 || w === 1080 && h2 === 1920) {
24518
+ if (llm.aspect === "9:16") {
24519
+ if (h2 === 720) cli.size = [720, 1280];
24520
+ else if (h2 === 1080) cli.size = [1080, 1920];
24521
+ } else {
24522
+ if (w === 720) cli.size = [1280, 720];
24523
+ else if (w === 1080) cli.size = [1920, 1080];
24524
+ if (w === 1080 && h2 === 1920) cli.size = [1920, 1080];
24525
+ }
24526
+ }
24527
+ } else if (llm.size && Array.isArray(llm.size) && llm.size.length === 2) {
24528
+ cli.size = llm.size;
24529
+ cli.aspect = cli.size[0] >= cli.size[1] ? "16:9" : "9:16";
24530
+ }
24531
+ }
24532
+ }
24533
+ } catch {
24534
+ }
24414
24535
  if (cli.planOnly || cli.dryRun || !cli.apply) {
24415
24536
  const line = `Plan: video ${cli.duration}s @${cli.fps}fps ${cli.size[0]}x${cli.size[1]} ${cli.format} model=${cli.model || "auto"} concurrency=${cli.concurrency} retry=${cli.retry}`;
24416
24537
  const next = `Next: /video "${cli.prompt}" --duration ${cli.duration} --fps ${cli.fps} --res ${cli.size[0]}x${cli.size[1]} --apply`;
@@ -26512,7 +26633,7 @@ var init_about_command = __esm({
26512
26633
  async execute(args2, context2) {
26513
26634
  const output3 = [];
26514
26635
  output3.push("");
26515
- output3.push(chalk40__default.default.cyan.bold("About MARIA v4.4.1"));
26636
+ output3.push(chalk40__default.default.cyan.bold("About MARIA v4.4.2"));
26516
26637
  output3.push(chalk40__default.default.gray("\u2550".repeat(40)));
26517
26638
  output3.push("");
26518
26639
  output3.push(chalk40__default.default.white.bold("MARIA - Minimal API, Maximum Power"));
@@ -38584,7 +38705,7 @@ var init_code_utils = __esm({
38584
38705
  });
38585
38706
 
38586
38707
  // src/services/creative/NovelArgumentInference.ts
38587
- function extractFirstJson4(text) {
38708
+ function extractFirstJson5(text) {
38588
38709
  if (!text) return null;
38589
38710
  const start = text.indexOf("{");
38590
38711
  const end = text.lastIndexOf("}");
@@ -38635,7 +38756,7 @@ ${rawText}`,
38635
38756
  }
38636
38757
  });
38637
38758
  const raw = (resp?.data?.content || resp?.output || "").trim();
38638
- const jsonText = extractFirstJson4(raw) || raw;
38759
+ const jsonText = extractFirstJson5(raw) || raw;
38639
38760
  let parsed = {};
38640
38761
  try {
38641
38762
  parsed = JSON.parse(jsonText);
@@ -39037,7 +39158,7 @@ var init_rate_limit_handler = __esm({
39037
39158
  });
39038
39159
 
39039
39160
  // src/services/code-orchestrator/ArgumentInference.ts
39040
- function extractFirstJson5(text) {
39161
+ function extractFirstJson6(text) {
39041
39162
  const fence = /```json\r?\n([\s\S]*?)```/i.exec(text);
39042
39163
  if (fence) return fence[1];
39043
39164
  const start = text.indexOf("{");
@@ -39089,7 +39210,7 @@ ${rawText}`,
39089
39210
  }
39090
39211
  });
39091
39212
  const raw = (resp?.data?.content || resp?.output || "").trim();
39092
- const jsonText = extractFirstJson5(raw) || raw;
39213
+ const jsonText = extractFirstJson6(raw) || raw;
39093
39214
  let parsed = {};
39094
39215
  try {
39095
39216
  parsed = JSON.parse(jsonText);
@@ -42077,7 +42198,7 @@ ${user}`
42077
42198
  } catch {
42078
42199
  }
42079
42200
  const content = (resp?.data?.content || resp?.content || "").trim();
42080
- const extractFirstJson7 = (text) => {
42201
+ const extractFirstJson8 = (text) => {
42081
42202
  const fence = /```\s*json\s*\r?\n([\s\S]*?)```/i.exec(text);
42082
42203
  if (fence) return fence[1];
42083
42204
  const generic = /```\s*\r?\n([\s\S]*?)```/i.exec(text);
@@ -42100,7 +42221,7 @@ ${user}`
42100
42221
  }
42101
42222
  return null;
42102
42223
  };
42103
- const jsonText = extractFirstJson7(content) || content;
42224
+ const jsonText = extractFirstJson8(content) || content;
42104
42225
  let parsed = {};
42105
42226
  try {
42106
42227
  parsed = JSON.parse(jsonText);
@@ -60014,7 +60135,7 @@ ${user}`,
60014
60135
  };
60015
60136
  }
60016
60137
  });
60017
- function extractFirstJson6(text) {
60138
+ function extractFirstJson7(text) {
60018
60139
  const fence = /```json\r?\n([\s\S]*?)```/i.exec(text);
60019
60140
  if (fence) return fence[1];
60020
60141
  const start = text.indexOf("{");
@@ -60076,7 +60197,7 @@ ${user}`,
60076
60197
  }
60077
60198
  });
60078
60199
  const raw = (response?.data?.content || response?.output || "").trim();
60079
- const jsonText = extractFirstJson6(raw) || raw;
60200
+ const jsonText = extractFirstJson7(raw) || raw;
60080
60201
  let parsed = {};
60081
60202
  try {
60082
60203
  parsed = JSON.parse(jsonText);