@bonginkan/maria 4.4.7 → 4.4.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -710,7 +710,7 @@ var GeminiMediaProvider = class {
710
710
  }
711
711
  const { GoogleGenAI } = __require("@google/genai");
712
712
  this.ai = new GoogleGenAI({ apiKey });
713
- this.primaryModel = opts?.model || process.env.MARIA_IMAGE_MODEL || "gemini-2.5-flash-image-preview";
713
+ this.primaryModel = opts?.model || process.env.MARIA_IMAGE_MODEL || "gemini-3-pro-image-preview";
714
714
  }
715
715
  async generateImage(req) {
716
716
  const modelName = this.primaryModel;
@@ -727,11 +727,8 @@ var GeminiMediaProvider = class {
727
727
  try {
728
728
  resp = await this.ai.models.generateContent({
729
729
  model: modelName,
730
- contents: [{ role: "user", parts: [{ text: String(req.prompt) }] }],
731
- generationConfig: {
732
- responseModalities: ["IMAGE"],
733
- responseMimeType: targetMime
734
- }
730
+ // For @google/genai, a simple string prompt is sufficient here.
731
+ contents: String(req.prompt ?? "")
735
732
  });
736
733
  } catch (err) {
737
734
  const errMsg = err?.message || String(err);
@@ -739,15 +736,19 @@ var GeminiMediaProvider = class {
739
736
  `GeminiMediaProvider.generateImage request failed: model=${modelName}; prompt="${promptPreview}"; error=${errMsg}`
740
737
  );
741
738
  }
742
- const feedback = resp?.response?.promptFeedback;
739
+ const root = resp?.response ?? resp;
740
+ const feedback = root?.promptFeedback || root?.prompt_feedback;
743
741
  const blockReason = feedback?.blockReason || feedback?.block_reason;
744
742
  if (blockReason) {
745
743
  const modelName2 = this.primaryModel;
746
744
  const reason = String(blockReason);
747
745
  throw new Error(`GeminiMediaProvider.policy_violation: model=${modelName2}; reason=${reason}`);
748
746
  }
749
- const parts = resp?.response?.candidates?.[0]?.content?.parts || [];
750
- for (const p of parts) {
747
+ const candidates = root?.candidates ?? [];
748
+ const parts = candidates?.[0]?.content?.parts || [];
749
+ for (let i = parts.length - 1; i >= 0; i--) {
750
+ const p = parts[i];
751
+ if (p?.thought === true) continue;
751
752
  const data = p?.inlineData?.data || p?.inline_data?.data;
752
753
  const mime = p?.inlineData?.mimeType || p?.inline_data?.mime_type || p?.inline_data?.mimeType || p?.inlineData?.mime_type;
753
754
  if (data) {
@@ -755,22 +756,17 @@ var GeminiMediaProvider = class {
755
756
  if (buf.length > 0) return { bytes: buf, mime: typeof mime === "string" ? mime : targetMime };
756
757
  }
757
758
  }
758
- try {
759
- const imagenModel = process.env.MARIA_IMAGE_FALLBACK_MODEL || "imagen-4.0-generate-001";
760
- const r2 = await this.ai.models.generateImages({
761
- model: imagenModel,
762
- prompt: String(req.prompt)
763
- });
764
- const img0 = r2?.generatedImages?.[0]?.image;
765
- const bytesB64 = img0?.imageBytes || img0?.bytesBase64Encoded;
766
- if (bytesB64) {
767
- const buf = Buffer.from(String(bytesB64), "base64");
768
- if (buf.length > 0) return { bytes: buf, mime: targetMime };
759
+ for (let i = parts.length - 1; i >= 0; i--) {
760
+ const p = parts[i];
761
+ const data = p?.inlineData?.data || p?.inline_data?.data;
762
+ const mime = p?.inlineData?.mimeType || p?.inline_data?.mime_type || p?.inline_data?.mimeType || p?.inlineData?.mime_type;
763
+ if (data) {
764
+ const buf = Buffer.from(String(data), "base64");
765
+ if (buf.length > 0) return { bytes: buf, mime: typeof mime === "string" ? mime : targetMime };
769
766
  }
770
- } catch {
771
767
  }
772
- const candidatesLen = resp?.response?.candidates?.length ?? 0;
773
- const finish = resp?.response?.candidates?.[0]?.finishReason || resp?.response?.promptFeedback?.blockReason || "unknown";
768
+ const candidatesLen = candidates.length ?? 0;
769
+ const finish = candidates?.[0]?.finishReason || root?.promptFeedback?.blockReason || "unknown";
774
770
  const firstPartKeys = parts[0] ? Object.keys(parts[0]).join(",") : "n/a";
775
771
  throw new Error(
776
772
  `GeminiMediaProvider.generateImage: no inline image returned; model=${modelName}; prompt="${promptPreview}"; candidates=${candidatesLen}; parts=${parts.length}; firstPartKeys=${firstPartKeys}; finish=${finish}`
@@ -1736,7 +1732,7 @@ app.get("/api/status", (req, res) => {
1736
1732
  app.get("/", (req, res) => {
1737
1733
  res.json({
1738
1734
  name: "MARIA CODE API",
1739
- version: "4.4.7",
1735
+ version: "4.4.9",
1740
1736
  status: "running",
1741
1737
  environment: process.env.NODE_ENV || "development",
1742
1738
  endpoints: {
@@ -2156,7 +2152,7 @@ app.post("/api/v1/image", rateLimitMiddleware, async (req, res) => {
2156
2152
  const promptHash = hashPrompt(prompt);
2157
2153
  const manifest = {
2158
2154
  kind: "image",
2159
- request: { promptHash, seed, params: { size: [w, h], format, count }, model: model || "gemini-2.5-flash-image-preview", provider: "google" },
2155
+ request: { promptHash, seed, params: { size: [w, h], format, count }, model: model || "gemini-3-pro-image-preview", provider: "google" },
2160
2156
  artifacts: [],
2161
2157
  metrics: { durationMs: Date.now() - started, retries: 0, fallbacks: 0 },
2162
2158
  trace: Math.random().toString(36).slice(2, 8).toUpperCase()
@@ -710,7 +710,7 @@ var GeminiMediaProvider = class {
710
710
  }
711
711
  const { GoogleGenAI } = __require("@google/genai");
712
712
  this.ai = new GoogleGenAI({ apiKey });
713
- this.primaryModel = opts?.model || process.env.MARIA_IMAGE_MODEL || "gemini-2.5-flash-image-preview";
713
+ this.primaryModel = opts?.model || process.env.MARIA_IMAGE_MODEL || "gemini-3-pro-image-preview";
714
714
  }
715
715
  async generateImage(req) {
716
716
  const modelName = this.primaryModel;
@@ -727,11 +727,8 @@ var GeminiMediaProvider = class {
727
727
  try {
728
728
  resp = await this.ai.models.generateContent({
729
729
  model: modelName,
730
- contents: [{ role: "user", parts: [{ text: String(req.prompt) }] }],
731
- generationConfig: {
732
- responseModalities: ["IMAGE"],
733
- responseMimeType: targetMime
734
- }
730
+ // For @google/genai, a simple string prompt is sufficient here.
731
+ contents: String(req.prompt ?? "")
735
732
  });
736
733
  } catch (err) {
737
734
  const errMsg = err?.message || String(err);
@@ -739,15 +736,19 @@ var GeminiMediaProvider = class {
739
736
  `GeminiMediaProvider.generateImage request failed: model=${modelName}; prompt="${promptPreview}"; error=${errMsg}`
740
737
  );
741
738
  }
742
- const feedback = resp?.response?.promptFeedback;
739
+ const root = resp?.response ?? resp;
740
+ const feedback = root?.promptFeedback || root?.prompt_feedback;
743
741
  const blockReason = feedback?.blockReason || feedback?.block_reason;
744
742
  if (blockReason) {
745
743
  const modelName2 = this.primaryModel;
746
744
  const reason = String(blockReason);
747
745
  throw new Error(`GeminiMediaProvider.policy_violation: model=${modelName2}; reason=${reason}`);
748
746
  }
749
- const parts = resp?.response?.candidates?.[0]?.content?.parts || [];
750
- for (const p of parts) {
747
+ const candidates = root?.candidates ?? [];
748
+ const parts = candidates?.[0]?.content?.parts || [];
749
+ for (let i = parts.length - 1; i >= 0; i--) {
750
+ const p = parts[i];
751
+ if (p?.thought === true) continue;
751
752
  const data = p?.inlineData?.data || p?.inline_data?.data;
752
753
  const mime = p?.inlineData?.mimeType || p?.inline_data?.mime_type || p?.inline_data?.mimeType || p?.inlineData?.mime_type;
753
754
  if (data) {
@@ -755,22 +756,17 @@ var GeminiMediaProvider = class {
755
756
  if (buf.length > 0) return { bytes: buf, mime: typeof mime === "string" ? mime : targetMime };
756
757
  }
757
758
  }
758
- try {
759
- const imagenModel = process.env.MARIA_IMAGE_FALLBACK_MODEL || "imagen-4.0-generate-001";
760
- const r2 = await this.ai.models.generateImages({
761
- model: imagenModel,
762
- prompt: String(req.prompt)
763
- });
764
- const img0 = r2?.generatedImages?.[0]?.image;
765
- const bytesB64 = img0?.imageBytes || img0?.bytesBase64Encoded;
766
- if (bytesB64) {
767
- const buf = Buffer.from(String(bytesB64), "base64");
768
- if (buf.length > 0) return { bytes: buf, mime: targetMime };
759
+ for (let i = parts.length - 1; i >= 0; i--) {
760
+ const p = parts[i];
761
+ const data = p?.inlineData?.data || p?.inline_data?.data;
762
+ const mime = p?.inlineData?.mimeType || p?.inline_data?.mime_type || p?.inline_data?.mimeType || p?.inlineData?.mime_type;
763
+ if (data) {
764
+ const buf = Buffer.from(String(data), "base64");
765
+ if (buf.length > 0) return { bytes: buf, mime: typeof mime === "string" ? mime : targetMime };
769
766
  }
770
- } catch {
771
767
  }
772
- const candidatesLen = resp?.response?.candidates?.length ?? 0;
773
- const finish = resp?.response?.candidates?.[0]?.finishReason || resp?.response?.promptFeedback?.blockReason || "unknown";
768
+ const candidatesLen = candidates.length ?? 0;
769
+ const finish = candidates?.[0]?.finishReason || root?.promptFeedback?.blockReason || "unknown";
774
770
  const firstPartKeys = parts[0] ? Object.keys(parts[0]).join(",") : "n/a";
775
771
  throw new Error(
776
772
  `GeminiMediaProvider.generateImage: no inline image returned; model=${modelName}; prompt="${promptPreview}"; candidates=${candidatesLen}; parts=${parts.length}; firstPartKeys=${firstPartKeys}; finish=${finish}`
@@ -1736,7 +1732,7 @@ app.get("/api/status", (req, res) => {
1736
1732
  app.get("/", (req, res) => {
1737
1733
  res.json({
1738
1734
  name: "MARIA CODE API",
1739
- version: "4.4.7",
1735
+ version: "4.4.9",
1740
1736
  status: "running",
1741
1737
  environment: process.env.NODE_ENV || "development",
1742
1738
  endpoints: {
@@ -2156,7 +2152,7 @@ app.post("/api/v1/image", rateLimitMiddleware, async (req, res) => {
2156
2152
  const promptHash = hashPrompt(prompt);
2157
2153
  const manifest = {
2158
2154
  kind: "image",
2159
- request: { promptHash, seed, params: { size: [w, h], format, count }, model: model || "gemini-2.5-flash-image-preview", provider: "google" },
2155
+ request: { promptHash, seed, params: { size: [w, h], format, count }, model: model || "gemini-3-pro-image-preview", provider: "google" },
2160
2156
  artifacts: [],
2161
2157
  metrics: { durationMs: Date.now() - started, retries: 0, fallbacks: 0 },
2162
2158
  trace: Math.random().toString(36).slice(2, 8).toUpperCase()
@@ -710,7 +710,7 @@ var GeminiMediaProvider = class {
710
710
  }
711
711
  const { GoogleGenAI } = __require("@google/genai");
712
712
  this.ai = new GoogleGenAI({ apiKey });
713
- this.primaryModel = opts?.model || process.env.MARIA_IMAGE_MODEL || "gemini-2.5-flash-image-preview";
713
+ this.primaryModel = opts?.model || process.env.MARIA_IMAGE_MODEL || "gemini-3-pro-image-preview";
714
714
  }
715
715
  async generateImage(req) {
716
716
  const modelName = this.primaryModel;
@@ -727,11 +727,8 @@ var GeminiMediaProvider = class {
727
727
  try {
728
728
  resp = await this.ai.models.generateContent({
729
729
  model: modelName,
730
- contents: [{ role: "user", parts: [{ text: String(req.prompt) }] }],
731
- generationConfig: {
732
- responseModalities: ["IMAGE"],
733
- responseMimeType: targetMime
734
- }
730
+ // For @google/genai, a simple string prompt is sufficient here.
731
+ contents: String(req.prompt ?? "")
735
732
  });
736
733
  } catch (err) {
737
734
  const errMsg = err?.message || String(err);
@@ -739,15 +736,19 @@ var GeminiMediaProvider = class {
739
736
  `GeminiMediaProvider.generateImage request failed: model=${modelName}; prompt="${promptPreview}"; error=${errMsg}`
740
737
  );
741
738
  }
742
- const feedback = resp?.response?.promptFeedback;
739
+ const root = resp?.response ?? resp;
740
+ const feedback = root?.promptFeedback || root?.prompt_feedback;
743
741
  const blockReason = feedback?.blockReason || feedback?.block_reason;
744
742
  if (blockReason) {
745
743
  const modelName2 = this.primaryModel;
746
744
  const reason = String(blockReason);
747
745
  throw new Error(`GeminiMediaProvider.policy_violation: model=${modelName2}; reason=${reason}`);
748
746
  }
749
- const parts = resp?.response?.candidates?.[0]?.content?.parts || [];
750
- for (const p of parts) {
747
+ const candidates = root?.candidates ?? [];
748
+ const parts = candidates?.[0]?.content?.parts || [];
749
+ for (let i = parts.length - 1; i >= 0; i--) {
750
+ const p = parts[i];
751
+ if (p?.thought === true) continue;
751
752
  const data = p?.inlineData?.data || p?.inline_data?.data;
752
753
  const mime = p?.inlineData?.mimeType || p?.inline_data?.mime_type || p?.inline_data?.mimeType || p?.inlineData?.mime_type;
753
754
  if (data) {
@@ -755,22 +756,17 @@ var GeminiMediaProvider = class {
755
756
  if (buf.length > 0) return { bytes: buf, mime: typeof mime === "string" ? mime : targetMime };
756
757
  }
757
758
  }
758
- try {
759
- const imagenModel = process.env.MARIA_IMAGE_FALLBACK_MODEL || "imagen-4.0-generate-001";
760
- const r2 = await this.ai.models.generateImages({
761
- model: imagenModel,
762
- prompt: String(req.prompt)
763
- });
764
- const img0 = r2?.generatedImages?.[0]?.image;
765
- const bytesB64 = img0?.imageBytes || img0?.bytesBase64Encoded;
766
- if (bytesB64) {
767
- const buf = Buffer.from(String(bytesB64), "base64");
768
- if (buf.length > 0) return { bytes: buf, mime: targetMime };
759
+ for (let i = parts.length - 1; i >= 0; i--) {
760
+ const p = parts[i];
761
+ const data = p?.inlineData?.data || p?.inline_data?.data;
762
+ const mime = p?.inlineData?.mimeType || p?.inline_data?.mime_type || p?.inline_data?.mimeType || p?.inlineData?.mime_type;
763
+ if (data) {
764
+ const buf = Buffer.from(String(data), "base64");
765
+ if (buf.length > 0) return { bytes: buf, mime: typeof mime === "string" ? mime : targetMime };
769
766
  }
770
- } catch {
771
767
  }
772
- const candidatesLen = resp?.response?.candidates?.length ?? 0;
773
- const finish = resp?.response?.candidates?.[0]?.finishReason || resp?.response?.promptFeedback?.blockReason || "unknown";
768
+ const candidatesLen = candidates.length ?? 0;
769
+ const finish = candidates?.[0]?.finishReason || root?.promptFeedback?.blockReason || "unknown";
774
770
  const firstPartKeys = parts[0] ? Object.keys(parts[0]).join(",") : "n/a";
775
771
  throw new Error(
776
772
  `GeminiMediaProvider.generateImage: no inline image returned; model=${modelName}; prompt="${promptPreview}"; candidates=${candidatesLen}; parts=${parts.length}; firstPartKeys=${firstPartKeys}; finish=${finish}`
@@ -1736,7 +1732,7 @@ app.get("/api/status", (req, res) => {
1736
1732
  app.get("/", (req, res) => {
1737
1733
  res.json({
1738
1734
  name: "MARIA CODE API",
1739
- version: "4.4.7",
1735
+ version: "4.4.9",
1740
1736
  status: "running",
1741
1737
  environment: process.env.NODE_ENV || "development",
1742
1738
  endpoints: {
@@ -2156,7 +2152,7 @@ app.post("/api/v1/image", rateLimitMiddleware, async (req, res) => {
2156
2152
  const promptHash = hashPrompt(prompt);
2157
2153
  const manifest = {
2158
2154
  kind: "image",
2159
- request: { promptHash, seed, params: { size: [w, h], format, count }, model: model || "gemini-2.5-flash-image-preview", provider: "google" },
2155
+ request: { promptHash, seed, params: { size: [w, h], format, count }, model: model || "gemini-3-pro-image-preview", provider: "google" },
2160
2156
  artifacts: [],
2161
2157
  metrics: { durationMs: Date.now() - started, retries: 0, fallbacks: 0 },
2162
2158
  trace: Math.random().toString(36).slice(2, 8).toUpperCase()