@bonginkan/maria 4.3.37 → 4.3.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9437,7 +9437,7 @@ app.get("/api/status", (req, res) => {
9437
9437
  app.get("/", (req, res) => {
9438
9438
  res.json({
9439
9439
  name: "MARIA CODE API",
9440
- version: "4.3.37",
9440
+ version: "4.3.39",
9441
9441
  status: "running",
9442
9442
  environment: process.env.NODE_ENV || "development",
9443
9443
  endpoints: {
@@ -9716,6 +9716,113 @@ If you want, I can try again in a moment or help you rephrase.` : `Sorry, I coul
9716
9716
  return res.status(200).json({ success: true, data: { content: polite } });
9717
9717
  }
9718
9718
  });
9719
+ app.post("/api/v1/research", rateLimitMiddleware, async (req, res) => {
9720
+ try {
9721
+ await loadProviderKeys();
9722
+ const auth = req.headers.authorization;
9723
+ if (!auth || !auth.startsWith("Bearer ")) {
9724
+ return res.status(401).json({ error: "unauthorized", message: "Login required" });
9725
+ }
9726
+ const body = req.body || {};
9727
+ const query = typeof body.query === "string" ? body.query.trim() : "";
9728
+ const url = typeof body.url === "string" ? body.url.trim() : "";
9729
+ const topK = Number.isFinite(body.topK) ? Math.max(1, Math.min(5, Math.floor(body.topK))) : 5;
9730
+ if (!query && !url) {
9731
+ return res.status(400).json({ error: "bad_request", message: "query or url required" });
9732
+ }
9733
+ const extractText = (html) => {
9734
+ try {
9735
+ const withoutScripts = html.replace(/<script[\s\S]*?<\/script>/gi, " ").replace(/<style[\s\S]*?<\/style>/gi, " ");
9736
+ const noTags = withoutScripts.replace(/<[^>]+>/g, " ");
9737
+ const unescaped = noTags.replace(/&nbsp;/g, " ").replace(/&amp;/g, "&").replace(/&lt;/g, "<").replace(/&gt;/g, ">").replace(/&quot;/g, '"').replace(/&#39;/g, "'");
9738
+ return unescaped.replace(/\s+/g, " ").trim();
9739
+ } catch {
9740
+ return html;
9741
+ }
9742
+ };
9743
+ const fetchText = async (targetUrl) => {
9744
+ try {
9745
+ const ctrl = new AbortController();
9746
+ const to = setTimeout(() => ctrl.abort(), 2e4);
9747
+ const r2 = await fetch(targetUrl, { method: "GET", signal: ctrl.signal });
9748
+ clearTimeout(to);
9749
+ if (!r2.ok) return null;
9750
+ const html = await r2.text();
9751
+ const titleMatch = /<title>([\s\S]*?)<\/title>/i.exec(html);
9752
+ return { url: targetUrl, title: titleMatch ? titleMatch[1].trim() : void 0, text: extractText(html).slice(0, 12e4) };
9753
+ } catch {
9754
+ return null;
9755
+ }
9756
+ };
9757
+ let sources = [];
9758
+ let seedSummary = "";
9759
+ if (url) {
9760
+ sources = [{ url }];
9761
+ } else {
9762
+ const { GoogleGenAI } = __require("@google/genai");
9763
+ const apiKey = process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY;
9764
+ if (!apiKey) {
9765
+ return res.status(503).json({ error: "provider_unavailable", message: "Provider API key is not configured" });
9766
+ }
9767
+ const ai = new GoogleGenAI({ apiKey });
9768
+ const groundingTool = { googleSearch: {} };
9769
+ const config = { tools: [groundingTool] };
9770
+ const modelName = process.env.MARIA_RESEARCH_MODEL || "gemini-2.5-flash";
9771
+ const response2 = await ai.models.generateContent({ model: modelName, contents: String(query), config });
9772
+ const textOut = String(response2?.text || (response2?.response?.candidates?.[0]?.content?.parts?.[0]?.text || ""));
9773
+ seedSummary = textOut;
9774
+ const chunks = response2?.response?.candidates?.[0]?.groundingMetadata?.groundingChunks || [];
9775
+ const urls = chunks.map((c) => ({ url: String(c?.web?.uri || "").trim(), title: String(c?.web?.title || "").trim() })).filter((c) => c.url && /^https?:\/\//i.test(c.url));
9776
+ const seen = /* @__PURE__ */ new Set();
9777
+ const deduped = [];
9778
+ for (const c of urls) {
9779
+ if (!seen.has(c.url)) {
9780
+ seen.add(c.url);
9781
+ deduped.push(c);
9782
+ }
9783
+ }
9784
+ sources = deduped.slice(0, topK);
9785
+ }
9786
+ const scraped = [];
9787
+ for (const s2 of sources) {
9788
+ const got = await fetchText(s2.url);
9789
+ if (got) scraped.push({ url: got.url, title: got.title || s2.title, text: got.text });
9790
+ }
9791
+ const summarize = async (inputText) => {
9792
+ const { GoogleGenAI } = __require("@google/genai");
9793
+ const apiKey = process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY;
9794
+ if (!apiKey) return "";
9795
+ const ai = new GoogleGenAI({ apiKey });
9796
+ const modelName = process.env.MARIA_RESEARCH_MODEL || "gemini-2.5-flash";
9797
+ const prompt = `Summarize the following web research into concise English plaintext.
9798
+ - Focus on verifiable facts and key takeaways.
9799
+ - Keep it under 12 sentences.
9800
+ - Do not include markdown, just plain text.
9801
+
9802
+ CONTENT BEGIN
9803
+ ${inputText}
9804
+ CONTENT END`;
9805
+ const resp = await ai.models.generateContent({ model: modelName, contents: prompt });
9806
+ return String(resp?.text || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "").trim();
9807
+ };
9808
+ const combined = [seedSummary, ...scraped.map((s2) => `${s2.title ? `${s2.title}
9809
+ ` : ""}${s2.text}`)].filter(Boolean).join("\n\n").slice(0, 18e4);
9810
+ const summary = await summarize(combined) || (scraped[0]?.text?.slice(0, 600) || seedSummary || "");
9811
+ try {
9812
+ const idToken = auth.substring("Bearer ".length).trim();
9813
+ const decoded = await decodeFirebaseToken(idToken).catch(() => null);
9814
+ const uid = decoded?.uid || decoded?.sub || "current";
9815
+ const idemKey = req.headers["idempotency-key"] || void 0;
9816
+ await applyConsumption(uid, { requests: 1 }, idemKey);
9817
+ } catch {
9818
+ }
9819
+ return res.json({ success: true, data: { summary, sources } });
9820
+ } catch (error) {
9821
+ console.error("[Research API] Error:", error);
9822
+ const mapped = classifyMediaError(error);
9823
+ return res.status(mapped.status).json({ error: mapped.code, message: mapped.message, hint: mapped.hint });
9824
+ }
9825
+ });
9719
9826
  app.post("/api/auth/revoke", async (req, res) => {
9720
9827
  try {
9721
9828
  const authHeader = req.headers.authorization || "";
@@ -10179,19 +10286,26 @@ app.post("/api/v1/chat", rateLimitMiddleware, async (req, res) => {
10179
10286
  const decoded = await decodeFirebaseToken(idToken).catch(() => null);
10180
10287
  if (!decoded) return res.status(401).json({ error: "unauthorized" });
10181
10288
  const uid = decoded?.uid || decoded?.sub;
10182
- const { message, model = "gpt-4" } = req.body;
10183
- if (!message) {
10184
- return res.status(400).json({
10185
- error: "bad_request",
10186
- message: "Message is required"
10187
- });
10289
+ const { message, model = "gemini-2.5-flash", messages } = req.body || {};
10290
+ let effectiveMessage = void 0;
10291
+ if (typeof message === "string" && message.trim()) {
10292
+ effectiveMessage = String(message).trim();
10293
+ } else if (Array.isArray(messages)) {
10294
+ try {
10295
+ const lastUser = [...messages].reverse().find((m2) => (m2?.role || "").toLowerCase() === "user");
10296
+ effectiveMessage = lastUser?.content && String(lastUser.content) || messages.map((m2) => String(m2?.content || "")).filter(Boolean).join("\n");
10297
+ } catch {
10298
+ }
10299
+ }
10300
+ if (!effectiveMessage) {
10301
+ return res.status(400).json({ error: "bad_request", message: "message or messages is required" });
10188
10302
  }
10189
10303
  const idemKey = req.headers["idempotency-key"] || void 0;
10190
10304
  await applyConsumption(uid, { requests: 1 }, idemKey);
10191
10305
  return res.json({
10192
10306
  success: true,
10193
10307
  data: {
10194
- response: `Echo: ${message}`,
10308
+ response: `Echo: ${effectiveMessage}`,
10195
10309
  model,
10196
10310
  createdAt: (/* @__PURE__ */ new Date()).toISOString()
10197
10311
  }
@@ -10208,8 +10322,18 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10208
10322
  try {
10209
10323
  const auth = req.headers.authorization;
10210
10324
  if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
10211
- const { prompt, taskType } = req.body || {};
10212
- if (!prompt) return res.status(400).json({ error: "bad_request", message: "prompt required" });
10325
+ const { prompt, taskType, research, messages } = req.body || {};
10326
+ let effectivePrompt = void 0;
10327
+ if (typeof prompt === "string" && prompt.trim()) {
10328
+ effectivePrompt = String(prompt).trim();
10329
+ } else if (Array.isArray(messages)) {
10330
+ try {
10331
+ const lastUser = [...messages].reverse().find((m2) => (m2?.role || "").toLowerCase() === "user");
10332
+ effectivePrompt = lastUser?.content && String(lastUser.content) || messages.map((m2) => String(m2?.content || "")).filter(Boolean).join("\n");
10333
+ } catch {
10334
+ }
10335
+ }
10336
+ if (!effectivePrompt && !(taskType === "research" && research?.query)) return res.status(400).json({ error: "bad_request", message: "prompt or messages or research.query required" });
10213
10337
  const idToken = auth.substring("Bearer ".length).trim();
10214
10338
  const decoded = await decodeFirebaseToken(idToken).catch(() => null);
10215
10339
  if (!decoded) return res.status(401).json({ error: "unauthorized" });
@@ -10217,7 +10341,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10217
10341
  const idemKey = req.headers["idempotency-key"] || void 0;
10218
10342
  if (process.env.MARIA_TELEMETRY === "1") {
10219
10343
  try {
10220
- console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(prompt).length }));
10344
+ console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(effectivePrompt || research?.query || "").length }));
10221
10345
  } catch {
10222
10346
  }
10223
10347
  }
@@ -10253,19 +10377,26 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10253
10377
  const ai = new GoogleGenerativeAI2(gemKey);
10254
10378
  const modelName = process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
10255
10379
  const model2 = ai.getGenerativeModel({ model: modelName });
10256
- const attachments = Array.isArray((req.body?.metadata || {}).attachments) ? (req.body.metadata.attachments || []).filter(Boolean) : [];
10257
- const parts = [{ text: String(prompt) }];
10258
- for (const a of attachments) {
10259
- try {
10260
- const b64 = String(a.data_base64 || "");
10261
- const mime = String(a.mime || "application/octet-stream");
10262
- if (!b64) continue;
10263
- parts.push({ inlineData: { data: b64, mimeType: mime } });
10264
- } catch {
10380
+ let content2 = "";
10381
+ if (taskType === "research" && research?.query) {
10382
+ const tool = { googleSearch: {} };
10383
+ const r2 = await ai.models.generateContent({ model: modelName, contents: String(research.query), config: { tools: [tool] } });
10384
+ content2 = String(r2?.text?.() || r2?.text || r2?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "");
10385
+ } else {
10386
+ const attachments = Array.isArray((req.body?.metadata || {}).attachments) ? (req.body.metadata.attachments || []).filter(Boolean) : [];
10387
+ const parts = [{ text: String(effectivePrompt || "") }];
10388
+ for (const a of attachments) {
10389
+ try {
10390
+ const b64 = String(a.data_base64 || "");
10391
+ const mime = String(a.mime || "application/octet-stream");
10392
+ if (!b64) continue;
10393
+ parts.push({ inlineData: { data: b64, mimeType: mime } });
10394
+ } catch {
10395
+ }
10265
10396
  }
10397
+ const resp = await model2.generateContent({ contents: [{ role: "user", parts }] });
10398
+ content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
10266
10399
  }
10267
- const resp = await model2.generateContent({ contents: [{ role: "user", parts }] });
10268
- const content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
10269
10400
  if (process.env.MARIA_TELEMETRY === "1") {
10270
10401
  try {
10271
10402
  console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "google", model: modelName, empty: !content2 }));
@@ -10309,7 +10440,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10309
10440
  model,
10310
10441
  input: [
10311
10442
  { role: "system", content: "You output only code blocks when asked for code." },
10312
- { role: "user", content: prompt }
10443
+ { role: "user", content: effectivePrompt || "" }
10313
10444
  ]
10314
10445
  });
10315
10446
  content = r2?.output_text || r2?.content?.[0]?.text || "";
@@ -10325,7 +10456,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10325
10456
  model,
10326
10457
  messages: [
10327
10458
  { role: "system", content: "You output only code blocks when asked for code." },
10328
- { role: "user", content: prompt }
10459
+ { role: "user", content: effectivePrompt || "" }
10329
10460
  ]
10330
10461
  });
10331
10462
  content = r2.choices?.[0]?.message?.content || "";
@@ -9437,7 +9437,7 @@ app.get("/api/status", (req, res) => {
9437
9437
  app.get("/", (req, res) => {
9438
9438
  res.json({
9439
9439
  name: "MARIA CODE API",
9440
- version: "4.3.37",
9440
+ version: "4.3.39",
9441
9441
  status: "running",
9442
9442
  environment: process.env.NODE_ENV || "development",
9443
9443
  endpoints: {
@@ -9716,6 +9716,113 @@ If you want, I can try again in a moment or help you rephrase.` : `Sorry, I coul
9716
9716
  return res.status(200).json({ success: true, data: { content: polite } });
9717
9717
  }
9718
9718
  });
9719
+ app.post("/api/v1/research", rateLimitMiddleware, async (req, res) => {
9720
+ try {
9721
+ await loadProviderKeys();
9722
+ const auth = req.headers.authorization;
9723
+ if (!auth || !auth.startsWith("Bearer ")) {
9724
+ return res.status(401).json({ error: "unauthorized", message: "Login required" });
9725
+ }
9726
+ const body = req.body || {};
9727
+ const query = typeof body.query === "string" ? body.query.trim() : "";
9728
+ const url = typeof body.url === "string" ? body.url.trim() : "";
9729
+ const topK = Number.isFinite(body.topK) ? Math.max(1, Math.min(5, Math.floor(body.topK))) : 5;
9730
+ if (!query && !url) {
9731
+ return res.status(400).json({ error: "bad_request", message: "query or url required" });
9732
+ }
9733
+ const extractText = (html) => {
9734
+ try {
9735
+ const withoutScripts = html.replace(/<script[\s\S]*?<\/script>/gi, " ").replace(/<style[\s\S]*?<\/style>/gi, " ");
9736
+ const noTags = withoutScripts.replace(/<[^>]+>/g, " ");
9737
+ const unescaped = noTags.replace(/&nbsp;/g, " ").replace(/&amp;/g, "&").replace(/&lt;/g, "<").replace(/&gt;/g, ">").replace(/&quot;/g, '"').replace(/&#39;/g, "'");
9738
+ return unescaped.replace(/\s+/g, " ").trim();
9739
+ } catch {
9740
+ return html;
9741
+ }
9742
+ };
9743
+ const fetchText = async (targetUrl) => {
9744
+ try {
9745
+ const ctrl = new AbortController();
9746
+ const to = setTimeout(() => ctrl.abort(), 2e4);
9747
+ const r2 = await fetch(targetUrl, { method: "GET", signal: ctrl.signal });
9748
+ clearTimeout(to);
9749
+ if (!r2.ok) return null;
9750
+ const html = await r2.text();
9751
+ const titleMatch = /<title>([\s\S]*?)<\/title>/i.exec(html);
9752
+ return { url: targetUrl, title: titleMatch ? titleMatch[1].trim() : void 0, text: extractText(html).slice(0, 12e4) };
9753
+ } catch {
9754
+ return null;
9755
+ }
9756
+ };
9757
+ let sources = [];
9758
+ let seedSummary = "";
9759
+ if (url) {
9760
+ sources = [{ url }];
9761
+ } else {
9762
+ const { GoogleGenAI } = __require("@google/genai");
9763
+ const apiKey = process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY;
9764
+ if (!apiKey) {
9765
+ return res.status(503).json({ error: "provider_unavailable", message: "Provider API key is not configured" });
9766
+ }
9767
+ const ai = new GoogleGenAI({ apiKey });
9768
+ const groundingTool = { googleSearch: {} };
9769
+ const config = { tools: [groundingTool] };
9770
+ const modelName = process.env.MARIA_RESEARCH_MODEL || "gemini-2.5-flash";
9771
+ const response2 = await ai.models.generateContent({ model: modelName, contents: String(query), config });
9772
+ const textOut = String(response2?.text || (response2?.response?.candidates?.[0]?.content?.parts?.[0]?.text || ""));
9773
+ seedSummary = textOut;
9774
+ const chunks = response2?.response?.candidates?.[0]?.groundingMetadata?.groundingChunks || [];
9775
+ const urls = chunks.map((c) => ({ url: String(c?.web?.uri || "").trim(), title: String(c?.web?.title || "").trim() })).filter((c) => c.url && /^https?:\/\//i.test(c.url));
9776
+ const seen = /* @__PURE__ */ new Set();
9777
+ const deduped = [];
9778
+ for (const c of urls) {
9779
+ if (!seen.has(c.url)) {
9780
+ seen.add(c.url);
9781
+ deduped.push(c);
9782
+ }
9783
+ }
9784
+ sources = deduped.slice(0, topK);
9785
+ }
9786
+ const scraped = [];
9787
+ for (const s2 of sources) {
9788
+ const got = await fetchText(s2.url);
9789
+ if (got) scraped.push({ url: got.url, title: got.title || s2.title, text: got.text });
9790
+ }
9791
+ const summarize = async (inputText) => {
9792
+ const { GoogleGenAI } = __require("@google/genai");
9793
+ const apiKey = process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY;
9794
+ if (!apiKey) return "";
9795
+ const ai = new GoogleGenAI({ apiKey });
9796
+ const modelName = process.env.MARIA_RESEARCH_MODEL || "gemini-2.5-flash";
9797
+ const prompt = `Summarize the following web research into concise English plaintext.
9798
+ - Focus on verifiable facts and key takeaways.
9799
+ - Keep it under 12 sentences.
9800
+ - Do not include markdown, just plain text.
9801
+
9802
+ CONTENT BEGIN
9803
+ ${inputText}
9804
+ CONTENT END`;
9805
+ const resp = await ai.models.generateContent({ model: modelName, contents: prompt });
9806
+ return String(resp?.text || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "").trim();
9807
+ };
9808
+ const combined = [seedSummary, ...scraped.map((s2) => `${s2.title ? `${s2.title}
9809
+ ` : ""}${s2.text}`)].filter(Boolean).join("\n\n").slice(0, 18e4);
9810
+ const summary = await summarize(combined) || (scraped[0]?.text?.slice(0, 600) || seedSummary || "");
9811
+ try {
9812
+ const idToken = auth.substring("Bearer ".length).trim();
9813
+ const decoded = await decodeFirebaseToken(idToken).catch(() => null);
9814
+ const uid = decoded?.uid || decoded?.sub || "current";
9815
+ const idemKey = req.headers["idempotency-key"] || void 0;
9816
+ await applyConsumption(uid, { requests: 1 }, idemKey);
9817
+ } catch {
9818
+ }
9819
+ return res.json({ success: true, data: { summary, sources } });
9820
+ } catch (error) {
9821
+ console.error("[Research API] Error:", error);
9822
+ const mapped = classifyMediaError(error);
9823
+ return res.status(mapped.status).json({ error: mapped.code, message: mapped.message, hint: mapped.hint });
9824
+ }
9825
+ });
9719
9826
  app.post("/api/auth/revoke", async (req, res) => {
9720
9827
  try {
9721
9828
  const authHeader = req.headers.authorization || "";
@@ -10179,19 +10286,26 @@ app.post("/api/v1/chat", rateLimitMiddleware, async (req, res) => {
10179
10286
  const decoded = await decodeFirebaseToken(idToken).catch(() => null);
10180
10287
  if (!decoded) return res.status(401).json({ error: "unauthorized" });
10181
10288
  const uid = decoded?.uid || decoded?.sub;
10182
- const { message, model = "gpt-4" } = req.body;
10183
- if (!message) {
10184
- return res.status(400).json({
10185
- error: "bad_request",
10186
- message: "Message is required"
10187
- });
10289
+ const { message, model = "gemini-2.5-flash", messages } = req.body || {};
10290
+ let effectiveMessage = void 0;
10291
+ if (typeof message === "string" && message.trim()) {
10292
+ effectiveMessage = String(message).trim();
10293
+ } else if (Array.isArray(messages)) {
10294
+ try {
10295
+ const lastUser = [...messages].reverse().find((m2) => (m2?.role || "").toLowerCase() === "user");
10296
+ effectiveMessage = lastUser?.content && String(lastUser.content) || messages.map((m2) => String(m2?.content || "")).filter(Boolean).join("\n");
10297
+ } catch {
10298
+ }
10299
+ }
10300
+ if (!effectiveMessage) {
10301
+ return res.status(400).json({ error: "bad_request", message: "message or messages is required" });
10188
10302
  }
10189
10303
  const idemKey = req.headers["idempotency-key"] || void 0;
10190
10304
  await applyConsumption(uid, { requests: 1 }, idemKey);
10191
10305
  return res.json({
10192
10306
  success: true,
10193
10307
  data: {
10194
- response: `Echo: ${message}`,
10308
+ response: `Echo: ${effectiveMessage}`,
10195
10309
  model,
10196
10310
  createdAt: (/* @__PURE__ */ new Date()).toISOString()
10197
10311
  }
@@ -10208,8 +10322,18 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10208
10322
  try {
10209
10323
  const auth = req.headers.authorization;
10210
10324
  if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
10211
- const { prompt, taskType } = req.body || {};
10212
- if (!prompt) return res.status(400).json({ error: "bad_request", message: "prompt required" });
10325
+ const { prompt, taskType, research, messages } = req.body || {};
10326
+ let effectivePrompt = void 0;
10327
+ if (typeof prompt === "string" && prompt.trim()) {
10328
+ effectivePrompt = String(prompt).trim();
10329
+ } else if (Array.isArray(messages)) {
10330
+ try {
10331
+ const lastUser = [...messages].reverse().find((m2) => (m2?.role || "").toLowerCase() === "user");
10332
+ effectivePrompt = lastUser?.content && String(lastUser.content) || messages.map((m2) => String(m2?.content || "")).filter(Boolean).join("\n");
10333
+ } catch {
10334
+ }
10335
+ }
10336
+ if (!effectivePrompt && !(taskType === "research" && research?.query)) return res.status(400).json({ error: "bad_request", message: "prompt or messages or research.query required" });
10213
10337
  const idToken = auth.substring("Bearer ".length).trim();
10214
10338
  const decoded = await decodeFirebaseToken(idToken).catch(() => null);
10215
10339
  if (!decoded) return res.status(401).json({ error: "unauthorized" });
@@ -10217,7 +10341,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10217
10341
  const idemKey = req.headers["idempotency-key"] || void 0;
10218
10342
  if (process.env.MARIA_TELEMETRY === "1") {
10219
10343
  try {
10220
- console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(prompt).length }));
10344
+ console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(effectivePrompt || research?.query || "").length }));
10221
10345
  } catch {
10222
10346
  }
10223
10347
  }
@@ -10253,19 +10377,26 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10253
10377
  const ai = new GoogleGenerativeAI2(gemKey);
10254
10378
  const modelName = process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
10255
10379
  const model2 = ai.getGenerativeModel({ model: modelName });
10256
- const attachments = Array.isArray((req.body?.metadata || {}).attachments) ? (req.body.metadata.attachments || []).filter(Boolean) : [];
10257
- const parts = [{ text: String(prompt) }];
10258
- for (const a of attachments) {
10259
- try {
10260
- const b64 = String(a.data_base64 || "");
10261
- const mime = String(a.mime || "application/octet-stream");
10262
- if (!b64) continue;
10263
- parts.push({ inlineData: { data: b64, mimeType: mime } });
10264
- } catch {
10380
+ let content2 = "";
10381
+ if (taskType === "research" && research?.query) {
10382
+ const tool = { googleSearch: {} };
10383
+ const r2 = await ai.models.generateContent({ model: modelName, contents: String(research.query), config: { tools: [tool] } });
10384
+ content2 = String(r2?.text?.() || r2?.text || r2?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "");
10385
+ } else {
10386
+ const attachments = Array.isArray((req.body?.metadata || {}).attachments) ? (req.body.metadata.attachments || []).filter(Boolean) : [];
10387
+ const parts = [{ text: String(effectivePrompt || "") }];
10388
+ for (const a of attachments) {
10389
+ try {
10390
+ const b64 = String(a.data_base64 || "");
10391
+ const mime = String(a.mime || "application/octet-stream");
10392
+ if (!b64) continue;
10393
+ parts.push({ inlineData: { data: b64, mimeType: mime } });
10394
+ } catch {
10395
+ }
10265
10396
  }
10397
+ const resp = await model2.generateContent({ contents: [{ role: "user", parts }] });
10398
+ content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
10266
10399
  }
10267
- const resp = await model2.generateContent({ contents: [{ role: "user", parts }] });
10268
- const content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
10269
10400
  if (process.env.MARIA_TELEMETRY === "1") {
10270
10401
  try {
10271
10402
  console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "google", model: modelName, empty: !content2 }));
@@ -10309,7 +10440,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10309
10440
  model,
10310
10441
  input: [
10311
10442
  { role: "system", content: "You output only code blocks when asked for code." },
10312
- { role: "user", content: prompt }
10443
+ { role: "user", content: effectivePrompt || "" }
10313
10444
  ]
10314
10445
  });
10315
10446
  content = r2?.output_text || r2?.content?.[0]?.text || "";
@@ -10325,7 +10456,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
10325
10456
  model,
10326
10457
  messages: [
10327
10458
  { role: "system", content: "You output only code blocks when asked for code." },
10328
- { role: "user", content: prompt }
10459
+ { role: "user", content: effectivePrompt || "" }
10329
10460
  ]
10330
10461
  });
10331
10462
  content = r2.choices?.[0]?.message?.content || "";