@bonginkan/maria 4.3.44 → 4.3.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/dist/READY.manifest.json +1 -1
- package/dist/bin/maria.cjs +460 -149
- package/dist/bin/maria.cjs.map +1 -1
- package/dist/cli.cjs +460 -149
- package/dist/cli.cjs.map +1 -1
- package/dist/index.js +2 -2
- package/dist/index.js.map +1 -1
- package/dist/server/express-server.cjs +8 -5
- package/dist/server/express-server.js +8 -5
- package/dist/server-express.cjs +8 -5
- package/dist/server-express.cjs.map +1 -1
- package/package.json +2 -2
- package/src/slash-commands/READY.manifest.json +1 -1
|
@@ -9447,7 +9447,7 @@ app.get("/api/status", (req, res) => {
|
|
|
9447
9447
|
app.get("/", (req, res) => {
|
|
9448
9448
|
res.json({
|
|
9449
9449
|
name: "MARIA CODE API",
|
|
9450
|
-
version: "4.3.
|
|
9450
|
+
version: "4.3.46",
|
|
9451
9451
|
status: "running",
|
|
9452
9452
|
environment: process.env.NODE_ENV || "development",
|
|
9453
9453
|
endpoints: {
|
|
@@ -10332,7 +10332,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10332
10332
|
try {
|
|
10333
10333
|
const auth = req.headers.authorization;
|
|
10334
10334
|
if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
|
|
10335
|
-
const { prompt, taskType, research, messages } = req.body || {};
|
|
10335
|
+
const { prompt, taskType, research, messages, provider: reqProvider, model: reqModel } = req.body || {};
|
|
10336
10336
|
let effectivePrompt = void 0;
|
|
10337
10337
|
if (typeof prompt === "string" && prompt.trim()) {
|
|
10338
10338
|
effectivePrompt = String(prompt).trim();
|
|
@@ -10381,11 +10381,14 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10381
10381
|
}
|
|
10382
10382
|
})();
|
|
10383
10383
|
const gemKey = sanitizeKey2(keys?.googleApiKey || process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY);
|
|
10384
|
-
|
|
10384
|
+
const requestedProvider = typeof reqProvider === "string" ? reqProvider.toLowerCase() : void 0;
|
|
10385
|
+
const requestedModel = typeof reqModel === "string" ? String(reqModel).trim() : void 0;
|
|
10386
|
+
const wantsGoogle = requestedProvider === "google" || !!requestedModel && requestedModel.toLowerCase().startsWith("gemini") || !requestedProvider && !requestedModel;
|
|
10387
|
+
if (gemKey && wantsGoogle) {
|
|
10385
10388
|
try {
|
|
10386
10389
|
const { GoogleGenerativeAI: GoogleGenerativeAI2 } = await import('@google/generative-ai');
|
|
10387
10390
|
const ai = new GoogleGenerativeAI2(gemKey);
|
|
10388
|
-
const modelName = process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
|
|
10391
|
+
const modelName = requestedModel && requestedModel.length > 0 ? requestedModel : process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
|
|
10389
10392
|
const model2 = ai.getGenerativeModel({ model: modelName });
|
|
10390
10393
|
let content2 = "";
|
|
10391
10394
|
if (taskType === "research" && research?.query) {
|
|
@@ -10442,7 +10445,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10442
10445
|
}
|
|
10443
10446
|
const OpenAI2 = (await import('openai')).default;
|
|
10444
10447
|
const client = new OpenAI2({ apiKey: openaiKey });
|
|
10445
|
-
let model = process.env.MARIA_CODE_MODEL || "gpt-5-mini";
|
|
10448
|
+
let model = requestedProvider === "openai" && requestedModel ? requestedModel : process.env.MARIA_CODE_MODEL || "gpt-5-mini";
|
|
10446
10449
|
let content = "";
|
|
10447
10450
|
let totalTokens = 0;
|
|
10448
10451
|
try {
|
|
@@ -9447,7 +9447,7 @@ app.get("/api/status", (req, res) => {
|
|
|
9447
9447
|
app.get("/", (req, res) => {
|
|
9448
9448
|
res.json({
|
|
9449
9449
|
name: "MARIA CODE API",
|
|
9450
|
-
version: "4.3.
|
|
9450
|
+
version: "4.3.46",
|
|
9451
9451
|
status: "running",
|
|
9452
9452
|
environment: process.env.NODE_ENV || "development",
|
|
9453
9453
|
endpoints: {
|
|
@@ -10332,7 +10332,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10332
10332
|
try {
|
|
10333
10333
|
const auth = req.headers.authorization;
|
|
10334
10334
|
if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
|
|
10335
|
-
const { prompt, taskType, research, messages } = req.body || {};
|
|
10335
|
+
const { prompt, taskType, research, messages, provider: reqProvider, model: reqModel } = req.body || {};
|
|
10336
10336
|
let effectivePrompt = void 0;
|
|
10337
10337
|
if (typeof prompt === "string" && prompt.trim()) {
|
|
10338
10338
|
effectivePrompt = String(prompt).trim();
|
|
@@ -10381,11 +10381,14 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10381
10381
|
}
|
|
10382
10382
|
})();
|
|
10383
10383
|
const gemKey = sanitizeKey2(keys?.googleApiKey || process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY);
|
|
10384
|
-
|
|
10384
|
+
const requestedProvider = typeof reqProvider === "string" ? reqProvider.toLowerCase() : void 0;
|
|
10385
|
+
const requestedModel = typeof reqModel === "string" ? String(reqModel).trim() : void 0;
|
|
10386
|
+
const wantsGoogle = requestedProvider === "google" || !!requestedModel && requestedModel.toLowerCase().startsWith("gemini") || !requestedProvider && !requestedModel;
|
|
10387
|
+
if (gemKey && wantsGoogle) {
|
|
10385
10388
|
try {
|
|
10386
10389
|
const { GoogleGenerativeAI: GoogleGenerativeAI2 } = await import('@google/generative-ai');
|
|
10387
10390
|
const ai = new GoogleGenerativeAI2(gemKey);
|
|
10388
|
-
const modelName = process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
|
|
10391
|
+
const modelName = requestedModel && requestedModel.length > 0 ? requestedModel : process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
|
|
10389
10392
|
const model2 = ai.getGenerativeModel({ model: modelName });
|
|
10390
10393
|
let content2 = "";
|
|
10391
10394
|
if (taskType === "research" && research?.query) {
|
|
@@ -10442,7 +10445,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10442
10445
|
}
|
|
10443
10446
|
const OpenAI2 = (await import('openai')).default;
|
|
10444
10447
|
const client = new OpenAI2({ apiKey: openaiKey });
|
|
10445
|
-
let model = process.env.MARIA_CODE_MODEL || "gpt-5-mini";
|
|
10448
|
+
let model = requestedProvider === "openai" && requestedModel ? requestedModel : process.env.MARIA_CODE_MODEL || "gpt-5-mini";
|
|
10446
10449
|
let content = "";
|
|
10447
10450
|
let totalTokens = 0;
|
|
10448
10451
|
try {
|
package/dist/server-express.cjs
CHANGED
|
@@ -9447,7 +9447,7 @@ app.get("/api/status", (req, res) => {
|
|
|
9447
9447
|
app.get("/", (req, res) => {
|
|
9448
9448
|
res.json({
|
|
9449
9449
|
name: "MARIA CODE API",
|
|
9450
|
-
version: "4.3.
|
|
9450
|
+
version: "4.3.46",
|
|
9451
9451
|
status: "running",
|
|
9452
9452
|
environment: process.env.NODE_ENV || "development",
|
|
9453
9453
|
endpoints: {
|
|
@@ -10332,7 +10332,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10332
10332
|
try {
|
|
10333
10333
|
const auth = req.headers.authorization;
|
|
10334
10334
|
if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
|
|
10335
|
-
const { prompt, taskType, research, messages } = req.body || {};
|
|
10335
|
+
const { prompt, taskType, research, messages, provider: reqProvider, model: reqModel } = req.body || {};
|
|
10336
10336
|
let effectivePrompt = void 0;
|
|
10337
10337
|
if (typeof prompt === "string" && prompt.trim()) {
|
|
10338
10338
|
effectivePrompt = String(prompt).trim();
|
|
@@ -10381,11 +10381,14 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10381
10381
|
}
|
|
10382
10382
|
})();
|
|
10383
10383
|
const gemKey = sanitizeKey2(keys?.googleApiKey || process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY);
|
|
10384
|
-
|
|
10384
|
+
const requestedProvider = typeof reqProvider === "string" ? reqProvider.toLowerCase() : void 0;
|
|
10385
|
+
const requestedModel = typeof reqModel === "string" ? String(reqModel).trim() : void 0;
|
|
10386
|
+
const wantsGoogle = requestedProvider === "google" || !!requestedModel && requestedModel.toLowerCase().startsWith("gemini") || !requestedProvider && !requestedModel;
|
|
10387
|
+
if (gemKey && wantsGoogle) {
|
|
10385
10388
|
try {
|
|
10386
10389
|
const { GoogleGenerativeAI: GoogleGenerativeAI2 } = await import('@google/generative-ai');
|
|
10387
10390
|
const ai = new GoogleGenerativeAI2(gemKey);
|
|
10388
|
-
const modelName = process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
|
|
10391
|
+
const modelName = requestedModel && requestedModel.length > 0 ? requestedModel : process.env.MARIA_CODE_MODEL || "gemini-2.5-flash";
|
|
10389
10392
|
const model2 = ai.getGenerativeModel({ model: modelName });
|
|
10390
10393
|
let content2 = "";
|
|
10391
10394
|
if (taskType === "research" && research?.query) {
|
|
@@ -10442,7 +10445,7 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
10442
10445
|
}
|
|
10443
10446
|
const OpenAI2 = (await import('openai')).default;
|
|
10444
10447
|
const client = new OpenAI2({ apiKey: openaiKey });
|
|
10445
|
-
let model = process.env.MARIA_CODE_MODEL || "gpt-5-mini";
|
|
10448
|
+
let model = requestedProvider === "openai" && requestedModel ? requestedModel : process.env.MARIA_CODE_MODEL || "gpt-5-mini";
|
|
10446
10449
|
let content = "";
|
|
10447
10450
|
let totalTokens = 0;
|
|
10448
10451
|
try {
|