@bonginkan/maria 4.3.24 → 4.3.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/dist/READY.manifest.json +1 -1
- package/dist/bin/maria.cjs +44 -8
- package/dist/bin/maria.cjs.map +1 -1
- package/dist/cli.cjs +44 -8
- package/dist/cli.cjs.map +1 -1
- package/dist/index.js +2 -2
- package/dist/index.js.map +1 -1
- package/dist/server/express-server.cjs +66 -1
- package/dist/server/express-server.js +66 -1
- package/dist/server-express.cjs +66 -1
- package/dist/server-express.cjs.map +1 -1
- package/package.json +2 -2
- package/src/slash-commands/READY.manifest.json +1 -1
|
@@ -8621,7 +8621,7 @@ app.get("/api/status", (req, res) => {
|
|
|
8621
8621
|
app.get("/", (req, res) => {
|
|
8622
8622
|
res.json({
|
|
8623
8623
|
name: "MARIA CODE API",
|
|
8624
|
-
version: "4.3.
|
|
8624
|
+
version: "4.3.26",
|
|
8625
8625
|
status: "running",
|
|
8626
8626
|
environment: process.env.NODE_ENV || "development",
|
|
8627
8627
|
endpoints: {
|
|
@@ -8732,11 +8732,40 @@ app.post("/api/ai", rateLimitMiddleware, async (req, res) => {
|
|
|
8732
8732
|
if (!prompt || typeof prompt !== "string") {
|
|
8733
8733
|
return res.status(400).json({ error: "bad_request", message: "prompt is required" });
|
|
8734
8734
|
}
|
|
8735
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8736
|
+
try {
|
|
8737
|
+
console.log(JSON.stringify({ ev: "api_ai_request", promptLen: String(prompt).length, hasSystem: !!systemPrompt }));
|
|
8738
|
+
} catch {
|
|
8739
|
+
}
|
|
8740
|
+
}
|
|
8741
|
+
try {
|
|
8742
|
+
await loadProviderKeys();
|
|
8743
|
+
} catch {
|
|
8744
|
+
}
|
|
8735
8745
|
const ims = new IMSFacade_default();
|
|
8736
8746
|
const resp = await ims.routeChat({ prompt, systemPrompt });
|
|
8747
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8748
|
+
try {
|
|
8749
|
+
console.log(JSON.stringify({
|
|
8750
|
+
ev: "api_ai_route",
|
|
8751
|
+
provider: resp?.meta?.provider || "unknown",
|
|
8752
|
+
model: resp?.meta?.model || "unknown",
|
|
8753
|
+
traceId: resp?.meta?.traceId,
|
|
8754
|
+
reasons: resp?.meta?.reasons || [],
|
|
8755
|
+
success: !!resp?.success
|
|
8756
|
+
}));
|
|
8757
|
+
} catch {
|
|
8758
|
+
}
|
|
8759
|
+
}
|
|
8737
8760
|
return res.json(resp);
|
|
8738
8761
|
} catch (error) {
|
|
8739
8762
|
console.error("[AI API] Error:", error?.message || error);
|
|
8763
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8764
|
+
try {
|
|
8765
|
+
console.log(JSON.stringify({ ev: "api_ai_error", message: error?.message || String(error) }));
|
|
8766
|
+
} catch {
|
|
8767
|
+
}
|
|
8768
|
+
}
|
|
8740
8769
|
const polite = typeof req.body?.prompt === "string" ? `Sorry, I couldn't reach the AI service right now. Here\u2019s a quick human-style reply to keep you moving:
|
|
8741
8770
|
|
|
8742
8771
|
${String(req.body.prompt)}
|
|
@@ -9186,6 +9215,12 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9186
9215
|
if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
|
|
9187
9216
|
const { prompt, taskType } = req.body || {};
|
|
9188
9217
|
if (!prompt) return res.status(400).json({ error: "bad_request", message: "prompt required" });
|
|
9218
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9219
|
+
try {
|
|
9220
|
+
console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(prompt).length }));
|
|
9221
|
+
} catch {
|
|
9222
|
+
}
|
|
9223
|
+
}
|
|
9189
9224
|
const sanitizeKey = (v) => {
|
|
9190
9225
|
if (!v) return void 0;
|
|
9191
9226
|
let k = String(v).trim();
|
|
@@ -9220,13 +9255,31 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9220
9255
|
const model2 = ai.getGenerativeModel({ model: modelName });
|
|
9221
9256
|
const resp = await model2.generateContent({ contents: [{ role: "user", parts: [{ text: prompt }] }] });
|
|
9222
9257
|
const content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
|
|
9258
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9259
|
+
try {
|
|
9260
|
+
console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "google", model: modelName, empty: !content2 }));
|
|
9261
|
+
} catch {
|
|
9262
|
+
}
|
|
9263
|
+
}
|
|
9223
9264
|
return res.json({ data: { content: content2, routedModel: { vendor: "google", family: "gemini", name: modelName, reason: taskType || "code" } } });
|
|
9224
9265
|
} catch (e2) {
|
|
9225
9266
|
console.warn("[AI Proxy] Gemini path failed, falling back to OpenAI:", e2?.message || e2);
|
|
9267
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9268
|
+
try {
|
|
9269
|
+
console.log(JSON.stringify({ ev: "ai_proxy_google_error", message: e2?.message || String(e2) }));
|
|
9270
|
+
} catch {
|
|
9271
|
+
}
|
|
9272
|
+
}
|
|
9226
9273
|
}
|
|
9227
9274
|
}
|
|
9228
9275
|
const openaiKey = sanitizeKey(keys?.openaiApiKey || process.env.OPENAI_API_KEY);
|
|
9229
9276
|
if (!openaiKey) {
|
|
9277
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9278
|
+
try {
|
|
9279
|
+
console.log(JSON.stringify({ ev: "ai_proxy_no_keys" }));
|
|
9280
|
+
} catch {
|
|
9281
|
+
}
|
|
9282
|
+
}
|
|
9230
9283
|
return res.status(503).json({ error: "provider_unavailable", message: "No valid provider key (set GEMINI_API_KEY/GOOGLE_API_KEY or OPENAI_API_KEY)" });
|
|
9231
9284
|
}
|
|
9232
9285
|
const OpenAI2 = (await import('openai')).default;
|
|
@@ -9253,9 +9306,21 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9253
9306
|
});
|
|
9254
9307
|
content = r2.choices?.[0]?.message?.content || "";
|
|
9255
9308
|
}
|
|
9309
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9310
|
+
try {
|
|
9311
|
+
console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "openai", model, empty: !content }));
|
|
9312
|
+
} catch {
|
|
9313
|
+
}
|
|
9314
|
+
}
|
|
9256
9315
|
return res.json({ data: { content, routedModel: { vendor: "openai", family: "gpt", name: model, reason: taskType || "code" } } });
|
|
9257
9316
|
} catch (error) {
|
|
9258
9317
|
console.error("[AI Proxy] Error:", error);
|
|
9318
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9319
|
+
try {
|
|
9320
|
+
console.log(JSON.stringify({ ev: "ai_proxy_error", message: error?.message || String(error) }));
|
|
9321
|
+
} catch {
|
|
9322
|
+
}
|
|
9323
|
+
}
|
|
9259
9324
|
return res.status(500).json({ error: "internal_error", message: "AI proxy failed" });
|
|
9260
9325
|
}
|
|
9261
9326
|
});
|
|
@@ -8621,7 +8621,7 @@ app.get("/api/status", (req, res) => {
|
|
|
8621
8621
|
app.get("/", (req, res) => {
|
|
8622
8622
|
res.json({
|
|
8623
8623
|
name: "MARIA CODE API",
|
|
8624
|
-
version: "4.3.
|
|
8624
|
+
version: "4.3.26",
|
|
8625
8625
|
status: "running",
|
|
8626
8626
|
environment: process.env.NODE_ENV || "development",
|
|
8627
8627
|
endpoints: {
|
|
@@ -8732,11 +8732,40 @@ app.post("/api/ai", rateLimitMiddleware, async (req, res) => {
|
|
|
8732
8732
|
if (!prompt || typeof prompt !== "string") {
|
|
8733
8733
|
return res.status(400).json({ error: "bad_request", message: "prompt is required" });
|
|
8734
8734
|
}
|
|
8735
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8736
|
+
try {
|
|
8737
|
+
console.log(JSON.stringify({ ev: "api_ai_request", promptLen: String(prompt).length, hasSystem: !!systemPrompt }));
|
|
8738
|
+
} catch {
|
|
8739
|
+
}
|
|
8740
|
+
}
|
|
8741
|
+
try {
|
|
8742
|
+
await loadProviderKeys();
|
|
8743
|
+
} catch {
|
|
8744
|
+
}
|
|
8735
8745
|
const ims = new IMSFacade_default();
|
|
8736
8746
|
const resp = await ims.routeChat({ prompt, systemPrompt });
|
|
8747
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8748
|
+
try {
|
|
8749
|
+
console.log(JSON.stringify({
|
|
8750
|
+
ev: "api_ai_route",
|
|
8751
|
+
provider: resp?.meta?.provider || "unknown",
|
|
8752
|
+
model: resp?.meta?.model || "unknown",
|
|
8753
|
+
traceId: resp?.meta?.traceId,
|
|
8754
|
+
reasons: resp?.meta?.reasons || [],
|
|
8755
|
+
success: !!resp?.success
|
|
8756
|
+
}));
|
|
8757
|
+
} catch {
|
|
8758
|
+
}
|
|
8759
|
+
}
|
|
8737
8760
|
return res.json(resp);
|
|
8738
8761
|
} catch (error) {
|
|
8739
8762
|
console.error("[AI API] Error:", error?.message || error);
|
|
8763
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8764
|
+
try {
|
|
8765
|
+
console.log(JSON.stringify({ ev: "api_ai_error", message: error?.message || String(error) }));
|
|
8766
|
+
} catch {
|
|
8767
|
+
}
|
|
8768
|
+
}
|
|
8740
8769
|
const polite = typeof req.body?.prompt === "string" ? `Sorry, I couldn't reach the AI service right now. Here\u2019s a quick human-style reply to keep you moving:
|
|
8741
8770
|
|
|
8742
8771
|
${String(req.body.prompt)}
|
|
@@ -9186,6 +9215,12 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9186
9215
|
if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
|
|
9187
9216
|
const { prompt, taskType } = req.body || {};
|
|
9188
9217
|
if (!prompt) return res.status(400).json({ error: "bad_request", message: "prompt required" });
|
|
9218
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9219
|
+
try {
|
|
9220
|
+
console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(prompt).length }));
|
|
9221
|
+
} catch {
|
|
9222
|
+
}
|
|
9223
|
+
}
|
|
9189
9224
|
const sanitizeKey = (v) => {
|
|
9190
9225
|
if (!v) return void 0;
|
|
9191
9226
|
let k = String(v).trim();
|
|
@@ -9220,13 +9255,31 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9220
9255
|
const model2 = ai.getGenerativeModel({ model: modelName });
|
|
9221
9256
|
const resp = await model2.generateContent({ contents: [{ role: "user", parts: [{ text: prompt }] }] });
|
|
9222
9257
|
const content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
|
|
9258
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9259
|
+
try {
|
|
9260
|
+
console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "google", model: modelName, empty: !content2 }));
|
|
9261
|
+
} catch {
|
|
9262
|
+
}
|
|
9263
|
+
}
|
|
9223
9264
|
return res.json({ data: { content: content2, routedModel: { vendor: "google", family: "gemini", name: modelName, reason: taskType || "code" } } });
|
|
9224
9265
|
} catch (e2) {
|
|
9225
9266
|
console.warn("[AI Proxy] Gemini path failed, falling back to OpenAI:", e2?.message || e2);
|
|
9267
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9268
|
+
try {
|
|
9269
|
+
console.log(JSON.stringify({ ev: "ai_proxy_google_error", message: e2?.message || String(e2) }));
|
|
9270
|
+
} catch {
|
|
9271
|
+
}
|
|
9272
|
+
}
|
|
9226
9273
|
}
|
|
9227
9274
|
}
|
|
9228
9275
|
const openaiKey = sanitizeKey(keys?.openaiApiKey || process.env.OPENAI_API_KEY);
|
|
9229
9276
|
if (!openaiKey) {
|
|
9277
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9278
|
+
try {
|
|
9279
|
+
console.log(JSON.stringify({ ev: "ai_proxy_no_keys" }));
|
|
9280
|
+
} catch {
|
|
9281
|
+
}
|
|
9282
|
+
}
|
|
9230
9283
|
return res.status(503).json({ error: "provider_unavailable", message: "No valid provider key (set GEMINI_API_KEY/GOOGLE_API_KEY or OPENAI_API_KEY)" });
|
|
9231
9284
|
}
|
|
9232
9285
|
const OpenAI2 = (await import('openai')).default;
|
|
@@ -9253,9 +9306,21 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9253
9306
|
});
|
|
9254
9307
|
content = r2.choices?.[0]?.message?.content || "";
|
|
9255
9308
|
}
|
|
9309
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9310
|
+
try {
|
|
9311
|
+
console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "openai", model, empty: !content }));
|
|
9312
|
+
} catch {
|
|
9313
|
+
}
|
|
9314
|
+
}
|
|
9256
9315
|
return res.json({ data: { content, routedModel: { vendor: "openai", family: "gpt", name: model, reason: taskType || "code" } } });
|
|
9257
9316
|
} catch (error) {
|
|
9258
9317
|
console.error("[AI Proxy] Error:", error);
|
|
9318
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9319
|
+
try {
|
|
9320
|
+
console.log(JSON.stringify({ ev: "ai_proxy_error", message: error?.message || String(error) }));
|
|
9321
|
+
} catch {
|
|
9322
|
+
}
|
|
9323
|
+
}
|
|
9259
9324
|
return res.status(500).json({ error: "internal_error", message: "AI proxy failed" });
|
|
9260
9325
|
}
|
|
9261
9326
|
});
|
package/dist/server-express.cjs
CHANGED
|
@@ -8621,7 +8621,7 @@ app.get("/api/status", (req, res) => {
|
|
|
8621
8621
|
app.get("/", (req, res) => {
|
|
8622
8622
|
res.json({
|
|
8623
8623
|
name: "MARIA CODE API",
|
|
8624
|
-
version: "4.3.
|
|
8624
|
+
version: "4.3.26",
|
|
8625
8625
|
status: "running",
|
|
8626
8626
|
environment: process.env.NODE_ENV || "development",
|
|
8627
8627
|
endpoints: {
|
|
@@ -8732,11 +8732,40 @@ app.post("/api/ai", rateLimitMiddleware, async (req, res) => {
|
|
|
8732
8732
|
if (!prompt || typeof prompt !== "string") {
|
|
8733
8733
|
return res.status(400).json({ error: "bad_request", message: "prompt is required" });
|
|
8734
8734
|
}
|
|
8735
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8736
|
+
try {
|
|
8737
|
+
console.log(JSON.stringify({ ev: "api_ai_request", promptLen: String(prompt).length, hasSystem: !!systemPrompt }));
|
|
8738
|
+
} catch {
|
|
8739
|
+
}
|
|
8740
|
+
}
|
|
8741
|
+
try {
|
|
8742
|
+
await loadProviderKeys();
|
|
8743
|
+
} catch {
|
|
8744
|
+
}
|
|
8735
8745
|
const ims = new IMSFacade_default();
|
|
8736
8746
|
const resp = await ims.routeChat({ prompt, systemPrompt });
|
|
8747
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8748
|
+
try {
|
|
8749
|
+
console.log(JSON.stringify({
|
|
8750
|
+
ev: "api_ai_route",
|
|
8751
|
+
provider: resp?.meta?.provider || "unknown",
|
|
8752
|
+
model: resp?.meta?.model || "unknown",
|
|
8753
|
+
traceId: resp?.meta?.traceId,
|
|
8754
|
+
reasons: resp?.meta?.reasons || [],
|
|
8755
|
+
success: !!resp?.success
|
|
8756
|
+
}));
|
|
8757
|
+
} catch {
|
|
8758
|
+
}
|
|
8759
|
+
}
|
|
8737
8760
|
return res.json(resp);
|
|
8738
8761
|
} catch (error) {
|
|
8739
8762
|
console.error("[AI API] Error:", error?.message || error);
|
|
8763
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
8764
|
+
try {
|
|
8765
|
+
console.log(JSON.stringify({ ev: "api_ai_error", message: error?.message || String(error) }));
|
|
8766
|
+
} catch {
|
|
8767
|
+
}
|
|
8768
|
+
}
|
|
8740
8769
|
const polite = typeof req.body?.prompt === "string" ? `Sorry, I couldn't reach the AI service right now. Here\u2019s a quick human-style reply to keep you moving:
|
|
8741
8770
|
|
|
8742
8771
|
${String(req.body.prompt)}
|
|
@@ -9186,6 +9215,12 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9186
9215
|
if (!auth || !auth.startsWith("Bearer ")) return res.status(401).json({ error: "unauthorized" });
|
|
9187
9216
|
const { prompt, taskType } = req.body || {};
|
|
9188
9217
|
if (!prompt) return res.status(400).json({ error: "bad_request", message: "prompt required" });
|
|
9218
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9219
|
+
try {
|
|
9220
|
+
console.log(JSON.stringify({ ev: "ai_proxy_request", taskType: taskType || "unknown", promptLen: String(prompt).length }));
|
|
9221
|
+
} catch {
|
|
9222
|
+
}
|
|
9223
|
+
}
|
|
9189
9224
|
const sanitizeKey = (v) => {
|
|
9190
9225
|
if (!v) return void 0;
|
|
9191
9226
|
let k = String(v).trim();
|
|
@@ -9220,13 +9255,31 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9220
9255
|
const model2 = ai.getGenerativeModel({ model: modelName });
|
|
9221
9256
|
const resp = await model2.generateContent({ contents: [{ role: "user", parts: [{ text: prompt }] }] });
|
|
9222
9257
|
const content2 = resp?.response?.text?.() || resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
|
|
9258
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9259
|
+
try {
|
|
9260
|
+
console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "google", model: modelName, empty: !content2 }));
|
|
9261
|
+
} catch {
|
|
9262
|
+
}
|
|
9263
|
+
}
|
|
9223
9264
|
return res.json({ data: { content: content2, routedModel: { vendor: "google", family: "gemini", name: modelName, reason: taskType || "code" } } });
|
|
9224
9265
|
} catch (e2) {
|
|
9225
9266
|
console.warn("[AI Proxy] Gemini path failed, falling back to OpenAI:", e2?.message || e2);
|
|
9267
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9268
|
+
try {
|
|
9269
|
+
console.log(JSON.stringify({ ev: "ai_proxy_google_error", message: e2?.message || String(e2) }));
|
|
9270
|
+
} catch {
|
|
9271
|
+
}
|
|
9272
|
+
}
|
|
9226
9273
|
}
|
|
9227
9274
|
}
|
|
9228
9275
|
const openaiKey = sanitizeKey(keys?.openaiApiKey || process.env.OPENAI_API_KEY);
|
|
9229
9276
|
if (!openaiKey) {
|
|
9277
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9278
|
+
try {
|
|
9279
|
+
console.log(JSON.stringify({ ev: "ai_proxy_no_keys" }));
|
|
9280
|
+
} catch {
|
|
9281
|
+
}
|
|
9282
|
+
}
|
|
9230
9283
|
return res.status(503).json({ error: "provider_unavailable", message: "No valid provider key (set GEMINI_API_KEY/GOOGLE_API_KEY or OPENAI_API_KEY)" });
|
|
9231
9284
|
}
|
|
9232
9285
|
const OpenAI2 = (await import('openai')).default;
|
|
@@ -9253,9 +9306,21 @@ app.post("/v1/ai-proxy", rateLimitMiddleware, async (req, res) => {
|
|
|
9253
9306
|
});
|
|
9254
9307
|
content = r2.choices?.[0]?.message?.content || "";
|
|
9255
9308
|
}
|
|
9309
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9310
|
+
try {
|
|
9311
|
+
console.log(JSON.stringify({ ev: "ai_proxy_route", vendor: "openai", model, empty: !content }));
|
|
9312
|
+
} catch {
|
|
9313
|
+
}
|
|
9314
|
+
}
|
|
9256
9315
|
return res.json({ data: { content, routedModel: { vendor: "openai", family: "gpt", name: model, reason: taskType || "code" } } });
|
|
9257
9316
|
} catch (error) {
|
|
9258
9317
|
console.error("[AI Proxy] Error:", error);
|
|
9318
|
+
if (process.env.MARIA_TELEMETRY === "1") {
|
|
9319
|
+
try {
|
|
9320
|
+
console.log(JSON.stringify({ ev: "ai_proxy_error", message: error?.message || String(error) }));
|
|
9321
|
+
} catch {
|
|
9322
|
+
}
|
|
9323
|
+
}
|
|
9259
9324
|
return res.status(500).json({ error: "internal_error", message: "AI proxy failed" });
|
|
9260
9325
|
}
|
|
9261
9326
|
});
|