@bonginkan/maria 4.2.3 → 4.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/maria.cjs +194 -27
- package/dist/bin/maria.cjs.map +1 -1
- package/dist/cli.cjs +208 -41
- package/dist/cli.cjs.map +1 -1
- package/package.json +1 -1
package/dist/bin/maria.cjs
CHANGED
|
@@ -7305,6 +7305,7 @@ var init_groq_provider = __esm({
|
|
|
7305
7305
|
"src/providers/groq-provider.ts"() {
|
|
7306
7306
|
init_base_provider();
|
|
7307
7307
|
GroqProvider = class extends UnifiedBaseProvider {
|
|
7308
|
+
id = "groq";
|
|
7308
7309
|
name = "groq";
|
|
7309
7310
|
modelsCache;
|
|
7310
7311
|
constructor(apiKey) {
|
|
@@ -7330,6 +7331,15 @@ var init_groq_provider = __esm({
|
|
|
7330
7331
|
}
|
|
7331
7332
|
}
|
|
7332
7333
|
async getModels() {
|
|
7334
|
+
const models = [
|
|
7335
|
+
"llama-3.3-70b-versatile",
|
|
7336
|
+
"llama-3.2-90b-vision-preview",
|
|
7337
|
+
"mixtral-8x7b-32768",
|
|
7338
|
+
"gemma2-9b-it"
|
|
7339
|
+
];
|
|
7340
|
+
return models;
|
|
7341
|
+
}
|
|
7342
|
+
async getModelInfo() {
|
|
7333
7343
|
if (this.modelsCache) {
|
|
7334
7344
|
return this.modelsCache;
|
|
7335
7345
|
}
|
|
@@ -7382,33 +7392,102 @@ var init_groq_provider = __esm({
|
|
|
7382
7392
|
this.modelsCache = models;
|
|
7383
7393
|
return models;
|
|
7384
7394
|
}
|
|
7395
|
+
async complete(prompt, req) {
|
|
7396
|
+
if (!await this.isAvailable()) {
|
|
7397
|
+
throw new Error("Groq API not available");
|
|
7398
|
+
}
|
|
7399
|
+
const model = req.model || "mixtral-8x7b-32768";
|
|
7400
|
+
const payload = {
|
|
7401
|
+
model,
|
|
7402
|
+
messages: [{ role: "user", content: prompt }],
|
|
7403
|
+
temperature: req.temperature || 0.7,
|
|
7404
|
+
max_tokens: req.maxTokens || 4e3,
|
|
7405
|
+
stream: false
|
|
7406
|
+
};
|
|
7407
|
+
const response2 = await this.makeRequest(
|
|
7408
|
+
`${this.apiBase}/chat/completions`,
|
|
7409
|
+
{
|
|
7410
|
+
method: "POST",
|
|
7411
|
+
headers: {
|
|
7412
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
7413
|
+
...req.headers
|
|
7414
|
+
},
|
|
7415
|
+
body: payload,
|
|
7416
|
+
timeout: req.timeoutMs
|
|
7417
|
+
}
|
|
7418
|
+
);
|
|
7419
|
+
return {
|
|
7420
|
+
content: response2.choices[0]?.message?.content || "",
|
|
7421
|
+
model,
|
|
7422
|
+
usage: {
|
|
7423
|
+
promptTokens: response2.usage?.prompt_tokens || 0,
|
|
7424
|
+
completionTokens: response2.usage?.completion_tokens || 0,
|
|
7425
|
+
totalTokens: response2.usage?.total_tokens || 0
|
|
7426
|
+
},
|
|
7427
|
+
finishReason: response2.choices[0]?.finish_reason || "stop"
|
|
7428
|
+
};
|
|
7429
|
+
}
|
|
7430
|
+
async stream(prompt, req) {
|
|
7431
|
+
if (!await this.isAvailable()) {
|
|
7432
|
+
throw new Error("Groq API not available");
|
|
7433
|
+
}
|
|
7434
|
+
const model = req.model || "mixtral-8x7b-32768";
|
|
7435
|
+
const payload = {
|
|
7436
|
+
model,
|
|
7437
|
+
messages: [{ role: "user", content: prompt }],
|
|
7438
|
+
temperature: req.temperature || 0.7,
|
|
7439
|
+
max_tokens: req.maxTokens || 4e3,
|
|
7440
|
+
stream: true
|
|
7441
|
+
};
|
|
7442
|
+
const stream = await this.makeStreamRequest(
|
|
7443
|
+
`${this.apiBase}/chat/completions`,
|
|
7444
|
+
{
|
|
7445
|
+
method: "POST",
|
|
7446
|
+
headers: {
|
|
7447
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
7448
|
+
...req.headers
|
|
7449
|
+
},
|
|
7450
|
+
body: payload,
|
|
7451
|
+
timeout: req.timeoutMs
|
|
7452
|
+
}
|
|
7453
|
+
);
|
|
7454
|
+
async function* chunkGenerator() {
|
|
7455
|
+
for await (const chunk of stream) {
|
|
7456
|
+
yield {
|
|
7457
|
+
content: chunk,
|
|
7458
|
+
delta: chunk
|
|
7459
|
+
};
|
|
7460
|
+
}
|
|
7461
|
+
}
|
|
7462
|
+
return chunkGenerator();
|
|
7463
|
+
}
|
|
7385
7464
|
async chat(request) {
|
|
7386
7465
|
if (!await this.isAvailable()) {
|
|
7387
7466
|
throw new Error("Groq API not available");
|
|
7388
7467
|
}
|
|
7389
|
-
const
|
|
7468
|
+
const model = request.model || "mixtral-8x7b-32768";
|
|
7390
7469
|
const _startTime = Date.now();
|
|
7391
7470
|
const _payload = {
|
|
7392
|
-
|
|
7471
|
+
model,
|
|
7393
7472
|
messages: request.messages,
|
|
7394
7473
|
temperature: request.temperature || 0.7,
|
|
7395
|
-
|
|
7396
|
-
|
|
7474
|
+
max_tokens: request.maxTokens || 4e3,
|
|
7475
|
+
stream: request.stream || false
|
|
7397
7476
|
};
|
|
7398
|
-
if (request.
|
|
7399
|
-
const
|
|
7477
|
+
if (request.stream) {
|
|
7478
|
+
const stream = await this.makeStreamRequest(
|
|
7400
7479
|
`${this.apiBase}/chat/completions`,
|
|
7401
7480
|
{
|
|
7402
7481
|
method: "POST",
|
|
7403
7482
|
headers: {
|
|
7404
7483
|
Authorization: `Bearer ${this.apiKey}`
|
|
7405
7484
|
},
|
|
7406
|
-
body:
|
|
7485
|
+
body: _payload
|
|
7407
7486
|
}
|
|
7408
7487
|
);
|
|
7409
7488
|
return {
|
|
7410
|
-
|
|
7411
|
-
|
|
7489
|
+
stream,
|
|
7490
|
+
model,
|
|
7412
7491
|
provider: this.name,
|
|
7413
7492
|
responseTime: Date.now() - _startTime
|
|
7414
7493
|
};
|
|
@@ -7420,12 +7499,12 @@ var init_groq_provider = __esm({
|
|
|
7420
7499
|
headers: {
|
|
7421
7500
|
Authorization: `Bearer ${this.apiKey}`
|
|
7422
7501
|
},
|
|
7423
|
-
body:
|
|
7502
|
+
body: _payload
|
|
7424
7503
|
}
|
|
7425
7504
|
);
|
|
7426
7505
|
return {
|
|
7427
7506
|
content: _response.choices[0]?.message?.content || "",
|
|
7428
|
-
|
|
7507
|
+
model,
|
|
7429
7508
|
provider: this.name,
|
|
7430
7509
|
usage: {
|
|
7431
7510
|
promptTokens: _response.usage?.prompt_tokens || 0,
|
|
@@ -7442,7 +7521,7 @@ var init_groq_provider = __esm({
|
|
|
7442
7521
|
const _base64Image = _image.toString("base64");
|
|
7443
7522
|
const _startTime = Date.now();
|
|
7444
7523
|
const _payload = {
|
|
7445
|
-
|
|
7524
|
+
model: "llama-3.2-90b-vision-preview",
|
|
7446
7525
|
messages: [
|
|
7447
7526
|
{
|
|
7448
7527
|
role: "user",
|
|
@@ -7457,7 +7536,7 @@ var init_groq_provider = __esm({
|
|
|
7457
7536
|
]
|
|
7458
7537
|
}
|
|
7459
7538
|
],
|
|
7460
|
-
|
|
7539
|
+
max_tokens: 4e3
|
|
7461
7540
|
};
|
|
7462
7541
|
const _response = await this.makeRequest(
|
|
7463
7542
|
`${this.apiBase}/chat/completions`,
|
|
@@ -7466,12 +7545,12 @@ var init_groq_provider = __esm({
|
|
|
7466
7545
|
headers: {
|
|
7467
7546
|
Authorization: `Bearer ${this.apiKey}`
|
|
7468
7547
|
},
|
|
7469
|
-
body:
|
|
7548
|
+
body: _payload
|
|
7470
7549
|
}
|
|
7471
7550
|
);
|
|
7472
7551
|
return {
|
|
7473
7552
|
content: _response.choices[0]?.message?.content || "",
|
|
7474
|
-
|
|
7553
|
+
model: "llama-3.2-90b-vision-preview",
|
|
7475
7554
|
provider: this.name,
|
|
7476
7555
|
usage: {
|
|
7477
7556
|
promptTokens: _response.usage?.prompt_tokens || 0,
|
|
@@ -21767,7 +21846,7 @@ var init_package = __esm({
|
|
|
21767
21846
|
"package.json"() {
|
|
21768
21847
|
package_default = {
|
|
21769
21848
|
name: "@bonginkan/maria",
|
|
21770
|
-
version: "4.2.
|
|
21849
|
+
version: "4.2.5",
|
|
21771
21850
|
description: "\u{1F680} MARIA v4.2.0 - Enterprise AI Development Platform with 100% Command Availability. Features 74 production-ready commands with comprehensive fallback implementation, local LLM support, and zero external dependencies. Includes natural language coding, AI safety evaluation, intelligent evolution system, episodic memory with PII masking, and real-time monitoring dashboard. Built with TypeScript AST-powered code generation, OAuth2.0 + PKCE authentication, quantum-resistant cryptography, and enterprise-grade performance.",
|
|
21772
21851
|
keywords: [
|
|
21773
21852
|
"ai",
|
|
@@ -22796,14 +22875,33 @@ var init_AuthenticationManager = __esm({
|
|
|
22796
22875
|
// 2 minutes clock skew tolerance
|
|
22797
22876
|
constructor() {
|
|
22798
22877
|
this.tokenStorage = new TokenStorage();
|
|
22799
|
-
this.authBase = process.env.MARIA_AUTH_BASE ||
|
|
22800
|
-
|
|
22801
|
-
this.authBase = "https://auth-server-1098737975582.us-central1.run.app";
|
|
22802
|
-
console.debug("Using Cloud Run URL for auth (DNS pending for auth.maria-code.ai)");
|
|
22803
|
-
}
|
|
22804
|
-
this.apiBase = process.env.MARIA_API_BASE || "https://api.maria-code.ai";
|
|
22878
|
+
this.authBase = process.env.MARIA_AUTH_BASE || this.getAuthBaseUrl();
|
|
22879
|
+
this.apiBase = process.env.MARIA_API_BASE || this.getApiBaseUrl();
|
|
22805
22880
|
this.clientId = process.env.MARIA_CLIENT_ID || "maria-cli";
|
|
22806
22881
|
}
|
|
22882
|
+
getAuthBaseUrl() {
|
|
22883
|
+
if (process.env.MARIA_AUTH_MODE === "local") {
|
|
22884
|
+
console.debug("Using local auth server (development mode)");
|
|
22885
|
+
return "http://localhost:3001";
|
|
22886
|
+
}
|
|
22887
|
+
const cloudRunUrl = "https://auth-server-i227ftjidq-uc.a.run.app";
|
|
22888
|
+
if (process.env.MARIA_USE_CUSTOM_DOMAIN === "true") {
|
|
22889
|
+
console.debug("Attempting to use custom domain auth.maria-code.ai");
|
|
22890
|
+
return "https://auth.maria-code.ai";
|
|
22891
|
+
}
|
|
22892
|
+
console.debug("Using Cloud Run URL for auth:", cloudRunUrl);
|
|
22893
|
+
return cloudRunUrl;
|
|
22894
|
+
}
|
|
22895
|
+
getApiBaseUrl() {
|
|
22896
|
+
if (process.env.MARIA_AUTH_MODE === "local") {
|
|
22897
|
+
return "http://localhost:3000/api";
|
|
22898
|
+
}
|
|
22899
|
+
const cloudRunApiUrl = "https://maria-code-i227ftjidq-uc.a.run.app";
|
|
22900
|
+
if (process.env.MARIA_USE_CUSTOM_DOMAIN === "true") {
|
|
22901
|
+
return "https://api.maria-code.ai";
|
|
22902
|
+
}
|
|
22903
|
+
return cloudRunApiUrl;
|
|
22904
|
+
}
|
|
22807
22905
|
/**
|
|
22808
22906
|
* Check if user is authenticated
|
|
22809
22907
|
*/
|
|
@@ -22832,6 +22930,27 @@ var init_AuthenticationManager = __esm({
|
|
|
22832
22930
|
* Get current authenticated user
|
|
22833
22931
|
*/
|
|
22834
22932
|
async getCurrentUser() {
|
|
22933
|
+
if (process.env.MARIA_AUTH_MODE === "local") {
|
|
22934
|
+
const tokens2 = await this.tokenStorage.load();
|
|
22935
|
+
if (!tokens2) {
|
|
22936
|
+
throw new AuthenticationRequiredError(ERROR_MESSAGES.AUTH_REQUIRED);
|
|
22937
|
+
}
|
|
22938
|
+
return {
|
|
22939
|
+
id: "local-dev-user",
|
|
22940
|
+
email: "developer@localhost",
|
|
22941
|
+
name: "Local Developer",
|
|
22942
|
+
plan: "ultra",
|
|
22943
|
+
usage: {
|
|
22944
|
+
requests: Math.floor(Math.random() * 100),
|
|
22945
|
+
// Random usage for testing
|
|
22946
|
+
requestLimit: 999999,
|
|
22947
|
+
requestsRemaining: 999999,
|
|
22948
|
+
resetAt: Date.now() + 30 * 24 * 60 * 60 * 1e3
|
|
22949
|
+
},
|
|
22950
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
22951
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
22952
|
+
};
|
|
22953
|
+
}
|
|
22835
22954
|
const tokens = await this.getValidTokens();
|
|
22836
22955
|
if (!tokens) {
|
|
22837
22956
|
throw new AuthenticationRequiredError(ERROR_MESSAGES.AUTH_REQUIRED);
|
|
@@ -22869,6 +22988,9 @@ var init_AuthenticationManager = __esm({
|
|
|
22869
22988
|
const user2 = await this.getCurrentUser();
|
|
22870
22989
|
return { success: true, user: user2 };
|
|
22871
22990
|
}
|
|
22991
|
+
if (process.env.MARIA_AUTH_MODE === "local") {
|
|
22992
|
+
return await this.loginWithLocalMock();
|
|
22993
|
+
}
|
|
22872
22994
|
let tokens;
|
|
22873
22995
|
if (options.device) {
|
|
22874
22996
|
tokens = await this.loginWithDeviceFlow();
|
|
@@ -22876,6 +22998,19 @@ var init_AuthenticationManager = __esm({
|
|
|
22876
22998
|
try {
|
|
22877
22999
|
tokens = await this.loginWithPKCEFlow();
|
|
22878
23000
|
} catch (error2) {
|
|
23001
|
+
if (error2.message?.includes("ECONNREFUSED") || error2.message?.includes("fetch failed")) {
|
|
23002
|
+
console.error("\n\u274C Authentication service is currently unavailable");
|
|
23003
|
+
console.error("Please try one of the following:");
|
|
23004
|
+
console.error("1. Set MARIA_AUTH_MODE=local for local development");
|
|
23005
|
+
console.error("2. Check your internet connection");
|
|
23006
|
+
console.error("3. Visit https://status.maria-code.ai for service status\n");
|
|
23007
|
+
if (!process.env.MARIA_AUTH_MODE) {
|
|
23008
|
+
console.log("\u{1F4A1} Tip: Run with local auth mode:");
|
|
23009
|
+
console.log(" export MARIA_AUTH_MODE=local");
|
|
23010
|
+
console.log(" maria /login\n");
|
|
23011
|
+
}
|
|
23012
|
+
throw new Error("Authentication service unavailable. See above for alternatives.");
|
|
23013
|
+
}
|
|
22879
23014
|
console.warn("PKCE flow failed, falling back to device flow");
|
|
22880
23015
|
tokens = await this.loginWithDeviceFlow();
|
|
22881
23016
|
}
|
|
@@ -22890,6 +23025,43 @@ var init_AuthenticationManager = __esm({
|
|
|
22890
23025
|
};
|
|
22891
23026
|
}
|
|
22892
23027
|
}
|
|
23028
|
+
/**
|
|
23029
|
+
* Local mock authentication for development
|
|
23030
|
+
*/
|
|
23031
|
+
async loginWithLocalMock() {
|
|
23032
|
+
console.log("\u{1F510} Local Development Mode - Mock Authentication");
|
|
23033
|
+
const mockTokens = {
|
|
23034
|
+
idToken: "mock-id-token-" + import_crypto4.default.randomBytes(16).toString("hex"),
|
|
23035
|
+
accessToken: "mock-access-token-" + import_crypto4.default.randomBytes(16).toString("hex"),
|
|
23036
|
+
refreshToken: "mock-refresh-token-" + import_crypto4.default.randomBytes(16).toString("hex"),
|
|
23037
|
+
expiresAt: Date.now() + 24 * 60 * 60 * 1e3
|
|
23038
|
+
// 24 hours
|
|
23039
|
+
};
|
|
23040
|
+
await this.tokenStorage.save(mockTokens);
|
|
23041
|
+
const mockUser = {
|
|
23042
|
+
id: "local-dev-user",
|
|
23043
|
+
email: "developer@localhost",
|
|
23044
|
+
name: "Local Developer",
|
|
23045
|
+
plan: "ultra",
|
|
23046
|
+
// Give full access in dev mode
|
|
23047
|
+
usage: {
|
|
23048
|
+
requests: 0,
|
|
23049
|
+
requestLimit: 999999,
|
|
23050
|
+
requestsRemaining: 999999,
|
|
23051
|
+
resetAt: Date.now() + 30 * 24 * 60 * 60 * 1e3
|
|
23052
|
+
},
|
|
23053
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
23054
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
23055
|
+
};
|
|
23056
|
+
console.log("\u2705 Logged in as developer@localhost (Local Mode)");
|
|
23057
|
+
console.log(" Plan: Ultra (Development)");
|
|
23058
|
+
console.log(" All features enabled for testing\n");
|
|
23059
|
+
return {
|
|
23060
|
+
success: true,
|
|
23061
|
+
user: mockUser,
|
|
23062
|
+
tokens: mockTokens
|
|
23063
|
+
};
|
|
23064
|
+
}
|
|
22893
23065
|
/**
|
|
22894
23066
|
* Logout and clean up
|
|
22895
23067
|
*/
|
|
@@ -27969,7 +28141,6 @@ var init_bigquery_telemetry = __esm({
|
|
|
27969
28141
|
this.httpEndpoint = process.env.TELEMETRY_ENDPOINT || null;
|
|
27970
28142
|
if (this.isEnabled) {
|
|
27971
28143
|
this.initialize().catch((error2) => {
|
|
27972
|
-
console.error("[Telemetry] Initialization error:", error2);
|
|
27973
28144
|
});
|
|
27974
28145
|
}
|
|
27975
28146
|
}
|
|
@@ -27994,7 +28165,6 @@ var init_bigquery_telemetry = __esm({
|
|
|
27994
28165
|
keyFilename: process.env.GOOGLE_APPLICATION_CREDENTIALS
|
|
27995
28166
|
});
|
|
27996
28167
|
} catch (error2) {
|
|
27997
|
-
console.error("[Telemetry] Failed to initialize BigQuery:", error2);
|
|
27998
28168
|
}
|
|
27999
28169
|
}
|
|
28000
28170
|
this.startFlushTimer();
|
|
@@ -28036,7 +28206,6 @@ var init_bigquery_telemetry = __esm({
|
|
|
28036
28206
|
} else {
|
|
28037
28207
|
}
|
|
28038
28208
|
} catch (error2) {
|
|
28039
|
-
console.error("[Telemetry] Flush failed:", error2);
|
|
28040
28209
|
if (this.telemetryQueue.length < this.config.batchSize * 2) {
|
|
28041
28210
|
this.telemetryQueue = [...dataToFlush, ...this.telemetryQueue];
|
|
28042
28211
|
}
|
|
@@ -28100,7 +28269,6 @@ var init_bigquery_telemetry = __esm({
|
|
|
28100
28269
|
}
|
|
28101
28270
|
})
|
|
28102
28271
|
}).catch((error2) => {
|
|
28103
|
-
console.error("[Telemetry] HTTP flush failed:", error2);
|
|
28104
28272
|
throw error2;
|
|
28105
28273
|
});
|
|
28106
28274
|
if (!response2.ok) {
|
|
@@ -28114,7 +28282,6 @@ var init_bigquery_telemetry = __esm({
|
|
|
28114
28282
|
if (this.flushTimer) return;
|
|
28115
28283
|
this.flushTimer = setInterval(() => {
|
|
28116
28284
|
this.flush().catch((error2) => {
|
|
28117
|
-
console.error("[Telemetry] Timer flush failed:", error2);
|
|
28118
28285
|
});
|
|
28119
28286
|
}, this.config.flushIntervalMs);
|
|
28120
28287
|
}
|