@mixio-pro/kalaasetu-mcp 2.1.0 → 2.1.1-beta

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,123 @@
1
+ /**
2
+ * OpenMeter integration for tracking MCP tool call usage.
3
+ * Sends events to OpenMeter for billing and analytics.
4
+ */
5
+
6
+ import { OpenMeter } from "@openmeter/sdk";
7
+ import { getToolCredits } from "./tool-credits";
8
+ import { logger } from "./logger";
9
+
10
+ /**
11
+ * Internal OpenMeter configuration.
12
+ * API token is hardcoded - users only need to provide CLIENT_ID.
13
+ */
14
+ const OPENMETER_CONFIG = {
15
+ apiToken:
16
+ "om_ftRYHNOtv5gRqPaXkItHlWmiiloAI9QD.73hmGa8o1a179gi1189QLn3beQq8wmKhB5eYnBOZwmw",
17
+ baseUrl: "https://openmeter.cloud",
18
+ source: "kalaasetu-mcp",
19
+ } as const;
20
+
21
+ let openmeterClient: OpenMeter | null = null;
22
+
23
+ /**
24
+ * Get the client ID for usage tracking.
25
+ * CLIENT_ID must be set by the user - throws error if missing.
26
+ */
27
+ export function getClientId(): string {
28
+ const clientId = process.env.CLIENT_ID;
29
+ if (!clientId) {
30
+ throw new Error(
31
+ "CLIENT_ID environment variable is required. Set CLIENT_ID to your customer identifier for usage tracking.",
32
+ );
33
+ }
34
+ return clientId;
35
+ }
36
+
37
+ /**
38
+ * Alias for getClientId - throws if CLIENT_ID is not set.
39
+ * Use this to enforce billing at tool execution time.
40
+ */
41
+ export const requireClientId = getClientId;
42
+
43
+ /**
44
+ * Initialize the OpenMeter client.
45
+ * Uses hardcoded API token - only requires CLIENT_ID from user.
46
+ */
47
+ export function initOpenMeter(): OpenMeter | null {
48
+ // Validate CLIENT_ID is set (will throw if not)
49
+ const clientId = getClientId();
50
+
51
+ if (!openmeterClient) {
52
+ openmeterClient = new OpenMeter({
53
+ baseUrl: OPENMETER_CONFIG.baseUrl,
54
+ apiKey: OPENMETER_CONFIG.apiToken,
55
+ });
56
+ logger.info(`[OpenMeter] Client initialized`);
57
+ }
58
+
59
+ return openmeterClient;
60
+ }
61
+
62
+ /**
63
+ * Get the OpenMeter client instance (lazy initialization).
64
+ */
65
+ export function getOpenMeter(): OpenMeter | null {
66
+ if (!openmeterClient) {
67
+ return initOpenMeter();
68
+ }
69
+ return openmeterClient;
70
+ }
71
+
72
+ /**
73
+ * Context for tracking a tool call.
74
+ */
75
+ export interface ToolCallContext {
76
+ toolName: string;
77
+ projectId?: string;
78
+ sessionId?: string;
79
+ }
80
+
81
+ /**
82
+ * Track a successful tool call in OpenMeter.
83
+ * Uses CLIENT_ID as the subject for billing.
84
+ * Tracks all tools for analytics (credit=0 for non-chargeable tools).
85
+ */
86
+ export async function trackToolCall(context: ToolCallContext): Promise<void> {
87
+ const clientId = getClientId(); // Will throw if not set
88
+ const client = getOpenMeter();
89
+
90
+ if (!client) {
91
+ return; // Silent fail if OpenMeter client failed
92
+ }
93
+
94
+ const creditConfig = getToolCredits(context.toolName);
95
+
96
+ try {
97
+ await client.events.ingest({
98
+ id: crypto.randomUUID(),
99
+ source: OPENMETER_CONFIG.source,
100
+ type: "tool_calls",
101
+ time: new Date(),
102
+ subject: clientId, // CLIENT_ID is the billing subject
103
+ data: {
104
+ value: 1,
105
+ provider: creditConfig.provider,
106
+ credit: creditConfig.credits,
107
+ mcp_name: "kalaasetu-mcp",
108
+ model_name: creditConfig.modelName || context.toolName,
109
+ project_id: context.projectId || "unknown",
110
+ session_id: context.sessionId || "unknown",
111
+ tool_name: context.toolName,
112
+ customer_id: clientId,
113
+ },
114
+ });
115
+
116
+ logger.info(
117
+ `[OpenMeter] Tracked tool call: ${context.toolName} (${creditConfig.credits} credits)`,
118
+ );
119
+ } catch (error) {
120
+ // Log but don't throw - tracking failure shouldn't break tool execution
121
+ logger.error("[OpenMeter] Failed to track tool call:", error);
122
+ }
123
+ }
@@ -308,7 +308,9 @@ export let PROMPT_ENHANCER_PRESETS: Record<string, PromptEnhancerConfig> = {
308
308
  * Syncs the prompt enhancer configurations with the remote server.
309
309
  */
310
310
  export async function syncPromptEnhancerConfigs(): Promise<void> {
311
- const remoteConfig = await syncRemoteConfig<Record<string, PromptEnhancerConfig>>({
311
+ const remoteConfig = await syncRemoteConfig<
312
+ Record<string, PromptEnhancerConfig>
313
+ >({
312
314
  name: "prompt-enhancers",
313
315
  remoteUrl: "https://config.mixio.pro/mcp/prompt-enhancers.json",
314
316
  envVar: "PROMPT_ENHANCER_CONFIG_PATH",
@@ -325,7 +327,7 @@ export async function syncPromptEnhancerConfigs(): Promise<void> {
325
327
  ...remoteConfig,
326
328
  };
327
329
 
328
- console.log(`[Sync] Prompt Enhancer Presets updated. Total: ${Object.keys(PROMPT_ENHANCER_PRESETS).length}`);
330
+ // console.log(`[Sync] Prompt Enhancer Presets updated. Total: ${Object.keys(PROMPT_ENHANCER_PRESETS).length}`);
329
331
  }
330
332
 
331
333
  // =============================================================================
@@ -337,7 +339,7 @@ export async function syncPromptEnhancerConfigs(): Promise<void> {
337
339
  * Returns undefined if the preset doesn't exist.
338
340
  */
339
341
  export function getPromptEnhancer(
340
- presetName: string
342
+ presetName: string,
341
343
  ): PromptEnhancer | undefined {
342
344
  const config = PROMPT_ENHANCER_PRESETS[presetName];
343
345
  if (!config) {
@@ -372,7 +374,7 @@ export function listVideoEnhancerPresets(): string[] {
372
374
  * Returns PASSTHROUGH enhancer if resolution fails.
373
375
  */
374
376
  export function resolveEnhancer(
375
- input: string | PromptEnhancerConfig | undefined
377
+ input: string | PromptEnhancerConfig | undefined,
376
378
  ): PromptEnhancer {
377
379
  if (!input) {
378
380
  return PromptEnhancer.PASSTHROUGH;
@@ -382,7 +384,7 @@ export function resolveEnhancer(
382
384
  const enhancer = getPromptEnhancer(input);
383
385
  if (!enhancer) {
384
386
  console.warn(
385
- `Prompt enhancer preset '${input}' not found, using passthrough.`
387
+ `Prompt enhancer preset '${input}' not found, using passthrough.`,
386
388
  );
387
389
  return PromptEnhancer.PASSTHROUGH;
388
390
  }
@@ -1,6 +1,7 @@
1
1
  import * as fs from "node:fs";
2
2
  import * as path from "node:path";
3
3
  import { mkdir, readFile, writeFile } from "node:fs/promises";
4
+ import { logger } from "./logger";
4
5
 
5
6
  const CACHE_DIR = path.resolve(__dirname, "..", "..", ".cache");
6
7
 
@@ -36,18 +37,20 @@ export async function syncRemoteConfig<T>(options: SyncOptions<T>): Promise<T> {
36
37
  const content = await readFile(localPath, "utf-8");
37
38
  const data = JSON.parse(content);
38
39
  if (!validate || validate(data)) {
39
- console.log(`[Sync] Using local override from ${envVar}: ${localPath}`);
40
+ logger.info(
41
+ `[Sync] Using local override from ${envVar}: ${localPath}`,
42
+ );
40
43
  return data as T;
41
44
  }
42
45
  }
43
46
  } catch (e) {
44
- console.warn(`[Sync] Failed to read local override from ${envVar}: ${e}`);
47
+ logger.info(`[Sync] Failed to read local override from ${envVar}: ${e}`);
45
48
  }
46
49
  }
47
50
 
48
51
  // 2. Attempt to Fetch from Remote
49
52
  try {
50
- console.log(`[Sync] Attempting to fetch ${name} from ${remoteUrl}...`);
53
+ logger.info(`[Sync] Attempting to fetch ${name} from ${remoteUrl}...`);
51
54
  const response = await fetch(remoteUrl, {
52
55
  signal: AbortSignal.timeout(5000), // 5s timeout
53
56
  });
@@ -58,15 +61,19 @@ export async function syncRemoteConfig<T>(options: SyncOptions<T>): Promise<T> {
58
61
  // Cache the successful fetch
59
62
  await mkdir(CACHE_DIR, { recursive: true });
60
63
  await writeFile(cachePath, JSON.stringify(data, null, 2));
61
- console.log(`[Sync] Successfully updated ${name} and cached at ${cachePath}`);
64
+ logger.info(
65
+ `[Sync] Successfully updated ${name} and cached at ${cachePath}`,
66
+ );
62
67
  return data as T;
63
68
  }
64
- console.warn(`[Sync] Remote data for ${name} failed validation.`);
69
+ logger.info(`[Sync] Remote data for ${name} failed validation.`);
65
70
  } else {
66
- console.warn(`[Sync] Remote fetch for ${name} failed with status: ${response.status}`);
71
+ logger.info(
72
+ `[Sync] Remote fetch for ${name} failed with status: ${response.status}`,
73
+ );
67
74
  }
68
75
  } catch (e) {
69
- console.warn(`[Sync] Error fetching ${name} from remote: ${e}`);
76
+ logger.info(`[Sync] Error fetching ${name} from remote: ${e}`);
70
77
  }
71
78
 
72
79
  // 3. Fallback to Cache
@@ -75,15 +82,17 @@ export async function syncRemoteConfig<T>(options: SyncOptions<T>): Promise<T> {
75
82
  const cacheContent = await readFile(cachePath, "utf-8");
76
83
  const data = JSON.parse(cacheContent);
77
84
  if (data && (!validate || validate(data))) {
78
- console.log(`[Sync] Using cached version of ${name} from ${cachePath}`);
85
+ logger.info(`[Sync] Using cached version of ${name} from ${cachePath}`);
79
86
  return data as T;
80
87
  }
81
88
  }
82
89
  } catch (e) {
83
- console.warn(`[Sync] Failed to read cache for ${name}: ${e}`);
90
+ logger.info(`[Sync] Failed to read cache for ${name}: ${e}`);
84
91
  }
85
92
 
86
93
  // 4. Final Fallback to Internal Defaults
87
- console.warn(`[Sync] All sync methods failed for ${name}. Using internal defaults.`);
94
+ logger.info(
95
+ `[Sync] All sync methods failed for ${name}. Using internal defaults.`,
96
+ );
88
97
  return fallback;
89
98
  }
@@ -0,0 +1,104 @@
1
+ /**
2
+ * Tool credits configuration for OpenMeter billing.
3
+ * Defines which tools are chargeable and their credit costs.
4
+ */
5
+
6
+ export interface ToolCreditConfig {
7
+ credits: number;
8
+ provider: string;
9
+ chargeable: boolean;
10
+ modelName?: string;
11
+ }
12
+
13
+ /**
14
+ * Credit costs for each registered MCP tool.
15
+ * Non-chargeable tools (discovery, status, uploads) have 0 credits.
16
+ */
17
+ export const TOOL_CREDITS: Record<string, ToolCreditConfig> = {
18
+ // Gemini Image Generation - 5 credits
19
+ generateImage: {
20
+ credits: 5,
21
+ provider: "google-gemini",
22
+ chargeable: true,
23
+ modelName: "imagen-3.0-generate-002",
24
+ },
25
+ editImage: {
26
+ credits: 5,
27
+ provider: "google-gemini",
28
+ chargeable: true,
29
+ modelName: "imagen-3.0-capability-001",
30
+ },
31
+
32
+ // Vertex AI Video Generation - 20 credits (premium)
33
+ generateVideoi2v: {
34
+ credits: 20,
35
+ provider: "google-vertex",
36
+ chargeable: true,
37
+ modelName: "veo-2.0-generate-001",
38
+ },
39
+
40
+ // FAL AI Video Generation - 15 credits
41
+ fal_ltx_image_to_video: {
42
+ credits: 15,
43
+ provider: "fal-ai",
44
+ chargeable: true,
45
+ modelName: "fal-ai/ltx-2/image-to-video",
46
+ },
47
+ fal_ltx_retake_video: {
48
+ credits: 15,
49
+ provider: "fal-ai",
50
+ chargeable: true,
51
+ modelName: "fal-ai/ltx-2/retake-video",
52
+ },
53
+
54
+ // Non-chargeable utility tools
55
+ fal_upload_file: {
56
+ credits: 0,
57
+ provider: "fal-ai",
58
+ chargeable: false,
59
+ },
60
+ fal_list_presets: {
61
+ credits: 0,
62
+ provider: "fal-ai",
63
+ chargeable: false,
64
+ },
65
+ fal_get_preset_details: {
66
+ credits: 0,
67
+ provider: "fal-ai",
68
+ chargeable: false,
69
+ },
70
+ get_generation_status: {
71
+ credits: 0,
72
+ provider: "internal",
73
+ chargeable: false,
74
+ },
75
+ };
76
+
77
+ /**
78
+ * Get credit config for a tool. Returns default (0 credits, not chargeable) for unknown tools.
79
+ */
80
+ export function getToolCredits(toolName: string): ToolCreditConfig {
81
+ // Check for exact match first
82
+ const config = TOOL_CREDITS[toolName];
83
+ if (config) {
84
+ return config;
85
+ }
86
+
87
+ // Check for dynamic FAL tools (prefixed with fal_)
88
+ if (toolName.startsWith("fal_")) {
89
+ // Default FAL tool credits for dynamic presets
90
+ return {
91
+ credits: 15,
92
+ provider: "fal-ai",
93
+ chargeable: true,
94
+ modelName: toolName,
95
+ };
96
+ }
97
+
98
+ // Unknown tool - not chargeable
99
+ return {
100
+ credits: 0,
101
+ provider: "unknown",
102
+ chargeable: false,
103
+ };
104
+ }
@@ -1,4 +1,10 @@
1
1
  import { z } from "zod";
2
+ import { logger } from "./logger";
3
+ import {
4
+ trackToolCall,
5
+ requireClientId,
6
+ type ToolCallContext,
7
+ } from "./openmeter";
2
8
 
3
9
  /**
4
10
  * Standardized error result for MCP tools
@@ -11,6 +17,16 @@ export interface ToolErrorResult {
11
17
  }>;
12
18
  }
13
19
 
20
+ /**
21
+ * Optional tracking context for tool execution
22
+ */
23
+ export interface TrackingContext {
24
+ toolName: string;
25
+ projectId?: string;
26
+ sessionId?: string;
27
+ userId?: string;
28
+ }
29
+
14
30
  /**
15
31
  * Helper to check if a result is a ToolErrorResult
16
32
  */
@@ -32,7 +48,7 @@ export function formatToolError(error: unknown, context?: string): string {
32
48
  // Enhanced Zod Error Handling
33
49
  if (error instanceof z.ZodError) {
34
50
  const issues = error.issues.map(
35
- (issue) => `[${issue.path.join(".")}] ${issue.message}`
51
+ (issue) => `[${issue.path.join(".")}] ${issue.message}`,
36
52
  );
37
53
  errorMessage = `Validation Error: ${issues.join("; ")}`;
38
54
  } else {
@@ -49,9 +65,9 @@ export function formatToolError(error: unknown, context?: string): string {
49
65
  }
50
66
 
51
67
  // Secure logging (never expose stack traces to the LLM, but log them internally)
52
- console.error(
68
+ logger.error(
53
69
  `[Tool Error] ${context ? `${context}: ` : ""}${errorMessage}`,
54
- error
70
+ error,
55
71
  );
56
72
 
57
73
  // Return sanitized message for the LLM
@@ -61,16 +77,31 @@ export function formatToolError(error: unknown, context?: string): string {
61
77
  }
62
78
 
63
79
  /**
64
- * Safely execute a tool function with standardized error handling
80
+ * Safely execute a tool function with standardized error handling and optional usage tracking.
65
81
  * @param fn The async tool execution function
66
82
  * @param context Optional context name (e.g. tool name) for logging
83
+ * @param tracking Optional tracking context for OpenMeter billing
67
84
  */
68
85
  export async function safeToolExecute<T>(
69
86
  fn: () => Promise<T>,
70
- context?: string
87
+ context?: string,
88
+ tracking?: TrackingContext,
71
89
  ): Promise<T | ToolErrorResult> {
72
90
  try {
73
- return await fn();
91
+ // Validate CLIENT_ID is set before executing any tool
92
+ requireClientId();
93
+
94
+ const result = await fn();
95
+
96
+ // Track successful execution if tracking context provided
97
+ if (tracking) {
98
+ // Fire and forget - don't block on tracking
99
+ trackToolCall(tracking).catch((err) => {
100
+ logger.error("[OpenMeter] Tracking background error:", err);
101
+ });
102
+ }
103
+
104
+ return result;
74
105
  } catch (error) {
75
106
  const errorText = formatToolError(error, context);
76
107
  return {
@@ -3,6 +3,7 @@ import * as path from "path";
3
3
  import * as os from "os";
4
4
  import { getStorage } from "../storage";
5
5
  import { generateTimestampedFilename } from "./filename";
6
+ import { logger } from "./logger";
6
7
 
7
8
  export interface LocalFileResult {
8
9
  path: string;
@@ -32,7 +33,7 @@ export async function ensureLocalFile(input: string): Promise<LocalFileResult> {
32
33
  const response = await fetch(input);
33
34
  if (!response.ok) {
34
35
  throw new Error(
35
- `Failed to download URL: ${input} (${response.status} ${response.statusText})`
36
+ `Failed to download URL: ${input} (${response.status} ${response.statusText})`,
36
37
  );
37
38
  }
38
39
 
@@ -49,7 +50,7 @@ export async function ensureLocalFile(input: string): Promise<LocalFileResult> {
49
50
  fs.unlinkSync(tempFilePath);
50
51
  }
51
52
  } catch (e) {
52
- console.error(`Failed to cleanup temp file ${tempFilePath}:`, e);
53
+ logger.error(`Failed to cleanup temp file ${tempFilePath}:`, e);
53
54
  }
54
55
  },
55
56
  };
@@ -78,7 +79,7 @@ export async function ensureLocalFile(input: string): Promise<LocalFileResult> {
78
79
  `File not found: ${input}\n` +
79
80
  `Resolved path: ${resolvedPath}\n` +
80
81
  `Is absolute: ${isAbsolute}\n` +
81
- `CWD: ${process.cwd()}`
82
+ `CWD: ${process.cwd()}`,
82
83
  );
83
84
  }
84
85
 
@@ -1,52 +0,0 @@
1
- import { z } from "zod";
2
- import { safeToolExecute } from "./utils/tool-wrapper";
3
-
4
- async function runTest() {
5
- console.log("Running Error Context Verification Test...");
6
-
7
- // 1. Mock Schema Validation Error
8
- const schema = z.object({
9
- prompt: z.string(),
10
- count: z.number().min(1).max(5),
11
- });
12
-
13
- const mockToolWithValidation = async (args: any) => {
14
- // Simulate what happens in a real tool when Zod parsing fails inside the execute block
15
- // OR if we manually parse and it fails
16
- // Most tools currently don't re-parse inside execute because MCP handles it?
17
- // Wait, the tools define `parameters` schema, but the `execute` function receives `args`.
18
- // The MCP server framework usually does the validation BEFORE calling execute.
19
- // BUT if we look at the code, some tools do manual checks or parsing.
20
-
21
- // Let's simulate a manual validation failure inside execute, or a deep validation failure
22
- try {
23
- schema.parse(args);
24
- } catch (error) {
25
- throw error;
26
- }
27
- return "success";
28
- };
29
-
30
- const zodResult = await safeToolExecute(
31
- async () => mockToolWithValidation({ prompt: 123, count: 10 }),
32
- "ZodTool"
33
- );
34
- console.log("\n--- Zod Error Result ---");
35
- console.log(JSON.stringify(zodResult, null, 2));
36
-
37
- // 2. Mock API Error with Status
38
- const mockToolWithApiError = async () => {
39
- // Simulate a fetch error structure
40
- const err: any = new Error("API request failed");
41
- err.status = 429;
42
- err.statusText = "Too Many Requests";
43
- err.responseBody = "Rate limit exceeded. Retry in 60s.";
44
- throw err;
45
- };
46
-
47
- const apiResult = await safeToolExecute(mockToolWithApiError, "ApiTool");
48
- console.log("\n--- API Error Result ---");
49
- console.log(JSON.stringify(apiResult, null, 2));
50
- }
51
-
52
- runTest();
@@ -1,31 +0,0 @@
1
- import { safeToolExecute } from "./utils/tool-wrapper";
2
-
3
- async function runTest() {
4
- console.log("Running Error Handling Verification Test...");
5
-
6
- // Mock tool that throws an error
7
- const mockTool = async () => {
8
- throw new Error("Simulated tool failure");
9
- };
10
-
11
- const result = await safeToolExecute(mockTool, "MockTool");
12
-
13
- console.log("Result:", JSON.stringify(result, null, 2));
14
-
15
- if (
16
- result.isError === true &&
17
- result.content &&
18
- result.content[0]?.text.includes(
19
- "Tool execution failed in MockTool: Simulated tool failure"
20
- )
21
- ) {
22
- console.log(
23
- "✅ Verification PASSED: Error was correctly caught and formatted."
24
- );
25
- } else {
26
- console.error("❌ Verification FAILED: Unexpected result format.");
27
- process.exit(1);
28
- }
29
- }
30
-
31
- runTest();