@mutagent/cli 0.1.50 → 0.1.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/bin/cli.js CHANGED
@@ -455,7 +455,7 @@ class SDKClientWrapper {
455
455
  workspaceId;
456
456
  organizationId;
457
457
  constructor(opts) {
458
- this.apiKey = opts.bearerAuth;
458
+ this.apiKey = opts.apiKey;
459
459
  this.endpoint = opts.serverURL ?? "http://localhost:3003";
460
460
  this.workspaceId = opts.workspaceId;
461
461
  this.organizationId = opts.organizationId;
@@ -605,9 +605,13 @@ class SDKClientWrapper {
605
605
  this.handleError(error);
606
606
  }
607
607
  }
608
- async deletePrompt(id) {
608
+ async deletePrompt(id, options) {
609
609
  try {
610
- await this.sdk.prompt.deletePrompt({ id: parseInt(id, 10) });
610
+ if (options?.force) {
611
+ await this.request(`/api/prompts/${id}?force=true`, { method: "DELETE" });
612
+ } else {
613
+ await this.sdk.prompt.deletePrompt({ id: parseInt(id, 10) });
614
+ }
611
615
  } catch (error) {
612
616
  this.handleError(error);
613
617
  }
@@ -677,9 +681,10 @@ class SDKClientWrapper {
677
681
  this.handleError(error);
678
682
  }
679
683
  }
680
- async deleteDataset(_promptId, datasetId) {
684
+ async deleteDataset(_promptId, datasetId, options) {
681
685
  try {
682
- await this.request(`/api/prompts/datasets/${datasetId}`, { method: "DELETE" });
686
+ const query = options?.force ? "?force=true" : "";
687
+ await this.request(`/api/prompts/datasets/${datasetId}${query}`, { method: "DELETE" });
683
688
  } catch (error) {
684
689
  this.handleError(error);
685
690
  }
@@ -1007,7 +1012,7 @@ function getSDKClient() {
1007
1012
  }
1008
1013
  const config = loadConfig();
1009
1014
  sdkClient = new SDKClientWrapper({
1010
- bearerAuth: apiKey,
1015
+ apiKey,
1011
1016
  serverURL: config.endpoint,
1012
1017
  workspaceId: config.defaultWorkspace,
1013
1018
  organizationId: config.defaultOrganization
@@ -1020,7 +1025,7 @@ function resetSDKClient() {
1020
1025
  }
1021
1026
  async function validateApiKey(apiKey, endpoint) {
1022
1027
  try {
1023
- const response = await fetch(`${endpoint}/api/organizations`, {
1028
+ const response = await fetch(`${endpoint}/api/providers`, {
1024
1029
  headers: { "x-api-key": apiKey }
1025
1030
  });
1026
1031
  return response.ok;
@@ -2339,6 +2344,9 @@ Examples:
2339
2344
  output.success(`Integration: ${contextSummary.integrated.join(", ")} (active)`);
2340
2345
  }
2341
2346
  }
2347
+ if (!isValid) {
2348
+ process.exit(1);
2349
+ }
2342
2350
  });
2343
2351
  auth.command("logout").description("Clear stored credentials").addHelpText("after", `
2344
2352
  Examples:
@@ -3161,7 +3169,7 @@ ${chalk6.dim("Tip: Combine --with-datasets and --with-evals to fetch all nested
3161
3169
  handleError(error, isJson);
3162
3170
  }
3163
3171
  });
3164
- prompts.command("create").description("Create a new prompt").option("-n, --name <name>", "Prompt name").option("--description <text>", "Prompt description (shown in dashboard)").option("-c, --content <content>", "Prompt content (rawPrompt) [DEPRECATED: use --raw]").option("-r, --raw <text>", "Raw prompt text (single prompt)").option("--system <text>", "System prompt (use with --human)").option("--human <text>", "Human prompt (use with --system)").option("--messages <json>", `Messages array as JSON (e.g., '[{"role":"system","content":"..."}]')`).option("--output-schema <json>", "Output schema as JSON string (required for optimization)").addHelpText("after", `
3172
+ prompts.command("create").description("Create a new prompt").option("-n, --name <name>", "Prompt name").option("--description <text>", "Prompt description (shown in dashboard)").option("-c, --content <content>", "Prompt content (rawPrompt) [DEPRECATED: use --raw]").option("-r, --raw <text>", "Raw prompt text (single prompt)").option("--system <text>", "System prompt (use with --human)").option("--human <text>", "Human prompt (use with --system)").option("--messages <json>", `Messages array as JSON (e.g., '[{"role":"system","content":"..."}]')`).option("--output-schema <json>", "Output schema as JSON string (required for optimization)").option("--input-schema <json>", "Input variable schema as JSON string").addHelpText("after", `
3165
3173
  Examples:
3166
3174
  ${chalk6.dim("$")} mutagent prompts create --name "my-prompt" --description "Greeting prompt for customers" --system "You are helpful" --human "{input}" --output-schema '{"type":"object","properties":{"result":{"type":"string","description":"The result"}}}'
3167
3175
  ${chalk6.dim("$")} mutagent prompts create --name "raw-prompt" --raw "Summarize: {text}" --output-schema '{"type":"object","properties":{"summary":{"type":"string","description":"Summary"}}}'
@@ -3237,14 +3245,12 @@ Use --raw, --system/--human, or --messages to specify prompt content`);
3237
3245
  throw new MutagentError("MISSING_ARGUMENTS", "--name is required. Use --name with --system/--human, --raw, or --messages", `Run: mutagent prompts create --help
3238
3246
  Provide --name with --system/--human, --raw, or --messages to specify prompt content`);
3239
3247
  }
3240
- if (options.outputSchema) {
3241
- try {
3242
- data.outputSchema = JSON.parse(options.outputSchema);
3243
- } catch {
3244
- throw new MutagentError("INVALID_JSON", "Invalid JSON in --output-schema flag", `Run: mutagent prompts create --help
3245
- Provide a valid JSON Schema, e.g., '{"type":"object","properties":{"result":{"type":"string"}}}'`);
3246
- }
3247
- }
3248
+ const parsedOutputSchema = parseSchemaOption(options.outputSchema, undefined, "output-schema", "mutagent prompts create --help");
3249
+ if (parsedOutputSchema)
3250
+ data.outputSchema = parsedOutputSchema;
3251
+ const parsedInputSchema = parseSchemaOption(options.inputSchema, undefined, "input-schema", "mutagent prompts create --help");
3252
+ if (parsedInputSchema)
3253
+ data.inputSchema = parsedInputSchema;
3248
3254
  if (isSchemaEmpty(data.outputSchema)) {
3249
3255
  throw new MutagentError("MISSING_ARGUMENTS", "outputSchema is required for prompt creation", `Run: mutagent prompts create --help
3250
3256
  Use --output-schema '{"type":"object","properties":{...}}'`);
@@ -3384,7 +3390,7 @@ ${chalk6.dim("Note: --force is required. The CLI is non-interactive — confirm
3384
3390
  }
3385
3391
  const client = getSDKClient();
3386
3392
  try {
3387
- await client.deletePrompt(id);
3393
+ await client.deletePrompt(id, { force: true });
3388
3394
  } catch (deleteError) {
3389
3395
  if (deleteError instanceof ApiError && deleteError.statusCode === 404) {
3390
3396
  if (isJson) {
@@ -3564,7 +3570,7 @@ Examples:
3564
3570
  }
3565
3571
  const client = getSDKClient();
3566
3572
  try {
3567
- await client.deleteDataset(promptId, datasetId);
3573
+ await client.deleteDataset(promptId, datasetId, { force: true });
3568
3574
  } catch (deleteError) {
3569
3575
  if (deleteError instanceof ApiError && deleteError.statusCode === 404) {
3570
3576
  if (isJson) {
@@ -5967,69 +5973,30 @@ version: 2.0.0
5967
5973
  - MUTAGENT_ENDPOINT: ${config.endpoint}
5968
5974
  - API Connection: Verified
5969
5975
 
5970
- > **Note**: LangGraph is built on LangChain. The same \`MutagentCallbackHandler\` from
5971
- > \`@mutagent/langchain\` works for both LangChain and LangGraph — no separate package needed.
5972
-
5973
5976
  ## Installation
5974
5977
 
5975
5978
  \`\`\`bash
5976
- bun add @mutagent/langchain @mutagent/sdk
5979
+ bun add @mutagent/langgraph @mutagent/sdk
5977
5980
  # or
5978
- npm install @mutagent/langchain @mutagent/sdk
5981
+ npm install @mutagent/langgraph @mutagent/sdk
5979
5982
  \`\`\`
5980
5983
 
5981
- > **Deprecation Notice**: The \`@mutagent/langgraph\` package is deprecated.
5982
- > Use \`@mutagent/langchain\` instead — it supports both LangChain and LangGraph.
5983
-
5984
5984
  ## Integration
5985
5985
 
5986
5986
  \`\`\`typescript
5987
- import { MutagentCallbackHandler } from '@mutagent/langchain';
5987
+ import { MutagentCallbackHandler } from '@mutagent/langgraph';
5988
5988
  import { initTracing } from '@mutagent/sdk/tracing';
5989
5989
 
5990
- // Initialize tracing (once at app startup)
5991
- initTracing({ apiKey: process.env.MUTAGENT_API_KEY! });
5992
-
5993
- // Create the handler
5994
- const handler = new MutagentCallbackHandler({
5995
- sessionId: 'my-session', // optional
5996
- userId: 'user-123', // optional
5997
- });
5998
-
5999
- // Pass to any LangGraph invoke/stream call
6000
- const result = await graph.invoke(input, { callbacks: [handler] });
6001
- \`\`\`
6002
-
6003
- ## Full Graph Example
6004
-
6005
- \`\`\`typescript
6006
- import { StateGraph, Annotation } from '@langchain/langgraph';
6007
- import { ChatOpenAI } from '@langchain/openai';
6008
- import { MutagentCallbackHandler } from '@mutagent/langchain';
6009
- import { initTracing } from '@mutagent/sdk/tracing';
6010
-
6011
- // Initialize tracing (once at app startup)
5990
+ // Initialize MutagenT tracing
6012
5991
  initTracing({ apiKey: process.env.MUTAGENT_API_KEY! });
6013
5992
 
5993
+ // Create callback handler (no constructor args)
6014
5994
  const handler = new MutagentCallbackHandler();
6015
5995
 
6016
- // Define your graph as usual
6017
- const StateAnnotation = Annotation.Root({
6018
- input: Annotation<string>,
6019
- output: Annotation<string>,
5996
+ // Pass as callback to your LangGraph execution
5997
+ const result = await graph.invoke(input, {
5998
+ callbacks: [handler],
6020
5999
  });
6021
-
6022
- const graph = new StateGraph(StateAnnotation)
6023
- .addNode('agent', agentNode)
6024
- .addNode('tools', toolNode)
6025
- .addEdge('__start__', 'agent')
6026
- .compile();
6027
-
6028
- // All nodes, edges, and LLM calls are automatically traced
6029
- const result = await graph.invoke(
6030
- { input: 'Hello' },
6031
- { callbacks: [handler] },
6032
- );
6033
6000
  \`\`\`
6034
6001
 
6035
6002
  ## Streaming
@@ -6099,83 +6066,63 @@ version: 2.0.0
6099
6066
  ## Installation
6100
6067
 
6101
6068
  \`\`\`bash
6102
- bun add @mutagent/vercel-ai @mutagent/sdk
6069
+ bun add @mutagent/vercel-ai @mutagent/sdk @opentelemetry/sdk-trace-node @opentelemetry/sdk-trace-base
6103
6070
  # or
6104
- npm install @mutagent/vercel-ai @mutagent/sdk
6105
-
6106
- # For Option A (OTel SpanExporter), also install:
6107
- bun add @opentelemetry/sdk-trace-node @opentelemetry/sdk-trace-base
6071
+ npm install @mutagent/vercel-ai @mutagent/sdk @opentelemetry/sdk-trace-node @opentelemetry/sdk-trace-base
6108
6072
  \`\`\`
6109
6073
 
6110
- ---
6111
-
6112
- ## Option A (Recommended): OTel SpanExporter
6074
+ ## Option A: OpenTelemetry (Recommended)
6113
6075
 
6114
- Uses Vercel AI SDK's built-in \`experimental_telemetry\` with an OpenTelemetry exporter
6115
- that sends spans directly to MutagenT.
6076
+ Follows the same pattern as Langfuse, Braintrust, SigNoz, and other major observability providers.
6077
+ Uses \`MutagentSpanExporter\` as a standard OTel SpanProcessor — no model wrapping required.
6116
6078
 
6117
6079
  \`\`\`typescript
6080
+ import { generateText } from 'ai';
6081
+ import { openai } from '@ai-sdk/openai';
6118
6082
  import { MutagentSpanExporter } from '@mutagent/vercel-ai';
6119
6083
  import { NodeTracerProvider } from '@opentelemetry/sdk-trace-node';
6120
6084
  import { SimpleSpanProcessor } from '@opentelemetry/sdk-trace-base';
6121
- import { initTracing } from '@mutagent/sdk/tracing';
6122
- import { generateText } from 'ai';
6123
- import { openai } from '@ai-sdk/openai';
6124
6085
 
6125
- // Initialize MutagenT tracing
6126
- initTracing({ apiKey: process.env.MUTAGENT_API_KEY! });
6127
-
6128
- // Set up OTel with MutagenT exporter
6086
+ // Set up OTel provider with MutagenT exporter (once at app startup)
6129
6087
  const provider = new NodeTracerProvider();
6130
6088
  provider.addSpanProcessor(
6131
6089
  new SimpleSpanProcessor(new MutagentSpanExporter())
6132
6090
  );
6133
6091
  provider.register();
6134
6092
 
6135
- // Use Vercel AI SDK normally with telemetry enabled
6093
+ // Enable telemetry on AI SDK calls all calls are automatically traced
6136
6094
  const result = await generateText({
6137
- model: openai('gpt-4'),
6095
+ model: openai('gpt-4o'),
6138
6096
  prompt: 'Hello!',
6139
6097
  experimental_telemetry: { isEnabled: true },
6140
6098
  });
6141
6099
  \`\`\`
6142
6100
 
6143
- ---
6144
-
6145
- ## Option B (Alternative): Middleware
6146
-
6147
- Uses the Vercel AI SDK \`wrapLanguageModel\` middleware pattern.
6101
+ ### Streaming with OTel
6148
6102
 
6149
6103
  \`\`\`typescript
6150
6104
  // app/api/chat/route.ts
6151
- import { streamText, wrapLanguageModel } from 'ai';
6105
+ import { streamText } from 'ai';
6152
6106
  import { openai } from '@ai-sdk/openai';
6153
- import { createMutagentMiddleware } from '@mutagent/vercel-ai';
6154
- import { initTracing } from '@mutagent/sdk/tracing';
6155
-
6156
- // Initialize tracing (once at app startup)
6157
- initTracing({ apiKey: process.env.MUTAGENT_API_KEY! });
6158
-
6159
- // Create middleware
6160
- const middleware = createMutagentMiddleware();
6161
-
6162
- // Wrap your model with MutagenT middleware
6163
- const model = wrapLanguageModel({
6164
- model: openai('gpt-4o'),
6165
- middleware,
6166
- });
6167
6107
 
6168
6108
  export async function POST(req: Request) {
6169
6109
  const { messages } = await req.json();
6170
6110
 
6171
- // All calls are automatically traced
6172
- const result = streamText({ model, messages });
6111
+ // OTel provider registered at startup — just enable telemetry
6112
+ const result = streamText({
6113
+ model: openai('gpt-4o'),
6114
+ messages,
6115
+ experimental_telemetry: { isEnabled: true },
6116
+ });
6173
6117
 
6174
6118
  return result.toDataStreamResponse();
6175
6119
  }
6176
6120
  \`\`\`
6177
6121
 
6178
- ## Non-Streaming Usage
6122
+ ## Option B: Middleware (Alternative — Simple / Legacy)
6123
+
6124
+ Works without OTel setup. Good for quick prototyping or environments where
6125
+ installing OTel dependencies is not practical.
6179
6126
 
6180
6127
  \`\`\`typescript
6181
6128
  import { generateText, wrapLanguageModel } from 'ai';
@@ -6191,6 +6138,26 @@ const { text } = await generateText({
6191
6138
  });
6192
6139
  \`\`\`
6193
6140
 
6141
+ ### Streaming with Middleware
6142
+
6143
+ \`\`\`typescript
6144
+ // app/api/chat/route.ts
6145
+ import { streamText, wrapLanguageModel } from 'ai';
6146
+ import { openai } from '@ai-sdk/openai';
6147
+ import { createMutagentMiddleware } from '@mutagent/vercel-ai';
6148
+
6149
+ const middleware = createMutagentMiddleware();
6150
+ const model = wrapLanguageModel({ model: openai('gpt-4o'), middleware });
6151
+
6152
+ export async function POST(req: Request) {
6153
+ const { messages } = await req.json();
6154
+
6155
+ const result = streamText({ model, messages });
6156
+
6157
+ return result.toDataStreamResponse();
6158
+ }
6159
+ \`\`\`
6160
+
6194
6161
  ## Verification
6195
6162
 
6196
6163
  \`\`\`bash
@@ -6253,31 +6220,26 @@ npm install @mutagent/openai @mutagent/sdk
6253
6220
  ## Integration
6254
6221
 
6255
6222
  \`\`\`typescript
6256
- import OpenAI from 'openai';
6257
6223
  import { observeOpenAI } from '@mutagent/openai';
6258
6224
  import { initTracing } from '@mutagent/sdk/tracing';
6259
6225
 
6260
- // Initialize tracing (once at app startup)
6226
+ // Initialize MutagenT tracing
6261
6227
  initTracing({ apiKey: process.env.MUTAGENT_API_KEY! });
6262
6228
 
6263
- // Wrap the OpenAI client ALL methods are automatically traced
6264
- const openai = observeOpenAI(new OpenAI(), {
6265
- sessionId: 'my-session', // optional
6266
- userId: 'user-123', // optional
6267
- });
6229
+ // Wrap your OpenAI client with observeOpenAI for automatic tracing
6230
+ const openai = observeOpenAI(new OpenAI({
6231
+ apiKey: process.env.OPENAI_API_KEY,
6232
+ }));
6268
6233
 
6269
- // Use exactly as normal — chat, embeddings, images, audio all work
6270
- const completion = await openai.chat.completions.create({
6234
+ // All calls are automatically traced
6235
+ const response = await openai.chat.completions.create({
6271
6236
  model: 'gpt-4o',
6272
6237
  messages: [{ role: 'user', content: 'Hello!' }],
6273
6238
  });
6274
6239
 
6275
- console.log(completion.choices[0].message.content);
6240
+ console.log(response.choices[0].message.content);
6276
6241
  \`\`\`
6277
6242
 
6278
- ALL OpenAI SDK methods are preserved and traced automatically.
6279
- No API changes — just wrap your client and everything is observed.
6280
-
6281
6243
  ## Streaming
6282
6244
 
6283
6245
  Streaming works out of the box with no extra configuration:
@@ -8601,5 +8563,5 @@ program.addCommand(createUsageCommand());
8601
8563
  program.addCommand(createHooksCommand());
8602
8564
  program.parse();
8603
8565
 
8604
- //# debugId=9DE14F18C9B5C35264756E2164756E21
8566
+ //# debugId=DC3AE5F1F511B4AF64756E2164756E21
8605
8567
  //# sourceMappingURL=cli.js.map