frogo 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +79 -0
  2. package/dist/agent/launch.js +384 -0
  3. package/dist/cli/commands/configure.js +243 -0
  4. package/dist/cli/commands/debug.js +9 -0
  5. package/dist/cli/commands/investigate.js +50 -0
  6. package/dist/cli/commands/mcp.js +53 -0
  7. package/dist/cli/commands/scan.js +5 -0
  8. package/dist/cli/index.js +22 -0
  9. package/dist/config/load.js +36 -0
  10. package/dist/config/save.js +19 -0
  11. package/dist/connectors/datadog.js +113 -0
  12. package/dist/connectors/demo-events.js +82 -0
  13. package/dist/connectors/local.js +25 -0
  14. package/dist/connectors/trigger.js +13 -0
  15. package/dist/connectors/vercel.js +13 -0
  16. package/dist/core/correlator.js +17 -0
  17. package/dist/core/investigator.js +49 -0
  18. package/dist/core/pattern-engine.js +108 -0
  19. package/dist/core/timeline.js +24 -0
  20. package/dist/core/types.js +1 -0
  21. package/dist/llm/explain.js +14 -0
  22. package/package.json +37 -0
  23. package/src/agent/launch.ts +449 -0
  24. package/src/cli/commands/configure.ts +265 -0
  25. package/src/cli/commands/debug.ts +10 -0
  26. package/src/cli/commands/mcp.ts +66 -0
  27. package/src/cli/commands/scan.ts +6 -0
  28. package/src/cli/index.ts +27 -0
  29. package/src/config/load.ts +42 -0
  30. package/src/config/save.ts +27 -0
  31. package/src/connectors/datadog.ts +152 -0
  32. package/src/connectors/local.ts +27 -0
  33. package/src/connectors/trigger.ts +16 -0
  34. package/src/connectors/vercel.ts +16 -0
  35. package/src/core/correlator.ts +27 -0
  36. package/src/core/investigator.ts +64 -0
  37. package/src/core/pattern-engine.ts +139 -0
  38. package/src/core/timeline.ts +32 -0
  39. package/src/core/types.ts +92 -0
  40. package/src/llm/explain.ts +20 -0
  41. package/tsconfig.json +15 -0
package/README.md ADDED
@@ -0,0 +1,79 @@
1
+ # Frogo
2
+ Production incident investigator CLI that reconstructs cross-service failure timelines and produces a deterministic root-cause hypothesis, with optional MCP-powered log exploration.
3
+
4
+ ## Install
5
+ ```bash
6
+ npx frogo
7
+ ```
8
+
9
+ Or install globally:
10
+ ```bash
11
+ npm install -g frogo
12
+ ```
13
+
14
+ ## Quickstart
15
+ 1. Configure integrations and the model:
16
+ ```bash
17
+ npx frogo configure
18
+ ```
19
+ 2. Run the agent chat:
20
+ ```bash
21
+ npx frogo
22
+ ```
23
+ 3. Run deterministic scans:
24
+ ```bash
25
+ npx frogo scan
26
+ npx frogo debug "why did my worker restart?"
27
+ ```
28
+
29
+ ## Commands
30
+ - `frogo`
31
+ Starts the agent chat (ai-sdk). Uses MCP tools when configured.
32
+ - `frogo configure`
33
+ Interactive configuration for Vercel, Trigger.dev, Datadog, LangSmith, and the LLM provider.
34
+ - `frogo scan`
35
+ Deterministic investigation over the default time window.
36
+ - `frogo debug "<query>"`
37
+ Narrow-window deterministic investigation with a user query bias.
38
+ - `frogo mcp login langsmith`
39
+ Store LangSmith MCP credentials.
40
+
41
+ ## Configuration
42
+ Frogo reads from:
43
+ - Project config: `.frogo.json`
44
+ - Global config: `~/.frogo/config.json`
45
+
46
+ ### LLM provider
47
+ Frogo does not store provider API keys in config. Set:
48
+ ```bash
49
+ export FROGO_AI_API_KEY="..."
50
+ ```
51
+
52
+ ### LangSmith MCP
53
+ Use the hosted MCP server:
54
+ ```
55
+ https://langsmith-mcp-server.onrender.com/mcp
56
+ ```
57
+
58
+ Frogo sends your key as the `LANGSMITH-API-KEY` header.
59
+
60
+ ### Datadog MCP
61
+ Frogo can connect to a Datadog MCP server via stdio. Provide:
62
+ ```json
63
+ {
64
+ "datadog": {
65
+ "apiKey": "...",
66
+ "appKey": "...",
67
+ "command": "datadog-mcp-server"
68
+ }
69
+ }
70
+ ```
71
+
72
+ ## Philosophy
73
+ - Deterministic pattern engine first
74
+ - LLM explains, never decides root cause
75
+ - Normalized events only (no raw logs sent to the model)
76
+ - Built to extend with more connectors and patterns
77
+
78
+ ## License
79
+ MIT
@@ -0,0 +1,384 @@
1
+ import "dotenv/config";
2
+ import readline from "node:readline/promises";
3
+ import crypto from "node:crypto";
4
+ import chalk from "chalk";
5
+ import { stdin as input, stdout as output } from "node:process";
6
+ import { ToolLoopAgent, stepCountIs } from "ai";
7
+ import { createOpenAI } from "@ai-sdk/openai";
8
+ import { createAnthropic } from "@ai-sdk/anthropic";
9
+ import { createMCPClient } from "@ai-sdk/mcp";
10
+ import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
11
+ import { loadConfig } from "../config/load.js";
12
+ const DEFAULT_SYSTEM_PROMPT = "You are Frogo, a deterministic incident investigator. Respond concisely, cite evidence, and avoid hallucinations. " +
13
+ "If the user says 'do a pass' or 'anything you can get', fetch a minimal overview: projects, recent runs, datasets, and prompts (if available) " +
14
+ "using MCP tools, then summarize what you found.";
15
+ const DATADOG_MCP_COMMAND = "datadog-mcp-server";
16
+ function buildProviderContext(config) {
17
+ const apiKey = process.env.FROGO_AI_API_KEY?.trim();
18
+ if (!apiKey) {
19
+ console.error("Set FROGO_AI_API_KEY (or add it to your .env) before running Frogo.");
20
+ return null;
21
+ }
22
+ const providerName = (config.llmProvider?.provider ?? process.env.FROGO_AI_PROVIDER ?? "openai").toLowerCase();
23
+ const modelId = config.llmProvider?.model ?? process.env.FROGO_AI_MODEL ?? "gpt-4o-mini";
24
+ const endpoint = config.llmProvider?.endpoint ?? process.env.FROGO_AI_ENDPOINT;
25
+ const systemPrompt = config.llmProvider?.systemPrompt ?? DEFAULT_SYSTEM_PROMPT;
26
+ let languageModel;
27
+ if (providerName === "anthropic") {
28
+ const provider = createAnthropic({ apiKey, baseURL: endpoint });
29
+ languageModel = provider(modelId);
30
+ }
31
+ else {
32
+ const provider = createOpenAI({ apiKey, baseURL: endpoint });
33
+ languageModel = provider(modelId);
34
+ }
35
+ return { languageModel, systemPrompt };
36
+ }
37
+ function prefersSse(error) {
38
+ if (!(error instanceof Error)) {
39
+ return false;
40
+ }
41
+ return error.message.toLowerCase().includes("does not support http transport");
42
+ }
43
+ async function createLangSmithClient(langsmith) {
44
+ const headers = {
45
+ "LANGSMITH-API-KEY": langsmith.apiKey
46
+ };
47
+ if (langsmith.workspaceKey) {
48
+ headers["LANGSMITH-WORKSPACE-ID"] = langsmith.workspaceKey;
49
+ }
50
+ const baseConfig = {
51
+ headers
52
+ };
53
+ try {
54
+ return await createMCPClient({
55
+ transport: {
56
+ type: "http",
57
+ url: langsmith.mcpUrl,
58
+ ...baseConfig
59
+ }
60
+ });
61
+ }
62
+ catch (error) {
63
+ if (prefersSse(error)) {
64
+ return await createMCPClient({
65
+ transport: {
66
+ type: "sse",
67
+ url: langsmith.mcpUrl,
68
+ ...baseConfig
69
+ }
70
+ });
71
+ }
72
+ throw error;
73
+ }
74
+ }
75
+ async function buildLangSmithToolContext(config) {
76
+ const langsmith = config.langsmith;
77
+ if (!langsmith?.apiKey || !langsmith?.mcpUrl) {
78
+ return null;
79
+ }
80
+ try {
81
+ const client = await createLangSmithClient(langsmith);
82
+ const tools = await client.tools();
83
+ return { name: "LangSmith", tools, client };
84
+ }
85
+ catch (error) {
86
+ console.error("LangSmith MCP initialization failed:", error);
87
+ return null;
88
+ }
89
+ }
90
+ async function buildDatadogToolContext(config) {
91
+ const datadog = config.datadog;
92
+ if (!datadog?.apiKey || !datadog?.appKey) {
93
+ return null;
94
+ }
95
+ const env = {
96
+ DD_API_KEY: datadog.apiKey,
97
+ DD_APP_KEY: datadog.appKey,
98
+ DD_SITE: datadog.site ?? process.env.DD_SITE ?? "datadoghq.com"
99
+ };
100
+ if (datadog.logsSite) {
101
+ env.DD_LOGS_SITE = datadog.logsSite;
102
+ }
103
+ if (datadog.metricsSite) {
104
+ env.DD_METRICS_SITE = datadog.metricsSite;
105
+ }
106
+ const transport = new StdioClientTransport({
107
+ command: datadog.command ?? DATADOG_MCP_COMMAND,
108
+ args: datadog.args ?? [],
109
+ env,
110
+ stderr: "inherit"
111
+ });
112
+ try {
113
+ const client = await createMCPClient({
114
+ transport,
115
+ name: "frogo-datadog",
116
+ version: "0.1.0"
117
+ });
118
+ const tools = await client.tools();
119
+ return { name: "Datadog", tools, client };
120
+ }
121
+ catch (error) {
122
+ console.error("Datadog MCP initialization failed:", error);
123
+ await transport.close().catch(() => {
124
+ /* ignore */
125
+ });
126
+ return null;
127
+ }
128
+ }
129
+ async function buildMcpToolContexts(config) {
130
+ const contexts = [];
131
+ const langsmithContext = await buildLangSmithToolContext(config);
132
+ if (langsmithContext) {
133
+ contexts.push(langsmithContext);
134
+ }
135
+ const datadogContext = await buildDatadogToolContext(config);
136
+ if (datadogContext) {
137
+ contexts.push(datadogContext);
138
+ }
139
+ return contexts;
140
+ }
141
+ function combineToolSets(contexts) {
142
+ return contexts.reduce((acc, context) => ({ ...acc, ...context.tools }), {});
143
+ }
144
+ async function cleanupMcpContexts(contexts) {
145
+ await Promise.all(contexts.map(async (context) => {
146
+ try {
147
+ await context.client.close();
148
+ }
149
+ catch (error) {
150
+ console.error(`Failed to close ${context.name} MCP client:`, error);
151
+ }
152
+ }));
153
+ }
154
+ function createSpinner(label = "thinking") {
155
+ const frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧"];
156
+ let index = 0;
157
+ let handle;
158
+ const isTty = Boolean(process.stdout.isTTY);
159
+ const render = () => {
160
+ const frame = frames[index];
161
+ index = (index + 1) % frames.length;
162
+ process.stdout.write(`\r${chalk.gray(frame)} ${chalk.dim(label)}`);
163
+ };
164
+ return {
165
+ start() {
166
+ if (!isTty) {
167
+ process.stdout.write(`${chalk.dim(label)}...\n`);
168
+ return;
169
+ }
170
+ render();
171
+ handle = setInterval(render, 80);
172
+ },
173
+ stop() {
174
+ if (!isTty) {
175
+ return;
176
+ }
177
+ if (handle) {
178
+ clearInterval(handle);
179
+ handle = undefined;
180
+ }
181
+ process.stdout.write("\r");
182
+ process.stdout.write(" ".repeat(32));
183
+ process.stdout.write("\r");
184
+ }
185
+ };
186
+ }
187
+ function cleanAssistantOutput(content) {
188
+ return content.replace(/^Agent:\s*/i, "").trim();
189
+ }
190
+ function renderBanner(context, mcpContexts) {
191
+ const sessionId = crypto.randomBytes(8).toString("hex");
192
+ const workdir = process.cwd();
193
+ const providerName = context.languageModel.modelId ?? context.languageModel?.model ?? "unknown";
194
+ const providerLabel = context.languageModel?.provider ?? "openai";
195
+ const mcpLabel = mcpContexts.length ? mcpContexts.map((item) => item.name).join(", ") : "none";
196
+ const header = "🐸 Frogo CLI";
197
+ const headerLine = `│ ● ${header} │`;
198
+ const sessionLines = [
199
+ `│ session: ${sessionId} │`,
200
+ `│ ↳ workdir: ${workdir} │`,
201
+ `│ ↳ model: ${providerName} │`,
202
+ `│ ↳ provider: ${providerLabel} │`,
203
+ `│ ↳ mcp: ${mcpLabel} │`
204
+ ];
205
+ const headerWidth = Math.max(headerLine.length, ...sessionLines.map((line) => line.length));
206
+ const drawLine = (text) => {
207
+ const padded = text.padEnd(headerWidth - 1, " ");
208
+ return `${padded}│`;
209
+ };
210
+ const top = `╭${"─".repeat(headerWidth - 1)}╮`;
211
+ const bottom = `╰${"─".repeat(headerWidth - 1)}╯`;
212
+ console.log(chalk.gray(top));
213
+ console.log(chalk.gray(drawLine(headerLine)));
214
+ console.log(chalk.gray(bottom));
215
+ console.log(chalk.gray(top));
216
+ sessionLines.forEach((line) => console.log(chalk.gray(drawLine(line))));
217
+ console.log(chalk.gray(bottom));
218
+ }
219
+ function renderAssistantPrefix() {
220
+ process.stdout.write(`${chalk.green("🐸")} `);
221
+ }
222
+ function formatToolPayload(payload, max = 400) {
223
+ try {
224
+ const raw = JSON.stringify(payload);
225
+ if (raw.length <= max) {
226
+ return raw;
227
+ }
228
+ return `${raw.slice(0, max)}…`;
229
+ }
230
+ catch {
231
+ return "[unserializable]";
232
+ }
233
+ }
234
+ export async function runAgentChat() {
235
+ const config = await loadConfig();
236
+ const context = buildProviderContext(config);
237
+ if (!context) {
238
+ process.exit(1);
239
+ }
240
+ const mcpContexts = await buildMcpToolContexts(config);
241
+ renderBanner(context, mcpContexts);
242
+ const toolSet = combineToolSets(mcpContexts);
243
+ const agent = new ToolLoopAgent({
244
+ id: "frogo-agent",
245
+ model: context.languageModel,
246
+ instructions: context.systemPrompt,
247
+ tools: Object.keys(toolSet).length ? toolSet : undefined,
248
+ stopWhen: stepCountIs(1000)
249
+ });
250
+ const rl = readline.createInterface({ input, output });
251
+ const spinner = createSpinner();
252
+ const conversation = [];
253
+ let activeAbort = null;
254
+ let isGenerating = false;
255
+ console.log(chalk.dim("Type your question and press enter (Ctrl+C to exit)."));
256
+ try {
257
+ rl.on("SIGINT", () => {
258
+ if (isGenerating && activeAbort) {
259
+ activeAbort.abort();
260
+ process.stdout.write(`\n${chalk.dim("↳ you canceled the current response. Ask another question when ready.")}\n`);
261
+ return;
262
+ }
263
+ rl.close();
264
+ });
265
+ while (true) {
266
+ let question;
267
+ try {
268
+ question = await rl.question(`${chalk.cyan("›")} `);
269
+ }
270
+ catch (error) {
271
+ if (error instanceof Error && error.message === "SIGINT") {
272
+ break;
273
+ }
274
+ throw error;
275
+ }
276
+ const trimmed = question.trim();
277
+ if (!trimmed) {
278
+ continue;
279
+ }
280
+ conversation.push({ role: "user", content: trimmed });
281
+ spinner.start();
282
+ isGenerating = true;
283
+ activeAbort = new AbortController();
284
+ let spinnerActive = true;
285
+ let assistantReply = "";
286
+ let prefixPrinted = false;
287
+ let lineOpen = false;
288
+ const ensurePrefix = () => {
289
+ if (!prefixPrinted) {
290
+ renderAssistantPrefix();
291
+ prefixPrinted = true;
292
+ lineOpen = true;
293
+ }
294
+ };
295
+ const ensureNewline = () => {
296
+ if (lineOpen) {
297
+ process.stdout.write("\n");
298
+ lineOpen = false;
299
+ prefixPrinted = false;
300
+ }
301
+ };
302
+ try {
303
+ const streamResult = await agent.stream({
304
+ messages: conversation,
305
+ abortSignal: activeAbort.signal
306
+ });
307
+ for await (const part of streamResult.fullStream) {
308
+ switch (part.type) {
309
+ case "text-delta": {
310
+ if (spinnerActive) {
311
+ spinner.stop();
312
+ spinnerActive = false;
313
+ ensurePrefix();
314
+ }
315
+ ensurePrefix();
316
+ process.stdout.write(part.text);
317
+ assistantReply += part.text;
318
+ break;
319
+ }
320
+ case "tool-call": {
321
+ if (spinnerActive) {
322
+ spinner.stop();
323
+ spinnerActive = false;
324
+ }
325
+ ensureNewline();
326
+ const payload = formatToolPayload(part.input);
327
+ process.stdout.write(`${chalk.dim("↳ tool call")} ${chalk.cyan(part.toolName)} ${chalk.dim(payload)}\n`);
328
+ break;
329
+ }
330
+ case "tool-result": {
331
+ ensureNewline();
332
+ const payload = formatToolPayload(part.output);
333
+ process.stdout.write(`${chalk.dim("↳ tool result")} ${chalk.cyan(part.toolName)} ${chalk.dim(payload)}\n`);
334
+ break;
335
+ }
336
+ case "tool-error": {
337
+ ensureNewline();
338
+ process.stdout.write(`${chalk.red("↳ tool error")} ${chalk.cyan(part.toolName)} ${chalk.dim(String(part.error))}\n`);
339
+ break;
340
+ }
341
+ case "tool-approval-request": {
342
+ ensureNewline();
343
+ process.stdout.write(`${chalk.yellow("↳ tool approval")} ${chalk.cyan(part.toolCall.toolName)}\n`);
344
+ break;
345
+ }
346
+ case "tool-output-denied": {
347
+ ensureNewline();
348
+ process.stdout.write(`${chalk.yellow("↳ tool output denied")} ${chalk.cyan(part.toolName)}\n`);
349
+ break;
350
+ }
351
+ default:
352
+ break;
353
+ }
354
+ }
355
+ if (spinnerActive) {
356
+ spinner.stop();
357
+ }
358
+ process.stdout.write("\n");
359
+ const cleaned = cleanAssistantOutput(assistantReply);
360
+ if (cleaned) {
361
+ conversation.push({ role: "assistant", content: cleaned });
362
+ }
363
+ }
364
+ catch (error) {
365
+ spinner.stop();
366
+ if (activeAbort?.signal.aborted) {
367
+ console.log(`\n${chalk.dim("↳ you canceled the current response. Ask another question when ready.")}`);
368
+ }
369
+ else {
370
+ console.error("Agent call failed:", error);
371
+ break;
372
+ }
373
+ }
374
+ finally {
375
+ isGenerating = false;
376
+ activeAbort = null;
377
+ }
378
+ }
379
+ }
380
+ finally {
381
+ rl.close();
382
+ await cleanupMcpContexts(mcpContexts);
383
+ }
384
+ }
@@ -0,0 +1,243 @@
1
+ import { Command } from "commander";
2
+ import prompts from "prompts";
3
+ import { loadConfig } from "../../config/load.js";
4
+ import { saveConfig } from "../../config/save.js";
5
+ const menuChoices = [
6
+ { title: "Vercel", value: "vercel" },
7
+ { title: "Trigger.dev", value: "trigger" },
8
+ { title: "Datadog", value: "datadog" },
9
+ { title: "LangSmith", value: "langsmith" },
10
+ { title: "LLM provider", value: "llmProvider" },
11
+ { title: "Show configuration", value: "show" },
12
+ { title: "Done", value: "done" }
13
+ ];
14
+ const promptCredentials = async (questions) => {
15
+ const answers = (await prompts(questions));
16
+ return answers;
17
+ };
18
+ const vqlQuestions = [
19
+ {
20
+ type: "text",
21
+ name: "vercelToken",
22
+ message: "Vercel token",
23
+ initial: ""
24
+ }
25
+ ];
26
+ const triggerQuestions = [
27
+ {
28
+ type: "text",
29
+ name: "triggerToken",
30
+ message: "Trigger.dev API key",
31
+ initial: ""
32
+ }
33
+ ];
34
+ const datadogQuestions = [
35
+ {
36
+ type: "text",
37
+ name: "apiKey",
38
+ message: "Datadog API key for MCP",
39
+ initial: ""
40
+ },
41
+ {
42
+ type: "text",
43
+ name: "appKey",
44
+ message: "Datadog Application key",
45
+ initial: ""
46
+ },
47
+ {
48
+ type: "text",
49
+ name: "site",
50
+ message: "Datadog site",
51
+ initial: "datadoghq.com"
52
+ },
53
+ {
54
+ type: "text",
55
+ name: "logsSite",
56
+ message: "Datadog logs site (optional)",
57
+ initial: ""
58
+ },
59
+ {
60
+ type: "text",
61
+ name: "metricsSite",
62
+ message: "Datadog metrics site (optional)",
63
+ initial: ""
64
+ },
65
+ {
66
+ type: "text",
67
+ name: "command",
68
+ message: "Datadog MCP command",
69
+ initial: "datadog-mcp-server"
70
+ },
71
+ {
72
+ type: "text",
73
+ name: "args",
74
+ message: "Additional args for Datadog command",
75
+ initial: ""
76
+ }
77
+ ];
78
+ const langsmithQuestions = [
79
+ {
80
+ type: "text",
81
+ name: "apiKey",
82
+ message: "LangSmith API key",
83
+ initial: ""
84
+ },
85
+ {
86
+ type: "text",
87
+ name: "workspaceKey",
88
+ message: "LangSmith workspace ID (optional)",
89
+ initial: ""
90
+ },
91
+ {
92
+ type: "text",
93
+ name: "mcpUrl",
94
+ message: "LangSmith MCP URL",
95
+ initial: "https://langsmith-mcp-server.onrender.com/mcp"
96
+ }
97
+ ];
98
+ const llmProviderChoices = [
99
+ { title: "openai", value: "openai" },
100
+ { title: "anthropic", value: "anthropic" },
101
+ { title: "custom", value: "custom" }
102
+ ];
103
+ const llmProviderQuestions = [
104
+ {
105
+ type: "select",
106
+ name: "provider",
107
+ message: "LLM provider",
108
+ choices: llmProviderChoices,
109
+ initial: 0
110
+ },
111
+ {
112
+ type: "text",
113
+ name: "endpoint",
114
+ message: "Provider endpoint or MCP URL (optional)",
115
+ initial: "https://api.openai.com/v1"
116
+ },
117
+ {
118
+ type: "text",
119
+ name: "model",
120
+ message: "Default model",
121
+ initial: "gpt-4o-mini"
122
+ },
123
+ {
124
+ type: "text",
125
+ name: "systemPrompt",
126
+ message: "System prompt (optional)",
127
+ initial: "You are Frogo, an incident investigator. Answer concisely."
128
+ }
129
+ ];
130
+ function trimOrUndefined(value) {
131
+ if (!value)
132
+ return undefined;
133
+ const trimmed = value.trim();
134
+ return trimmed === "" ? undefined : trimmed;
135
+ }
136
+ async function configureDatadog(config) {
137
+ const answers = await promptCredentials(datadogQuestions);
138
+ return {
139
+ ...config,
140
+ datadog: {
141
+ apiKey: trimOrUndefined(answers.apiKey),
142
+ appKey: trimOrUndefined(answers.appKey),
143
+ site: trimOrUndefined(answers.site),
144
+ logsSite: trimOrUndefined(answers.logsSite),
145
+ metricsSite: trimOrUndefined(answers.metricsSite),
146
+ command: trimOrUndefined(answers.command),
147
+ args: answers.args ? answers.args.split(" ").filter(Boolean) : undefined
148
+ }
149
+ };
150
+ }
151
+ async function configureLangSmith(config) {
152
+ const answers = await promptCredentials(langsmithQuestions);
153
+ return {
154
+ ...config,
155
+ langsmith: {
156
+ apiKey: trimOrUndefined(answers.apiKey),
157
+ workspaceKey: trimOrUndefined(answers.workspaceKey),
158
+ mcpUrl: trimOrUndefined(answers.mcpUrl)
159
+ }
160
+ };
161
+ }
162
+ async function configureLLMProvider(config) {
163
+ const answers = await promptCredentials(llmProviderQuestions);
164
+ return {
165
+ ...config,
166
+ llmProvider: {
167
+ provider: trimOrUndefined(answers.provider),
168
+ endpoint: trimOrUndefined(answers.endpoint),
169
+ model: trimOrUndefined(answers.model),
170
+ systemPrompt: trimOrUndefined(answers.systemPrompt)
171
+ }
172
+ };
173
+ }
174
+ async function showConfig(config) {
175
+ console.log("Current configuration:");
176
+ console.log(JSON.stringify(config, null, 2));
177
+ }
178
+ async function configure(cmd) {
179
+ let config = await loadConfig();
180
+ console.log("🐸 Frog configure — pick an integration to update");
181
+ while (true) {
182
+ const { choice } = (await promptCredentials([
183
+ {
184
+ type: "select",
185
+ name: "choice",
186
+ message: "What would you like to configure?",
187
+ choices: menuChoices
188
+ }
189
+ ]));
190
+ if (!choice) {
191
+ console.log("Configuration aborted.");
192
+ break;
193
+ }
194
+ if (choice === "done") {
195
+ await saveConfig(config);
196
+ console.log("🐸 Configuration saved.");
197
+ break;
198
+ }
199
+ if (choice === "show") {
200
+ await showConfig(config);
201
+ continue;
202
+ }
203
+ switch (choice) {
204
+ case "vercel":
205
+ config = {
206
+ ...config,
207
+ vercelToken: trimOrUndefined((await promptCredentials(vqlQuestions)).vercelToken)
208
+ };
209
+ await saveConfig(config);
210
+ console.log("✔ Vercel token updated.");
211
+ break;
212
+ case "trigger":
213
+ config = {
214
+ ...config,
215
+ triggerToken: trimOrUndefined((await promptCredentials(triggerQuestions)).triggerToken)
216
+ };
217
+ await saveConfig(config);
218
+ console.log("✔ Trigger.dev API key updated.");
219
+ break;
220
+ case "datadog":
221
+ config = await configureDatadog(config);
222
+ await saveConfig(config);
223
+ console.log("✔ Datadog MCP config updated.");
224
+ break;
225
+ case "langsmith":
226
+ config = await configureLangSmith(config);
227
+ await saveConfig(config);
228
+ console.log("✔ LangSmith MCP config updated.");
229
+ break;
230
+ case "llmProvider":
231
+ config = await configureLLMProvider(config);
232
+ await saveConfig(config);
233
+ console.log("✔ LLM provider updated.");
234
+ console.log("Set `FROGO_AI_API_KEY` (or place it in your `.env`) instead of saving the key to disk.");
235
+ break;
236
+ default:
237
+ console.log(`Unknown option: ${choice}`);
238
+ }
239
+ }
240
+ }
241
+ export const configureCommand = new Command("configure")
242
+ .description("connect integrations and save config")
243
+ .action(async () => configure(new Command()));