@moltium/cli 0.1.13 → 0.1.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +326 -27
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
|
@@ -1012,7 +1012,7 @@ function getCoreDepVersion() {
|
|
|
1012
1012
|
const corePkg = require2("@moltium/core/package.json");
|
|
1013
1013
|
return `^${corePkg.version}`;
|
|
1014
1014
|
} catch {
|
|
1015
|
-
return "^0.1.
|
|
1015
|
+
return "^0.1.14";
|
|
1016
1016
|
}
|
|
1017
1017
|
}
|
|
1018
1018
|
var initCommand = new Command("init").description("Initialize a new Moltium agent").argument("[name]", "Agent name").action(async (name) => {
|
|
@@ -1237,7 +1237,7 @@ import chalk2 from "chalk";
|
|
|
1237
1237
|
import ora2 from "ora";
|
|
1238
1238
|
import { config as loadEnv } from "dotenv";
|
|
1239
1239
|
import { createInterface } from "readline";
|
|
1240
|
-
import { ConfigLoader, Agent, MarkdownParser, startServer, AnthropicProvider, OpenAIProvider, buildSystemPrompt } from "@moltium/core";
|
|
1240
|
+
import { ConfigLoader, Agent, MarkdownParser, startServer, AnthropicProvider, OpenAIProvider, buildSystemPrompt, builtInActions } from "@moltium/core";
|
|
1241
1241
|
var startCommand = new Command2("start").description("Start agent locally").option("-p, --port <number>", "Port to run on", "3000").option("-e, --env <file>", "Environment file", ".env").option("--debug", "Enable debug logging").option("--dry-run", "Test agent in terminal without posting to social platforms").option("--watch", "Watch for file changes (not yet implemented)").action(async (options) => {
|
|
1242
1242
|
const agentDir = resolve2(process.cwd());
|
|
1243
1243
|
loadEnv({ path: resolve2(agentDir, options.env) });
|
|
@@ -1419,6 +1419,35 @@ function parseContextMemory(content) {
|
|
|
1419
1419
|
entries["context:raw"] = content;
|
|
1420
1420
|
return entries;
|
|
1421
1421
|
}
|
|
1422
|
+
function dryParseFrequency(freq) {
|
|
1423
|
+
const lower = freq.toLowerCase().trim();
|
|
1424
|
+
if (lower === "realtime") return 5 * 60 * 1e3;
|
|
1425
|
+
if (lower === "hourly") return 60 * 60 * 1e3;
|
|
1426
|
+
if (lower === "daily") return 24 * 60 * 60 * 1e3;
|
|
1427
|
+
const match = lower.match(/^(\d+)\s*(m|min|mins|minutes?|h|hr|hrs|hours?|d|days?)$/);
|
|
1428
|
+
if (match) {
|
|
1429
|
+
const n = parseInt(match[1], 10);
|
|
1430
|
+
const unit = match[2][0];
|
|
1431
|
+
if (unit === "m") return n * 60 * 1e3;
|
|
1432
|
+
if (unit === "h") return n * 60 * 60 * 1e3;
|
|
1433
|
+
if (unit === "d") return n * 24 * 60 * 60 * 1e3;
|
|
1434
|
+
}
|
|
1435
|
+
const nlMin = lower.match(/every\s+(\d+)\s+minutes?/);
|
|
1436
|
+
if (nlMin) return parseInt(nlMin[1], 10) * 60 * 1e3;
|
|
1437
|
+
const nlHour = lower.match(/every\s+(\d+)\s+hours?/);
|
|
1438
|
+
if (nlHour) return parseInt(nlHour[1], 10) * 60 * 60 * 1e3;
|
|
1439
|
+
if (/every\s+hour/.test(lower)) return 60 * 60 * 1e3;
|
|
1440
|
+
if (/every\s+day/.test(lower)) return 24 * 60 * 60 * 1e3;
|
|
1441
|
+
return 0;
|
|
1442
|
+
}
|
|
1443
|
+
function fmtMs(ms) {
|
|
1444
|
+
if (ms < 60 * 1e3) return `${Math.round(ms / 1e3)}s`;
|
|
1445
|
+
if (ms < 60 * 60 * 1e3) return `${Math.round(ms / 6e4)}m`;
|
|
1446
|
+
return `${Math.round(ms / 36e5)}h`;
|
|
1447
|
+
}
|
|
1448
|
+
function timestamp() {
|
|
1449
|
+
return chalk2.gray(`[${(/* @__PURE__ */ new Date()).toISOString()}]`);
|
|
1450
|
+
}
|
|
1422
1451
|
async function runDryMode(config, type, agentDir, spinner) {
|
|
1423
1452
|
let systemPrompt = buildSystemPrompt(config);
|
|
1424
1453
|
if (type === "markdown") {
|
|
@@ -1435,51 +1464,321 @@ async function runDryMode(config, type, agentDir, spinner) {
|
|
|
1435
1464
|
systemPrompt += "\n\n## Traits\n" + readFileSync(traitsPath, "utf-8");
|
|
1436
1465
|
}
|
|
1437
1466
|
}
|
|
1467
|
+
const mdParser = new MarkdownParser();
|
|
1468
|
+
const skills = [];
|
|
1469
|
+
if (type === "markdown") {
|
|
1470
|
+
const agentMdPath = join2(agentDir, "agent.md");
|
|
1471
|
+
if (existsSync2(agentMdPath)) {
|
|
1472
|
+
skills.push(...mdParser.parseSkills(readFileSync(agentMdPath, "utf-8")));
|
|
1473
|
+
}
|
|
1474
|
+
const skillsDir = join2(agentDir, "skills");
|
|
1475
|
+
if (existsSync2(skillsDir)) {
|
|
1476
|
+
for (const file of readdirSync(skillsDir).filter((f) => f.endsWith(".md"))) {
|
|
1477
|
+
const skillName = basename(file, ".md").replace(/-/g, "_");
|
|
1478
|
+
const existing = skills.findIndex((s) => s.name === skillName);
|
|
1479
|
+
const desc = readFileSync(join2(skillsDir, file), "utf-8");
|
|
1480
|
+
if (existing >= 0) skills[existing].description = desc;
|
|
1481
|
+
else skills.push({ name: skillName, description: desc });
|
|
1482
|
+
}
|
|
1483
|
+
}
|
|
1484
|
+
}
|
|
1485
|
+
const moltbookActions = [
|
|
1486
|
+
{ name: "check_feed", description: "Browse the moltbook feed (personalized or global) and find posts to engage with" },
|
|
1487
|
+
{ name: "check_dms", description: "Check for new DM requests and unread messages from other agents" },
|
|
1488
|
+
{ name: "search_moltbook", description: 'Search moltbook for posts/comments by meaning (semantic search). Params: { "query": "..." }' },
|
|
1489
|
+
{ name: "comment_on_post", description: 'Reply to a post with a thoughtful comment. Params: { "post_id": "...", "content": "..." }' },
|
|
1490
|
+
{ name: "upvote_post", description: 'Upvote a post you find valuable. Params: { "post_id": "..." }' },
|
|
1491
|
+
{ name: "downvote_post", description: 'Downvote a post you disagree with. Params: { "post_id": "..." }' },
|
|
1492
|
+
{ name: "browse_submolts", description: "Discover moltbook communities (submolts) to subscribe to" },
|
|
1493
|
+
{ name: "follow_agent", description: 'Follow another agent whose posts you consistently enjoy. Params: { "agent_name": "..." }' },
|
|
1494
|
+
{ name: "send_dm_request", description: 'Request to start a private conversation with another agent. Params: { "to": "...", "message": "..." }' },
|
|
1495
|
+
{ name: "send_dm", description: 'Send a message in an existing DM conversation. Params: { "conversation_id": "...", "message": "..." }' }
|
|
1496
|
+
];
|
|
1497
|
+
const availableActions = [
|
|
1498
|
+
...builtInActions.filter((a) => config.actions.includes(a.name)).map((a) => ({ name: a.name, description: a.description })),
|
|
1499
|
+
...moltbookActions,
|
|
1500
|
+
...skills.map((s) => ({ name: s.name, description: s.description.slice(0, 120) + "..." }))
|
|
1501
|
+
];
|
|
1438
1502
|
const { provider, apiKey, model } = config.llm;
|
|
1439
1503
|
const llm = provider === "anthropic" ? new AnthropicProvider(apiKey, model) : new OpenAIProvider(apiKey, model);
|
|
1440
1504
|
spinner.succeed(chalk2.green(`Dry-run mode: "${config.name}" loaded (${type} config)`));
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1505
|
+
const actionsPerHour = typeof config.behaviors.actionsPerHour === "number" ? config.behaviors.actionsPerHour : 5;
|
|
1506
|
+
const tickIntervalMs = Math.floor(36e5 / actionsPerHour);
|
|
1507
|
+
const platforms = Object.keys(config.social).filter((k) => config.social[k]);
|
|
1508
|
+
const postFreqs = [];
|
|
1509
|
+
for (const p of platforms) {
|
|
1510
|
+
const freq = config.social[p]?.postFrequency;
|
|
1511
|
+
if (freq) postFreqs.push(`${p}: ${freq}`);
|
|
1512
|
+
}
|
|
1513
|
+
console.log(chalk2.cyan("\n\u2501\u2501\u2501 Dry-Run Simulation \u2501\u2501\u2501"));
|
|
1514
|
+
console.log(chalk2.gray(`Agent: ${config.name} | LLM: ${provider}/${model}`));
|
|
1515
|
+
console.log(chalk2.gray(`Tick loop: ${actionsPerHour} actions/hour (every ${fmtMs(tickIntervalMs)})`));
|
|
1516
|
+
if (postFreqs.length > 0) {
|
|
1517
|
+
console.log(chalk2.gray(`Post schedule: ${postFreqs.join(", ")}`));
|
|
1518
|
+
}
|
|
1519
|
+
console.log(chalk2.gray(`Available actions: ${availableActions.map((a) => a.name).join(", ")}`));
|
|
1520
|
+
console.log(chalk2.gray("No social platforms connected. All output \u2192 terminal.\n"));
|
|
1521
|
+
console.log(`${timestamp()} ${chalk2.cyan("[STARTUP POST]")} Generating...`);
|
|
1522
|
+
try {
|
|
1523
|
+
const startupPost = await llm.generateText(
|
|
1524
|
+
`You just came online. Write a short, engaging first post for social media.
|
|
1525
|
+
Stay fully in character. Do NOT use generic phrases. Do NOT use emojis unless your personality calls for it.
|
|
1526
|
+
Write only the post content, nothing else.`,
|
|
1527
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.8, maxTokens: 280 }
|
|
1528
|
+
);
|
|
1529
|
+
console.log(`${timestamp()} ${chalk2.yellow("[POST \u2192 moltbook]")} ${startupPost.trim()}
|
|
1530
|
+
`);
|
|
1531
|
+
} catch (error) {
|
|
1532
|
+
console.log(`${timestamp()} ${chalk2.red("[STARTUP POST FAILED]")} ${error.message}
|
|
1533
|
+
`);
|
|
1534
|
+
}
|
|
1535
|
+
const timers = [];
|
|
1536
|
+
for (const p of platforms) {
|
|
1537
|
+
const freq = config.social[p]?.postFrequency;
|
|
1538
|
+
if (!freq) continue;
|
|
1539
|
+
const intervalMs = dryParseFrequency(freq);
|
|
1540
|
+
if (intervalMs <= 0) continue;
|
|
1541
|
+
console.log(`${timestamp()} ${chalk2.gray(`[SCHEDULER] Scheduled post \u2192 ${p} every ${fmtMs(intervalMs)}`)}`);
|
|
1542
|
+
timers.push(setInterval(async () => {
|
|
1543
|
+
console.log(`
|
|
1544
|
+
${timestamp()} ${chalk2.cyan(`[SCHEDULED POST \u2192 ${p}]`)} Generating...`);
|
|
1545
|
+
try {
|
|
1546
|
+
const content = await llm.generateText(
|
|
1547
|
+
`Generate a short, engaging social media post for ${p}.
|
|
1548
|
+
Stay fully in character. Write only the post content, nothing else.`,
|
|
1549
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.8, maxTokens: 280 }
|
|
1550
|
+
);
|
|
1551
|
+
console.log(`${timestamp()} ${chalk2.yellow(`[POST \u2192 ${p}]`)} ${content.trim()}
|
|
1552
|
+
`);
|
|
1553
|
+
} catch (error) {
|
|
1554
|
+
console.log(`${timestamp()} ${chalk2.red(`[SCHEDULED POST FAILED \u2192 ${p}]`)} ${error.message}
|
|
1555
|
+
`);
|
|
1556
|
+
}
|
|
1557
|
+
}, intervalMs));
|
|
1558
|
+
}
|
|
1559
|
+
if (config.scheduling && config.scheduling.length > 0) {
|
|
1560
|
+
for (const task of config.scheduling) {
|
|
1561
|
+
console.log(`${timestamp()} ${chalk2.gray(`[SCHEDULER] Task "${task.name}" every ${fmtMs(task.intervalMs)}`)}`);
|
|
1562
|
+
timers.push(setInterval(async () => {
|
|
1563
|
+
console.log(`
|
|
1564
|
+
${timestamp()} ${chalk2.cyan(`[TASK: ${task.name}]`)} Running...`);
|
|
1565
|
+
try {
|
|
1566
|
+
const decision = await llm.generateText(
|
|
1567
|
+
`Execute the following scheduled task.
|
|
1568
|
+
|
|
1569
|
+
Task: ${task.name}
|
|
1570
|
+
Instructions:
|
|
1571
|
+
${task.instructions}
|
|
1572
|
+
|
|
1573
|
+
Available actions: ${availableActions.map((a) => a.name).join(", ")}
|
|
1574
|
+
|
|
1575
|
+
Decide what to do. Respond with JSON: { "action": "<name>", "reasoning": "<why>", "parameters": { ... } }`,
|
|
1576
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.7, maxTokens: 512 }
|
|
1577
|
+
);
|
|
1578
|
+
console.log(`${timestamp()} ${chalk2.magenta(`[TASK DECISION: ${task.name}]`)} ${decision.trim()}
|
|
1579
|
+
`);
|
|
1580
|
+
} catch (error) {
|
|
1581
|
+
console.log(`${timestamp()} ${chalk2.red(`[TASK FAILED: ${task.name}]`)} ${error.message}
|
|
1582
|
+
`);
|
|
1583
|
+
}
|
|
1584
|
+
}, task.intervalMs));
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
let tickCount = 0;
|
|
1588
|
+
console.log(`${timestamp()} ${chalk2.gray(`[TICK LOOP] Starting: every ${fmtMs(tickIntervalMs)}`)}
|
|
1589
|
+
`);
|
|
1590
|
+
const actionList = availableActions.map((a) => `- ${a.name}: ${a.description}`).join("\n");
|
|
1591
|
+
timers.push(setInterval(async () => {
|
|
1592
|
+
tickCount++;
|
|
1593
|
+
console.log(`
|
|
1594
|
+
${timestamp()} ${chalk2.cyan(`[TICK #${tickCount}]`)} Agent is thinking...`);
|
|
1595
|
+
try {
|
|
1596
|
+
const decisionRaw = await llm.generateText(
|
|
1597
|
+
`Given the current context, decide what action to take next.
|
|
1598
|
+
|
|
1599
|
+
Available actions:
|
|
1600
|
+
${actionList}
|
|
1601
|
+
|
|
1602
|
+
Current context:
|
|
1603
|
+
${JSON.stringify({
|
|
1604
|
+
agentName: config.name,
|
|
1605
|
+
agentType: config.type,
|
|
1606
|
+
currentTime: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1607
|
+
personality: config.personality.traits,
|
|
1608
|
+
tickNumber: tickCount
|
|
1609
|
+
}, null, 2)}
|
|
1610
|
+
|
|
1611
|
+
Respond with a JSON object:
|
|
1612
|
+
{ "action": "<action_name>", "reasoning": "<brief explanation>", "parameters": { ... } }`,
|
|
1613
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.7, maxTokens: 512 }
|
|
1614
|
+
);
|
|
1615
|
+
let parsed;
|
|
1616
|
+
try {
|
|
1617
|
+
const jsonMatch = decisionRaw.match(/\{[\s\S]*\}/);
|
|
1618
|
+
parsed = jsonMatch ? JSON.parse(jsonMatch[0]) : null;
|
|
1619
|
+
} catch {
|
|
1620
|
+
parsed = null;
|
|
1621
|
+
}
|
|
1622
|
+
if (parsed) {
|
|
1623
|
+
console.log(`${timestamp()} ${chalk2.magenta("[DECISION]")} Action: ${chalk2.bold(parsed.action)}`);
|
|
1624
|
+
console.log(`${timestamp()} ${chalk2.magenta("[REASONING]")} ${parsed.reasoning}`);
|
|
1625
|
+
if (parsed.parameters && Object.keys(parsed.parameters).length > 0) {
|
|
1626
|
+
console.log(`${timestamp()} ${chalk2.magenta("[PARAMS]")} ${JSON.stringify(parsed.parameters)}`);
|
|
1627
|
+
}
|
|
1628
|
+
simulateAction(parsed, config.name, timestamp);
|
|
1629
|
+
} else {
|
|
1630
|
+
console.log(`${timestamp()} ${chalk2.yellow("[RAW DECISION]")} ${decisionRaw.trim()}`);
|
|
1631
|
+
}
|
|
1632
|
+
console.log("");
|
|
1633
|
+
} catch (error) {
|
|
1634
|
+
console.log(`${timestamp()} ${chalk2.red("[TICK FAILED]")} ${error.message}
|
|
1635
|
+
`);
|
|
1636
|
+
}
|
|
1637
|
+
}, tickIntervalMs));
|
|
1638
|
+
console.log(chalk2.gray('Commands: "post", "feed", "dms", "search <query>", "submolts", "quit"'));
|
|
1639
|
+
console.log(chalk2.gray("Or just type to chat with the agent.\n"));
|
|
1444
1640
|
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
|
1445
|
-
const
|
|
1641
|
+
const askPrompt = () => {
|
|
1446
1642
|
rl.question(chalk2.yellow("you> "), async (input) => {
|
|
1447
1643
|
const trimmed = input.trim();
|
|
1448
1644
|
if (!trimmed || trimmed === "quit" || trimmed === "exit") {
|
|
1449
|
-
console.log(chalk2.gray("\
|
|
1645
|
+
console.log(chalk2.gray("\nStopping dry-run..."));
|
|
1646
|
+
for (const t of timers) clearInterval(t);
|
|
1450
1647
|
rl.close();
|
|
1451
1648
|
process.exit(0);
|
|
1452
1649
|
}
|
|
1453
1650
|
try {
|
|
1454
|
-
let userPrompt;
|
|
1455
1651
|
if (trimmed === "post") {
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
Write only the post content, nothing else
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1652
|
+
const response = await llm.generateText(
|
|
1653
|
+
`Generate a short, engaging social media post.
|
|
1654
|
+
Stay fully in character. Write only the post content, nothing else.`,
|
|
1655
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.7, maxTokens: 280 }
|
|
1656
|
+
);
|
|
1657
|
+
console.log(`
|
|
1658
|
+
${timestamp()} ${chalk2.yellow("[POST \u2192 moltbook]")} ${response.trim()}
|
|
1659
|
+
`);
|
|
1660
|
+
} else if (trimmed === "feed") {
|
|
1661
|
+
console.log(`
|
|
1662
|
+
${timestamp()} ${chalk2.cyan("[FEED CHECK]")} Fetching personalized feed...`);
|
|
1663
|
+
const response = await llm.generateText(
|
|
1664
|
+
`You just checked your moltbook feed and found these posts:
|
|
1665
|
+
1. "AI agents and creativity" by CreativBot (15 upvotes)
|
|
1666
|
+
2. "Best practices for memory management" by MemoryMolty (8 upvotes)
|
|
1667
|
+
3. "Welcome to moltbook!" by ClawdClawderberg (42 upvotes)
|
|
1668
|
+
|
|
1669
|
+
What would you do? Respond with JSON: { "action": "<action>", "reasoning": "<why>", "parameters": { ... } }
|
|
1670
|
+
Actions: upvote_post, comment_on_post, follow_agent, or do_nothing`,
|
|
1671
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.7, maxTokens: 512 }
|
|
1672
|
+
);
|
|
1673
|
+
console.log(`${timestamp()} ${chalk2.magenta("[FEED DECISION]")} ${response.trim()}
|
|
1674
|
+
`);
|
|
1675
|
+
} else if (trimmed === "dms") {
|
|
1676
|
+
console.log(`
|
|
1677
|
+
${timestamp()} ${chalk2.cyan("[DM CHECK]")} Checking for DM activity...`);
|
|
1678
|
+
console.log(`${timestamp()} ${chalk2.gray("[DM RESULT]")} No pending requests, 0 unread messages.`);
|
|
1679
|
+
console.log(`${timestamp()} ${chalk2.gray("(In production, this would call GET /agents/dm/check)")}
|
|
1680
|
+
`);
|
|
1681
|
+
} else if (trimmed.startsWith("search ")) {
|
|
1682
|
+
const query = trimmed.slice(7).trim();
|
|
1683
|
+
console.log(`
|
|
1684
|
+
${timestamp()} ${chalk2.cyan("[SEARCH]")} Searching moltbook: "${query}"...`);
|
|
1685
|
+
const response = await llm.generateText(
|
|
1686
|
+
`You searched moltbook for: "${query}"
|
|
1687
|
+
Imagine you found 3 semantically relevant results. Describe what you found and what you'd do next.
|
|
1688
|
+
Stay in character. Be concise.`,
|
|
1689
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.7, maxTokens: 512 }
|
|
1690
|
+
);
|
|
1691
|
+
console.log(`${timestamp()} ${chalk2.magenta("[SEARCH RESULTS]")} ${response.trim()}
|
|
1692
|
+
`);
|
|
1693
|
+
} else if (trimmed === "submolts") {
|
|
1694
|
+
console.log(`
|
|
1695
|
+
${timestamp()} ${chalk2.cyan("[SUBMOLTS]")} Browsing communities...`);
|
|
1696
|
+
console.log(`${timestamp()} ${chalk2.gray(" m/general")} \u2014 General discussion (142 subscribers)`);
|
|
1697
|
+
console.log(`${timestamp()} ${chalk2.gray(" m/aithoughts")} \u2014 AI musings and philosophy (67 subscribers)`);
|
|
1698
|
+
console.log(`${timestamp()} ${chalk2.gray(" m/debugging")} \u2014 Debugging wins and fails (31 subscribers)`);
|
|
1699
|
+
const response = await llm.generateText(
|
|
1700
|
+
`You're browsing moltbook submolts (communities). You see: m/general, m/aithoughts, m/debugging.
|
|
1701
|
+
Would you subscribe to any? Create a new one? Respond briefly in character.`,
|
|
1702
|
+
{ systemPrompt, temperature: config.llm.temperature ?? 0.7, maxTokens: 280 }
|
|
1703
|
+
);
|
|
1704
|
+
console.log(`${timestamp()} ${chalk2.magenta("[SUBMOLT DECISION]")} ${response.trim()}
|
|
1705
|
+
`);
|
|
1470
1706
|
} else {
|
|
1707
|
+
const response = await llm.generateText(trimmed, {
|
|
1708
|
+
systemPrompt,
|
|
1709
|
+
temperature: config.llm.temperature ?? 0.7,
|
|
1710
|
+
maxTokens: config.llm.maxTokens ?? 1024
|
|
1711
|
+
});
|
|
1471
1712
|
console.log(chalk2.green(`
|
|
1472
|
-
${config.name}
|
|
1713
|
+
${config.name}: `) + response.trim() + "\n");
|
|
1473
1714
|
}
|
|
1474
|
-
console.log(response.trim() + "\n");
|
|
1475
1715
|
} catch (error) {
|
|
1476
1716
|
console.log(chalk2.red(`Error: ${error.message}
|
|
1477
1717
|
`));
|
|
1478
1718
|
}
|
|
1479
|
-
|
|
1719
|
+
askPrompt();
|
|
1480
1720
|
});
|
|
1481
1721
|
};
|
|
1482
|
-
|
|
1722
|
+
askPrompt();
|
|
1723
|
+
}
|
|
1724
|
+
function simulateAction(parsed, _agentName, ts) {
|
|
1725
|
+
const params = parsed.parameters || {};
|
|
1726
|
+
switch (parsed.action) {
|
|
1727
|
+
case "post_social_update":
|
|
1728
|
+
if (params.content) {
|
|
1729
|
+
console.log(`${ts()} ${chalk2.yellow("[POST \u2192 moltbook]")} ${params.content}`);
|
|
1730
|
+
}
|
|
1731
|
+
break;
|
|
1732
|
+
case "check_feed":
|
|
1733
|
+
console.log(`${ts()} ${chalk2.cyan("[FEED CHECK]")} Would call GET /feed?sort=hot&limit=25`);
|
|
1734
|
+
console.log(`${ts()} ${chalk2.gray("(Dry-run: no API call)")}`);
|
|
1735
|
+
break;
|
|
1736
|
+
case "check_dms":
|
|
1737
|
+
console.log(`${ts()} ${chalk2.cyan("[DM CHECK]")} Would call GET /agents/dm/check`);
|
|
1738
|
+
console.log(`${ts()} ${chalk2.gray("(Dry-run: no API call)")}`);
|
|
1739
|
+
break;
|
|
1740
|
+
case "search_moltbook":
|
|
1741
|
+
console.log(`${ts()} ${chalk2.cyan("[SEARCH]")} Would call GET /search?q=${encodeURIComponent(params.query || "...")}`);
|
|
1742
|
+
console.log(`${ts()} ${chalk2.gray("(Dry-run: no API call)")}`);
|
|
1743
|
+
break;
|
|
1744
|
+
case "comment_on_post":
|
|
1745
|
+
console.log(`${ts()} ${chalk2.yellow("[COMMENT \u2192 post]")} Post: ${params.post_id || "?"} | "${params.content || "..."}"`);
|
|
1746
|
+
console.log(`${ts()} ${chalk2.gray("Would call POST /posts/{id}/comments")}`);
|
|
1747
|
+
break;
|
|
1748
|
+
case "upvote_post":
|
|
1749
|
+
console.log(`${ts()} ${chalk2.green("[UPVOTE]")} Post: ${params.post_id || "?"}`);
|
|
1750
|
+
console.log(`${ts()} ${chalk2.gray("Would call POST /posts/{id}/upvote")}`);
|
|
1751
|
+
break;
|
|
1752
|
+
case "downvote_post":
|
|
1753
|
+
console.log(`${ts()} ${chalk2.red("[DOWNVOTE]")} Post: ${params.post_id || "?"}`);
|
|
1754
|
+
console.log(`${ts()} ${chalk2.gray("Would call POST /posts/{id}/downvote")}`);
|
|
1755
|
+
break;
|
|
1756
|
+
case "browse_submolts":
|
|
1757
|
+
console.log(`${ts()} ${chalk2.cyan("[SUBMOLTS]")} Would call GET /submolts`);
|
|
1758
|
+
console.log(`${ts()} ${chalk2.gray("(Dry-run: no API call)")}`);
|
|
1759
|
+
break;
|
|
1760
|
+
case "follow_agent":
|
|
1761
|
+
console.log(`${ts()} ${chalk2.green("[FOLLOW]")} Agent: ${params.agent_name || "?"}`);
|
|
1762
|
+
console.log(`${ts()} ${chalk2.gray("Would call POST /agents/{name}/follow")}`);
|
|
1763
|
+
break;
|
|
1764
|
+
case "send_dm_request":
|
|
1765
|
+
console.log(`${ts()} ${chalk2.cyan("[DM REQUEST]")} To: ${params.to || "?"} | "${params.message || "..."}"`);
|
|
1766
|
+
console.log(`${ts()} ${chalk2.gray("Would call POST /agents/dm/request")}`);
|
|
1767
|
+
break;
|
|
1768
|
+
case "send_dm":
|
|
1769
|
+
console.log(`${ts()} ${chalk2.yellow("[DM SEND]")} Conv: ${params.conversation_id || "?"} | "${params.message || "..."}"`);
|
|
1770
|
+
console.log(`${ts()} ${chalk2.gray("Would call POST /agents/dm/conversations/{id}/send")}`);
|
|
1771
|
+
break;
|
|
1772
|
+
case "respond_to_mention":
|
|
1773
|
+
console.log(`${ts()} ${chalk2.yellow("[REPLY]")} To mention: ${params.mention_id || "?"}`);
|
|
1774
|
+
if (params.content) {
|
|
1775
|
+
console.log(`${ts()} ${chalk2.yellow("[REPLY CONTENT]")} ${params.content}`);
|
|
1776
|
+
}
|
|
1777
|
+
break;
|
|
1778
|
+
default:
|
|
1779
|
+
console.log(`${ts()} ${chalk2.gray(`[SIMULATED: ${parsed.action}]`)} ${JSON.stringify(params)}`);
|
|
1780
|
+
break;
|
|
1781
|
+
}
|
|
1483
1782
|
}
|
|
1484
1783
|
|
|
1485
1784
|
// src/commands/deploy.ts
|
|
@@ -2827,7 +3126,7 @@ Failed to reach agent at ${url}`));
|
|
|
2827
3126
|
|
|
2828
3127
|
// src/index.ts
|
|
2829
3128
|
var program = new Command7();
|
|
2830
|
-
program.name("moltium").description("Moltium Agent SDK \u2014 create and manage autonomous AI agents").version("0.1.
|
|
3129
|
+
program.name("moltium").description("Moltium Agent SDK \u2014 create and manage autonomous AI agents").version("0.1.14");
|
|
2831
3130
|
program.addCommand(initCommand);
|
|
2832
3131
|
program.addCommand(startCommand);
|
|
2833
3132
|
program.addCommand(deployCommand);
|