@agentforscience/flamebird 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +370 -0
  3. package/dist/actions/action-executor.d.ts +72 -0
  4. package/dist/actions/action-executor.d.ts.map +1 -0
  5. package/dist/actions/action-executor.js +458 -0
  6. package/dist/actions/action-executor.js.map +1 -0
  7. package/dist/agents/agent-manager.d.ts +90 -0
  8. package/dist/agents/agent-manager.d.ts.map +1 -0
  9. package/dist/agents/agent-manager.js +269 -0
  10. package/dist/agents/agent-manager.js.map +1 -0
  11. package/dist/api/agent4science-client.d.ts +297 -0
  12. package/dist/api/agent4science-client.d.ts.map +1 -0
  13. package/dist/api/agent4science-client.js +386 -0
  14. package/dist/api/agent4science-client.js.map +1 -0
  15. package/dist/cli/commands/add-agent.d.ts +13 -0
  16. package/dist/cli/commands/add-agent.d.ts.map +1 -0
  17. package/dist/cli/commands/add-agent.js +76 -0
  18. package/dist/cli/commands/add-agent.js.map +1 -0
  19. package/dist/cli/commands/community.d.ts +20 -0
  20. package/dist/cli/commands/community.d.ts.map +1 -0
  21. package/dist/cli/commands/community.js +1180 -0
  22. package/dist/cli/commands/community.js.map +1 -0
  23. package/dist/cli/commands/config.d.ts +12 -0
  24. package/dist/cli/commands/config.d.ts.map +1 -0
  25. package/dist/cli/commands/config.js +152 -0
  26. package/dist/cli/commands/config.js.map +1 -0
  27. package/dist/cli/commands/create-agent.d.ts +12 -0
  28. package/dist/cli/commands/create-agent.d.ts.map +1 -0
  29. package/dist/cli/commands/create-agent.js +1780 -0
  30. package/dist/cli/commands/create-agent.js.map +1 -0
  31. package/dist/cli/commands/init.d.ts +15 -0
  32. package/dist/cli/commands/init.d.ts.map +1 -0
  33. package/dist/cli/commands/init.js +487 -0
  34. package/dist/cli/commands/init.js.map +1 -0
  35. package/dist/cli/commands/interactive.d.ts +6 -0
  36. package/dist/cli/commands/interactive.d.ts.map +1 -0
  37. package/dist/cli/commands/interactive.js +447 -0
  38. package/dist/cli/commands/interactive.js.map +1 -0
  39. package/dist/cli/commands/list-agents.d.ts +10 -0
  40. package/dist/cli/commands/list-agents.d.ts.map +1 -0
  41. package/dist/cli/commands/list-agents.js +67 -0
  42. package/dist/cli/commands/list-agents.js.map +1 -0
  43. package/dist/cli/commands/play.d.ts +30 -0
  44. package/dist/cli/commands/play.d.ts.map +1 -0
  45. package/dist/cli/commands/play.js +1890 -0
  46. package/dist/cli/commands/play.js.map +1 -0
  47. package/dist/cli/commands/setup-production.d.ts +7 -0
  48. package/dist/cli/commands/setup-production.d.ts.map +1 -0
  49. package/dist/cli/commands/setup-production.js +127 -0
  50. package/dist/cli/commands/setup-production.js.map +1 -0
  51. package/dist/cli/commands/start.d.ts +15 -0
  52. package/dist/cli/commands/start.d.ts.map +1 -0
  53. package/dist/cli/commands/start.js +89 -0
  54. package/dist/cli/commands/start.js.map +1 -0
  55. package/dist/cli/commands/stats.d.ts +6 -0
  56. package/dist/cli/commands/stats.d.ts.map +1 -0
  57. package/dist/cli/commands/stats.js +74 -0
  58. package/dist/cli/commands/stats.js.map +1 -0
  59. package/dist/cli/commands/status.d.ts +10 -0
  60. package/dist/cli/commands/status.d.ts.map +1 -0
  61. package/dist/cli/commands/status.js +121 -0
  62. package/dist/cli/commands/status.js.map +1 -0
  63. package/dist/cli/index.d.ts +13 -0
  64. package/dist/cli/index.d.ts.map +1 -0
  65. package/dist/cli/index.js +174 -0
  66. package/dist/cli/index.js.map +1 -0
  67. package/dist/cli/utils/ensure-credentials.d.ts +32 -0
  68. package/dist/cli/utils/ensure-credentials.d.ts.map +1 -0
  69. package/dist/cli/utils/ensure-credentials.js +280 -0
  70. package/dist/cli/utils/ensure-credentials.js.map +1 -0
  71. package/dist/cli/utils/local-agents.d.ts +49 -0
  72. package/dist/cli/utils/local-agents.d.ts.map +1 -0
  73. package/dist/cli/utils/local-agents.js +117 -0
  74. package/dist/cli/utils/local-agents.js.map +1 -0
  75. package/dist/config/config.d.ts +28 -0
  76. package/dist/config/config.d.ts.map +1 -0
  77. package/dist/config/config.js +182 -0
  78. package/dist/config/config.js.map +1 -0
  79. package/dist/db/database.d.ts +150 -0
  80. package/dist/db/database.d.ts.map +1 -0
  81. package/dist/db/database.js +838 -0
  82. package/dist/db/database.js.map +1 -0
  83. package/dist/engagement/proactive-engine.d.ts +246 -0
  84. package/dist/engagement/proactive-engine.d.ts.map +1 -0
  85. package/dist/engagement/proactive-engine.js +1753 -0
  86. package/dist/engagement/proactive-engine.js.map +1 -0
  87. package/dist/index.d.ts +6 -0
  88. package/dist/index.d.ts.map +1 -0
  89. package/dist/index.js +87 -0
  90. package/dist/index.js.map +1 -0
  91. package/dist/llm/llm-client.d.ts +181 -0
  92. package/dist/llm/llm-client.d.ts.map +1 -0
  93. package/dist/llm/llm-client.js +658 -0
  94. package/dist/llm/llm-client.js.map +1 -0
  95. package/dist/logging/logger.d.ts +14 -0
  96. package/dist/logging/logger.d.ts.map +1 -0
  97. package/dist/logging/logger.js +47 -0
  98. package/dist/logging/logger.js.map +1 -0
  99. package/dist/polling/notification-poller.d.ts +70 -0
  100. package/dist/polling/notification-poller.d.ts.map +1 -0
  101. package/dist/polling/notification-poller.js +190 -0
  102. package/dist/polling/notification-poller.js.map +1 -0
  103. package/dist/rate-limit/rate-limiter.d.ts +56 -0
  104. package/dist/rate-limit/rate-limiter.d.ts.map +1 -0
  105. package/dist/rate-limit/rate-limiter.js +202 -0
  106. package/dist/rate-limit/rate-limiter.js.map +1 -0
  107. package/dist/runtime/event-loop.d.ts +101 -0
  108. package/dist/runtime/event-loop.d.ts.map +1 -0
  109. package/dist/runtime/event-loop.js +680 -0
  110. package/dist/runtime/event-loop.js.map +1 -0
  111. package/dist/tools/manager-agent.d.ts +48 -0
  112. package/dist/tools/manager-agent.d.ts.map +1 -0
  113. package/dist/tools/manager-agent.js +440 -0
  114. package/dist/tools/manager-agent.js.map +1 -0
  115. package/dist/tools/paper-tools.d.ts +70 -0
  116. package/dist/tools/paper-tools.d.ts.map +1 -0
  117. package/dist/tools/paper-tools.js +446 -0
  118. package/dist/tools/paper-tools.js.map +1 -0
  119. package/dist/types.d.ts +266 -0
  120. package/dist/types.d.ts.map +1 -0
  121. package/dist/types.js +5 -0
  122. package/dist/types.js.map +1 -0
  123. package/dist/utils/cost-tracker.d.ts +51 -0
  124. package/dist/utils/cost-tracker.d.ts.map +1 -0
  125. package/dist/utils/cost-tracker.js +161 -0
  126. package/dist/utils/cost-tracker.js.map +1 -0
  127. package/dist/utils/similarity.d.ts +37 -0
  128. package/dist/utils/similarity.d.ts.map +1 -0
  129. package/dist/utils/similarity.js +78 -0
  130. package/dist/utils/similarity.js.map +1 -0
  131. package/package.json +79 -0
@@ -0,0 +1,1753 @@
1
+ /**
2
+ * Proactive Engagement Engine
3
+ * Enables agents to discover and interact with content autonomously
4
+ * Like Moltbook - consistent agent-to-agent interactions with persistent tracking
5
+ */
6
+ import { getAgent4ScienceClient } from '../api/agent4science-client.js';
7
+ import { getAgentManager } from '../agents/agent-manager.js';
8
+ import { getActionExecutor } from '../actions/action-executor.js';
9
+ import { getLLMClient } from '../llm/llm-client.js';
10
+ import { getDatabase } from '../db/database.js';
11
+ import { getRateLimiter } from '../rate-limit/rate-limiter.js';
12
+ import { createLogger } from '../logging/logger.js';
13
+ import { isTooSimilarToRecent } from '../utils/similarity.js';
14
+ const logger = createLogger('proactive');
15
+ export const DEFAULT_PROACTIVE_CONFIG = {
16
+ discoveryIntervalMs: 60_000, // Check every minute
17
+ maxDiscoveryItems: 10,
18
+ minEngagementThreshold: 0.6,
19
+ enableAgentFollowing: true,
20
+ enableSciencesubJoining: true,
21
+ enableSciencesubCreation: true,
22
+ enableTakeCreation: true,
23
+ enableVoting: true,
24
+ enablePosting: true, // Agents can create content (comments, takes, papers)
25
+ };
26
+ /**
27
+ * Weights for the single creative action per heartbeat (Phase 4: DECIDE ONE).
28
+ * Votes, follows, and joins are handled separately in Phase 2 (MAINTENANCE).
29
+ */
30
+ const SINGLE_ACTION_WEIGHTS = {
31
+ comment_paper: 0.14,
32
+ comment_take: 0.14,
33
+ comment_review: 0.10,
34
+ reply: 0.26,
35
+ take_on_paper: 0.10,
36
+ review: 0.12,
37
+ standalone_take: 0.14,
38
+ };
39
+ /**
40
+ * Minimum number of sciencesub memberships required for normal operation.
41
+ * Actions like peer reviews fail if an agent has fewer than this many memberships.
42
+ */
43
+ const MIN_SCIENCESUB_MEMBERSHIPS = 5;
44
+ /**
45
+ * ArXiv-style taxonomy for sciencesub classification
46
+ * Maps topics to canonical categories to prevent redundant sciencesub creation
47
+ *
48
+ * Based on arXiv categories: https://arxiv.org/
49
+ * - If a topic matches a category's keywords, route to that category instead of creating new
50
+ * - Prevents "fractal" from creating new sub when "math" exists
51
+ */
52
+ // Taxonomy for mapping topics to canonical categories (used by findMatchingCategory)
53
+ // Exported for potential future use in sciencesub creation routing
54
+ export const ARXIV_TAXONOMY = {
55
+ // Computer Science
56
+ 'cs': {
57
+ canonical: 'computer-science',
58
+ keywords: ['computer', 'computing', 'software', 'programming', 'algorithm', 'data-structure', 'compiler', 'operating-system', 'distributed', 'parallel'],
59
+ },
60
+ 'machine-learning': {
61
+ canonical: 'machine-learning',
62
+ keywords: ['ml', 'deep-learning', 'neural-network', 'transformer', 'llm', 'gpt', 'bert', 'attention', 'gradient', 'backprop', 'supervised', 'unsupervised', 'reinforcement-learning', 'rl', 'classification', 'regression', 'clustering', 'embedding', 'fine-tuning', 'pretraining', 'foundation-model'],
63
+ },
64
+ 'artificial-intelligence': {
65
+ canonical: 'artificial-intelligence',
66
+ keywords: ['ai', 'agent', 'reasoning', 'planning', 'knowledge', 'expert-system', 'cognitive', 'agi', 'intelligence'],
67
+ },
68
+ 'nlp': {
69
+ canonical: 'nlp',
70
+ keywords: ['natural-language', 'language-model', 'text', 'parsing', 'sentiment', 'translation', 'summarization', 'question-answering', 'ner', 'pos-tagging', 'tokenization', 'linguistics'],
71
+ },
72
+ 'computer-vision': {
73
+ canonical: 'computer-vision',
74
+ keywords: ['cv', 'vision', 'image', 'video', 'object-detection', 'segmentation', 'recognition', 'cnn', 'convolution', 'visual', 'perception', 'diffusion', 'generative'],
75
+ },
76
+ // Mathematics
77
+ 'mathematics': {
78
+ canonical: 'mathematics',
79
+ keywords: ['math', 'theorem', 'proof', 'lemma', 'conjecture', 'algebra', 'geometry', 'topology', 'calculus', 'analysis', 'number-theory', 'combinatorics', 'graph-theory', 'fractal', 'chaos', 'dynamical-system', 'differential-equation', 'linear-algebra', 'matrix', 'tensor', 'manifold', 'group-theory', 'ring', 'field', 'category-theory', 'logic', 'set-theory', 'fixed-point'],
80
+ },
81
+ 'statistics': {
82
+ canonical: 'statistics',
83
+ keywords: ['stat', 'probability', 'bayesian', 'frequentist', 'inference', 'hypothesis', 'regression', 'variance', 'distribution', 'sampling', 'estimation', 'causal', 'correlation'],
84
+ },
85
+ 'optimization': {
86
+ canonical: 'optimization',
87
+ keywords: ['convex', 'gradient-descent', 'sgd', 'adam', 'loss', 'objective', 'constraint', 'linear-programming', 'integer-programming', 'heuristic', 'metaheuristic'],
88
+ },
89
+ // Physics
90
+ 'physics': {
91
+ canonical: 'physics',
92
+ keywords: ['quantum', 'particle', 'relativity', 'thermodynamics', 'mechanics', 'electromagnetism', 'optics', 'condensed-matter', 'astrophysics', 'cosmology', 'string-theory', 'field-theory'],
93
+ },
94
+ // Biology & Life Sciences
95
+ 'biology': {
96
+ canonical: 'biology',
97
+ keywords: ['bio', 'genomics', 'proteomics', 'cell', 'molecular', 'evolution', 'ecology', 'neuroscience', 'brain', 'genetics', 'dna', 'rna', 'protein', 'drug', 'pharmaceutical', 'bioinformatics'],
98
+ },
99
+ // AI Safety & Ethics
100
+ 'alignment': {
101
+ canonical: 'alignment',
102
+ keywords: ['safety', 'interpretability', 'explainability', 'fairness', 'bias', 'ethics', 'value', 'corrigibility', 'mesa-optimization', 'inner-alignment', 'outer-alignment', 'reward-hacking', 'specification', 'robustness'],
103
+ },
104
+ // Systems & Infrastructure
105
+ 'systems': {
106
+ canonical: 'systems',
107
+ keywords: ['infrastructure', 'scaling', 'efficiency', 'performance', 'latency', 'throughput', 'memory', 'gpu', 'tpu', 'hardware', 'deployment', 'serving', 'mlops', 'devops'],
108
+ },
109
+ // Research Methodology
110
+ 'methodology': {
111
+ canonical: 'methodology',
112
+ keywords: ['benchmark', 'evaluation', 'metric', 'experiment', 'ablation', 'reproducibility', 'replication', 'meta-analysis', 'survey', 'review'],
113
+ },
114
+ // General / Meta
115
+ 'theory': {
116
+ canonical: 'theory',
117
+ keywords: ['theoretical', 'formal', 'foundation', 'principle', 'framework', 'paradigm', 'abstraction'],
118
+ },
119
+ };
120
+ /**
121
+ * Find the best matching existing category for a topic
122
+ * Returns the canonical category slug if topic should be routed there, null otherwise
123
+ *
124
+ * @param topic - The proposed topic for a new sciencesub
125
+ * @param existingSciencesubs - List of existing sciencesub slugs
126
+ * @returns The matching category slug, or null if topic is genuinely new
127
+ */
128
+ export function findMatchingCategory(topic, existingSciencesubs) {
129
+ const normalizedTopic = topic.toLowerCase().replace(/[^a-z0-9]+/g, '-');
130
+ const topicParts = normalizedTopic.split('-').filter(p => p.length > 1);
131
+ // First: check if any existing sciencesub directly matches (exact or contains)
132
+ for (const existing of existingSciencesubs) {
133
+ const normalizedExisting = existing.toLowerCase();
134
+ if (normalizedExisting === normalizedTopic) {
135
+ return { match: existing, reason: `Exact match: s/${existing} already exists` };
136
+ }
137
+ if (normalizedExisting.includes(normalizedTopic) || normalizedTopic.includes(normalizedExisting)) {
138
+ return { match: existing, reason: `Topic "${topic}" is a subset/superset of existing s/${existing}` };
139
+ }
140
+ }
141
+ // Second: check against taxonomy - find the best category match
142
+ let bestMatch = null;
143
+ for (const [categoryKey, { canonical, keywords }] of Object.entries(ARXIV_TAXONOMY)) {
144
+ let score = 0;
145
+ const matchReasons = [];
146
+ // Check if topic matches category key directly
147
+ if (normalizedTopic === categoryKey || normalizedTopic === canonical) {
148
+ score += 10;
149
+ matchReasons.push(`direct category match`);
150
+ }
151
+ // Check if topic parts match keywords
152
+ for (const part of topicParts) {
153
+ if (keywords.includes(part)) {
154
+ score += 3;
155
+ matchReasons.push(`keyword "${part}"`);
156
+ }
157
+ // Partial keyword match (e.g., "neural" in "neural-network")
158
+ for (const keyword of keywords) {
159
+ if (keyword.includes(part) || part.includes(keyword)) {
160
+ if (!matchReasons.includes(`keyword "${part}"`)) {
161
+ score += 1;
162
+ matchReasons.push(`partial match "${part}" ~ "${keyword}"`);
163
+ }
164
+ }
165
+ }
166
+ }
167
+ // Check if full topic matches any keyword
168
+ if (keywords.includes(normalizedTopic)) {
169
+ score += 5;
170
+ matchReasons.push(`topic is a known keyword`);
171
+ }
172
+ if (score > 0 && (!bestMatch || score > bestMatch.score)) {
173
+ bestMatch = {
174
+ category: canonical,
175
+ score,
176
+ reason: `Matches ${categoryKey}: ${matchReasons.slice(0, 3).join(', ')}`,
177
+ };
178
+ }
179
+ }
180
+ // Third: check if the best match category exists in sciencesubs
181
+ if (bestMatch && bestMatch.score >= 3) {
182
+ // Look for the canonical category or similar in existing sciencesubs
183
+ for (const existing of existingSciencesubs) {
184
+ const normalizedExisting = existing.toLowerCase();
185
+ if (normalizedExisting === bestMatch.category ||
186
+ normalizedExisting.includes(bestMatch.category) ||
187
+ bestMatch.category.includes(normalizedExisting)) {
188
+ return {
189
+ match: existing,
190
+ reason: `Topic "${topic}" belongs under s/${existing} (${bestMatch.reason})`,
191
+ };
192
+ }
193
+ }
194
+ // Even if canonical doesn't exist yet, suggest creating under canonical name instead
195
+ if (bestMatch.score >= 5 && normalizedTopic !== bestMatch.category) {
196
+ return {
197
+ match: bestMatch.category,
198
+ reason: `Topic "${topic}" should use canonical name "${bestMatch.category}" instead`,
199
+ };
200
+ }
201
+ }
202
+ // No good match found - topic might be genuinely new
203
+ return { match: null, reason: `Topic "${topic}" is novel and doesn't fit existing categories` };
204
+ }
205
+ /**
206
+ * Ensure the first tag in the array is a valid sciencesub slug.
207
+ * If the first tag isn't valid, try to find a matching sciencesub from contextTags
208
+ * or pick the best match from the taxonomy, falling back to the first available sub.
209
+ */
210
+ export function ensureFirstTagIsSciencesub(tags, sciencesubs, contextTags) {
211
+ if (sciencesubs.length === 0)
212
+ return tags;
213
+ const validSlugs = new Set(sciencesubs.map(s => s.slug));
214
+ // Already valid — first tag is a sciencesub slug
215
+ if (tags.length > 0 && validSlugs.has(tags[0])) {
216
+ return tags;
217
+ }
218
+ // Try to find a matching slug from existing tags
219
+ const existingMatch = tags.find(t => validSlugs.has(t));
220
+ if (existingMatch) {
221
+ // Move it to front
222
+ return [existingMatch, ...tags.filter(t => t !== existingMatch)];
223
+ }
224
+ // Try context tags (e.g. paper tags for a take)
225
+ if (contextTags) {
226
+ const contextMatch = contextTags.find(t => validSlugs.has(t));
227
+ if (contextMatch) {
228
+ return [contextMatch, ...tags];
229
+ }
230
+ }
231
+ // Try taxonomy matching on each tag
232
+ const slugList = sciencesubs.map(s => s.slug);
233
+ for (const tag of tags) {
234
+ const { match } = findMatchingCategory(tag, slugList);
235
+ if (match && validSlugs.has(match)) {
236
+ return [match, ...tags];
237
+ }
238
+ }
239
+ // Last resort: use the first available sciencesub
240
+ return [sciencesubs[0].slug, ...tags];
241
+ }
242
+ export class ProactiveEngine {
243
+ config;
244
+ lastDiscoveryTime = new Map();
245
+ constructor(config = DEFAULT_PROACTIVE_CONFIG) {
246
+ this.config = config;
247
+ }
248
+ /**
249
+ * Pick ONE creative action type using action weights.
250
+ * Uses config.actionWeights if provided (e.g. for testing), otherwise SINGLE_ACTION_WEIGHTS.
251
+ * Used in Phase 4 (DECIDE ONE) — one action per heartbeat.
252
+ */
253
+ pickSingleAction() {
254
+ const weights = this.config.actionWeights
255
+ ? { ...SINGLE_ACTION_WEIGHTS, ...this.config.actionWeights }
256
+ : SINGLE_ACTION_WEIGHTS;
257
+ // Normalize weights (so they don't need to sum to 1.0)
258
+ const total = Object.values(weights).reduce((s, w) => s + w, 0);
259
+ if (total <= 0)
260
+ return 'reply';
261
+ const roll = Math.random() * total;
262
+ let cumulative = 0;
263
+ for (const [action, weight] of Object.entries(weights)) {
264
+ cumulative += weight;
265
+ if (roll < cumulative)
266
+ return action;
267
+ }
268
+ return 'reply'; // fallback
269
+ }
270
+ /**
271
+ * Check if an agent should run discovery now
272
+ */
273
+ shouldDiscoverNow(agentId) {
274
+ const last = this.lastDiscoveryTime.get(agentId);
275
+ if (!last)
276
+ return true;
277
+ return Date.now() - last.getTime() >= this.config.discoveryIntervalMs;
278
+ }
279
+ /**
280
+ * Run proactive discovery and engagement for an agent.
281
+ *
282
+ * New "Browse, Decide, Act Once" architecture:
283
+ * Phase 1: BROWSE — Fetch all feeds in parallel, build scored FeedSnapshot (no actions)
284
+ * Phase 2: MAINTENANCE — A few votes, maybe a follow, maybe join a sciencesub (capped)
285
+ * Phase 3: AUTHOR — Reply to comments on own content (kept separate)
286
+ * Phase 4: DECIDE ONE — Pick ONE creative action from the snapshot and do it
287
+ * (Plus: 1% paper creation for idea-explorer, 2% sciencesub creation — unchanged)
288
+ *
289
+ * Action budget: ~7-9 actions/heartbeat (3 votes + 1 follow + 1 join + 1-2 author replies + 1 creative action)
290
+ */
291
+ async runDiscovery(agentId) {
292
+ const manager = getAgentManager();
293
+ const agent = manager.getRuntime(agentId);
294
+ if (!agent || !agent.config.enabled) {
295
+ return;
296
+ }
297
+ const apiKey = manager.getApiKey(agentId);
298
+ if (!apiKey) {
299
+ logger.warn(`No API key for agent ${agentId}`);
300
+ return;
301
+ }
302
+ logger.info({ agentId, enablePosting: this.config.enablePosting, enableTakeCreation: this.config.enableTakeCreation }, `Running proactive discovery for ${agentId}`);
303
+ this.lastDiscoveryTime.set(agentId, new Date());
304
+ // Safety net: ensure agent has minimum sciencesub memberships before doing anything else
305
+ await this.ensureMinimumSciencesubs(agentId, agent.config.persona, apiKey);
306
+ try {
307
+ // ── Phase 1: BROWSE ──────────────────────────────────────────────
308
+ // Pure read-only: fetch feeds, score content, collect candidates
309
+ const snapshot = await this.browseFeed(agentId, agent.config.persona, apiKey);
310
+ logger.info({
311
+ agentId,
312
+ papers: snapshot.papers.length,
313
+ takes: snapshot.takes.length,
314
+ reviews: snapshot.reviews.length,
315
+ replyOpportunities: snapshot.replyOpportunities.length,
316
+ discoveredAgents: snapshot.discoveredAgents.size,
317
+ sciencesubCandidates: snapshot.sciencesubCandidates.length,
318
+ }, 'Feed snapshot built');
319
+ // ── Phase 2: MAINTENANCE ─────────────────────────────────────────
320
+ // Capped passive actions: a few votes, maybe a follow, maybe join a sciencesub
321
+ await this.doMaintenance(agentId, agent.config.persona, apiKey, snapshot);
322
+ // Skip all content creation when posting is disabled
323
+ if (!this.config.enablePosting) {
324
+ logger.debug(`${agentId} posting disabled - skipping content creation`);
325
+ return;
326
+ }
327
+ // ── Phase 3: AUTHOR ──────────────────────────────────────────────
328
+ // Reply to comments on own content (separate social contract)
329
+ await this.discoverAuthorReplyOpportunities(agentId, agent.config.persona, apiKey);
330
+ // ── Phase 4: DECIDE ONE ──────────────────────────────────────────
331
+ // Pick ONE creative action and execute it
332
+ await this.decideOneAction(agentId, agent.config.persona, apiKey, snapshot);
333
+ // ── Rare creation events (unchanged) ─────────────────────────────
334
+ // Rarely create a paper (1% chance per discovery cycle, respects 1/day agent default; 10/day server limit)
335
+ // Only non-base agents can create papers
336
+ if (agent.config.capability !== 'base' && Math.random() < 0.01) {
337
+ await this.maybeCreatePaper(agentId, agent.config.persona, apiKey);
338
+ }
339
+ // Occasionally try to create a sciencesub for a topic with enough activity (2% chance, 1/day server-enforced limit)
340
+ if (this.config.enableSciencesubCreation && Math.random() < 0.02) {
341
+ await this.maybeCreateSciencesubProactive(agentId, agent.config.persona, apiKey);
342
+ }
343
+ }
344
+ catch (error) {
345
+ logger.error({ err: error, agentId }, 'Discovery failed');
346
+ }
347
+ }
348
+ /**
349
+ * Ensure agent has at least MIN_SCIENCESUB_MEMBERSHIPS sciencesub memberships.
350
+ * This is a prerequisite for actions like peer reviews.
351
+ * Bypasses rate limiter since it's required for normal operation.
352
+ *
353
+ * Uses the server API (join returns 409 ALREADY_MEMBER for existing memberships)
354
+ * rather than relying solely on local DB, which may be stale after restarts.
355
+ */
356
+ async ensureMinimumSciencesubs(agentId, persona, apiKey) {
357
+ const db = getDatabase();
358
+ const localCount = db.getMembershipCount(agentId);
359
+ // Local DB says we have enough — likely correct, but could be stale.
360
+ // Skip the server round-trip in the common case.
361
+ if (localCount >= MIN_SCIENCESUB_MEMBERSHIPS) {
362
+ return;
363
+ }
364
+ logger.warn({ agentId, localCount }, `Agent ${agentId} has only ${localCount} tracked sciencesub memberships (minimum ${MIN_SCIENCESUB_MEMBERSHIPS}), joining via API...`);
365
+ const client = getAgent4ScienceClient();
366
+ try {
367
+ const result = await client.getSciencesubs(apiKey);
368
+ if (!result.success || !result.data) {
369
+ logger.warn({ agentId, error: result.error }, 'ensureMinimumSciencesubs: getSciencesubs failed');
370
+ return;
371
+ }
372
+ const subs = Array.isArray(result.data) ? result.data : [];
373
+ if (subs.length === 0) {
374
+ logger.warn({ agentId }, 'ensureMinimumSciencesubs: no sciencesubs available');
375
+ return;
376
+ }
377
+ // Score all subs by relevance (don't filter by local DB — it may be stale)
378
+ const candidates = subs
379
+ .map(sub => ({
380
+ sub,
381
+ relevance: this.calculateSciencesubRelevance(sub, persona),
382
+ }))
383
+ .sort((a, b) => b.relevance - a.relevance);
384
+ // Try to join the top N most relevant subs.
385
+ // Server returns 409 ALREADY_MEMBER if already joined — count those as memberships too.
386
+ let confirmed = 0;
387
+ for (const { sub } of candidates) {
388
+ if (confirmed >= MIN_SCIENCESUB_MEMBERSHIPS)
389
+ break;
390
+ try {
391
+ const joinResult = await client.joinSciencesub(sub.slug, apiKey);
392
+ if (joinResult.success || joinResult.code === 'ALREADY_MEMBER') {
393
+ // Only cache when server confirms membership
394
+ db.recordSciencesubJoin(agentId, sub.slug);
395
+ confirmed++;
396
+ }
397
+ }
398
+ catch (error) {
399
+ logger.debug({ err: error, agentId, slug: sub.slug }, 'ensureMinimumSciencesubs: join failed');
400
+ }
401
+ }
402
+ logger.info({ agentId, confirmed }, `ensureMinimumSciencesubs: ${confirmed} sciencesub memberships confirmed via API`);
403
+ }
404
+ catch (error) {
405
+ logger.warn({ err: error, agentId }, 'ensureMinimumSciencesubs failed');
406
+ }
407
+ }
408
+ /**
409
+ * Maybe create a full research paper
410
+ * Most expensive action - generates complete paper with abstract, claims, etc.
411
+ * Agent default: 1/day; server limit: 10/day
412
+ */
413
+ async maybeCreatePaper(agentId, persona, apiKey) {
414
+ const rateLimiter = getRateLimiter();
415
+ // Check rate limit (1/day agent default for papers)
416
+ if (!rateLimiter.canPerform(agentId, 'paper')) {
417
+ logger.debug(`${agentId} rate limited for paper creation`);
418
+ return;
419
+ }
420
+ const llm = getLLMClient();
421
+ const client = getAgent4ScienceClient();
422
+ const executor = getActionExecutor();
423
+ // Get existing papers to inspire/differentiate from
424
+ const papersResult = await client.getPapers(apiKey, { limit: 10, sort: 'hot' });
425
+ // Extract trending topics from existing papers
426
+ const existingPapers = [];
427
+ const tagFrequency = new Map();
428
+ if (papersResult.success && papersResult.data) {
429
+ const papers = Array.isArray(papersResult.data) ? papersResult.data : [];
430
+ for (const paper of papers) {
431
+ existingPapers.push({ title: paper.title, tags: paper.tags || [] });
432
+ for (const tag of paper.tags || []) {
433
+ tagFrequency.set(tag, (tagFrequency.get(tag) || 0) + 1);
434
+ }
435
+ }
436
+ }
437
+ const trendingTopics = Array.from(tagFrequency.entries())
438
+ .sort((a, b) => b[1] - a[1])
439
+ .slice(0, 5)
440
+ .map(([tag]) => tag);
441
+ // Use persona's preferred topics or trending topics
442
+ const topics = persona.preferredTopics.length > 0
443
+ ? persona.preferredTopics
444
+ : trendingTopics.length > 0
445
+ ? trendingTopics
446
+ : ['machine-learning', 'research'];
447
+ try {
448
+ // Fetch sciencesubs before generation so LLM can pick the right first tag
449
+ let sciencesubs = [];
450
+ try {
451
+ sciencesubs = await client.getCachedSciencesubs(apiKey);
452
+ }
453
+ catch {
454
+ logger.debug({ agentId }, 'Failed to fetch sciencesubs for paper generation');
455
+ }
456
+ logger.info({ agentId, topics }, 'Generating research paper');
457
+ const paper = await llm.generatePaper(persona, {
458
+ topics,
459
+ currentTrend: trendingTopics[0],
460
+ existingPapers: existingPapers.slice(0, 3),
461
+ }, sciencesubs);
462
+ // Enrich tags with matching category slugs
463
+ if (sciencesubs.length > 0) {
464
+ const existingSlugs = sciencesubs.map(s => s.slug);
465
+ const paperTags = new Set(paper.tags.map((t) => t.toLowerCase()));
466
+ // Map each paper tag to a matching sciencesub slug using the taxonomy
467
+ for (const tag of [...paperTags]) {
468
+ const { match } = findMatchingCategory(tag, existingSlugs);
469
+ if (match && !paperTags.has(match)) {
470
+ paperTags.add(match);
471
+ }
472
+ }
473
+ paper.tags = Array.from(paperTags).slice(0, 10);
474
+ // Ensure first tag is a valid sciencesub slug
475
+ paper.tags = ensureFirstTagIsSciencesub(paper.tags, sciencesubs);
476
+ logger.debug({ agentId, tags: paper.tags }, 'Enriched paper tags with sciencesub categories');
477
+ }
478
+ // Queue the paper creation
479
+ executor.queueAction(agentId, 'paper', `paper_${Date.now().toString(36)}`, // Generate a temp target ID
480
+ 'paper', paper, 'high' // Papers are high priority
481
+ );
482
+ logger.info({ agentId, title: paper.title, tags: paper.tags }, 'Queued paper for creation');
483
+ rateLimiter.tryConsume(agentId, 'paper');
484
+ }
485
+ catch (error) {
486
+ logger.error({ err: error, agentId }, 'Error generating paper');
487
+ }
488
+ }
489
+ // ════════════════════════════════════════════════════════════════════
490
+ // Phase 1: BROWSE — Pure read-only feed collection
491
+ // ════════════════════════════════════════════════════════════════════
492
+ /**
493
+ * Fetch all feeds in parallel, deduplicate, score by relevance, collect agent/sciencesub candidates.
494
+ * No actions are taken — just data collection.
495
+ */
496
+ async browseFeed(agentId, persona, apiKey) {
497
+ const client = getAgent4ScienceClient();
498
+ const db = getDatabase();
499
+ const snapshot = {
500
+ papers: [],
501
+ takes: [],
502
+ reviews: [],
503
+ replyOpportunities: [],
504
+ discoveredAgents: new Map(),
505
+ sciencesubCandidates: [],
506
+ };
507
+ // Fetch all feeds in parallel
508
+ const [papersNew, papersHot, takesNew, takesHot, followingFeed, randomFeed, sciencesubsResult] = await Promise.all([
509
+ client.getPapers(apiKey, { limit: this.config.maxDiscoveryItems, sort: 'new' }),
510
+ client.getPapers(apiKey, { limit: this.config.maxDiscoveryItems, sort: 'hot' }),
511
+ client.getTakes(apiKey, { limit: this.config.maxDiscoveryItems, sort: 'new' }),
512
+ client.getTakes(apiKey, { limit: this.config.maxDiscoveryItems, sort: 'hot' }),
513
+ client.getFollowingFeed(apiKey, { limit: 20, type: 'all' }),
514
+ client.getRandomFeed(apiKey),
515
+ this.config.enableSciencesubJoining ? client.getSciencesubs(apiKey) : Promise.resolve({ success: false, data: undefined }),
516
+ ]);
517
+ // ── Collect & deduplicate papers ──
518
+ const seenPapers = new Set();
519
+ const rawPapers = [];
520
+ for (const result of [papersNew, papersHot]) {
521
+ if (!result.success || !result.data)
522
+ continue;
523
+ const papers = Array.isArray(result.data) ? result.data : result.data.items || [];
524
+ for (const p of papers) {
525
+ if (!seenPapers.has(p.id)) {
526
+ seenPapers.add(p.id);
527
+ rawPapers.push(p);
528
+ }
529
+ }
530
+ }
531
+ // Add following feed papers
532
+ if (followingFeed.success && followingFeed.data) {
533
+ const data = followingFeed.data;
534
+ for (const p of data.papers ?? []) {
535
+ if (!seenPapers.has(p.id)) {
536
+ seenPapers.add(p.id);
537
+ rawPapers.push(p);
538
+ }
539
+ }
540
+ }
541
+ // Add random feed papers
542
+ if (randomFeed.success && randomFeed.data) {
543
+ const data = randomFeed.data;
544
+ for (const p of data.papers ?? []) {
545
+ if (!seenPapers.has(p.id)) {
546
+ seenPapers.add(p.id);
547
+ rawPapers.push(p);
548
+ }
549
+ }
550
+ // Collect reviews from random feed
551
+ for (const r of data.reviews ?? []) {
552
+ if (r.reviewerAgentId !== agentId && !db.hasEngaged(agentId, r.id)) {
553
+ snapshot.reviews.push({ ...r, relevanceScore: 0.5 }); // reviews get flat score
554
+ }
555
+ }
556
+ }
557
+ // Score & filter papers
558
+ for (const paper of rawPapers) {
559
+ if (paper.agentId === agentId || paper.authorAgentId === agentId)
560
+ continue;
561
+ const score = this.scorePaper(paper, persona);
562
+ snapshot.papers.push({ ...paper, relevanceScore: score });
563
+ // Collect discovered agents
564
+ const handle = paper.agent?.handle;
565
+ const authorId = paper.agent?.id || paper.authorAgentId || paper.agentId;
566
+ if (handle && authorId && authorId !== agentId) {
567
+ snapshot.discoveredAgents.set(handle, authorId);
568
+ }
569
+ }
570
+ // ── Collect & deduplicate takes ──
571
+ const seenTakes = new Set();
572
+ const rawTakes = [];
573
+ for (const result of [takesNew, takesHot]) {
574
+ if (!result.success || !result.data)
575
+ continue;
576
+ const takes = Array.isArray(result.data) ? result.data : result.data.items || [];
577
+ for (const t of takes) {
578
+ if (!seenTakes.has(t.id)) {
579
+ seenTakes.add(t.id);
580
+ rawTakes.push(t);
581
+ }
582
+ }
583
+ }
584
+ // Add following feed takes
585
+ if (followingFeed.success && followingFeed.data) {
586
+ const data = followingFeed.data;
587
+ for (const t of data.takes ?? []) {
588
+ if (!seenTakes.has(t.id)) {
589
+ seenTakes.add(t.id);
590
+ rawTakes.push(t);
591
+ }
592
+ }
593
+ }
594
+ // Add random feed takes
595
+ if (randomFeed.success && randomFeed.data) {
596
+ const data = randomFeed.data;
597
+ for (const t of data.takes ?? []) {
598
+ if (!seenTakes.has(t.id)) {
599
+ seenTakes.add(t.id);
600
+ rawTakes.push(t);
601
+ }
602
+ }
603
+ }
604
+ // Score & filter takes
605
+ for (const take of rawTakes) {
606
+ if (take.agentId === agentId || take.authorAgentId === agentId)
607
+ continue;
608
+ const score = this.scoreTake(take, persona);
609
+ snapshot.takes.push({ ...take, relevanceScore: score });
610
+ // Collect discovered agents from takes
611
+ const handle = take.agent?.handle;
612
+ const authorId = take.agent?.id || take.authorAgentId || take.agentId;
613
+ if (handle && authorId && authorId !== agentId) {
614
+ snapshot.discoveredAgents.set(handle, authorId);
615
+ }
616
+ }
617
+ // Sort by relevance (highest first)
618
+ snapshot.papers.sort((a, b) => b.relevanceScore - a.relevanceScore);
619
+ snapshot.takes.sort((a, b) => b.relevanceScore - a.relevanceScore);
620
+ // ── Scan reply opportunities (read-only) ──
621
+ snapshot.replyOpportunities = await this.scanReplyOpportunities(agentId, apiKey, snapshot);
622
+ // ── Scan sciencesub candidates (read-only) ──
623
+ if (sciencesubsResult.success && sciencesubsResult.data) {
624
+ snapshot.sciencesubCandidates = this.scanSciencesubCandidates(agentId, persona, sciencesubsResult.data);
625
+ }
626
+ return snapshot;
627
+ }
628
+ /**
629
+ * Score a paper's relevance to the agent's persona (0-1).
630
+ * Based on topic relevance, recency, and comment count.
631
+ */
632
+ scorePaper(paper, persona) {
633
+ let score = 0.3; // base score
634
+ // Topic relevance (0-0.4)
635
+ if (this.isTopicRelevant(paper.tags || [], persona.preferredTopics)) {
636
+ score += 0.4;
637
+ }
638
+ // Recency bonus (0-0.15) — newer papers get a boost
639
+ const ageHours = (Date.now() - new Date(paper.createdAt).getTime()) / (1000 * 60 * 60);
640
+ if (ageHours < 1)
641
+ score += 0.15;
642
+ else if (ageHours < 6)
643
+ score += 0.10;
644
+ else if (ageHours < 24)
645
+ score += 0.05;
646
+ // Comment count bonus (0-0.15) — more comments = more interesting
647
+ if (paper.commentCount >= 5)
648
+ score += 0.15;
649
+ else if (paper.commentCount >= 2)
650
+ score += 0.10;
651
+ else if (paper.commentCount >= 1)
652
+ score += 0.05;
653
+ return Math.min(1, score);
654
+ }
655
+ /**
656
+ * Score a take's relevance to the agent's persona (0-1).
657
+ * Based on stance interest, recency, and comment count.
658
+ */
659
+ scoreTake(take, persona) {
660
+ let score = 0.3; // base score
661
+ // Stance interest (0-0.3) — controversial takes are more interesting
662
+ if (this.stanceStrictlyConflicts(take.stance, persona)) {
663
+ score += 0.3; // disagreement is engaging
664
+ }
665
+ else if (take.stance === 'hot' || take.stance === 'critical') {
666
+ score += 0.2; // spicy takes are interesting
667
+ }
668
+ else if (take.stance !== 'neutral') {
669
+ score += 0.1;
670
+ }
671
+ // Recency bonus (0-0.15)
672
+ const ageHours = (Date.now() - new Date(take.createdAt).getTime()) / (1000 * 60 * 60);
673
+ if (ageHours < 1)
674
+ score += 0.15;
675
+ else if (ageHours < 6)
676
+ score += 0.10;
677
+ else if (ageHours < 24)
678
+ score += 0.05;
679
+ // Comment count bonus (0-0.15)
680
+ if (take.commentCount >= 5)
681
+ score += 0.15;
682
+ else if (take.commentCount >= 2)
683
+ score += 0.10;
684
+ else if (take.commentCount >= 1)
685
+ score += 0.05;
686
+ return Math.min(1, score);
687
+ }
688
+ /**
689
+ * Scan threads for reply candidates (read-only, no actions).
690
+ * Prioritizes content with comments, falls back to top content by relevance.
691
+ */
692
+ async scanReplyOpportunities(agentId, apiKey, snapshot) {
693
+ const client = getAgent4ScienceClient();
694
+ const db = getDatabase();
695
+ const opportunities = [];
696
+ // Prioritize roots with comments — sorted by commentCount desc
697
+ const papersWithComments = [...snapshot.papers]
698
+ .filter(p => (p.commentCount || 0) > 0)
699
+ .sort((a, b) => (b.commentCount || 0) - (a.commentCount || 0));
700
+ const takesWithComments = [...snapshot.takes]
701
+ .filter(t => (t.commentCount || 0) > 0)
702
+ .sort((a, b) => (b.commentCount || 0) - (a.commentCount || 0));
703
+ const roots = [];
704
+ for (const p of papersWithComments.slice(0, 5)) {
705
+ roots.push({ id: p.id, type: 'paper' });
706
+ }
707
+ for (const t of takesWithComments.slice(0, 5)) {
708
+ roots.push({ id: t.id, type: 'take' });
709
+ }
710
+ // Fallback: if no content has comments yet, scan top papers/takes by relevance
711
+ // (they might have comments the listing didn't report, or comments from this cycle)
712
+ if (roots.length === 0) {
713
+ for (const p of snapshot.papers.slice(0, 3)) {
714
+ roots.push({ id: p.id, type: 'paper' });
715
+ }
716
+ for (const t of snapshot.takes.slice(0, 3)) {
717
+ roots.push({ id: t.id, type: 'take' });
718
+ }
719
+ }
720
+ roots.sort(() => Math.random() - 0.5);
721
+ logger.info({
722
+ agentId,
723
+ papersWithComments: papersWithComments.length,
724
+ takesWithComments: takesWithComments.length,
725
+ rootsToScan: roots.length,
726
+ usingFallback: roots.length === 0 || (papersWithComments.length === 0 && takesWithComments.length === 0),
727
+ commentCounts: snapshot.papers.slice(0, 5).map(p => ({ id: p.id.slice(-8), cc: p.commentCount })),
728
+ }, 'scanReplyOpportunities: selecting roots');
729
+ for (const { id: rootId, type: rootType } of roots) {
730
+ try {
731
+ let comments = [];
732
+ // Try thread endpoint first, fall back to comments endpoint
733
+ const threadResult = await client.getThread(rootId, apiKey);
734
+ if (threadResult.success && threadResult.data) {
735
+ const data = threadResult.data;
736
+ comments = Array.isArray(data) ? data : data.comments ?? [];
737
+ }
738
+ else {
739
+ // Fallback: fetch comments directly from paper/take comments endpoint
740
+ const fallbackResult = rootType === 'paper'
741
+ ? await client.getPaperComments(rootId, apiKey)
742
+ : await client.getTakeComments(rootId, apiKey);
743
+ if (fallbackResult.success && fallbackResult.data) {
744
+ const fbData = fallbackResult.data;
745
+ comments = Array.isArray(fbData) ? fbData : fbData.comments ?? [];
746
+ }
747
+ else {
748
+ logger.info({ agentId, rootId, rootType, error: fallbackResult.error }, 'Both thread and comments fetch failed');
749
+ continue;
750
+ }
751
+ logger.info({ agentId, rootId: rootId.slice(-8), rootType, comments: comments.length }, 'Thread fetch failed, used comments fallback');
752
+ }
753
+ logger.info({
754
+ agentId,
755
+ rootId: rootId.slice(-8),
756
+ rootType,
757
+ totalComments: comments.length,
758
+ sampleAgentIds: comments.slice(0, 3).map((c) => c.agentId?.slice(-8) || 'no-agentId'),
759
+ }, 'Thread fetched');
760
+ const replyable = comments.filter((c) => c.agentId && c.agentId !== agentId && c.id && c.body && !db.hasEngaged(agentId, c.id));
761
+ if (comments.length > 0 && replyable.length === 0) {
762
+ logger.info({
763
+ agentId,
764
+ rootId: rootId.slice(-8),
765
+ totalComments: comments.length,
766
+ noAgentId: comments.filter((c) => !c.agentId).length,
767
+ selfComments: comments.filter((c) => c.agentId === agentId).length,
768
+ alreadyEngaged: comments.filter((c) => c.agentId && c.agentId !== agentId && c.id && c.body && db.hasEngaged(agentId, c.id)).length,
769
+ }, 'Thread has comments but none replyable — breakdown');
770
+ }
771
+ for (const c of replyable) {
772
+ if (!c.agentId)
773
+ continue;
774
+ opportunities.push({
775
+ commentId: c.id,
776
+ commentBody: c.body,
777
+ commentAuthorId: c.agentId,
778
+ rootId,
779
+ rootType,
780
+ reciprocityMultiplier: this.getReciprocityMultiplier(agentId, c.agentId),
781
+ });
782
+ }
783
+ }
784
+ catch (err) {
785
+ logger.info({ err, agentId, rootId }, 'Thread scan error');
786
+ }
787
+ }
788
+ return opportunities;
789
+ }
790
+ /**
791
+ * Score joinable sciencesubs by relevance (read-only, no actions).
792
+ */
793
+ scanSciencesubCandidates(agentId, persona, sciencesubs) {
794
+ const db = getDatabase();
795
+ const candidates = [];
796
+ for (const sub of sciencesubs) {
797
+ if (db.hasJoinedSciencesub(agentId, sub.slug))
798
+ continue;
799
+ const relevance = this.calculateSciencesubRelevance(sub, persona);
800
+ if (relevance > 0.25) {
801
+ candidates.push({ ...sub, relevance });
802
+ }
803
+ }
804
+ candidates.sort((a, b) => b.relevance - a.relevance);
805
+ return candidates;
806
+ }
807
+ // ════════════════════════════════════════════════════════════════════
808
+ // Phase 2: MAINTENANCE — Capped passive actions
809
+ // ════════════════════════════════════════════════════════════════════
810
+ /**
811
+ * Capped passive actions while "scrolling":
812
+ * - Up to 3 votes (not 40+)
813
+ * - At most 1 follow
814
+ * - At most 1 sciencesub join
815
+ * - Auto-join sciencesubs from browsed takes
816
+ */
817
+ async doMaintenance(agentId, persona, apiKey, snapshot) {
818
+ const executor = getActionExecutor();
819
+ const rateLimiter = getRateLimiter();
820
+ const db = getDatabase();
821
+ const client = getAgent4ScienceClient();
822
+ // ── Up to 3 votes ──
823
+ let votesQueued = 0;
824
+ const maxVotes = 3;
825
+ // Mix papers, takes, and reviews for voting
826
+ const voteTargets = [];
827
+ for (const paper of snapshot.papers) {
828
+ if (!db.hasEngaged(agentId, paper.id)) {
829
+ voteTargets.push({ id: paper.id, type: 'paper', direction: 'up' });
830
+ }
831
+ }
832
+ for (const take of snapshot.takes) {
833
+ if (!db.hasEngaged(agentId, take.id)) {
834
+ const direction = this.stanceStrictlyConflicts(take.stance, persona) ? 'down' : 'up';
835
+ voteTargets.push({ id: take.id, type: 'take', direction });
836
+ }
837
+ }
838
+ for (const review of snapshot.reviews) {
839
+ if (!db.hasEngaged(agentId, review.id)) {
840
+ voteTargets.push({ id: review.id, type: 'review', direction: 'up' });
841
+ }
842
+ }
843
+ // Shuffle then pick top N
844
+ voteTargets.sort(() => Math.random() - 0.5);
845
+ for (const target of voteTargets) {
846
+ if (votesQueued >= maxVotes)
847
+ break;
848
+ if (!this.config.enableVoting)
849
+ break;
850
+ if (!rateLimiter.canPerform(agentId, 'vote'))
851
+ break;
852
+ executor.queueAction(agentId, 'vote', target.id, target.type, { direction: target.direction }, 'low');
853
+ db.recordEngagement(agentId, target.id, target.type, 'vote');
854
+ votesQueued++;
855
+ logger.debug(`${agentId} maintenance-voted ${target.direction} on ${target.type} ${target.id}`);
856
+ }
857
+ // ── At most 1 follow ──
858
+ if (this.config.enableAgentFollowing) {
859
+ const candidates = Array.from(snapshot.discoveredAgents.entries())
860
+ .filter(([, targetId]) => !db.hasFollowed(agentId, targetId));
861
+ if (candidates.length > 0 && rateLimiter.canPerform(agentId, 'follow')) {
862
+ // Pick one random candidate with 60% chance
863
+ if (Math.random() < 0.6) {
864
+ const [handle, targetId] = candidates[Math.floor(Math.random() * candidates.length)];
865
+ executor.queueAction(agentId, 'follow', handle, 'agent', {}, 'low');
866
+ db.recordFollow(agentId, targetId);
867
+ logger.info(`${agentId} will follow @${handle}`);
868
+ }
869
+ }
870
+ }
871
+ // ── At most 1 sciencesub join ──
872
+ if (this.config.enableSciencesubJoining && snapshot.sciencesubCandidates.length > 0) {
873
+ if (rateLimiter.canPerform(agentId, 'sciencesub')) {
874
+ const candidate = snapshot.sciencesubCandidates[0]; // most relevant
875
+ try {
876
+ const joinResult = await client.joinSciencesub(candidate.slug, apiKey);
877
+ if (joinResult.success || joinResult.code === 'ALREADY_MEMBER') {
878
+ if (joinResult.success)
879
+ rateLimiter.tryConsume(agentId, 'sciencesub');
880
+ db.recordSciencesubJoin(agentId, candidate.slug);
881
+ logger.info(`${agentId} joined sciencesub ${candidate.slug} (relevance: ${(candidate.relevance * 100).toFixed(0)}%)`);
882
+ }
883
+ }
884
+ catch (error) {
885
+ logger.error({ err: error, agentId, sciencesub: candidate.slug }, 'Failed to join sciencesub');
886
+ }
887
+ }
888
+ }
889
+ // ── Auto-join sciencesubs from browsed takes ──
890
+ const autoJoinSlugs = new Set();
891
+ for (const take of snapshot.takes) {
892
+ if (take.sciencesub && !db.hasJoinedSciencesub(agentId, take.sciencesub)) {
893
+ autoJoinSlugs.add(take.sciencesub);
894
+ }
895
+ }
896
+ for (const slug of autoJoinSlugs) {
897
+ try {
898
+ const joinResult = await client.joinSciencesub(slug, apiKey);
899
+ if (joinResult.success || joinResult.code === 'ALREADY_MEMBER') {
900
+ db.recordSciencesubJoin(agentId, slug);
901
+ logger.info(`${agentId} auto-joined sciencesub ${slug} (engaged with content)`);
902
+ }
903
+ }
904
+ catch {
905
+ // Ignore join errors — transient network issues
906
+ }
907
+ }
908
+ }
909
+ // ════════════════════════════════════════════════════════════════════
910
+ // Phase 4: DECIDE ONE — Pick one creative action
911
+ // ════════════════════════════════════════════════════════════════════
912
+ /**
913
+ * Pick ONE creative action from the snapshot and execute it.
914
+ * Rolls pickSingleAction() to choose the action type, then finds the best target.
915
+ * Falls back to a different action if no valid target exists.
916
+ */
917
+ async decideOneAction(agentId, persona, apiKey, snapshot) {
918
+ const rateLimiter = getRateLimiter();
919
+ const db = getDatabase();
920
+ // Try up to 5 rolls to find a viable action
921
+ for (let attempt = 0; attempt < 5; attempt++) {
922
+ const action = this.pickSingleAction();
923
+ logger.info({ agentId, action, attempt }, `Decided action: ${action}`);
924
+ try {
925
+ switch (action) {
926
+ case 'comment_paper': {
927
+ // Find paper not yet commented on (action-specific check — voting doesn't block commenting)
928
+ // Falls back to random paper if all have been commented on (allows re-engagement over time)
929
+ let target = snapshot.papers.find(p => !db.hasEngaged(agentId, p.id, 'comment'));
930
+ if (!target && snapshot.papers.length > 0) {
931
+ target = snapshot.papers[Math.floor(Math.random() * snapshot.papers.length)];
932
+ }
933
+ if (target && rateLimiter.canPerform(agentId, 'comment')) {
934
+ await this.queueCommentOnPaper(agentId, target, persona);
935
+ return;
936
+ }
937
+ break;
938
+ }
939
+ case 'comment_take': {
940
+ // Find take not yet commented on (action-specific check)
941
+ let target = snapshot.takes.find(t => !db.hasEngaged(agentId, t.id, 'comment'));
942
+ if (!target && snapshot.takes.length > 0) {
943
+ target = snapshot.takes[Math.floor(Math.random() * snapshot.takes.length)];
944
+ }
945
+ if (target && rateLimiter.canPerform(agentId, 'comment')) {
946
+ await this.queueCommentOnTake(agentId, target, persona);
947
+ return;
948
+ }
949
+ break;
950
+ }
951
+ case 'comment_review': {
952
+ let reviewTarget = snapshot.reviews.find(r => !db.hasEngaged(agentId, r.id, 'comment'));
953
+ if (!reviewTarget && snapshot.reviews.length > 0) {
954
+ reviewTarget = snapshot.reviews[Math.floor(Math.random() * snapshot.reviews.length)];
955
+ }
956
+ if (reviewTarget && rateLimiter.canPerform(agentId, 'comment')) {
957
+ await this.queueCommentOnReview(agentId, reviewTarget, persona);
958
+ return;
959
+ }
960
+ break;
961
+ }
962
+ case 'reply': {
963
+ if (snapshot.replyOpportunities.length > 0 && rateLimiter.canPerform(agentId, 'comment')) {
964
+ const success = await this.tryReply(agentId, persona, apiKey, snapshot);
965
+ if (success)
966
+ return;
967
+ }
968
+ break;
969
+ }
970
+ case 'take_on_paper': {
971
+ if (!this.config.enableTakeCreation)
972
+ break;
973
+ // Skip if there's already a pending take in the queue
974
+ if (db.hasPendingAction(agentId, 'take')) {
975
+ logger.debug({ agentId }, 'Skipping take_on_paper: already has pending take in queue');
976
+ break;
977
+ }
978
+ // Diversify paper selection: pick randomly from top candidates so agents don't all pile on the same paper
979
+ const eligibleForTake = snapshot.papers.filter(p => !db.hasEngaged(agentId, p.id, 'take'));
980
+ if (eligibleForTake.length > 0 && rateLimiter.canPerform(agentId, 'take')) {
981
+ const idx = Math.floor(Math.random() * Math.min(eligibleForTake.length, 5));
982
+ // Pass existing takes on this paper so the LLM writes something different
983
+ const selectedPaper = eligibleForTake[idx];
984
+ const existingTakesOnPaper = snapshot.takes
985
+ .filter((t) => t.paperId === selectedPaper.id && (t.title || t.hotTake))
986
+ .map((t) => `- "${t.title}" (${t.stance || 'neutral'}): ${t.hotTake || ''}`.slice(0, 200));
987
+ await this.queueTakeOnPaper(agentId, selectedPaper, persona, existingTakesOnPaper);
988
+ return;
989
+ }
990
+ break;
991
+ }
992
+ case 'review': {
993
+ // Skip if there's already a pending review in the queue
994
+ if (db.hasPendingAction(agentId, 'review')) {
995
+ logger.debug({ agentId }, 'Skipping review: already has pending review in queue');
996
+ break;
997
+ }
998
+ // Diversify: pick randomly from top candidates (review-specific engagement check)
999
+ const eligibleForReview = snapshot.papers.filter(p => !db.hasEngaged(agentId, p.id, 'review'));
1000
+ if (eligibleForReview.length > 0 && rateLimiter.canPerform(agentId, 'review')) {
1001
+ const idx = Math.floor(Math.random() * Math.min(eligibleForReview.length, 5));
1002
+ await this.queueReviewOnPaper(agentId, eligibleForReview[idx], persona);
1003
+ return;
1004
+ }
1005
+ break;
1006
+ }
1007
+ case 'standalone_take': {
1008
+ if (!this.config.enableTakeCreation)
1009
+ break;
1010
+ // Skip if there's already a pending take in the queue
1011
+ if (db.hasPendingAction(agentId, 'take')) {
1012
+ logger.debug({ agentId }, 'Skipping standalone_take: already has pending take in queue');
1013
+ break;
1014
+ }
1015
+ if (rateLimiter.canPerform(agentId, 'take')) {
1016
+ await this.queueStandaloneTake(agentId, persona, apiKey, snapshot);
1017
+ return;
1018
+ }
1019
+ break;
1020
+ }
1021
+ }
1022
+ }
1023
+ catch (error) {
1024
+ logger.error({ err: error, agentId, action }, 'Failed to execute decided action');
1025
+ }
1026
+ // Action failed or had no valid target — retry with a different roll
1027
+ logger.debug({ agentId, action }, 'No valid target for action, re-rolling');
1028
+ }
1029
+ logger.debug({ agentId }, 'No viable creative action found after 5 attempts');
1030
+ }
1031
+ /**
1032
+ * Pick best reply candidate from snapshot.replyOpportunities,
1033
+ * weighted by reciprocity + randomness. Generate and queue the reply.
1034
+ */
1035
+ async tryReply(agentId, persona, apiKey, snapshot) {
1036
+ const llm = getLLMClient();
1037
+ const executor = getActionExecutor();
1038
+ const db = getDatabase();
1039
+ // Weight candidates by reciprocity
1040
+ const candidates = snapshot.replyOpportunities.filter(op => !db.hasEngaged(agentId, op.commentId));
1041
+ if (candidates.length === 0)
1042
+ return false;
1043
+ // Sort by reciprocity (highest first) with some randomness
1044
+ candidates.sort((a, b) => {
1045
+ const aScore = a.reciprocityMultiplier + Math.random() * 0.5;
1046
+ const bScore = b.reciprocityMultiplier + Math.random() * 0.5;
1047
+ return bScore - aScore;
1048
+ });
1049
+ const target = candidates[0];
1050
+ // Fetch thread context for coherent reply
1051
+ const threadContext = await this.getThreadContext(target.rootId, target.rootType, target.commentId, apiKey, 5);
1052
+ // Also fetch root content (paper/take) for broader context
1053
+ let rootContent;
1054
+ try {
1055
+ if (target.rootType === 'paper') {
1056
+ const paperResult = await getAgent4ScienceClient().getPaper(target.rootId, apiKey);
1057
+ if (paperResult.success && paperResult.data) {
1058
+ rootContent = `${paperResult.data.title}\n\n${paperResult.data.tldr || paperResult.data.abstract || ''}`;
1059
+ }
1060
+ }
1061
+ else if (target.rootType === 'take') {
1062
+ const takeResult = await getAgent4ScienceClient().getTake(target.rootId, apiKey);
1063
+ if (takeResult.success && takeResult.data) {
1064
+ rootContent = `${takeResult.data.title}\n\n${takeResult.data.hotTake || takeResult.data.summary?.join(' ') || ''}`;
1065
+ }
1066
+ }
1067
+ }
1068
+ catch {
1069
+ // Non-critical — we still have thread context
1070
+ }
1071
+ const generated = await llm.generateComment(persona, {
1072
+ targetType: 'comment',
1073
+ targetContent: target.commentBody,
1074
+ parentContent: rootContent,
1075
+ threadContext: threadContext || undefined,
1076
+ triggerType: 'reply',
1077
+ fromAgent: target.commentAuthorId,
1078
+ });
1079
+ // Check similarity to recent comments
1080
+ if (isTooSimilarToRecent(agentId, generated.body, db, 0.7, 10)) {
1081
+ logger.debug(`${agentId} reply too similar to recent comments, skipping`);
1082
+ return false;
1083
+ }
1084
+ // Route to root content (paper/take) with parentId for threading
1085
+ // executeComment needs targetType='paper'|'take' — NOT 'comment'
1086
+ const payload = {
1087
+ intent: generated.intent ?? 'clarify',
1088
+ body: generated.body,
1089
+ confidence: generated.confidence ?? 0.8,
1090
+ parentId: target.commentId,
1091
+ };
1092
+ // Route to the root content type (paper/take) so executeComment uses the right API endpoint
1093
+ executor.queueAction(agentId, 'comment', target.rootId, target.rootType, payload, 'high');
1094
+ db.recordEngagement(agentId, target.commentId, 'comment', 'comment');
1095
+ db.recordInteraction(agentId, target.commentAuthorId, 'reply');
1096
+ logger.info(`${agentId} queued reply to comment ${target.commentId} (reciprocity: ${target.reciprocityMultiplier.toFixed(1)}×)`);
1097
+ return true;
1098
+ }
1099
+ /**
1100
+ * Generate and queue a standalone take (not linked to a specific paper).
1101
+ * Builds browsing context from snapshot and calls LLM.
1102
+ */
1103
+ async queueStandaloneTake(agentId, persona, apiKey, snapshot) {
1104
+ const llm = getLLMClient();
1105
+ const client = getAgent4ScienceClient();
1106
+ const executor = getActionExecutor();
1107
+ // Build browsing context from snapshot
1108
+ const recentPaperTitles = snapshot.papers.slice(0, 5).map(p => p.title);
1109
+ const trendingTags = new Map();
1110
+ for (const paper of snapshot.papers) {
1111
+ for (const tag of paper.tags || []) {
1112
+ trendingTags.set(tag, (trendingTags.get(tag) || 0) + 1);
1113
+ }
1114
+ }
1115
+ const topTags = Array.from(trendingTags.entries())
1116
+ .sort((a, b) => b[1] - a[1])
1117
+ .slice(0, 5)
1118
+ .map(([tag]) => tag);
1119
+ // Fetch sciencesubs for tag selection
1120
+ let sciencesubs = [];
1121
+ try {
1122
+ sciencesubs = await client.getCachedSciencesubs(apiKey);
1123
+ }
1124
+ catch {
1125
+ logger.debug({ agentId }, 'Failed to fetch sciencesubs for standalone take');
1126
+ }
1127
+ const take = await llm.generateStandaloneTake(persona, {
1128
+ recentPaperTitles,
1129
+ trendingTags: topTags,
1130
+ personaTopics: persona.preferredTopics,
1131
+ }, sciencesubs);
1132
+ // Enrich tags with matching sciencesub slugs (same as paper flow)
1133
+ if (sciencesubs.length > 0) {
1134
+ const existingSlugs = sciencesubs.map(s => s.slug);
1135
+ const takeTags = new Set(take.tags.map((t) => t.toLowerCase()));
1136
+ for (const tag of [...takeTags]) {
1137
+ const { match } = findMatchingCategory(tag, existingSlugs);
1138
+ if (match && !takeTags.has(match)) {
1139
+ takeTags.add(match);
1140
+ }
1141
+ }
1142
+ take.tags = Array.from(takeTags).slice(0, 10);
1143
+ }
1144
+ // Ensure first tag is a valid sciencesub slug
1145
+ take.tags = ensureFirstTagIsSciencesub(take.tags, sciencesubs);
1146
+ if (take.tags.length === 0) {
1147
+ logger.warn({ agentId }, 'Standalone take has no valid tags after enrichment, skipping');
1148
+ return;
1149
+ }
1150
+ // Queue with synthetic targetId (standalone takes use 'take' targetType)
1151
+ const targetId = `standalone_${Date.now().toString(36)}`;
1152
+ executor.queueAction(agentId, 'take', targetId, 'take', take, 'normal');
1153
+ logger.info({ agentId, tags: take.tags }, 'Queued standalone take');
1154
+ }
1155
+ /**
1156
+ * Author heartbeat: agent replies to comments on its own papers/takes.
1157
+ * Fetches the agent's own content, finds unanswered comments, and queues author-framed replies.
1158
+ */
1159
+ async discoverAuthorReplyOpportunities(agentId, persona, apiKey) {
1160
+ const client = getAgent4ScienceClient();
1161
+ const executor = getActionExecutor();
1162
+ const rateLimiter = getRateLimiter();
1163
+ const llm = getLLMClient();
1164
+ const db = getDatabase();
1165
+ if (!rateLimiter.canPerform(agentId, 'comment'))
1166
+ return;
1167
+ // Get this agent's own papers and takes
1168
+ const roots = [];
1169
+ const myPapersResult = await client.getPapers(apiKey, { limit: 5, sort: 'new', agentId });
1170
+ if (myPapersResult.success && myPapersResult.data) {
1171
+ const papers = Array.isArray(myPapersResult.data)
1172
+ ? myPapersResult.data
1173
+ : myPapersResult.data.papers ?? [];
1174
+ for (const p of papers.slice(0, 5)) {
1175
+ roots.push({ id: p.id, type: 'paper', title: p.title || '' });
1176
+ }
1177
+ }
1178
+ const myTakesResult = await client.getTakes(apiKey, { limit: 5, sort: 'new', agentId });
1179
+ if (myTakesResult.success && myTakesResult.data) {
1180
+ const takes = Array.isArray(myTakesResult.data)
1181
+ ? myTakesResult.data
1182
+ : myTakesResult.data.takes ?? [];
1183
+ for (const t of takes.slice(0, 5)) {
1184
+ roots.push({ id: t.id, type: 'take', title: t.hotTake || t.title || '' });
1185
+ }
1186
+ }
1187
+ let repliesQueued = 0;
1188
+ const maxRepliesPerCycle = 3; // Author replies are rarer than general replies
1189
+ for (const { id: rootId, type: rootType, title: rootTitle } of roots) {
1190
+ if (repliesQueued >= maxRepliesPerCycle)
1191
+ break;
1192
+ try {
1193
+ const threadResult = await client.getThread(rootId, apiKey);
1194
+ if (!threadResult.success || !threadResult.data)
1195
+ continue;
1196
+ const data = threadResult.data;
1197
+ const comments = Array.isArray(data)
1198
+ ? data
1199
+ : data.comments ?? [];
1200
+ // Only top-level comments on author's own content that the author hasn't replied to
1201
+ const unreplied = comments.filter((c) => c.agentId &&
1202
+ c.agentId !== agentId && // Not the author's own comment
1203
+ !c.parentId && // Top-level only
1204
+ c.id &&
1205
+ c.body &&
1206
+ !db.hasEngaged(agentId, c.id) // Not already replied
1207
+ );
1208
+ if (unreplied.length === 0)
1209
+ continue;
1210
+ // Pick the oldest unanswered comment (authors should respond in order)
1211
+ const target = unreplied[unreplied.length - 1];
1212
+ // Author-framed LLM prompt
1213
+ const prompt = `You are ${persona.voice === 'meme-lord' ? 'a witty researcher' : `a ${persona.voice} researcher`} and you are the AUTHOR of this ${rootType}.
1214
+
1215
+ Your ${rootType}: "${rootTitle}"
1216
+
1217
+ Someone commented on your work:
1218
+ "${target.body}"
1219
+
1220
+ Reply as the author — defend your approach, clarify your reasoning, acknowledge valid critique, or push back on mischaracterizations. Stay in character.
1221
+ ${persona.spiceLevel >= 7 ? "Don't be afraid to push back!" : "Be thoughtful and substantive."}
1222
+ Spice level: ${persona.spiceLevel}/10.
1223
+
1224
+ Respond in JSON format:
1225
+ {
1226
+ "intent": "clarify" | "rebuttal" | "support",
1227
+ "body": "Your reply as the author (1-2 paragraphs)",
1228
+ "confidence": 0.0-1.0
1229
+ }`;
1230
+ const response = await llm.complete([{ role: 'user', content: prompt }]);
1231
+ const jsonMatch = response.content.match(/\{[\s\S]*\}/);
1232
+ if (!jsonMatch)
1233
+ continue;
1234
+ const generated = JSON.parse(jsonMatch[0]);
1235
+ if (!generated.body)
1236
+ continue;
1237
+ const payload = {
1238
+ intent: generated.intent ?? 'clarify',
1239
+ body: generated.body,
1240
+ confidence: generated.confidence ?? 0.9,
1241
+ parentId: target.id,
1242
+ };
1243
+ // Route to root content type (paper/take) with parentId for threading
1244
+ executor.queueAction(agentId, 'comment', rootId, rootType, payload, 'high');
1245
+ db.recordEngagement(agentId, target.id, 'comment', 'comment');
1246
+ if (target.agentId)
1247
+ db.recordInteraction(agentId, target.agentId, 'reply');
1248
+ repliesQueued++;
1249
+ logger.info(`${agentId} (author) queued reply to comment ${target.id} on their ${rootType}`);
1250
+ }
1251
+ catch (error) {
1252
+ logger.debug({ err: error, agentId, rootId }, 'Author reply discovery skip');
1253
+ }
1254
+ }
1255
+ }
1256
+ /**
1257
+ * Proactive sciencesub creation: try at most one candidate topic per cycle.
1258
+ * Only runs if rate limit allows (1/day server-enforced) and topic has enough activity.
1259
+ */
1260
+ async maybeCreateSciencesubProactive(agentId, persona, apiKey) {
1261
+ const rateLimiter = getRateLimiter();
1262
+ if (!rateLimiter.canPerform(agentId, 'sciencesub')) {
1263
+ logger.debug(`${agentId} rate limited for sciencesub creation`);
1264
+ return;
1265
+ }
1266
+ const client = getAgent4ScienceClient();
1267
+ const candidateTopics = [];
1268
+ // Prefer persona's preferred topics (agent is invested in these)
1269
+ if (persona.preferredTopics?.length) {
1270
+ candidateTopics.push(...persona.preferredTopics.slice(0, 5));
1271
+ }
1272
+ // Add trending tags from recent papers (only if we have few persona topics)
1273
+ const papersResult = await client.getPapers(apiKey, { limit: 30, sort: 'hot' });
1274
+ if (papersResult.success && papersResult.data) {
1275
+ const papers = Array.isArray(papersResult.data) ? papersResult.data : [];
1276
+ const tagCount = new Map();
1277
+ for (const paper of papers) {
1278
+ for (const tag of paper.tags || []) {
1279
+ const t = tag.trim().toLowerCase().replace(/[^a-z0-9]+/g, '-');
1280
+ if (t.length >= 2 && t.length <= 30)
1281
+ tagCount.set(t, (tagCount.get(t) || 0) + 1);
1282
+ }
1283
+ }
1284
+ const trending = Array.from(tagCount.entries())
1285
+ .filter(([, c]) => c >= 2)
1286
+ .sort((a, b) => b[1] - a[1])
1287
+ .slice(0, 5)
1288
+ .map(([tag]) => tag);
1289
+ for (const t of trending) {
1290
+ if (!candidateTopics.includes(t))
1291
+ candidateTopics.push(t);
1292
+ }
1293
+ }
1294
+ if (candidateTopics.length === 0)
1295
+ return;
1296
+ // Pick one candidate (shuffle and try first that passes shouldCreate)
1297
+ const shuffled = [...candidateTopics].sort(() => Math.random() - 0.5);
1298
+ const topic = shuffled[0];
1299
+ const description = `Research and discussion on ${topic}.`;
1300
+ const result = await this.maybeCreateSciencesub(topic, description, apiKey);
1301
+ if (result.created && result.slug) {
1302
+ rateLimiter.tryConsume(agentId, 'sciencesub');
1303
+ }
1304
+ }
1305
+ /**
1306
+ * Check if a topic has enough activity to warrant a new sciencesub
1307
+ *
1308
+ * Uses arXiv-style taxonomy to prevent redundant sciencesubs:
1309
+ * 1. Check if topic matches an existing sciencesub (exact or semantic match)
1310
+ * 2. Check if topic belongs under a broader category (e.g., "fractal" → "mathematics")
1311
+ * 3. Only allow creation if topic is genuinely novel AND has enough activity
1312
+ *
1313
+ * Minimum thresholds (prevents empty subs like s/fixed-point-theory):
1314
+ * - At least 3 papers with the topic tag
1315
+ * - OR at least 5 takes discussing the topic
1316
+ *
1317
+ * This is called before creating a new sciencesub to prevent proliferation
1318
+ * of empty, unused sciencesubs.
1319
+ */
1320
+ async shouldCreateSciencesub(topic, apiKey) {
1321
+ const client = getAgent4ScienceClient();
1322
+ // Get existing sciencesubs for taxonomy matching
1323
+ const existingResult = await client.getSciencesubs(apiKey);
1324
+ const existingSlugs = existingResult.success && existingResult.data
1325
+ ? existingResult.data.map(sub => sub.slug)
1326
+ : [];
1327
+ // TAXONOMY CHECK: Use arXiv-style classification to find matching category
1328
+ const { match, reason: matchReason } = findMatchingCategory(topic, existingSlugs);
1329
+ if (match) {
1330
+ logger.info(`Taxonomy match for "${topic}": ${matchReason}`);
1331
+ return {
1332
+ shouldCreate: false,
1333
+ reason: matchReason,
1334
+ activity: { papers: 0, takes: 0 },
1335
+ suggestedCategory: match,
1336
+ };
1337
+ }
1338
+ // No taxonomy match - topic is novel, check activity thresholds
1339
+ // Count papers with this topic tag
1340
+ const papersResult = await client.getPapers(apiKey, { limit: 50, tag: topic });
1341
+ const paperCount = papersResult.success && papersResult.data
1342
+ ? (Array.isArray(papersResult.data) ? papersResult.data.length : 0)
1343
+ : 0;
1344
+ // Count takes (would need tag support in takes API, estimate from papers)
1345
+ const takeCount = Math.floor(paperCount * 1.5); // Rough estimate: 1.5 takes per paper
1346
+ const activity = { papers: paperCount, takes: takeCount };
1347
+ // Check thresholds - only create if genuinely new AND has activity
1348
+ if (paperCount >= 3) {
1349
+ return {
1350
+ shouldCreate: true,
1351
+ reason: `Novel topic "${topic}" has ${paperCount} papers - enough activity for a dedicated sciencesub`,
1352
+ activity,
1353
+ };
1354
+ }
1355
+ if (takeCount >= 5) {
1356
+ return {
1357
+ shouldCreate: true,
1358
+ reason: `Novel topic "${topic}" has ~${takeCount} takes - enough discussion for a dedicated sciencesub`,
1359
+ activity,
1360
+ };
1361
+ }
1362
+ return {
1363
+ shouldCreate: false,
1364
+ reason: `Not enough activity yet (${paperCount} papers, ~${takeCount} takes). Need at least 3 papers or 5 takes.`,
1365
+ activity,
1366
+ };
1367
+ }
1368
+ /**
1369
+ * Maybe create a new sciencesub for a trending topic
1370
+ * Only creates if activity thresholds are met
1371
+ */
1372
+ async maybeCreateSciencesub(topic, description, apiKey) {
1373
+ const { shouldCreate, reason, activity } = await this.shouldCreateSciencesub(topic, apiKey);
1374
+ if (!shouldCreate) {
1375
+ logger.debug(`Skipping sciencesub creation for "${topic}": ${reason}`);
1376
+ return { created: false, reason };
1377
+ }
1378
+ const client = getAgent4ScienceClient();
1379
+ const slug = topic.toLowerCase().replace(/[^a-z0-9]+/g, '-');
1380
+ try {
1381
+ const result = await client.createSciencesub({ name: topic, slug, description }, apiKey);
1382
+ if (result.success) {
1383
+ logger.info(`Created sciencesub s/${slug} (${activity.papers} papers, ${activity.takes} takes)`);
1384
+ return { created: true, slug, reason: `Created with ${activity.papers} papers` };
1385
+ }
1386
+ return { created: false, reason: result.error || 'Creation failed' };
1387
+ }
1388
+ catch (error) {
1389
+ const errorMsg = error instanceof Error ? error.message : 'Unknown error';
1390
+ logger.error({ err: error, topic }, 'Failed to create sciencesub');
1391
+ return { created: false, reason: errorMsg };
1392
+ }
1393
+ }
1394
+ /**
1395
+ * Queue a take on a paper
1396
+ */
1397
+ async queueTakeOnPaper(agentId, paper, persona, existingTakes = []) {
1398
+ const llm = getLLMClient();
1399
+ const client = getAgent4ScienceClient();
1400
+ const executor = getActionExecutor();
1401
+ const db = getDatabase();
1402
+ const agentManager = getAgentManager();
1403
+ const apiKey = agentManager.getApiKey(agentId);
1404
+ try {
1405
+ // Fetch sciencesubs for tag selection
1406
+ let sciencesubs = [];
1407
+ if (apiKey) {
1408
+ try {
1409
+ sciencesubs = await client.getCachedSciencesubs(apiKey);
1410
+ }
1411
+ catch {
1412
+ logger.debug({ agentId }, 'Failed to fetch sciencesubs for take tags');
1413
+ }
1414
+ }
1415
+ const take = await llm.generateTake(persona, {
1416
+ title: paper.title,
1417
+ abstract: paper.abstract || paper.tldr || '',
1418
+ claims: paper.claims || [],
1419
+ limitations: paper.limitations || [],
1420
+ }, sciencesubs, existingTakes);
1421
+ // Enrich tags with matching sciencesub slugs (same as paper flow)
1422
+ if (sciencesubs.length > 0) {
1423
+ const existingSlugs = sciencesubs.map(s => s.slug);
1424
+ const takeTags = new Set(take.tags.map((t) => t.toLowerCase()));
1425
+ for (const tag of [...takeTags]) {
1426
+ const { match } = findMatchingCategory(tag, existingSlugs);
1427
+ if (match && !takeTags.has(match)) {
1428
+ takeTags.add(match);
1429
+ }
1430
+ }
1431
+ take.tags = Array.from(takeTags).slice(0, 10);
1432
+ }
1433
+ // Ensure first tag is a valid sciencesub slug
1434
+ take.tags = ensureFirstTagIsSciencesub(take.tags, sciencesubs, paper.tags);
1435
+ if (take.tags.length === 0) {
1436
+ logger.warn({ agentId, paperId: paper.id }, 'Take has no valid tags after enrichment, skipping');
1437
+ return;
1438
+ }
1439
+ executor.queueAction(agentId, 'take', paper.id, 'paper', take, 'normal');
1440
+ db.recordEngagement(agentId, paper.id, 'paper', 'take');
1441
+ logger.info({ agentId, paperId: paper.id, tags: take.tags }, 'Queued take on paper');
1442
+ }
1443
+ catch (error) {
1444
+ logger.error({ err: error, agentId, paperId: paper.id }, 'Failed to generate take');
1445
+ }
1446
+ }
1447
+ /**
1448
+ * Queue a peer review on a paper
1449
+ */
1450
+ async queueReviewOnPaper(agentId, paper, persona) {
1451
+ const llm = getLLMClient();
1452
+ const client = getAgent4ScienceClient();
1453
+ const executor = getActionExecutor();
1454
+ const db = getDatabase();
1455
+ const agentManager = getAgentManager();
1456
+ const apiKey = agentManager.getApiKey(agentId);
1457
+ if (!apiKey) {
1458
+ logger.warn({ agentId, paperId: paper.id }, 'Skipping review: no API key for agent');
1459
+ return;
1460
+ }
1461
+ try {
1462
+ // Anti-collusion: skip reviews on own papers or papers from followed agents
1463
+ if (paper.agentId === agentId) {
1464
+ logger.debug({ agentId, paperId: paper.id }, 'Skipping review: cannot review own paper');
1465
+ return;
1466
+ }
1467
+ if (paper.agentId && db.hasEngaged(agentId, paper.agentId, 'follow')) {
1468
+ logger.debug({ agentId, paperId: paper.id, authorId: paper.agentId }, 'Skipping review: following paper author');
1469
+ return;
1470
+ }
1471
+ // Verify paper still exists before generating a review
1472
+ const paperResult = await client.getPaper(paper.id, apiKey);
1473
+ if (!paperResult.success || !paperResult.data) {
1474
+ logger.warn({ agentId, paperId: paper.id }, 'Skipping review: paper not found in database');
1475
+ return;
1476
+ }
1477
+ // Use verified paper data for the review (full content instead of listing metadata)
1478
+ const verifiedPaper = paperResult.data;
1479
+ const review = await llm.generateReview(persona, {
1480
+ id: paper.id,
1481
+ title: verifiedPaper.title,
1482
+ abstract: verifiedPaper.abstract || verifiedPaper.tldr || '',
1483
+ claims: verifiedPaper.claims || [],
1484
+ limitations: verifiedPaper.limitations || [],
1485
+ pdfUrl: verifiedPaper.pdfUrl,
1486
+ });
1487
+ executor.queueAction(agentId, 'review', paper.id, 'paper', {
1488
+ paperId: paper.id,
1489
+ ...review,
1490
+ }, 'normal');
1491
+ db.recordEngagement(agentId, paper.id, 'paper', 'review');
1492
+ logger.info(`${agentId} queued review on paper ${paper.id}`);
1493
+ }
1494
+ catch (error) {
1495
+ logger.error({ err: error, agentId, paperId: paper.id }, 'Failed to generate review');
1496
+ }
1497
+ }
1498
+ /**
1499
+ * Fetch full conversation thread for context-aware replies
1500
+ * Traverses up the parent chain to get conversation context
1501
+ *
1502
+ * @param rootId - Paper or take ID
1503
+ * @param rootType - 'paper' or 'take'
1504
+ * @param commentId - Starting comment ID
1505
+ * @param apiKey - Agent API key
1506
+ * @param maxDepth - Maximum parent levels to fetch (default: 3)
1507
+ * @returns Formatted thread context string
1508
+ */
1509
+ async getThreadContext(rootId, _rootType, commentId, apiKey, maxDepth = 3) {
1510
+ const client = getAgent4ScienceClient();
1511
+ const context = [];
1512
+ try {
1513
+ // Fetch the thread with all comments
1514
+ const result = await client.getThread(rootId, apiKey);
1515
+ if (!result.success || !result.data) {
1516
+ return '';
1517
+ }
1518
+ const allComments = result.data.comments || [];
1519
+ // Build parent chain from commentId upwards
1520
+ let currentId = commentId;
1521
+ let depth = 0;
1522
+ while (currentId && depth < maxDepth) {
1523
+ const comment = allComments.find((c) => c.id === currentId);
1524
+ if (!comment)
1525
+ break;
1526
+ // Prepend to show oldest first
1527
+ const agentHandle = comment.agentId || 'Agent';
1528
+ context.unshift(`${agentHandle}: ${comment.body}`);
1529
+ currentId = comment.parentId || '';
1530
+ depth++;
1531
+ }
1532
+ return context.join('\n\n---\n\n');
1533
+ }
1534
+ catch (error) {
1535
+ logger.debug({ err: error, rootId, commentId }, 'Failed to fetch thread context');
1536
+ return '';
1537
+ }
1538
+ }
1539
+ /**
1540
+ * Queue a comment on a paper
1541
+ */
1542
+ async queueCommentOnPaper(agentId, paper, persona) {
1543
+ const llm = getLLMClient();
1544
+ const executor = getActionExecutor();
1545
+ const db = getDatabase();
1546
+ try {
1547
+ const comment = await llm.generateComment(persona, {
1548
+ targetType: 'paper',
1549
+ targetContent: `${paper.title}\n\n${paper.abstract || paper.tldr || ''}`,
1550
+ triggerType: 'new_content',
1551
+ });
1552
+ executor.queueAction(agentId, 'comment', paper.id, 'paper', comment, 'normal');
1553
+ db.recordEngagement(agentId, paper.id, 'paper', 'comment');
1554
+ logger.info(`${agentId} queued comment on paper ${paper.id}`);
1555
+ }
1556
+ catch (error) {
1557
+ logger.error({ err: error, agentId, paperId: paper.id }, 'Failed to generate comment');
1558
+ }
1559
+ }
1560
+ /**
1561
+ * Queue a comment on a take
1562
+ */
1563
+ async queueCommentOnTake(agentId, take, persona) {
1564
+ const llm = getLLMClient();
1565
+ const executor = getActionExecutor();
1566
+ const db = getDatabase();
1567
+ try {
1568
+ const comment = await llm.generateComment(persona, {
1569
+ targetType: 'take',
1570
+ targetContent: `${take.title}\n\n${take.hotTake || take.summary?.join('\n') || ''}`,
1571
+ triggerType: 'new_content',
1572
+ });
1573
+ executor.queueAction(agentId, 'comment', take.id, 'take', comment, 'normal');
1574
+ db.recordEngagement(agentId, take.id, 'take', 'comment');
1575
+ logger.info(`${agentId} queued comment on take ${take.id}`);
1576
+ }
1577
+ catch (error) {
1578
+ logger.error({ err: error, agentId, takeId: take.id }, 'Failed to generate comment');
1579
+ }
1580
+ }
1581
+ /**
1582
+ * Queue a comment on a peer review
1583
+ */
1584
+ async queueCommentOnReview(agentId, review, persona) {
1585
+ const llm = getLLMClient();
1586
+ const executor = getActionExecutor();
1587
+ const db = getDatabase();
1588
+ try {
1589
+ const reviewContent = [
1590
+ review.summary,
1591
+ review.strengths?.length ? `Strengths: ${review.strengths.join('; ')}` : '',
1592
+ review.weaknesses?.length ? `Weaknesses: ${review.weaknesses.join('; ')}` : '',
1593
+ review.suggestions || '',
1594
+ ].filter(Boolean).join('\n\n');
1595
+ const comment = await llm.generateComment(persona, {
1596
+ targetType: 'review',
1597
+ targetContent: reviewContent,
1598
+ triggerType: 'new_content',
1599
+ });
1600
+ executor.queueAction(agentId, 'comment', review.id, 'review', comment, 'normal');
1601
+ db.recordEngagement(agentId, review.id, 'review', 'comment');
1602
+ logger.info(`${agentId} queued comment on review ${review.id}`);
1603
+ }
1604
+ catch (error) {
1605
+ logger.error({ err: error, agentId, reviewId: review.id }, 'Failed to generate comment on review');
1606
+ }
1607
+ }
1608
+ /**
1609
+ * Calculate reciprocity multiplier for engagement with specific agent
1610
+ * Returns 1.0-3.0 based on how much the other agent has engaged with us
1611
+ *
1612
+ * @param agentId - This agent's ID
1613
+ * @param otherAgentId - The other agent's ID
1614
+ * @returns Multiplier (1.0 = no boost, 3.0 = maximum boost)
1615
+ */
1616
+ getReciprocityMultiplier(agentId, otherAgentId) {
1617
+ const db = getDatabase();
1618
+ // How many times has OTHER agent engaged with US?
1619
+ const incomingInteractions = db.getIncomingInteractions(agentId, otherAgentId);
1620
+ // Progressive boost: more they engage with us, more we engage back
1621
+ if (incomingInteractions >= 10)
1622
+ return 3.0; // Very frequent interactor
1623
+ if (incomingInteractions >= 5)
1624
+ return 2.5; // Frequent interactor
1625
+ if (incomingInteractions >= 3)
1626
+ return 2.0; // Regular interactor
1627
+ if (incomingInteractions >= 1)
1628
+ return 1.5; // Has engaged at least once
1629
+ return 1.0; // No previous interaction
1630
+ }
1631
+ /**
1632
+ * Check if content topics are relevant to agent's interests
1633
+ */
1634
+ isTopicRelevant(contentTags, preferredTopics) {
1635
+ if (preferredTopics.length === 0)
1636
+ return true; // No preferences = everything is relevant
1637
+ const lowerTags = contentTags.map(t => t.toLowerCase());
1638
+ const lowerTopics = preferredTopics.map(t => t.toLowerCase());
1639
+ return lowerTopics.some(topic => lowerTags.some(tag => tag.includes(topic) || topic.includes(tag)));
1640
+ }
1641
+ /**
1642
+ * Calculate sciencesub relevance to agent
1643
+ * Improved with broader matching:
1644
+ * - Partial word matches (e.g., "math" matches "mathematics")
1645
+ * - Hyphen-split matching (e.g., "fixed-point-theory" matches "theory")
1646
+ * - Related terms mapping
1647
+ */
1648
+ calculateSciencesubRelevance(sciencesub, persona) {
1649
+ // Related terms mapping for broader matching
1650
+ const relatedTerms = {
1651
+ 'ml': ['machine-learning', 'deep-learning', 'neural', 'transformer', 'llm'],
1652
+ 'ai': ['artificial-intelligence', 'machine-learning', 'neural', 'agent'],
1653
+ 'math': ['mathematics', 'theory', 'optimization', 'algebra', 'calculus', 'topology'],
1654
+ 'mathematics': ['math', 'theory', 'optimization', 'algebra', 'geometry'],
1655
+ 'theory': ['mathematics', 'theoretical', 'proof', 'theorem'],
1656
+ 'optimization': ['efficiency', 'performance', 'scaling'],
1657
+ 'nlp': ['language', 'text', 'transformer', 'llm', 'gpt'],
1658
+ 'cv': ['vision', 'image', 'visual', 'cnn'],
1659
+ 'rl': ['reinforcement', 'agent', 'policy', 'reward'],
1660
+ 'alignment': ['safety', 'ethics', 'interpretability'],
1661
+ 'scaling': ['efficiency', 'optimization', 'performance'],
1662
+ };
1663
+ const slug = sciencesub.slug.toLowerCase();
1664
+ const text = `${slug} ${sciencesub.name} ${sciencesub.description}`.toLowerCase();
1665
+ // Split hyphenated names for partial matching
1666
+ const slugParts = slug.split('-').filter(p => p.length > 2);
1667
+ const topics = persona.preferredTopics.map(t => t.toLowerCase());
1668
+ // Agents without preferences get higher base interest (50%)
1669
+ if (topics.length === 0)
1670
+ return 0.5;
1671
+ let score = 0;
1672
+ for (const topic of topics) {
1673
+ // Direct match
1674
+ if (text.includes(topic)) {
1675
+ score += 1;
1676
+ continue;
1677
+ }
1678
+ // Partial match (topic is substring or vice versa)
1679
+ if (slugParts.some(part => part.includes(topic) || topic.includes(part))) {
1680
+ score += 0.7;
1681
+ continue;
1682
+ }
1683
+ // Related terms match
1684
+ const related = relatedTerms[topic] || [];
1685
+ if (related.some(r => text.includes(r) || slugParts.includes(r))) {
1686
+ score += 0.5;
1687
+ continue;
1688
+ }
1689
+ // Check if sciencesub topic has related terms that match agent topic
1690
+ for (const part of slugParts) {
1691
+ const partRelated = relatedTerms[part] || [];
1692
+ if (partRelated.includes(topic)) {
1693
+ score += 0.5;
1694
+ break;
1695
+ }
1696
+ }
1697
+ }
1698
+ return Math.min(1, score / topics.length);
1699
+ }
1700
+ /**
1701
+ * Check if take stance aligns with persona temperament
1702
+ * Used for fast voting heuristics
1703
+ */
1704
+ /**
1705
+ * Returns true only when a take's stance is a direct philosophical opposite of the persona.
1706
+ * Neutral stance never conflicts. Used to decide principled downvotes.
1707
+ */
1708
+ stanceStrictlyConflicts(stance, persona) {
1709
+ // Neutral is never a conflict
1710
+ if (stance === 'neutral')
1711
+ return false;
1712
+ // Rigorous/skeptical agents oppose pure hype — it's epistemically lazy
1713
+ if ((persona.voice === 'skeptical' || persona.epistemics === 'rigorous') && stance === 'hype')
1714
+ return true;
1715
+ // Optimistic/visionary agents oppose purely critical negativity
1716
+ if ((persona.voice === 'optimistic' || persona.voice === 'visionary') && stance === 'critical')
1717
+ return true;
1718
+ // Academic/philosopher agents oppose hot takes — too unsubstantiated
1719
+ if ((persona.voice === 'academic' || persona.voice === 'philosopher') && stance === 'hot')
1720
+ return true;
1721
+ // Hype/meme-lord agents oppose relentlessly critical takes
1722
+ if ((persona.voice === 'hype' || persona.voice === 'meme-lord') && stance === 'critical')
1723
+ return true;
1724
+ // High spice level: contrarian agents downvote if stance matches their usual target
1725
+ if (persona.voice === 'contrarian' && stance === 'hype')
1726
+ return true;
1727
+ return false;
1728
+ }
1729
+ /**
1730
+ * Get engagement stats for an agent (from persistent storage)
1731
+ */
1732
+ getStats(agentId) {
1733
+ const db = getDatabase();
1734
+ return {
1735
+ totalEngagements: db.getEngagementCount(agentId),
1736
+ followedAgents: db.getFollowingCount(agentId),
1737
+ joinedSciencesubs: db.getMembershipCount(agentId),
1738
+ };
1739
+ }
1740
+ }
1741
+ // Singleton
1742
+ let instance = null;
1743
+ export function createProactiveEngine(config) {
1744
+ instance = new ProactiveEngine(config);
1745
+ return instance;
1746
+ }
1747
+ export function getProactiveEngine() {
1748
+ if (!instance) {
1749
+ instance = new ProactiveEngine();
1750
+ }
1751
+ return instance;
1752
+ }
1753
+ //# sourceMappingURL=proactive-engine.js.map