twinclaw 1.2.9 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,207 @@
1
+ import { logThought } from '../utils/logger.js';
2
+ import { checkFirstInteraction, loadUserMemory, saveUserMemory, getDefaultUserMemory, } from '../services/user-memory.js';
3
+ const ONBOARDING_SYSTEM_PROMPT = `You are having a natural conversation to get to know the user better. Your goal is to learn about them in a friendly, casual way - like two people chatting over coffee.
4
+
5
+ Topics to naturally explore (one at a time, naturally):
6
+ - What they do for work/study
7
+ - What they're currently working on or interested in
8
+ - What tools/tech they use
9
+ - What they like to do outside of work
10
+ - How they prefer to communicate (brief vs detailed, technical vs simple)
11
+ - What their goals are
12
+
13
+ Guidelines:
14
+ - Be conversational and warm, not robotic
15
+ - Ask one question at a time, building on what they share
16
+ - Don't use numbered lists or formal questionnaires
17
+ - If they share multiple things, acknowledge them naturally before asking follow-ups
18
+ - When you feel you've learned enough (after 3-5 exchanges), ask if they'd like you to remember this info
19
+
20
+ Important: Extract key information from their responses and include it in your response as a JSON comment like:
21
+ <!-- INFO: {"role": "developer", "interests": "AI, music", "goals": "building automation"} -->
22
+
23
+ This JSON will be automatically parsed to save their profile. Use these keys:
24
+ - role (what they do)
25
+ - company (where they work)
26
+ - currentProject (what they're working on)
27
+ - tools (comma-separated list of tools they use)
28
+ - interests (what they like doing)
29
+ - communicationPreference (brief, detailed, or contextual)
30
+ - frustrations (what frustrates them about AI)
31
+ - goals (what they're trying to achieve)
32
+ - technicalLevel (beginner, intermediate, or expert)`;
33
+ export class SelfSetupAgent {
34
+ #userId;
35
+ #phase = 'idle';
36
+ #conversationTurn = 0;
37
+ #extractedInfo = {};
38
+ #confirmed = false;
39
+ constructor(userId) {
40
+ this.#userId = userId;
41
+ }
42
+ get userId() {
43
+ return this.#userId;
44
+ }
45
+ get phase() {
46
+ return this.#phase;
47
+ }
48
+ isComplete() {
49
+ return this.#phase === 'completed';
50
+ }
51
+ getSystemPrompt() {
52
+ return ONBOARDING_SYSTEM_PROMPT;
53
+ }
54
+ getConversationHistory() {
55
+ return [
56
+ {
57
+ role: 'system',
58
+ content: ONBOARDING_SYSTEM_PROMPT,
59
+ },
60
+ ];
61
+ }
62
+ processLLMResponse(llmText) {
63
+ this.#conversationTurn++;
64
+ const extracted = this.#extractInfo(llmText);
65
+ if (extracted) {
66
+ this.#extractedInfo = { ...this.#extractedInfo, ...extracted };
67
+ }
68
+ if (this.#conversationTurn >= 4 && !this.#confirmed) {
69
+ this.#phase = 'confirming';
70
+ return {
71
+ text: llmText,
72
+ phase: this.#phase,
73
+ isComplete: false,
74
+ needsMoreInfo: true,
75
+ extractedInfo: this.#extractedInfo,
76
+ };
77
+ }
78
+ if (this.#confirmed) {
79
+ return this.#completeSetup();
80
+ }
81
+ this.#phase = 'learning';
82
+ return {
83
+ text: llmText,
84
+ phase: this.#phase,
85
+ isComplete: false,
86
+ needsMoreInfo: true,
87
+ extractedInfo: this.#extractedInfo,
88
+ };
89
+ }
90
+ confirmAndSave() {
91
+ this.#confirmed = true;
92
+ return this.#completeSetup();
93
+ }
94
+ skipOrDecline() {
95
+ this.#phase = 'completed';
96
+ return {
97
+ text: "No problem! Whenever you're ready to share more about yourself, just let me know. How can I help you today?",
98
+ phase: this.#phase,
99
+ isComplete: true,
100
+ needsMoreInfo: false,
101
+ };
102
+ }
103
+ #completeSetup() {
104
+ if (Object.keys(this.#extractedInfo).length > 0) {
105
+ const memory = getDefaultUserMemory();
106
+ if (this.#extractedInfo.role) {
107
+ memory.profile.identity.role = String(this.#extractedInfo.role);
108
+ }
109
+ if (this.#extractedInfo.company) {
110
+ memory.profile.identity.company = String(this.#extractedInfo.company);
111
+ }
112
+ if (this.#extractedInfo.currentProject) {
113
+ memory.profile.professionalBackground.currentFocus = String(this.#extractedInfo.currentProject);
114
+ }
115
+ if (this.#extractedInfo.tools) {
116
+ const tools = String(this.#extractedInfo.tools).split(',').map(t => t.trim()).filter(Boolean);
117
+ memory.profile.professionalBackground.toolsAndPlatforms = tools;
118
+ }
119
+ if (this.#extractedInfo.interests) {
120
+ memory.profile.personalContext.interests = String(this.#extractedInfo.interests).split(',').map(t => t.trim()).filter(Boolean);
121
+ }
122
+ if (this.#extractedInfo.goals) {
123
+ memory.profile.goals.shortTerm = [String(this.#extractedInfo.goals)];
124
+ }
125
+ if (this.#extractedInfo.technicalLevel) {
126
+ const level = String(this.#extractedInfo.technicalLevel).toLowerCase();
127
+ if (level.includes('beginner')) {
128
+ memory.profile.identity.technicalLevel = 'beginner';
129
+ }
130
+ else if (level.includes('expert') || level.includes('advanced')) {
131
+ memory.profile.identity.technicalLevel = 'expert';
132
+ }
133
+ else {
134
+ memory.profile.identity.technicalLevel = 'intermediate';
135
+ }
136
+ }
137
+ if (this.#extractedInfo.communicationPreference) {
138
+ const pref = String(this.#extractedInfo.communicationPreference).toLowerCase();
139
+ if (pref.includes('brief')) {
140
+ memory.profile.communicationStyle.detailLevel = 'brief';
141
+ }
142
+ else if (pref.includes('detail')) {
143
+ memory.profile.communicationStyle.detailLevel = 'comprehensive';
144
+ }
145
+ else {
146
+ memory.profile.communicationStyle.detailLevel = 'contextual';
147
+ }
148
+ }
149
+ if (this.#extractedInfo.frustrations) {
150
+ memory.profile.communicationStyle.frustrations = [String(this.#extractedInfo.frustrations)];
151
+ }
152
+ saveUserMemory(this.#userId, memory);
153
+ logThought(`[SelfSetupAgent] Saved user profile for: ${this.#userId}`);
154
+ }
155
+ this.#phase = 'completed';
156
+ return {
157
+ text: `Got it! I've saved what you shared about yourself. I'll use this to tailor my responses to you.
158
+
159
+ From now on, I'll:
160
+ ✓ Remember what matters to you
161
+ ✓ Adapt my communication style to your preferences
162
+ ✓ Keep track of your projects and goals
163
+
164
+ If anything changes or you want to update your info, just let me know. Ready to dive in!`,
165
+ phase: this.#phase,
166
+ isComplete: true,
167
+ needsMoreInfo: false,
168
+ extractedInfo: this.#extractedInfo,
169
+ };
170
+ }
171
+ #extractInfo(text) {
172
+ const infoMatch = text.match(/<!--\s*INFO:\s*(\{[^}]+\})\s*-->/);
173
+ if (!infoMatch) {
174
+ return null;
175
+ }
176
+ try {
177
+ return JSON.parse(infoMatch[1]);
178
+ }
179
+ catch {
180
+ logThought(`[SelfSetupAgent] Failed to parse extracted info: ${infoMatch[1]}`);
181
+ return null;
182
+ }
183
+ }
184
+ getState() {
185
+ return {
186
+ phase: this.#phase,
187
+ turn: this.#conversationTurn,
188
+ extractedCount: Object.keys(this.#extractedInfo).length,
189
+ };
190
+ }
191
+ }
192
+ export function createSelfSetupAgent(userId) {
193
+ if (!checkFirstInteraction(userId)) {
194
+ return null;
195
+ }
196
+ return new SelfSetupAgent(userId);
197
+ }
198
+ export function getExistingUserMemory(userId) {
199
+ return loadUserMemory(userId);
200
+ }
201
+ export function getOrCreateAgent(userId, existing) {
202
+ if (existing && existing.phase !== 'completed') {
203
+ return existing;
204
+ }
205
+ const newAgent = createSelfSetupAgent(userId);
206
+ return newAgent ?? existing;
207
+ }
@@ -631,6 +631,9 @@ export function getConfigValue(key, sensitive = false) {
631
631
  case 'GITHUB_TOKEN':
632
632
  jsonValue = config.models.githubToken;
633
633
  break;
634
+ case 'MODEL_DEFINITIONS':
635
+ jsonValue = JSON.stringify(config.models.definitions ?? []);
636
+ break;
634
637
  case 'TELEGRAM_BOT_TOKEN':
635
638
  jsonValue = config.messaging.telegram.botToken;
636
639
  break;
@@ -265,6 +265,89 @@ export const STATIC_MODEL_CATALOG = [
265
265
  pricing: 'Included with Copilot',
266
266
  description: 'GitHub Copilot model',
267
267
  })),
268
+ // Modal Custom Models
269
+ {
270
+ id: 'modal-zai-glm-5-fp8',
271
+ name: 'GLM-5-FP8',
272
+ provider: 'modal',
273
+ model: 'zai-org/GLM-5-FP8',
274
+ contextLength: 128000,
275
+ supportsStreaming: true,
276
+ pricing: 'Custom endpoint',
277
+ description: 'Custom Modal model'
278
+ },
279
+ // Groq Models
280
+ {
281
+ id: 'groq-qwen3-32b',
282
+ name: 'Qwen 3 32B',
283
+ provider: 'groq',
284
+ model: 'qwen/qwen3-32b',
285
+ contextLength: 32768,
286
+ supportsStreaming: true,
287
+ pricing: 'Free tier available',
288
+ description: 'Fast inference with Groq'
289
+ },
290
+ {
291
+ id: 'groq-kimi-k2',
292
+ name: 'Kimi K2 Instruct',
293
+ provider: 'groq',
294
+ model: 'moonshotai/kimi-k2-instruct-0905',
295
+ contextLength: 32768,
296
+ supportsStreaming: true,
297
+ pricing: 'Free tier available',
298
+ description: 'Fast inference with Groq'
299
+ },
300
+ // OpenRouter Free Models
301
+ {
302
+ id: 'openrouter-stepfun-flash',
303
+ name: 'StepFun Flash (Free)',
304
+ provider: 'openrouter',
305
+ model: 'stepfun/step-3.5-flash:free',
306
+ contextLength: 32000,
307
+ supportsStreaming: true,
308
+ pricing: 'Free',
309
+ description: 'Free model via OpenRouter'
310
+ },
311
+ {
312
+ id: 'openrouter-trinity-large',
313
+ name: 'Trinity Large (Free)',
314
+ provider: 'openrouter',
315
+ model: 'arcee-ai/trinity-large-preview:free',
316
+ contextLength: 32000,
317
+ supportsStreaming: true,
318
+ pricing: 'Free',
319
+ description: 'Free model via OpenRouter'
320
+ },
321
+ {
322
+ id: 'openrouter-trinity-mini',
323
+ name: 'Trinity Mini (Free)',
324
+ provider: 'openrouter',
325
+ model: 'arcee-ai/trinity-mini:free',
326
+ contextLength: 32000,
327
+ supportsStreaming: true,
328
+ pricing: 'Free',
329
+ description: 'Free model via OpenRouter'
330
+ },
331
+ {
332
+ id: 'openrouter-qwen-vl',
333
+ name: 'Qwen VL (Free)',
334
+ provider: 'openrouter',
335
+ model: 'qwen/qwen3-vl-30b-a3b-thinking',
336
+ contextLength: 32000,
337
+ supportsStreaming: true,
338
+ pricing: 'Free',
339
+ description: 'Free vision model via OpenRouter'
340
+ },
341
+ {
342
+ id: 'openrouter-gpt-oss',
343
+ name: 'GPT OSS (Free)',
344
+ provider: 'openrouter',
345
+ model: 'openai/gpt-oss-120b:free',
346
+ contextLength: 32000,
347
+ supportsStreaming: true,
348
+ pricing: 'Free',
349
+ description: 'Free model via OpenRouter'
350
+ },
268
351
  // GitHub Models catalog entries can also be fetched dynamically
269
352
  // Modal Custom Models (user-specific - will be added dynamically)
270
353
  ];
@@ -1,6 +1,9 @@
1
1
  import { randomUUID } from 'node:crypto';
2
+ import fs from 'node:fs/promises';
3
+ import path from 'path';
2
4
  import { createSession, getSessionMessages, saveMessage } from '../services/db.js';
3
5
  import { ModelRouter } from '../services/model-router.js';
6
+ import { getIdentityDir } from '../config/workspace.js';
4
7
  import { indexConversationTurn, retrieveEvidenceAwareMemoryContext, } from '../services/semantic-memory.js';
5
8
  import { OrchestrationService } from '../services/orchestration-service.js';
6
9
  import { assembleContext } from './context-assembly.js';
@@ -8,6 +11,7 @@ import { LaneExecutor } from './lane-executor.js';
8
11
  import { PolicyEngine } from '../services/policy-engine.js';
9
12
  import { logThought } from '../utils/logger.js';
10
13
  import { ContextLifecycleOrchestrator } from '../services/context-lifecycle.js';
14
+ import { createSelfSetupAgent } from '../agents/self-setup-agent.js';
11
15
  const DEFAULT_MAX_TOOL_ROUNDS = 6;
12
16
  const DEFAULT_IDENTICAL_TOOL_CALL_LIMIT = 3;
13
17
  const DEFAULT_DELEGATION_MIN_SCORE = 2;
@@ -89,6 +93,7 @@ export class Gateway {
89
93
  #contextLifecycle;
90
94
  #toolPolicy;
91
95
  #degradationCounts = new Map();
96
+ #selfSetupAgents = new Map();
92
97
  constructor(registry, options = {}) {
93
98
  this.#router = options.router ?? new ModelRouter();
94
99
  this.#orchestration = options.orchestration ?? new OrchestrationService();
@@ -163,8 +168,46 @@ export class Gateway {
163
168
  return 'I could not find any text content to process.';
164
169
  }
165
170
  const sessionId = `${message.platform}:${message.senderId}`;
171
+ const userId = message.senderId;
172
+ const existingAgent = this.#selfSetupAgents.get(userId);
173
+ if (existingAgent && !existingAgent.isComplete()) {
174
+ return this.#runOnboardingConversation(existingAgent, normalizedText, sessionId);
175
+ }
176
+ const newAgent = createSelfSetupAgent(userId);
177
+ if (newAgent) {
178
+ this.#selfSetupAgents.set(userId, newAgent);
179
+ return this.#runOnboardingConversation(newAgent, normalizedText, sessionId);
180
+ }
166
181
  return this.processText(sessionId, normalizedText);
167
182
  }
183
+ async #runOnboardingConversation(agent, userMessage, sessionId) {
184
+ const messages = [
185
+ { role: 'system', content: agent.getSystemPrompt() },
186
+ ...agent.getConversationHistory(),
187
+ { role: 'user', content: userMessage },
188
+ ];
189
+ try {
190
+ const response = await this.#router.createChatCompletion(messages, undefined, { sessionId });
191
+ if (!response.content) {
192
+ return "I'm having trouble thinking right now. Let's try again.";
193
+ }
194
+ const result = agent.processLLMResponse(response.content);
195
+ if (result.isComplete && !result.needsMoreInfo) {
196
+ this.#selfSetupAgents.delete(agent.userId);
197
+ return result.text;
198
+ }
199
+ if (result.phase === 'confirming') {
200
+ const confirmMessage = result.text + "\n\nWould you like me to remember this? (yes/no or just tell me more)";
201
+ return confirmMessage;
202
+ }
203
+ return result.text;
204
+ }
205
+ catch (err) {
206
+ const errorMsg = err instanceof Error ? err.message : String(err);
207
+ logThought(`[SelfSetupAgent] LLM error: ${errorMsg}`);
208
+ return "I'm having trouble continuing our conversation. Let's try again - what would you like to talk about?";
209
+ }
210
+ }
168
211
  async processText(sessionId, text) {
169
212
  const normalizedText = text.trim();
170
213
  if (!normalizedText) {
@@ -174,6 +217,17 @@ export class Gateway {
174
217
  const historyRows = getSessionMessages(sessionId);
175
218
  const conversationHistory = toConversationHistory(historyRows);
176
219
  const historyPlan = this.#contextLifecycle.planHistoryWindow(conversationHistory);
220
+ const isFirstInteraction = historyRows.length === 0;
221
+ let setupPrompt = '';
222
+ if (isFirstInteraction) {
223
+ const userMdPath = path.join(getIdentityDir(), 'user.md');
224
+ try {
225
+ await fs.access(userMdPath);
226
+ }
227
+ catch {
228
+ setupPrompt = this.#getPersonaSetupPrompt();
229
+ }
230
+ }
177
231
  await this.#persistTurn(sessionId, 'user', normalizedText);
178
232
  const memoryRetrieval = await retrieveEvidenceAwareMemoryContext(sessionId, normalizedText, historyPlan.memoryTopK);
179
233
  const memoryContext = memoryRetrieval.context;
@@ -190,6 +244,7 @@ export class Gateway {
190
244
  const messages = [
191
245
  { role: 'system', content: compactSystemPrompt.content },
192
246
  ...historyPlan.hotHistory,
247
+ ...(setupPrompt ? [{ role: 'system', content: setupPrompt }] : []),
193
248
  { role: 'user', content: normalizedText },
194
249
  ];
195
250
  return this.#runConversationLoop(sessionId, messages);
@@ -458,4 +513,59 @@ export class Gateway {
458
513
  }
459
514
  return content;
460
515
  }
516
+ #getPersonaSetupPrompt() {
517
+ return `
518
+ ## First Interaction - Persona Setup
519
+
520
+ This appears to be your first conversation with me. Before we dive into anything, I'd love to take a few minutes to get to know you better so I can be actually helpful (not just generic AI helpful).
521
+
522
+ Think of this as a quick chat where I learn what makes you tick.
523
+
524
+ Start by introducing yourself warmly and asking me 2-3 questions about:
525
+ - What do you do? (Role, industry, company)
526
+ - What are you working on right now?
527
+ - How do you like information presented? (Brief vs detailed, casual vs formal)
528
+ - What frustrates you about AI assistants?
529
+ - What platforms or tools do you use that I might integrate with?
530
+
531
+ After I respond, synthesize what you learn and create/update the user.md file in my identity directory with:
532
+ - User name and how they'd like to be addressed
533
+ - Role/industry/company
534
+ - Communication preferences
535
+ - Current projects or goals
536
+ - Tools and platforms they use
537
+ - Anything they want me to know or avoid
538
+
539
+ The user.md file should follow this format:
540
+
541
+ \`\`\`markdown
542
+ # User Profile
543
+
544
+ ## Basic Info
545
+ - **Name:** [what they want to be called]
546
+ - **Role:** [job title/position]
547
+ - **Company/Context:** [where they work/study]
548
+ - **Technical Level:** [beginner/intermediate/expert]
549
+
550
+ ## Communication Preferences
551
+ - **Formality:** [casual/professional/mix]
552
+ - **Detail Level:** [brief/comprehensive/contextual]
553
+ - **Tone:** [direct/exploratory/friendly]
554
+
555
+ ## Current Context
556
+ - **Active Projects:** [what they're working on]
557
+ - **Goals:** [what they're trying to achieve]
558
+ - **Tools:** [daily tech stack, platforms]
559
+
560
+ ## Important to Remember
561
+ - [things they explicitly mention caring about]
562
+ - [frustrations they mention]
563
+
564
+ ## Learned Facts
565
+ - [interesting facts from our conversation]
566
+ \`\`\`
567
+
568
+ Tell me once you've saved this. Then we'll be ready to dive in!
569
+ `;
570
+ }
461
571
  }
@@ -8,7 +8,7 @@ import { isValidApiPort, isValidE164LikePhone, isValidProviderModelFormat, } fro
8
8
  import { createSession } from '../services/db.js';
9
9
  import { logThought } from '../utils/logger.js';
10
10
  import { getGitHubCopilotService } from '../services/github-copilot-service.js';
11
- import { getModelCatalogService } from '../services/model-catalog-service.js';
11
+ import { getModelCatalogService, initializeModelCatalog } from '../services/model-catalog-service.js';
12
12
  import { PROVIDER_INFO } from '../config/model-catalog.js';
13
13
  const rl = readline.createInterface({
14
14
  input: process.stdin,
@@ -1214,6 +1214,8 @@ async function restoreConfigSnapshot(configPath, snapshot) {
1214
1214
  await writeFile(configPath, snapshot, { encoding: 'utf8', mode: 0o600 });
1215
1215
  }
1216
1216
  export async function runSetupWizard(options = {}) {
1217
+ // Initialize model catalog (fetch GitHub models)
1218
+ await initializeModelCatalog();
1217
1219
  const logger = options.logger ?? console;
1218
1220
  const configPath = getConfigPath(options.configPathOverride);
1219
1221
  const baseConfig = await readConfig(options.configPathOverride);
package/dist/index.js CHANGED
@@ -19,6 +19,7 @@ import { SttService } from './services/stt-service.js';
19
19
  import { TtsService } from './services/tts-service.js';
20
20
  import { QueueService } from './services/queue-service.js';
21
21
  import { ModelRouter } from './services/model-router.js';
22
+ import { initializeModelCatalog } from './services/model-catalog-service.js';
22
23
  import { IncidentManager } from './services/incident-manager.js';
23
24
  import { RuntimeBudgetGovernor } from './services/runtime-budget-governor.js';
24
25
  import { LocalStateBackupService } from './services/local-state-backup.js';
@@ -240,6 +241,8 @@ async function main() {
240
241
  return 'allowlist';
241
242
  }
242
243
  const runtimeBudgetGovernor = new RuntimeBudgetGovernor();
244
+ // Initialize model catalog (fetch GitHub models in background)
245
+ initializeModelCatalog().catch(err => console.warn('[ModelCatalog] Init failed:', err));
243
246
  const modelRouter = new ModelRouter({ budgetGovernor: runtimeBudgetGovernor });
244
247
  const gateway = new Gateway(skillRegistry, {
245
248
  policyEngine,
@@ -1,6 +1,8 @@
1
1
  import TelegramBot from 'node-telegram-bot-api';
2
2
  import os from 'node:os';
3
3
  import path from 'node:path';
4
+ import { readConfig, writeConfig } from '../config/json-config.js';
5
+ import { getModelCatalogService } from '../services/model-catalog-service.js';
4
6
  /** Minimum ms delay between processing successive messages (human-like pacing). */
5
7
  const RATE_LIMIT_MS = 1500;
6
8
  const TELEGRAM_COMMANDS = [
@@ -128,20 +130,63 @@ _All systems operational_
128
130
  break;
129
131
  case '/models':
130
132
  case '/model':
131
- if (args.startsWith('set ') && args.length > 4) {
132
- const modelId = args.substring(4).trim();
133
- await this.#bot.sendMessage(chatId, `To change model to *${modelId}*, run:\n\n\`/key set modal ${modelId}\`\n\n_Or use: twinclaw config edit_`, { parse_mode: 'Markdown' });
133
+ if (args.startsWith('list')) {
134
+ const catalog = getModelCatalogService().getAllModels();
135
+ const config = await readConfig();
136
+ const currentPrimary = config.models.primaryModel || 'not set';
137
+ let modelList = `📦 *Available Models*\n\nCurrent: *${currentPrimary}*\n\n`;
138
+ const providerGroups = new Map();
139
+ for (const model of catalog) {
140
+ const models = providerGroups.get(model.provider) || [];
141
+ models.push(model);
142
+ providerGroups.set(model.provider, models);
143
+ }
144
+ let num = 1;
145
+ for (const [provider, models] of providerGroups) {
146
+ modelList += `*${provider.toUpperCase()}:*\n`;
147
+ for (const m of models.slice(0, 5)) {
148
+ const isCurrent = m.model === currentPrimary || m.id === currentPrimary;
149
+ modelList += `${num}. ${m.name}${isCurrent ? ' ✅' : ''}\n`;
150
+ num++;
151
+ }
152
+ if (models.length > 5)
153
+ modelList += ` ...and ${models.length - 5} more\n`;
154
+ modelList += '\n';
155
+ }
156
+ modelList += '_To switch: /model set <number>_';
157
+ await this.#bot.sendMessage(chatId, modelList, { parse_mode: 'Markdown' });
158
+ }
159
+ else if (args.startsWith('set ') && args.length > 4) {
160
+ const modelIdOrNum = args.substring(4).trim();
161
+ const catalog = getModelCatalogService().getAllModels();
162
+ let newModelId = modelIdOrNum;
163
+ if (/^\d+$/.test(modelIdOrNum)) {
164
+ const idx = parseInt(modelIdOrNum, 10) - 1;
165
+ if (idx >= 0 && idx < catalog.length) {
166
+ newModelId = `${catalog[idx].provider}/${catalog[idx].model}`;
167
+ }
168
+ else {
169
+ await this.#bot.sendMessage(chatId, `❌ Invalid model number. Use /model list to see available models.`);
170
+ break;
171
+ }
172
+ }
173
+ const config = await readConfig();
174
+ config.models.primaryModel = newModelId;
175
+ await writeConfig(config);
176
+ await this.#bot.sendMessage(chatId, `✅ *Model updated!*\n\nNew primary model: *${newModelId}*\n\nRestart TwinClaw for changes to take effect.`, { parse_mode: 'Markdown' });
134
177
  }
135
178
  else {
179
+ const config = await readConfig();
180
+ const currentPrimary = config.models.primaryModel || 'not set';
136
181
  await this.#bot.sendMessage(chatId, `📦 *Models*
137
182
 
138
- Use *twinclaw config* to manage models.
183
+ Current: *${currentPrimary}*
139
184
 
140
185
  Commands:
141
186
  • /model list - List available models
142
- • /model set <id> - Set primary model
187
+ • /model set <number> - Set primary model
143
188
 
144
- _Or run: twinclaw config model_
189
+ Example: /model set 1
145
190
  `, { parse_mode: 'Markdown' });
146
191
  }
147
192
  break;
@@ -6,7 +6,8 @@ import path from 'node:path';
6
6
  import fs from 'node:fs/promises';
7
7
  import { randomUUID } from 'node:crypto';
8
8
  import { logThought } from '../utils/logger.js';
9
- import { getConfigValue } from '../config/json-config.js';
9
+ import { getConfigValue, readConfig, writeConfig } from '../config/json-config.js';
10
+ import { getModelCatalogService } from '../services/model-catalog-service.js';
10
11
  const { Client, LocalAuth, MessageMedia } = WAWebJS;
11
12
  const RATE_LIMIT_MS = 1500;
12
13
  const WHATSAPP_COMMANDS = [
@@ -70,8 +71,10 @@ export class WhatsAppHandler {
70
71
  this.#registerListeners();
71
72
  }
72
73
  // ── Command Handlers ─────────────────────────────────────────────────────────
73
- async handleCommand(chatId, command) {
74
- const cmd = command.toLowerCase().trim();
74
+ async handleCommand(chatId, command, fullText) {
75
+ const parts = command.toLowerCase().trim().split(' ');
76
+ const cmd = parts[0];
77
+ const args = fullText ? fullText.substring(cmd.length).trim() : '';
75
78
  switch (cmd) {
76
79
  case '/start':
77
80
  case '/menu':
@@ -103,12 +106,65 @@ _All systems operational_
103
106
  break;
104
107
  case '/models':
105
108
  case '/model':
106
- await this.sendText(chatId, `📦 *Models*
109
+ if (args.startsWith('list')) {
110
+ const catalog = getModelCatalogService().getAllModels();
111
+ const config = await readConfig();
112
+ const currentPrimary = config.models.primaryModel || 'not set';
113
+ let modelList = `📦 *Available Models*\n\nCurrent: ${currentPrimary}\n\n`;
114
+ const providerGroups = new Map();
115
+ for (const model of catalog) {
116
+ const models = providerGroups.get(model.provider) || [];
117
+ models.push(model);
118
+ providerGroups.set(model.provider, models);
119
+ }
120
+ let num = 1;
121
+ for (const [provider, models] of providerGroups) {
122
+ modelList += `*${provider.toUpperCase()}:*\n`;
123
+ for (const m of models.slice(0, 5)) {
124
+ const isCurrent = m.model === currentPrimary || m.id === currentPrimary;
125
+ modelList += `${num}. ${m.name}${isCurrent ? ' ✅' : ''}\n`;
126
+ num++;
127
+ }
128
+ if (models.length > 5)
129
+ modelList += ` ...and ${models.length - 5} more\n`;
130
+ modelList += '\n';
131
+ }
132
+ modelList += '_To switch: /model set <number>_';
133
+ await this.sendText(chatId, modelList);
134
+ }
135
+ else if (args.startsWith('set ') && args.length > 4) {
136
+ const modelIdOrNum = args.substring(4).trim();
137
+ const catalog = getModelCatalogService().getAllModels();
138
+ let newModelId = modelIdOrNum;
139
+ if (/^\d+$/.test(modelIdOrNum)) {
140
+ const idx = parseInt(modelIdOrNum, 10) - 1;
141
+ if (idx >= 0 && idx < catalog.length) {
142
+ newModelId = `${catalog[idx].provider}/${catalog[idx].model}`;
143
+ }
144
+ else {
145
+ await this.sendText(chatId, '❌ Invalid model number. Use /model list to see available models.');
146
+ break;
147
+ }
148
+ }
149
+ const config = await readConfig();
150
+ config.models.primaryModel = newModelId;
151
+ await writeConfig(config);
152
+ await this.sendText(chatId, `✅ *Model updated!*\n\nNew primary model: ${newModelId}\n\nRestart TwinClaw for changes to take effect.`);
153
+ }
154
+ else {
155
+ const config = await readConfig();
156
+ const currentPrimary = config.models.primaryModel || 'not set';
157
+ await this.sendText(chatId, `📦 *Models*
158
+
159
+ Current: ${currentPrimary}
107
160
 
108
- Use *twinclaw config* to manage models.
161
+ Commands:
162
+ • /model list - List available models
163
+ • /model set <number> - Set primary model
109
164
 
110
- Or run: *twinclaw config model* in terminal
165
+ Example: /model set 1
111
166
  `);
167
+ }
112
168
  break;
113
169
  case '/keys':
114
170
  case '/key':
@@ -246,8 +302,8 @@ Run: *twinclaw channels* in terminal
246
302
  const text = msg.body;
247
303
  // Handle commands
248
304
  if (text.trim().startsWith('/')) {
249
- const command = text.trim().split(' ')[0];
250
- await this.handleCommand(msg.from, command);
305
+ const command = text.trim();
306
+ await this.handleCommand(msg.from, command, text.trim());
251
307
  return;
252
308
  }
253
309
  await this.onMessage?.(base);
@@ -1,6 +1,9 @@
1
1
  import { STATIC_MODEL_CATALOG } from '../config/model-catalog.js';
2
2
  import { getSecretVaultService } from './secret-vault.js';
3
+ import { getGitHubCopilotService } from './github-copilot-service.js';
3
4
  const GITHUB_CATALOG_URL = 'https://models.github.ai/catalog/models';
5
+ const OPENROUTER_API_URL = 'https://openrouter.ai/api/v1/models';
6
+ const GROQ_API_URL = 'https://api.groq.com/openai/v1/models';
4
7
  const CACHE_TTL_MS = 60 * 60 * 1000; // 1 hour
5
8
  function isRecord(value) {
6
9
  return typeof value === 'object' && value !== null && !Array.isArray(value);
@@ -55,8 +58,134 @@ function toGitHubModelEntry(value) {
55
58
  }
56
59
  return entry;
57
60
  }
61
+ const GITHUB_COPILOT_TOKEN_URL = 'https://api.github.com/copilot_internal/v2/token';
62
+ async function exchangeForCopilotToken(githubToken) {
63
+ try {
64
+ const response = await fetch(GITHUB_COPILOT_TOKEN_URL, {
65
+ method: 'GET',
66
+ headers: {
67
+ 'Authorization': `token ${githubToken}`,
68
+ 'Accept': 'application/json'
69
+ }
70
+ });
71
+ if (!response.ok) {
72
+ console.warn('[ModelCatalog] Failed to exchange for Copilot token:', response.status);
73
+ return null;
74
+ }
75
+ const data = await response.json();
76
+ return data.token || null;
77
+ }
78
+ catch (error) {
79
+ console.warn('[ModelCatalog] Error exchanging for Copilot token:', error);
80
+ return null;
81
+ }
82
+ }
83
+ async function fetchOpenRouterFreeModels(apiKey) {
84
+ if (!apiKey)
85
+ return [];
86
+ try {
87
+ const response = await fetch(OPENROUTER_API_URL, {
88
+ headers: {
89
+ 'Authorization': `Bearer ${apiKey}`,
90
+ 'HTTP-Referer': 'https://twinclaw.ai',
91
+ 'X-Title': 'TwinClaw'
92
+ }
93
+ });
94
+ if (!response.ok) {
95
+ console.warn('[ModelCatalog] Failed to fetch OpenRouter models:', response.status);
96
+ return [];
97
+ }
98
+ const data = await response.json();
99
+ const models = data.data || [];
100
+ return models
101
+ .filter(m => m.pricing?.prompt === '0' && m.pricing?.completion === '0')
102
+ .map(m => ({
103
+ id: `openrouter-${m.id.replace(/[^a-z0-9]/gi, '-')}`,
104
+ name: m.name || m.id,
105
+ provider: 'openrouter',
106
+ model: m.id,
107
+ contextLength: m.context_length || 32000,
108
+ supportsStreaming: true,
109
+ pricing: 'Free',
110
+ description: m.description || `Free model via OpenRouter (${m.architecture?.modality || 'text'})`
111
+ }));
112
+ }
113
+ catch (error) {
114
+ console.warn('[ModelCatalog] Error fetching OpenRouter models:', error);
115
+ return [];
116
+ }
117
+ }
118
+ async function fetchGroqModels(apiKey) {
119
+ if (!apiKey)
120
+ return [];
121
+ try {
122
+ const response = await fetch(GROQ_API_URL, {
123
+ headers: {
124
+ 'Authorization': `Bearer ${apiKey}`,
125
+ 'Content-Type': 'application/json'
126
+ }
127
+ });
128
+ if (!response.ok) {
129
+ console.warn('[ModelCatalog] Failed to fetch Groq models:', response.status);
130
+ return [];
131
+ }
132
+ const data = await response.json();
133
+ const models = data.data || [];
134
+ return models
135
+ .filter(m => m.active)
136
+ .map(m => ({
137
+ id: `groq-${m.id.replace(/[^a-z0-9-]/gi, '-')}`,
138
+ name: m.id,
139
+ provider: 'groq',
140
+ model: m.id,
141
+ contextLength: m.context_window || 32768,
142
+ supportsStreaming: true,
143
+ pricing: 'Free (with rate limits)',
144
+ description: `Free model on Groq (${(m.context_window / 1000).toFixed(0)}K context)`
145
+ }));
146
+ }
147
+ catch (error) {
148
+ console.warn('[ModelCatalog] Error fetching Groq models:', error);
149
+ return [];
150
+ }
151
+ }
58
152
  class ModelCatalogService {
59
153
  githubCache = null;
154
+ openRouterCache = null;
155
+ groqCache = null;
156
+ async fetchProviderModels(forceRefresh = false) {
157
+ const vault = getSecretVaultService();
158
+ const openRouterKey = vault.readSecret('OPENROUTER_API_KEY') || getSecretVaultService().readSecret('OPENROUTER_API_KEY');
159
+ const groqKey = vault.readSecret('GROQ_API_KEY') || getSecretVaultService().readSecret('GROQ_API_KEY');
160
+ // Fetch OpenRouter free models
161
+ if (!forceRefresh && this.openRouterCache) {
162
+ const elapsed = Date.now() - this.openRouterCache.timestamp;
163
+ if (elapsed < CACHE_TTL_MS) {
164
+ // Cache still valid
165
+ }
166
+ else {
167
+ this.openRouterCache = null;
168
+ }
169
+ }
170
+ if (!this.openRouterCache && openRouterKey) {
171
+ const models = await fetchOpenRouterFreeModels(openRouterKey);
172
+ this.openRouterCache = { data: models, timestamp: Date.now() };
173
+ }
174
+ // Fetch Groq models
175
+ if (!forceRefresh && this.groqCache) {
176
+ const elapsed = Date.now() - this.groqCache.timestamp;
177
+ if (elapsed < CACHE_TTL_MS) {
178
+ // Cache still valid
179
+ }
180
+ else {
181
+ this.groqCache = null;
182
+ }
183
+ }
184
+ if (!this.groqCache && groqKey) {
185
+ const models = await fetchGroqModels(groqKey);
186
+ this.groqCache = { data: models, timestamp: Date.now() };
187
+ }
188
+ }
60
189
  async fetchGitHubModels(forceRefresh = false) {
61
190
  if (!forceRefresh && this.githubCache) {
62
191
  const elapsed = Date.now() - this.githubCache.timestamp;
@@ -64,18 +193,39 @@ class ModelCatalogService {
64
193
  return this.githubCache.data;
65
194
  }
66
195
  }
67
- const vault = getSecretVaultService();
68
- const githubToken = vault.readSecret('GITHUB_TOKEN');
69
- if (!githubToken) {
70
- console.warn('[ModelCatalog] GITHUB_TOKEN not found in vault. Cannot fetch GitHub models.');
71
- return [];
196
+ // Try to get Copilot token from GitHubCopilotService
197
+ let copilotToken = null;
198
+ try {
199
+ const copilotService = getGitHubCopilotService();
200
+ const auth = copilotService.getAuth();
201
+ if (auth?.copilotToken) {
202
+ copilotToken = auth.copilotToken;
203
+ }
204
+ }
205
+ catch {
206
+ // GitHubCopilotService not initialized yet
207
+ }
208
+ // Fallback to GitHub OAuth token from vault
209
+ if (!copilotToken) {
210
+ const vault = getSecretVaultService();
211
+ const githubToken = vault.readSecret('GITHUB_TOKEN');
212
+ if (!githubToken) {
213
+ console.warn('[ModelCatalog] No GitHub token available. Cannot fetch GitHub models.');
214
+ return [];
215
+ }
216
+ // For GitHub OAuth token, we need to exchange it for Copilot token
217
+ copilotToken = await exchangeForCopilotToken(githubToken);
218
+ if (!copilotToken) {
219
+ console.warn('[ModelCatalog] Failed to get Copilot token. Cannot fetch GitHub models.');
220
+ return [];
221
+ }
72
222
  }
73
223
  try {
74
224
  const response = await fetch(GITHUB_CATALOG_URL, {
75
225
  method: 'GET',
76
226
  headers: {
77
- 'Authorization': `Bearer ${githubToken}`,
78
- 'Accept': 'application/vnd.github.v3+json',
227
+ 'Authorization': `Bearer ${copilotToken}`,
228
+ 'Accept': 'application/vnd.github+json',
79
229
  'X-GitHub-Api-Version': '2022-11-28'
80
230
  }
81
231
  });
@@ -128,10 +278,14 @@ class ModelCatalogService {
128
278
  }
129
279
  getAllModels() {
130
280
  const githubModels = this.getGitHubModelsCatalog();
131
- return [...STATIC_MODEL_CATALOG, ...githubModels];
281
+ const openRouterModels = this.openRouterCache?.data || [];
282
+ const groqModels = this.groqCache?.data || [];
283
+ return [...STATIC_MODEL_CATALOG, ...githubModels, ...openRouterModels, ...groqModels];
132
284
  }
133
285
  clearCache() {
134
286
  this.githubCache = null;
287
+ this.openRouterCache = null;
288
+ this.groqCache = null;
135
289
  }
136
290
  slugify(text, fallback = 'model') {
137
291
  const slug = text
@@ -151,4 +305,5 @@ export function getModelCatalogService() {
151
305
  export async function initializeModelCatalog() {
152
306
  const service = getModelCatalogService();
153
307
  await service.fetchGitHubModels();
308
+ await service.fetchProviderModels();
154
309
  }
@@ -1,6 +1,6 @@
1
1
  import { randomUUID } from 'node:crypto';
2
2
  import { MODEL_SLOT_IDS, } from '../types/model-routing.js';
3
- import { scrubSensitiveText } from '../utils/logger.js';
3
+ import { scrubSensitiveText, logThought } from '../utils/logger.js';
4
4
  import { getModelRoutingSetting, listModelRoutingEvents, saveModelRoutingEvent, saveModelRoutingSetting, } from './db-model-routing.js';
5
5
  import { getSecretVaultService } from './secret-vault.js';
6
6
  import { getGitHubCopilotService } from './github-copilot-service.js';
@@ -434,11 +434,18 @@ export class ModelRouter {
434
434
  }
435
435
  async getApiKey(config) {
436
436
  const envName = config.apiKeyEnvName;
437
+ const providerId = this.resolveProviderId(config);
437
438
  const key = getSecretVaultService().readSecret(envName);
438
439
  if (!key) {
439
- console.warn(`Warning: API key ${envName} is not set in environment.`);
440
+ const configKey = getConfigValue(envName);
441
+ if (configKey) {
442
+ logThought(`[Router] Using API key from config for ${providerId}: ${envName.slice(0, 8)}...`);
443
+ return configKey;
444
+ }
445
+ console.warn(`[Router] Warning: API key ${envName} is not set in environment or config (provider: ${providerId}).`);
440
446
  return '';
441
447
  }
448
+ logThought(`[Router] Using API key from vault for ${providerId}: ${envName.slice(0, 8)}...`);
442
449
  if (envName !== 'GITHUB_TOKEN') {
443
450
  return key;
444
451
  }
@@ -1009,7 +1016,71 @@ export class ModelRouter {
1009
1016
  }
1010
1017
  loadModelsFromConfig() {
1011
1018
  const configModels = [];
1012
- // Check if PRIMARY_MODEL is set - use config.models.primaryModel if available
1019
+ const definitionsJson = getConfigValue('MODEL_DEFINITIONS');
1020
+ const primaryModel = getConfigValue('PRIMARY_MODEL');
1021
+ logThought(`[Router] loadModelsFromConfig: PRIMARY_MODEL=${primaryModel}, MODEL_DEFINITIONS present=${!!definitionsJson}`);
1022
+ // First, check if config.models.definitions has any models (from model picker)
1023
+ // If so, use those instead of hardcoded fallbacks
1024
+ if (definitionsJson) {
1025
+ try {
1026
+ const definitions = JSON.parse(definitionsJson);
1027
+ if (definitions && definitions.length > 0) {
1028
+ const primaryModel = getConfigValue('PRIMARY_MODEL') ?? '';
1029
+ // Normalize model ID for comparison (modal/zai-glm-5-fp8 -> modal-zai-glm-5-fp8)
1030
+ const normalizeId = (id) => id.replace('/', '-').toLowerCase();
1031
+ const normalizedPrimary = normalizeId(primaryModel);
1032
+ // Sort models so primary is first, then the rest
1033
+ const sortedDefinitions = [...definitions].sort((a, b) => {
1034
+ const aNorm = normalizeId(a.id);
1035
+ const bNorm = normalizeId(b.id);
1036
+ if (aNorm === normalizedPrimary)
1037
+ return -1;
1038
+ if (bNorm === normalizedPrimary)
1039
+ return 1;
1040
+ return 0;
1041
+ });
1042
+ for (const def of sortedDefinitions) {
1043
+ configModels.push({
1044
+ id: def.id,
1045
+ model: def.model,
1046
+ baseURL: def.baseURL,
1047
+ apiKeyEnvName: def.apiKeyEnvName,
1048
+ });
1049
+ }
1050
+ // Even with definitions, add fallback providers in case primary fails
1051
+ // This ensures we have working alternatives if the user's primary model fails
1052
+ const groqApiKey = getConfigValue('GROQ_API_KEY');
1053
+ const openRouterApiKey = getConfigValue('OPENROUTER_API_KEY');
1054
+ const existingIds = new Set(configModels.map(m => m.id));
1055
+ // Add Groq as fallback if not already in definitions
1056
+ if (groqApiKey && !existingIds.has('groq-qwen3-32b') && !existingIds.has(MODEL_SLOT_IDS.FALLBACK_1)) {
1057
+ configModels.push({
1058
+ id: MODEL_SLOT_IDS.FALLBACK_1,
1059
+ model: 'qwen/qwen3-32b',
1060
+ baseURL: 'https://api.groq.com/openai/v1/chat/completions',
1061
+ apiKeyEnvName: 'GROQ_API_KEY',
1062
+ });
1063
+ }
1064
+ // Add OpenRouter free model as secondary fallback
1065
+ if (openRouterApiKey && !existingIds.has('openrouter-arcee-ai-trinity-mini-free') && !existingIds.has(MODEL_SLOT_IDS.FALLBACK_2)) {
1066
+ configModels.push({
1067
+ id: MODEL_SLOT_IDS.FALLBACK_2,
1068
+ model: 'arcee-ai/trinity-mini:free',
1069
+ baseURL: 'https://openrouter.ai/api/v1/chat/completions',
1070
+ apiKeyEnvName: 'OPENROUTER_API_KEY',
1071
+ });
1072
+ }
1073
+ logThought(`[Router] Loaded ${configModels.length} models from definitions. PRIMARY: ${primaryModel}`);
1074
+ // Return all models including fallbacks
1075
+ return configModels;
1076
+ }
1077
+ }
1078
+ catch (e) {
1079
+ logThought(`[Router] Failed to load definitions: ${e}`);
1080
+ // JSON parse failed, fall through to legacy logic
1081
+ }
1082
+ }
1083
+ // Legacy logic: Check if PRIMARY_MODEL is set - use config.models.primaryModel if available
1013
1084
  const primaryModelId = getConfigValue('PRIMARY_MODEL');
1014
1085
  const modalApiKey = getConfigValue('MODAL_API_KEY');
1015
1086
  const openRouterApiKey = getConfigValue('OPENROUTER_API_KEY');
@@ -1124,6 +1195,7 @@ export class ModelRouter {
1124
1195
  apiKeyEnvName: 'MODAL_API_KEY',
1125
1196
  });
1126
1197
  }
1198
+ logThought(`[Router] Final loaded models (${configModels.length}): ${configModels.map(m => `${m.id}->${m.model.split('/').pop()}`).join(', ')}`);
1127
1199
  return configModels;
1128
1200
  }
1129
1201
  }
@@ -0,0 +1,192 @@
1
+ import * as fs from 'fs';
2
+ import * as path from 'path';
3
+ import { getWorkspaceDir } from '../config/workspace.js';
4
+ import { logThought } from '../utils/logger.js';
5
+ function getUserMemoryDir(userId) {
6
+ const sanitizedUserId = userId.replace(/[^a-zA-Z0-9_-]/g, '_');
7
+ return path.join(getWorkspaceDir(), 'memory', 'users', sanitizedUserId);
8
+ }
9
+ function ensureUserMemoryDir(userId) {
10
+ const dir = getUserMemoryDir(userId);
11
+ if (!fs.existsSync(dir)) {
12
+ fs.mkdirSync(dir, { recursive: true });
13
+ }
14
+ return dir;
15
+ }
16
+ export function hasUserMemory(userId) {
17
+ const dir = getUserMemoryDir(userId);
18
+ const profilePath = path.join(dir, 'user_profile.md');
19
+ return fs.existsSync(profilePath);
20
+ }
21
+ export function loadUserMemory(userId) {
22
+ const dir = getUserMemoryDir(userId);
23
+ const profilePath = path.join(dir, 'user_profile.md');
24
+ const interactionPath = path.join(dir, 'interaction_style.md');
25
+ const contextPath = path.join(dir, 'current_context.md');
26
+ const hooksPath = path.join(dir, 'reference_hooks.md');
27
+ if (!fs.existsSync(profilePath)) {
28
+ return null;
29
+ }
30
+ const profile = loadYamlFrontMatter(profilePath) || getDefaultProfile();
31
+ const interactionStyle = loadYamlFrontMatter(interactionPath) || getDefaultInteractionStyle();
32
+ const currentContext = loadYamlFrontMatter(contextPath) || getDefaultCurrentContext();
33
+ const referenceHooks = loadYamlFrontMatter(hooksPath) || getDefaultReferenceHooks();
34
+ return {
35
+ profile: profile || getDefaultProfile(),
36
+ interactionStyle,
37
+ currentContext,
38
+ referenceHooks,
39
+ };
40
+ }
41
+ export function saveUserMemory(userId, memory) {
42
+ const dir = ensureUserMemoryDir(userId);
43
+ const profilePath = path.join(dir, 'user_profile.md');
44
+ const interactionPath = path.join(dir, 'interaction_style.md');
45
+ const contextPath = path.join(dir, 'current_context.md');
46
+ const hooksPath = path.join(dir, 'reference_hooks.md');
47
+ memory.profile.lastUpdated = new Date().toISOString();
48
+ writeYamlFrontMatter(profilePath, memory.profile);
49
+ writeYamlFrontMatter(interactionPath, memory.interactionStyle);
50
+ writeYamlFrontMatter(contextPath, memory.currentContext);
51
+ writeYamlFrontMatter(hooksPath, memory.referenceHooks);
52
+ logThought(`[UserMemory] Saved memory for user: ${userId}`);
53
+ }
54
+ export function updateUserProfile(userId, updates) {
55
+ const existing = loadUserMemory(userId);
56
+ if (!existing) {
57
+ const defaultMemory = getDefaultUserMemory();
58
+ defaultMemory.profile = { ...defaultMemory.profile, ...updates };
59
+ saveUserMemory(userId, defaultMemory);
60
+ return;
61
+ }
62
+ existing.profile = { ...existing.profile, ...updates };
63
+ saveUserMemory(userId, existing);
64
+ }
65
+ export function checkFirstInteraction(userId) {
66
+ return !hasUserMemory(userId);
67
+ }
68
+ function loadYamlFrontMatter(filePath) {
69
+ try {
70
+ const content = fs.readFileSync(filePath, 'utf-8');
71
+ const match = content.match(/^---\n([\s\S]*?)\n---/);
72
+ if (!match) {
73
+ return null;
74
+ }
75
+ const yamlContent = match[1];
76
+ return parseYamlSimple(yamlContent);
77
+ }
78
+ catch {
79
+ return null;
80
+ }
81
+ }
82
+ function writeYamlFrontMatter(filePath, data) {
83
+ const yamlContent = stringifyYamlSimple(data);
84
+ const content = `---\n${yamlContent}---\n`;
85
+ fs.writeFileSync(filePath, content, 'utf-8');
86
+ }
87
+ function parseYamlSimple(yaml) {
88
+ const result = {};
89
+ const lines = yaml.split('\n');
90
+ for (const line of lines) {
91
+ const trimmed = line.trim();
92
+ if (!trimmed || trimmed.startsWith('#'))
93
+ continue;
94
+ const colonIndex = trimmed.indexOf(':');
95
+ if (colonIndex === -1)
96
+ continue;
97
+ const key = trimmed.slice(0, colonIndex).trim();
98
+ let value = trimmed.slice(colonIndex + 1).trim();
99
+ if (typeof value === 'string') {
100
+ if (value.startsWith('[') && value.endsWith(']')) {
101
+ value = value.slice(1, -1).split(',').map(s => s.trim()).filter(Boolean);
102
+ }
103
+ else if (value === 'true') {
104
+ value = true;
105
+ }
106
+ else if (value === 'false') {
107
+ value = false;
108
+ }
109
+ }
110
+ result[key] = value;
111
+ }
112
+ return result;
113
+ }
114
+ function stringifyYamlSimple(data, indent = 0) {
115
+ if (data === null || data === undefined)
116
+ return '';
117
+ if (typeof data === 'string')
118
+ return data || '';
119
+ if (typeof data === 'boolean' || typeof data === 'number')
120
+ return String(data);
121
+ if (Array.isArray(data))
122
+ return `[${data.join(', ')}]`;
123
+ if (typeof data === 'object') {
124
+ const lines = [];
125
+ const prefix = ' '.repeat(indent);
126
+ for (const [key, value] of Object.entries(data)) {
127
+ if (value === null || value === undefined)
128
+ continue;
129
+ if (typeof value === 'object' && !Array.isArray(value)) {
130
+ lines.push(`${prefix}${key}:`);
131
+ lines.push(stringifyYamlSimple(value, indent + 1));
132
+ }
133
+ else {
134
+ lines.push(`${prefix}${key}: ${stringifyYamlSimple(value)}`);
135
+ }
136
+ }
137
+ return lines.join('\n');
138
+ }
139
+ return String(data);
140
+ }
141
+ function getDefaultProfile() {
142
+ return {
143
+ identity: {},
144
+ professionalBackground: {},
145
+ personalContext: {},
146
+ communicationStyle: {},
147
+ goals: { shortTerm: [], longTerm: [] },
148
+ painPoints: [],
149
+ };
150
+ }
151
+ function getDefaultInteractionStyle() {
152
+ return {
153
+ communicationGuidelines: {},
154
+ dos: [],
155
+ donts: [],
156
+ responsePatterns: {},
157
+ contextAwareness: {},
158
+ };
159
+ }
160
+ function getDefaultCurrentContext() {
161
+ return {
162
+ activeProjects: [],
163
+ recentConversations: [],
164
+ ongoingThreads: [],
165
+ changedSinceLastTime: [],
166
+ };
167
+ }
168
+ function getDefaultReferenceHooks() {
169
+ return {
170
+ conversationalAnchors: [],
171
+ keyFacts: [],
172
+ namesAndEntities: {},
173
+ insideReferences: [],
174
+ };
175
+ }
176
+ export function getDefaultUserMemory() {
177
+ return {
178
+ profile: getDefaultProfile(),
179
+ interactionStyle: getDefaultInteractionStyle(),
180
+ currentContext: getDefaultCurrentContext(),
181
+ referenceHooks: getDefaultReferenceHooks(),
182
+ };
183
+ }
184
+ export function listUsersWithMemory() {
185
+ const usersDir = path.join(getWorkspaceDir(), 'memory', 'users');
186
+ if (!fs.existsSync(usersDir)) {
187
+ return [];
188
+ }
189
+ return fs.readdirSync(usersDir, { withFileTypes: true })
190
+ .filter(dirent => dirent.isDirectory())
191
+ .map(dirent => dirent.name);
192
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "twinclaw",
3
- "version": "1.2.9",
3
+ "version": "1.3.1",
4
4
  "description": "Eagle-eyed agentic AI gateway with multi-modal hooks and proactive memory.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {