chainlesschain 0.40.2 → 0.40.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,465 @@
1
+ /**
2
+ * CLI SlotFiller — parameter slot filling for agentic workflows
3
+ *
4
+ * Ported from desktop-app-vue/src/main/ai-engine/slot-filler.js
5
+ * Adapted for CLI with InteractionAdapter support for terminal and WebSocket modes.
6
+ *
7
+ * Flow: detect missing slots → infer from context → ask user → LLM inference (optional) → validate
8
+ */
9
+
10
+ import { EventEmitter } from "events";
11
+
12
+ /**
13
+ * Required slots per intent type — must be filled before execution.
14
+ */
15
+ const REQUIRED_SLOTS = {
16
+ create_file: ["fileType", "path"],
17
+ edit_file: ["target"],
18
+ deploy: ["platform"],
19
+ refactor: ["scope"],
20
+ test: ["target"],
21
+ analyze: ["target"],
22
+ search: ["query"],
23
+ install: ["package"],
24
+ generate: ["type"],
25
+ };
26
+
27
+ /**
28
+ * Optional slots per intent type — filled opportunistically.
29
+ */
30
+ const OPTIONAL_SLOTS = {
31
+ create_file: ["template", "content", "overwrite"],
32
+ edit_file: ["action", "position"],
33
+ deploy: ["env", "branch", "dryRun"],
34
+ refactor: ["strategy", "dryRun"],
35
+ test: ["framework", "coverage", "watch"],
36
+ analyze: ["outputFormat", "depth"],
37
+ search: ["directory", "fileType"],
38
+ install: ["version", "dev"],
39
+ generate: ["output", "template"],
40
+ };
41
+
42
+ /**
43
+ * Slot prompts — question text, type, and options for user interaction.
44
+ */
45
+ const SLOT_PROMPTS = {
46
+ fileType: {
47
+ question: "What type of file do you want to create?",
48
+ type: "select",
49
+ options: [
50
+ { name: "JavaScript", value: "js" },
51
+ { name: "TypeScript", value: "ts" },
52
+ { name: "Python", value: "py" },
53
+ { name: "JSON", value: "json" },
54
+ { name: "Markdown", value: "md" },
55
+ { name: "Other", value: "other" },
56
+ ],
57
+ },
58
+ path: {
59
+ question: "Where should the file be created? (path)",
60
+ type: "input",
61
+ },
62
+ target: {
63
+ question: "Which file or directory should be targeted?",
64
+ type: "input",
65
+ },
66
+ platform: {
67
+ question: "Which platform are you deploying to?",
68
+ type: "select",
69
+ options: [
70
+ { name: "Docker", value: "docker" },
71
+ { name: "Vercel", value: "vercel" },
72
+ { name: "AWS", value: "aws" },
73
+ { name: "Local", value: "local" },
74
+ ],
75
+ },
76
+ scope: {
77
+ question: "What is the scope of the refactoring?",
78
+ type: "select",
79
+ options: [
80
+ { name: "Single file", value: "file" },
81
+ { name: "Directory", value: "directory" },
82
+ { name: "Module", value: "module" },
83
+ { name: "Full project", value: "project" },
84
+ ],
85
+ },
86
+ type: {
87
+ question: "What do you want to generate?",
88
+ type: "select",
89
+ options: [
90
+ { name: "Component", value: "component" },
91
+ { name: "Test", value: "test" },
92
+ { name: "API endpoint", value: "api" },
93
+ { name: "Config file", value: "config" },
94
+ ],
95
+ },
96
+ query: {
97
+ question: "What are you searching for?",
98
+ type: "input",
99
+ },
100
+ package: {
101
+ question: "Which package do you want to install?",
102
+ type: "input",
103
+ },
104
+ framework: {
105
+ question: "Which test framework?",
106
+ type: "select",
107
+ options: [
108
+ { name: "Vitest", value: "vitest" },
109
+ { name: "Jest", value: "jest" },
110
+ { name: "Mocha", value: "mocha" },
111
+ ],
112
+ },
113
+ };
114
+
115
+ /**
116
+ * File extension → fileType mapping
117
+ */
118
+ const EXT_TO_FILE_TYPE = {
119
+ ".js": "js",
120
+ ".mjs": "js",
121
+ ".cjs": "js",
122
+ ".ts": "ts",
123
+ ".tsx": "ts",
124
+ ".py": "py",
125
+ ".json": "json",
126
+ ".md": "md",
127
+ ".html": "html",
128
+ ".css": "css",
129
+ ".vue": "vue",
130
+ ".yaml": "yaml",
131
+ ".yml": "yaml",
132
+ };
133
+
134
+ export class CLISlotFiller extends EventEmitter {
135
+ /**
136
+ * @param {object} options
137
+ * @param {function} [options.llmChat] - LLM chat function for inference
138
+ * @param {object} [options.db] - Database for history
139
+ * @param {import("./interaction-adapter.js").InteractionAdapter} options.interaction
140
+ */
141
+ constructor({ llmChat, db, interaction }) {
142
+ super();
143
+ this.llmChat = llmChat || null;
144
+ this.db = db || null;
145
+ this.interaction = interaction;
146
+ }
147
+
148
+ /**
149
+ * Main flow: detect missing required slots → infer → ask user → validate.
150
+ *
151
+ * @param {{ type: string, entities: object }} intent - parsed intent
152
+ * @param {object} context - project context (cwd, files, etc.)
153
+ * @returns {Promise<{ entities: object, validation: object, filledSlots: string[], missingRequired: string[] }>}
154
+ */
155
+ async fillSlots(intent, context = {}) {
156
+ const intentType = intent.type || "unknown";
157
+ const entities = { ...(intent.entities || {}) };
158
+ const filledSlots = [];
159
+
160
+ const requiredSlots = REQUIRED_SLOTS[intentType] || [];
161
+ const optionalSlots = OPTIONAL_SLOTS[intentType] || [];
162
+
163
+ // Step 1: Infer from context
164
+ for (const slot of [...requiredSlots, ...optionalSlots]) {
165
+ if (entities[slot]) continue; // Already filled
166
+
167
+ const inferred = this.inferFromContext(slot, context);
168
+ if (inferred !== null) {
169
+ entities[slot] = inferred;
170
+ filledSlots.push(slot);
171
+ this.emit("slot-inferred", {
172
+ slot,
173
+ value: inferred,
174
+ source: "context",
175
+ });
176
+ }
177
+ }
178
+
179
+ // Step 2: Ask user for missing required slots
180
+ for (const slot of requiredSlots) {
181
+ if (entities[slot]) continue;
182
+
183
+ try {
184
+ const value = await this.askUser(slot);
185
+ if (value) {
186
+ entities[slot] = value;
187
+ filledSlots.push(slot);
188
+ this.emit("slot-filled", { slot, value, source: "user" });
189
+ }
190
+ } catch (_err) {
191
+ // User cancelled or timeout — leave slot empty
192
+ }
193
+ }
194
+
195
+ // Step 3: LLM inference for remaining optional slots
196
+ if (this.llmChat) {
197
+ const missingOptional = optionalSlots.filter((s) => !entities[s]);
198
+ if (missingOptional.length > 0) {
199
+ try {
200
+ const inferred = await this.inferWithLLM(
201
+ missingOptional,
202
+ context,
203
+ entities,
204
+ intentType,
205
+ );
206
+ for (const [slot, value] of Object.entries(inferred)) {
207
+ if (value && !entities[slot]) {
208
+ entities[slot] = value;
209
+ filledSlots.push(slot);
210
+ this.emit("slot-inferred", { slot, value, source: "llm" });
211
+ }
212
+ }
213
+ } catch (_err) {
214
+ // LLM inference failure is non-critical
215
+ }
216
+ }
217
+ }
218
+
219
+ // Step 4: Learn from user preferences
220
+ if (this.db) {
221
+ for (const slot of requiredSlots) {
222
+ if (!entities[slot]) {
223
+ try {
224
+ const preference = await this.learnUserPreference(intentType, slot);
225
+ if (preference) {
226
+ entities[slot] = preference;
227
+ filledSlots.push(slot);
228
+ this.emit("slot-inferred", {
229
+ slot,
230
+ value: preference,
231
+ source: "preference",
232
+ });
233
+ }
234
+ } catch (_err) {
235
+ // Non-critical
236
+ }
237
+ }
238
+ }
239
+ }
240
+
241
+ const validation = this.validateSlots(intentType, entities);
242
+
243
+ return {
244
+ entities,
245
+ validation,
246
+ filledSlots,
247
+ missingRequired: validation.missingRequired,
248
+ };
249
+ }
250
+
251
+ /**
252
+ * Infer a slot value from context (rules-based).
253
+ */
254
+ inferFromContext(slotName, context) {
255
+ switch (slotName) {
256
+ case "fileType": {
257
+ if (context.currentFile) {
258
+ const ext = context.currentFile.match(/\.[^.]+$/)?.[0];
259
+ if (ext && EXT_TO_FILE_TYPE[ext]) {
260
+ return EXT_TO_FILE_TYPE[ext];
261
+ }
262
+ }
263
+ return null;
264
+ }
265
+
266
+ case "target": {
267
+ if (context.currentFile) return context.currentFile;
268
+ if (context.selectedText) return context.selectedText;
269
+ return null;
270
+ }
271
+
272
+ case "path": {
273
+ if (context.cwd) return context.cwd;
274
+ return null;
275
+ }
276
+
277
+ case "platform": {
278
+ if (context.hasDockerfile) return "docker";
279
+ if (context.hasVercelConfig) return "vercel";
280
+ return null;
281
+ }
282
+
283
+ case "framework": {
284
+ if (context.hasVitest) return "vitest";
285
+ if (context.hasJest) return "jest";
286
+ return null;
287
+ }
288
+
289
+ case "directory": {
290
+ return context.cwd || null;
291
+ }
292
+
293
+ default:
294
+ return null;
295
+ }
296
+ }
297
+
298
+ /**
299
+ * Ask the user to fill a slot via the interaction adapter.
300
+ */
301
+ async askUser(slotName) {
302
+ const prompt = SLOT_PROMPTS[slotName];
303
+ if (!prompt) {
304
+ return this.interaction.askInput(
305
+ `Please provide a value for "${slotName}":`,
306
+ );
307
+ }
308
+
309
+ if (prompt.type === "select" && prompt.options) {
310
+ return this.interaction.askSelect(prompt.question, prompt.options);
311
+ }
312
+
313
+ return this.interaction.askInput(prompt.question);
314
+ }
315
+
316
+ /**
317
+ * Use LLM to infer optional slot values (low temperature).
318
+ */
319
+ async inferWithLLM(slots, context, currentEntities, intentType) {
320
+ if (!this.llmChat) return {};
321
+
322
+ const prompt = `Given the following context, infer reasonable values for these parameters.
323
+
324
+ Intent type: ${intentType}
325
+ Already known: ${JSON.stringify(currentEntities)}
326
+ Working directory: ${context.cwd || "unknown"}
327
+ Parameters to infer: ${slots.join(", ")}
328
+
329
+ Respond with a JSON object mapping parameter names to inferred values.
330
+ Only include parameters where you have high confidence. Use null for uncertain ones.
331
+ Keep values concise (single words or short strings).`;
332
+
333
+ try {
334
+ const response = await this.llmChat([
335
+ {
336
+ role: "system",
337
+ content:
338
+ "You are a parameter inference assistant. Respond only with valid JSON.",
339
+ },
340
+ { role: "user", content: prompt },
341
+ ]);
342
+
343
+ const content = response?.message?.content || response?.content || "";
344
+ const jsonMatch = content.match(/\{[\s\S]*\}/);
345
+ if (jsonMatch) {
346
+ const parsed = JSON.parse(jsonMatch[0]);
347
+ // Filter out null values
348
+ const result = {};
349
+ for (const [key, value] of Object.entries(parsed)) {
350
+ if (value !== null && value !== undefined && slots.includes(key)) {
351
+ result[key] = String(value);
352
+ }
353
+ }
354
+ return result;
355
+ }
356
+ } catch (_err) {
357
+ // Non-critical
358
+ }
359
+
360
+ return {};
361
+ }
362
+
363
+ /**
364
+ * Validate that all required slots are filled.
365
+ */
366
+ validateSlots(intentType, entities) {
367
+ const required = REQUIRED_SLOTS[intentType] || [];
368
+ const missingRequired = required.filter((s) => !entities[s]);
369
+ const total = required.length;
370
+ const filled = total - missingRequired.length;
371
+
372
+ return {
373
+ valid: missingRequired.length === 0,
374
+ missingRequired,
375
+ completeness: total > 0 ? Math.round((filled / total) * 100) : 100,
376
+ };
377
+ }
378
+
379
+ /**
380
+ * Learn user preferences from past slot-filling history.
381
+ */
382
+ async learnUserPreference(intentType, slot) {
383
+ if (!this.db) return null;
384
+
385
+ try {
386
+ this._ensureHistoryTable();
387
+ const rows = this.db
388
+ .prepare(
389
+ `SELECT slot_value FROM slot_filling_history
390
+ WHERE intent_type = ? AND slot_name = ?
391
+ ORDER BY created_at DESC LIMIT 10`,
392
+ )
393
+ .all(intentType, slot);
394
+
395
+ if (rows.length < 2) return null;
396
+
397
+ // Find most common value
398
+ const counts = {};
399
+ for (const row of rows) {
400
+ counts[row.slot_value] = (counts[row.slot_value] || 0) + 1;
401
+ }
402
+ const sorted = Object.entries(counts).sort((a, b) => b[1] - a[1]);
403
+ if (sorted[0] && sorted[0][1] >= 2) {
404
+ return sorted[0][0];
405
+ }
406
+ } catch (_err) {
407
+ // Non-critical
408
+ }
409
+
410
+ return null;
411
+ }
412
+
413
+ /**
414
+ * Record slot filling history for preference learning.
415
+ */
416
+ recordHistory(intentType, entities) {
417
+ if (!this.db) return;
418
+
419
+ try {
420
+ this._ensureHistoryTable();
421
+ const stmt = this.db.prepare(
422
+ `INSERT INTO slot_filling_history (intent_type, slot_name, slot_value, created_at)
423
+ VALUES (?, ?, ?, datetime('now'))`,
424
+ );
425
+ for (const [slot, value] of Object.entries(entities)) {
426
+ if (value) {
427
+ stmt.run(intentType, slot, String(value));
428
+ }
429
+ }
430
+ } catch (_err) {
431
+ // Non-critical
432
+ }
433
+ }
434
+
435
+ _ensureHistoryTable() {
436
+ if (this._tableCreated) return;
437
+ this.db.exec(`
438
+ CREATE TABLE IF NOT EXISTS slot_filling_history (
439
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
440
+ intent_type TEXT NOT NULL,
441
+ slot_name TEXT NOT NULL,
442
+ slot_value TEXT NOT NULL,
443
+ created_at TEXT DEFAULT (datetime('now'))
444
+ )
445
+ `);
446
+ this._tableCreated = true;
447
+ }
448
+
449
+ /**
450
+ * Get slot definitions for an intent type.
451
+ */
452
+ static getSlotDefinitions(intentType) {
453
+ return {
454
+ required: REQUIRED_SLOTS[intentType] || [],
455
+ optional: OPTIONAL_SLOTS[intentType] || [],
456
+ };
457
+ }
458
+
459
+ /**
460
+ * Get all supported intent types.
461
+ */
462
+ static getSupportedIntents() {
463
+ return Object.keys(REQUIRED_SLOTS);
464
+ }
465
+ }
@@ -24,24 +24,24 @@ export const TaskType = {
24
24
  */
25
25
  const TASK_MODEL_MAP = {
26
26
  [TaskType.CHAT]: {
27
- volcengine: "doubao-seed-1-6-flash-250828",
27
+ volcengine: "doubao-seed-1-6-251015",
28
28
  openai: "gpt-4o-mini",
29
29
  anthropic: "claude-sonnet-4-6",
30
30
  deepseek: "deepseek-chat",
31
31
  dashscope: "qwen-plus",
32
32
  gemini: "gemini-2.0-flash",
33
33
  mistral: "mistral-medium-latest",
34
- ollama: "qwen2:7b",
34
+ ollama: "qwen2.5:7b",
35
35
  },
36
36
  [TaskType.CODE]: {
37
- volcengine: "doubao-seed-code",
37
+ volcengine: "doubao-seed-1-6-251015",
38
38
  openai: "gpt-4o",
39
39
  anthropic: "claude-sonnet-4-6",
40
40
  deepseek: "deepseek-coder",
41
41
  dashscope: "qwen-max",
42
42
  gemini: "gemini-2.0-pro",
43
43
  mistral: "mistral-large-latest",
44
- ollama: "codellama:7b",
44
+ ollama: "qwen2.5-coder:14b",
45
45
  },
46
46
  [TaskType.REASONING]: {
47
47
  volcengine: "doubao-seed-1-6-251015",
@@ -51,7 +51,7 @@ const TASK_MODEL_MAP = {
51
51
  dashscope: "qwen-max",
52
52
  gemini: "gemini-2.0-pro",
53
53
  mistral: "mistral-large-latest",
54
- ollama: "qwen2:7b",
54
+ ollama: "qwen2.5:14b",
55
55
  },
56
56
  [TaskType.FAST]: {
57
57
  volcengine: "doubao-seed-1-6-lite-251015",