@contextstream/mcp-server 0.3.9 → 0.3.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +1053 -79
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -4578,6 +4578,14 @@ var ContextStreamClient = class {
4578
4578
  createWorkspace(input) {
4579
4579
  return request(this.config, "/workspaces", { body: input });
4580
4580
  }
4581
+ updateWorkspace(workspaceId, input) {
4582
+ uuidSchema.parse(workspaceId);
4583
+ return request(this.config, `/workspaces/${workspaceId}`, { method: "PUT", body: input });
4584
+ }
4585
+ deleteWorkspace(workspaceId) {
4586
+ uuidSchema.parse(workspaceId);
4587
+ return request(this.config, `/workspaces/${workspaceId}`, { method: "DELETE" });
4588
+ }
4581
4589
  listProjects(params) {
4582
4590
  const withDefaults = this.withDefaults(params || {});
4583
4591
  const query = new URLSearchParams();
@@ -4591,6 +4599,14 @@ var ContextStreamClient = class {
4591
4599
  const payload = this.withDefaults(input);
4592
4600
  return request(this.config, "/projects", { body: payload });
4593
4601
  }
4602
+ updateProject(projectId, input) {
4603
+ uuidSchema.parse(projectId);
4604
+ return request(this.config, `/projects/${projectId}`, { method: "PUT", body: input });
4605
+ }
4606
+ deleteProject(projectId) {
4607
+ uuidSchema.parse(projectId);
4608
+ return request(this.config, `/projects/${projectId}`, { method: "DELETE" });
4609
+ }
4594
4610
  indexProject(projectId) {
4595
4611
  uuidSchema.parse(projectId);
4596
4612
  return request(this.config, `/projects/${projectId}/index`, { body: {} });
@@ -5276,118 +5292,700 @@ var ContextStreamClient = class {
5276
5292
  }
5277
5293
  return results;
5278
5294
  }
5295
+ // ============================================
5296
+ // Token-Saving Context Tools
5297
+ // ============================================
5298
+ /**
5299
+ * Get a compact, token-efficient summary of workspace context.
5300
+ * Designed to be included in every AI prompt without consuming many tokens.
5301
+ *
5302
+ * Target: ~500 tokens max
5303
+ *
5304
+ * This replaces loading full chat history - AI can call session_recall
5305
+ * for specific details when needed.
5306
+ */
5307
+ async getContextSummary(params) {
5308
+ const withDefaults = this.withDefaults(params);
5309
+ const maxTokens = params.max_tokens || 500;
5310
+ if (!withDefaults.workspace_id) {
5311
+ return {
5312
+ summary: "No workspace context loaded. Call session_init first.",
5313
+ decision_count: 0,
5314
+ memory_count: 0
5315
+ };
5316
+ }
5317
+ const parts = [];
5318
+ let workspaceName;
5319
+ let projectName;
5320
+ let decisionCount = 0;
5321
+ let memoryCount = 0;
5322
+ try {
5323
+ const ws = await this.getWorkspace(withDefaults.workspace_id);
5324
+ workspaceName = ws?.name;
5325
+ if (workspaceName) {
5326
+ parts.push(`\u{1F4C1} Workspace: ${workspaceName}`);
5327
+ }
5328
+ } catch {
5329
+ }
5330
+ if (withDefaults.project_id) {
5331
+ try {
5332
+ const proj = await this.getProject(withDefaults.project_id);
5333
+ projectName = proj?.name;
5334
+ if (projectName) {
5335
+ parts.push(`\u{1F4C2} Project: ${projectName}`);
5336
+ }
5337
+ } catch {
5338
+ }
5339
+ }
5340
+ try {
5341
+ const decisions = await this.memoryDecisions({
5342
+ workspace_id: withDefaults.workspace_id,
5343
+ project_id: withDefaults.project_id,
5344
+ limit: 5
5345
+ });
5346
+ if (decisions.items && decisions.items.length > 0) {
5347
+ decisionCount = decisions.items.length;
5348
+ parts.push("");
5349
+ parts.push("\u{1F4CB} Recent Decisions:");
5350
+ decisions.items.slice(0, 3).forEach((d, i) => {
5351
+ parts.push(` ${i + 1}. ${d.title || "Untitled"}`);
5352
+ });
5353
+ if (decisions.items.length > 3) {
5354
+ parts.push(` (+${decisions.items.length - 3} more)`);
5355
+ }
5356
+ }
5357
+ } catch {
5358
+ }
5359
+ try {
5360
+ const prefs = await this.memorySearch({
5361
+ query: "user preferences coding style settings",
5362
+ workspace_id: withDefaults.workspace_id,
5363
+ limit: 5
5364
+ });
5365
+ if (prefs.results && prefs.results.length > 0) {
5366
+ parts.push("");
5367
+ parts.push("\u2699\uFE0F Preferences:");
5368
+ prefs.results.slice(0, 3).forEach((p) => {
5369
+ const title = p.title || "Preference";
5370
+ parts.push(` \u2022 ${title.slice(0, 60)}${title.length > 60 ? "..." : ""}`);
5371
+ });
5372
+ }
5373
+ } catch {
5374
+ }
5375
+ try {
5376
+ const summary2 = await this.memorySummary(withDefaults.workspace_id);
5377
+ memoryCount = summary2.events || 0;
5378
+ if (memoryCount > 0) {
5379
+ parts.push("");
5380
+ parts.push(`\u{1F9E0} Memory: ${memoryCount} events stored`);
5381
+ }
5382
+ } catch {
5383
+ }
5384
+ parts.push("");
5385
+ parts.push('\u{1F4A1} Use session_recall("topic") for specific context');
5386
+ const summary = parts.join("\n");
5387
+ return {
5388
+ summary,
5389
+ workspace_name: workspaceName,
5390
+ project_name: projectName,
5391
+ decision_count: decisionCount,
5392
+ memory_count: memoryCount
5393
+ };
5394
+ }
5395
+ /**
5396
+ * Compress chat history into structured memory events.
5397
+ * This extracts key information and stores it, allowing the chat
5398
+ * history to be cleared while preserving context.
5399
+ *
5400
+ * Use this at the end of a conversation or when context window is full.
5401
+ */
5402
+ async compressChat(params) {
5403
+ const withDefaults = this.withDefaults(params);
5404
+ if (!withDefaults.workspace_id) {
5405
+ throw new Error("workspace_id is required for compressChat");
5406
+ }
5407
+ const extractTypes = params.extract_types || ["decisions", "preferences", "insights", "tasks", "code_patterns"];
5408
+ const extracted = {
5409
+ decisions: [],
5410
+ preferences: [],
5411
+ insights: [],
5412
+ tasks: [],
5413
+ code_patterns: []
5414
+ };
5415
+ let eventsCreated = 0;
5416
+ const lines = params.chat_history.split("\n");
5417
+ for (const line of lines) {
5418
+ const lowerLine = line.toLowerCase();
5419
+ if (extractTypes.includes("decisions")) {
5420
+ if (lowerLine.includes("decided to") || lowerLine.includes("decision:") || lowerLine.includes("we'll use") || lowerLine.includes("going with") || lowerLine.includes("chose ")) {
5421
+ extracted.decisions.push(line.trim());
5422
+ }
5423
+ }
5424
+ if (extractTypes.includes("preferences")) {
5425
+ if (lowerLine.includes("prefer") || lowerLine.includes("i like") || lowerLine.includes("always use") || lowerLine.includes("don't use") || lowerLine.includes("never use")) {
5426
+ extracted.preferences.push(line.trim());
5427
+ }
5428
+ }
5429
+ if (extractTypes.includes("tasks")) {
5430
+ if (lowerLine.includes("todo:") || lowerLine.includes("task:") || lowerLine.includes("need to") || lowerLine.includes("should implement") || lowerLine.includes("will add")) {
5431
+ extracted.tasks.push(line.trim());
5432
+ }
5433
+ }
5434
+ if (extractTypes.includes("insights")) {
5435
+ if (lowerLine.includes("learned that") || lowerLine.includes("realized") || lowerLine.includes("found out") || lowerLine.includes("discovered") || lowerLine.includes("important:") || lowerLine.includes("note:")) {
5436
+ extracted.insights.push(line.trim());
5437
+ }
5438
+ }
5439
+ if (extractTypes.includes("code_patterns")) {
5440
+ if (lowerLine.includes("pattern:") || lowerLine.includes("convention:") || lowerLine.includes("style:") || lowerLine.includes("always format") || lowerLine.includes("naming convention")) {
5441
+ extracted.code_patterns.push(line.trim());
5442
+ }
5443
+ }
5444
+ }
5445
+ for (const decision of extracted.decisions.slice(0, 5)) {
5446
+ try {
5447
+ await this.captureContext({
5448
+ workspace_id: withDefaults.workspace_id,
5449
+ project_id: withDefaults.project_id,
5450
+ event_type: "decision",
5451
+ title: decision.slice(0, 100),
5452
+ content: decision,
5453
+ importance: "medium"
5454
+ });
5455
+ eventsCreated++;
5456
+ } catch {
5457
+ }
5458
+ }
5459
+ for (const pref of extracted.preferences.slice(0, 5)) {
5460
+ try {
5461
+ await this.captureContext({
5462
+ workspace_id: withDefaults.workspace_id,
5463
+ project_id: withDefaults.project_id,
5464
+ event_type: "preference",
5465
+ title: pref.slice(0, 100),
5466
+ content: pref,
5467
+ importance: "medium"
5468
+ });
5469
+ eventsCreated++;
5470
+ } catch {
5471
+ }
5472
+ }
5473
+ for (const task of extracted.tasks.slice(0, 5)) {
5474
+ try {
5475
+ await this.captureContext({
5476
+ workspace_id: withDefaults.workspace_id,
5477
+ project_id: withDefaults.project_id,
5478
+ event_type: "task",
5479
+ title: task.slice(0, 100),
5480
+ content: task,
5481
+ importance: "medium"
5482
+ });
5483
+ eventsCreated++;
5484
+ } catch {
5485
+ }
5486
+ }
5487
+ for (const insight of extracted.insights.slice(0, 5)) {
5488
+ try {
5489
+ await this.captureContext({
5490
+ workspace_id: withDefaults.workspace_id,
5491
+ project_id: withDefaults.project_id,
5492
+ event_type: "insight",
5493
+ title: insight.slice(0, 100),
5494
+ content: insight,
5495
+ importance: "medium"
5496
+ });
5497
+ eventsCreated++;
5498
+ } catch {
5499
+ }
5500
+ }
5501
+ return {
5502
+ events_created: eventsCreated,
5503
+ extracted
5504
+ };
5505
+ }
5506
+ /**
5507
+ * Get context optimized for a token budget.
5508
+ * Returns the most relevant context that fits within the specified token limit.
5509
+ *
5510
+ * This is the key tool for token-efficient AI interactions:
5511
+ * - AI calls this with a query and token budget
5512
+ * - Gets optimally selected context
5513
+ * - No need to include full chat history
5514
+ */
5515
+ async getContextWithBudget(params) {
5516
+ const withDefaults = this.withDefaults(params);
5517
+ const maxTokens = params.max_tokens || 2e3;
5518
+ const charsPerToken = 4;
5519
+ const maxChars = maxTokens * charsPerToken;
5520
+ const parts = [];
5521
+ const sources = [];
5522
+ let currentChars = 0;
5523
+ if (params.include_decisions !== false && withDefaults.workspace_id) {
5524
+ try {
5525
+ const decisions = await this.memoryDecisions({
5526
+ workspace_id: withDefaults.workspace_id,
5527
+ project_id: withDefaults.project_id,
5528
+ limit: 10
5529
+ });
5530
+ if (decisions.items) {
5531
+ parts.push("## Relevant Decisions\n");
5532
+ currentChars += 25;
5533
+ for (const d of decisions.items) {
5534
+ const entry = `\u2022 ${d.title || "Decision"}
5535
+ `;
5536
+ if (currentChars + entry.length > maxChars * 0.4) break;
5537
+ parts.push(entry);
5538
+ currentChars += entry.length;
5539
+ sources.push({ type: "decision", title: d.title || "Decision" });
5540
+ }
5541
+ parts.push("\n");
5542
+ }
5543
+ } catch {
5544
+ }
5545
+ }
5546
+ if (params.include_memory !== false && withDefaults.workspace_id) {
5547
+ try {
5548
+ const memory = await this.memorySearch({
5549
+ query: params.query,
5550
+ workspace_id: withDefaults.workspace_id,
5551
+ project_id: withDefaults.project_id,
5552
+ limit: 5
5553
+ });
5554
+ if (memory.results) {
5555
+ parts.push("## Related Context\n");
5556
+ currentChars += 20;
5557
+ for (const m of memory.results) {
5558
+ const title = m.title || "Context";
5559
+ const content = m.content?.slice(0, 200) || "";
5560
+ const entry = `\u2022 ${title}: ${content}...
5561
+ `;
5562
+ if (currentChars + entry.length > maxChars * 0.7) break;
5563
+ parts.push(entry);
5564
+ currentChars += entry.length;
5565
+ sources.push({ type: "memory", title });
5566
+ }
5567
+ parts.push("\n");
5568
+ }
5569
+ } catch {
5570
+ }
5571
+ }
5572
+ if (params.include_code && withDefaults.project_id && currentChars < maxChars * 0.8) {
5573
+ try {
5574
+ const code = await this.searchSemantic({
5575
+ query: params.query,
5576
+ workspace_id: withDefaults.workspace_id,
5577
+ project_id: withDefaults.project_id,
5578
+ limit: 3
5579
+ });
5580
+ if (code.results) {
5581
+ parts.push("## Relevant Code\n");
5582
+ currentChars += 18;
5583
+ for (const c of code.results) {
5584
+ const path3 = c.file_path || "file";
5585
+ const content = c.content?.slice(0, 150) || "";
5586
+ const entry = `\u2022 ${path3}: ${content}...
5587
+ `;
5588
+ if (currentChars + entry.length > maxChars) break;
5589
+ parts.push(entry);
5590
+ currentChars += entry.length;
5591
+ sources.push({ type: "code", title: path3 });
5592
+ }
5593
+ }
5594
+ } catch {
5595
+ }
5596
+ }
5597
+ const context = parts.join("");
5598
+ const tokenEstimate = Math.ceil(context.length / charsPerToken);
5599
+ return {
5600
+ context,
5601
+ token_estimate: tokenEstimate,
5602
+ sources
5603
+ };
5604
+ }
5605
+ /**
5606
+ * Get incremental context changes since a given timestamp.
5607
+ * Useful for syncing context without reloading everything.
5608
+ */
5609
+ async getContextDelta(params) {
5610
+ const withDefaults = this.withDefaults(params);
5611
+ if (!withDefaults.workspace_id) {
5612
+ return { new_decisions: 0, new_memory: 0, items: [] };
5613
+ }
5614
+ const items = [];
5615
+ let newDecisions = 0;
5616
+ let newMemory = 0;
5617
+ try {
5618
+ const memory = await this.listMemoryEvents({
5619
+ workspace_id: withDefaults.workspace_id,
5620
+ project_id: withDefaults.project_id,
5621
+ limit: params.limit || 20
5622
+ });
5623
+ if (memory.items) {
5624
+ for (const item of memory.items) {
5625
+ const createdAt = item.created_at || "";
5626
+ if (createdAt > params.since) {
5627
+ const type = item.metadata?.original_type || "memory";
5628
+ items.push({
5629
+ type,
5630
+ title: item.title || "Untitled",
5631
+ created_at: createdAt
5632
+ });
5633
+ if (type === "decision") newDecisions++;
5634
+ else newMemory++;
5635
+ }
5636
+ }
5637
+ }
5638
+ } catch {
5639
+ }
5640
+ return {
5641
+ new_decisions: newDecisions,
5642
+ new_memory: newMemory,
5643
+ items
5644
+ };
5645
+ }
5646
+ /**
5647
+ * Get smart context for a user query - CALL THIS BEFORE EVERY RESPONSE.
5648
+ *
5649
+ * This is the key tool for automatic context injection:
5650
+ * 1. Analyzes the user's message to understand what context is needed
5651
+ * 2. Retrieves relevant context in a minified, token-efficient format
5652
+ * 3. Returns context that the AI can use without including chat history
5653
+ *
5654
+ * The format is optimized for AI consumption:
5655
+ * - Compact notation (D: for Decision, P: for Preference, etc.)
5656
+ * - No redundant whitespace
5657
+ * - Structured for easy parsing
5658
+ *
5659
+ * Format options:
5660
+ * - 'minified': Ultra-compact TYPE:value|TYPE:value|...
5661
+ * - 'readable': Human-readable with line breaks
5662
+ * - 'structured': JSON-like grouped format
5663
+ */
5664
+ async getSmartContext(params) {
5665
+ const withDefaults = this.withDefaults(params);
5666
+ const maxTokens = params.max_tokens || 800;
5667
+ const format = params.format || "minified";
5668
+ if (!withDefaults.workspace_id) {
5669
+ return {
5670
+ context: "[NO_WORKSPACE]",
5671
+ token_estimate: 2,
5672
+ format,
5673
+ sources_used: 0
5674
+ };
5675
+ }
5676
+ const message = params.user_message.toLowerCase();
5677
+ const keywords = this.extractKeywords(message);
5678
+ const items = [];
5679
+ try {
5680
+ const ws = await this.getWorkspace(withDefaults.workspace_id);
5681
+ if (ws?.name) {
5682
+ items.push({ type: "W", key: "workspace", value: ws.name, relevance: 1 });
5683
+ }
5684
+ } catch {
5685
+ }
5686
+ if (withDefaults.project_id) {
5687
+ try {
5688
+ const proj = await this.getProject(withDefaults.project_id);
5689
+ if (proj?.name) {
5690
+ items.push({ type: "P", key: "project", value: proj.name, relevance: 1 });
5691
+ }
5692
+ } catch {
5693
+ }
5694
+ }
5695
+ try {
5696
+ const decisions = await this.memoryDecisions({
5697
+ workspace_id: withDefaults.workspace_id,
5698
+ project_id: withDefaults.project_id,
5699
+ limit: 10
5700
+ });
5701
+ if (decisions.items) {
5702
+ for (const d of decisions.items) {
5703
+ const title = d.title || "";
5704
+ const content = d.content || "";
5705
+ const relevance = this.calculateRelevance(keywords, title + " " + content);
5706
+ items.push({
5707
+ type: "D",
5708
+ key: "decision",
5709
+ value: title.slice(0, 80),
5710
+ relevance
5711
+ });
5712
+ }
5713
+ }
5714
+ } catch {
5715
+ }
5716
+ if (keywords.length > 0) {
5717
+ try {
5718
+ const memory = await this.memorySearch({
5719
+ query: params.user_message.slice(0, 200),
5720
+ workspace_id: withDefaults.workspace_id,
5721
+ project_id: withDefaults.project_id,
5722
+ limit: 5
5723
+ });
5724
+ if (memory.results) {
5725
+ for (const m of memory.results) {
5726
+ const title = m.title || "";
5727
+ const content = m.content || "";
5728
+ items.push({
5729
+ type: "M",
5730
+ key: "memory",
5731
+ value: title.slice(0, 80) + (content ? ": " + content.slice(0, 100) : ""),
5732
+ relevance: 0.8
5733
+ // Memory search already ranked by relevance
5734
+ });
5735
+ }
5736
+ }
5737
+ } catch {
5738
+ }
5739
+ }
5740
+ items.sort((a, b) => b.relevance - a.relevance);
5741
+ let context;
5742
+ let charsUsed = 0;
5743
+ const maxChars = maxTokens * 4;
5744
+ if (format === "minified") {
5745
+ const parts = [];
5746
+ for (const item of items) {
5747
+ const entry = `${item.type}:${item.value}`;
5748
+ if (charsUsed + entry.length + 1 > maxChars) break;
5749
+ parts.push(entry);
5750
+ charsUsed += entry.length + 1;
5751
+ }
5752
+ context = parts.join("|");
5753
+ } else if (format === "structured") {
5754
+ const grouped = {};
5755
+ for (const item of items) {
5756
+ if (charsUsed > maxChars) break;
5757
+ if (!grouped[item.type]) grouped[item.type] = [];
5758
+ grouped[item.type].push(item.value);
5759
+ charsUsed += item.value.length + 5;
5760
+ }
5761
+ context = JSON.stringify(grouped);
5762
+ } else {
5763
+ const lines = ["[CTX]"];
5764
+ for (const item of items) {
5765
+ const line = `${item.type}:${item.value}`;
5766
+ if (charsUsed + line.length + 1 > maxChars) break;
5767
+ lines.push(line);
5768
+ charsUsed += line.length + 1;
5769
+ }
5770
+ lines.push("[/CTX]");
5771
+ context = lines.join("\n");
5772
+ }
5773
+ return {
5774
+ context,
5775
+ token_estimate: Math.ceil(context.length / 4),
5776
+ format,
5777
+ sources_used: items.filter((i) => context.includes(i.value.slice(0, 20))).length
5778
+ };
5779
+ }
5780
+ /**
5781
+ * Extract keywords from a message for relevance matching
5782
+ */
5783
+ extractKeywords(message) {
5784
+ const stopWords = /* @__PURE__ */ new Set([
5785
+ "the",
5786
+ "a",
5787
+ "an",
5788
+ "is",
5789
+ "are",
5790
+ "was",
5791
+ "were",
5792
+ "be",
5793
+ "been",
5794
+ "being",
5795
+ "have",
5796
+ "has",
5797
+ "had",
5798
+ "do",
5799
+ "does",
5800
+ "did",
5801
+ "will",
5802
+ "would",
5803
+ "could",
5804
+ "should",
5805
+ "may",
5806
+ "might",
5807
+ "must",
5808
+ "can",
5809
+ "to",
5810
+ "of",
5811
+ "in",
5812
+ "for",
5813
+ "on",
5814
+ "with",
5815
+ "at",
5816
+ "by",
5817
+ "from",
5818
+ "as",
5819
+ "into",
5820
+ "through",
5821
+ "during",
5822
+ "before",
5823
+ "after",
5824
+ "above",
5825
+ "below",
5826
+ "between",
5827
+ "under",
5828
+ "again",
5829
+ "further",
5830
+ "then",
5831
+ "once",
5832
+ "here",
5833
+ "there",
5834
+ "when",
5835
+ "where",
5836
+ "why",
5837
+ "how",
5838
+ "all",
5839
+ "each",
5840
+ "few",
5841
+ "more",
5842
+ "most",
5843
+ "other",
5844
+ "some",
5845
+ "such",
5846
+ "no",
5847
+ "nor",
5848
+ "not",
5849
+ "only",
5850
+ "own",
5851
+ "same",
5852
+ "so",
5853
+ "than",
5854
+ "too",
5855
+ "very",
5856
+ "just",
5857
+ "and",
5858
+ "but",
5859
+ "if",
5860
+ "or",
5861
+ "because",
5862
+ "until",
5863
+ "while",
5864
+ "this",
5865
+ "that",
5866
+ "these",
5867
+ "those",
5868
+ "what",
5869
+ "which",
5870
+ "who",
5871
+ "whom",
5872
+ "i",
5873
+ "me",
5874
+ "my",
5875
+ "we",
5876
+ "our",
5877
+ "you",
5878
+ "your",
5879
+ "he",
5880
+ "she",
5881
+ "it",
5882
+ "they",
5883
+ "them"
5884
+ ]);
5885
+ return message.toLowerCase().replace(/[^\w\s]/g, " ").split(/\s+/).filter((word) => word.length > 2 && !stopWords.has(word));
5886
+ }
5887
+ /**
5888
+ * Calculate relevance score based on keyword matches
5889
+ */
5890
+ calculateRelevance(keywords, text) {
5891
+ if (keywords.length === 0) return 0.5;
5892
+ const textLower = text.toLowerCase();
5893
+ let matches = 0;
5894
+ for (const keyword of keywords) {
5895
+ if (textLower.includes(keyword)) {
5896
+ matches++;
5897
+ }
5898
+ }
5899
+ return matches / keywords.length;
5900
+ }
5279
5901
  };
5280
5902
 
5281
5903
  // src/rules-templates.ts
5282
5904
  var CONTEXTSTREAM_RULES = `
5283
5905
  ## ContextStream Integration
5284
5906
 
5285
- You have access to ContextStream MCP tools for persistent memory and context across sessions.
5907
+ You have access to ContextStream MCP tools for persistent memory and context.
5286
5908
 
5287
- ---
5288
-
5289
- ### \u{1F6A8} ESSENTIAL Tools (Always Use)
5909
+ ## TL;DR - REQUIRED EVERY MESSAGE
5290
5910
 
5291
- These tools should be called automatically in every conversation:
5911
+ | Message | What to Call |
5912
+ |---------|--------------|
5913
+ | **1st message** | \`session_init(folder_path="...", context_hint="<user's message>")\` |
5914
+ | **2nd+ messages** | \`context_smart(user_message="<user's message>")\` |
5915
+ | **After completing task** | \`session_capture(...)\` - MUST capture decisions/insights |
5292
5916
 
5293
- | When | Tool | Example Prompt |
5294
- |------|------|----------------|
5295
- | **Conversation start** | \`session_init\` | "Load my workspace context" |
5296
- | **User makes a decision** | \`session_capture\` | "Remember we decided to use PostgreSQL" |
5297
- | **User states a preference** | \`session_capture\` | "Remember I prefer TypeScript strict mode" |
5298
- | **Before starting work** | \`session_recall\` | "What did we decide about auth?" |
5917
+ **NO EXCEPTIONS.** Do not skip even if you think you have enough context.
5299
5918
 
5300
5919
  ---
5301
5920
 
5302
- ### \u{1F4DD} Memory & Context Tools (Use Frequently)
5921
+ ### \u26A0\uFE0F Why context_smart is Required (Even After session_init)
5303
5922
 
5304
- | Tool | Example Prompt |
5305
- |------|----------------|
5306
- | \`session_remember\` | "Remember that the API uses JWT tokens" |
5307
- | \`session_smart_search\` | "Search for everything about payment processing" |
5308
- | \`memory_search\` | "Search my notes for caching strategy" |
5309
- | \`memory_decisions\` | "Show me all past decisions" |
5310
- | \`memory_timeline\` | "Show me the timeline of this workspace" |
5311
- | \`memory_summary\` | "Summarize what we've worked on" |
5923
+ **Common mistake:** "session_init already gave me context, I don't need context_smart"
5312
5924
 
5313
- ---
5925
+ **This is WRONG. Here's why:**
5926
+ - \`session_init\` returns the last ~10 items **BY TIME** (chronological)
5927
+ - \`context_smart\` **SEARCHES** for items **RELEVANT to THIS message** (semantic)
5314
5928
 
5315
- ### \u{1F50D} Code Search Tools (Use When Exploring Code)
5929
+ **Example failure:**
5930
+ - User asks: "how should I implement authentication?"
5931
+ - Auth decisions were made 20 conversations ago
5932
+ - \u274C \`session_init\` won't have it (too old, not in recent 10)
5933
+ - \u2705 \`context_smart\` FINDS it via semantic search
5316
5934
 
5317
- | Tool | Example Prompt |
5318
- |------|----------------|
5319
- | \`search_semantic\` | "How does authentication work in this codebase?" |
5320
- | \`search_hybrid\` | "Find code related to user permissions" |
5321
- | \`search_keyword\` | "Search for handleSubmit function" |
5322
- | \`search_pattern\` | "Find all TODO comments" |
5323
- | \`ai_context\` | "Build context for implementing user roles" |
5324
- | \`ai_enhanced_context\` | "Give me deep context on the payment system" |
5935
+ **Without context_smart, you WILL miss relevant older context.**
5325
5936
 
5326
5937
  ---
5327
5938
 
5328
- ### \u{1F578}\uFE0F Code Analysis Tools (Use for Architecture)
5939
+ ### When to Capture (MANDATORY)
5329
5940
 
5330
- | Tool | Example Prompt |
5331
- |------|----------------|
5332
- | \`graph_dependencies\` | "What does UserService depend on?" |
5333
- | \`graph_impact\` | "What would be affected if I change the User model?" |
5334
- | \`graph_call_path\` | "How does login() eventually call the database?" |
5335
- | \`graph_circular_dependencies\` | "Are there any circular dependencies?" |
5336
- | \`graph_unused_code\` | "Find unused code in this project" |
5941
+ | When | Tool | Example |
5942
+ |------|------|---------|
5943
+ | User makes a decision | \`session_capture\` | "Let's use PostgreSQL" \u2192 capture as decision |
5944
+ | User states preference | \`session_capture\` | "I prefer TypeScript" \u2192 capture as preference |
5945
+ | You complete a task | \`session_capture\` | Capture what was done, decisions made |
5946
+ | Need past context | \`session_recall\` | "What did we decide about X?" |
5337
5947
 
5338
- ---
5339
-
5340
- ### \u{1F4C1} Project & Workspace Tools
5341
-
5342
- | Tool | Example Prompt |
5343
- |------|----------------|
5344
- | \`projects_overview\` | "Give me an overview of this project" |
5345
- | \`projects_index\` | "Re-index this project" |
5346
- | \`workspaces_overview\` | "Give me a workspace overview" |
5347
- | \`workspace_associate\` | "Associate this folder with my workspace" |
5348
- | \`generate_editor_rules\` | "Generate ContextStream rules for this project" |
5948
+ **You MUST capture after completing any significant task.** This ensures future sessions have context.
5349
5949
 
5350
5950
  ---
5351
5951
 
5352
- ### Automatic Behavior Guidelines
5952
+ ### Behavior Rules
5353
5953
 
5354
- **At conversation start:**
5355
- - Call \`session_init\` to load context (this happens automatically on first tool call)
5356
- - Review the loaded decisions and memory before responding
5954
+ **First message of conversation:**
5955
+ 1. Call \`session_init(folder_path="<cwd>", context_hint="<user's message>")\`
5956
+ 2. Then respond
5357
5957
 
5358
- **During the conversation:**
5359
- - When the user makes a decision \u2192 \`session_capture\` with \`event_type: "decision"\`
5360
- - When you learn a user preference \u2192 \`session_capture\` with \`event_type: "preference"\`
5361
- - When discovering important context \u2192 \`session_remember\`
5362
- - When encountering a bug \u2192 \`session_capture\` with \`event_type: "bug"\`
5363
- - When a feature is discussed \u2192 \`session_capture\` with \`event_type: "feature"\`
5958
+ **Every subsequent message:**
5959
+ 1. Call \`context_smart(user_message="<user's message>")\` FIRST
5960
+ 2. Then respond
5364
5961
 
5365
- **When exploring code:**
5366
- - Use \`search_semantic\` or \`search_hybrid\` for code search
5367
- - Use \`graph_dependencies\` to understand architecture
5368
- - Use \`graph_impact\` before making changes
5962
+ **After completing a task:**
5963
+ 1. Call \`session_capture\` to save decisions, preferences, or insights
5964
+ 2. This is NOT optional
5369
5965
 
5370
- **At conversation end (if significant work done):**
5371
- - Summarize and capture key decisions with \`session_capture\`
5966
+ **When user asks about past decisions:**
5967
+ - Use \`session_recall\` - do NOT ask user to repeat themselves
5372
5968
 
5373
5969
  ---
5374
5970
 
5375
5971
  ### Quick Examples
5376
5972
 
5377
5973
  \`\`\`
5378
- # Start of conversation - load context
5379
- session_init()
5974
+ # First message - user asks about auth
5975
+ session_init(folder_path="/path/to/project", context_hint="how should I implement auth?")
5976
+ # Returns workspace info + semantically relevant auth decisions from ANY time
5380
5977
 
5381
- # User says "Let's use PostgreSQL for the database"
5382
- session_capture(event_type="decision", title="Database Choice", content="Decided to use PostgreSQL...")
5978
+ # Second message - user asks about database
5979
+ context_smart(user_message="what database should I use?")
5980
+ # Returns: W:Maker|P:myproject|D:Use PostgreSQL|D:No ORMs|M:DB schema at...
5383
5981
 
5384
- # User says "I prefer async/await over callbacks"
5385
- session_capture(event_type="preference", title="Async Style", content="User prefers async/await...")
5982
+ # User says "Let's use Redis for caching"
5983
+ session_capture(event_type="decision", title="Caching Choice", content="Using Redis for caching layer")
5386
5984
 
5387
- # Need to find related code
5388
- search_semantic(query="authentication middleware")
5985
+ # After completing implementation
5986
+ session_capture(event_type="decision", title="Auth Implementation Complete", content="Implemented JWT auth with refresh tokens...")
5389
5987
 
5390
- # Check what we discussed before
5988
+ # Check past decisions
5391
5989
  session_recall(query="what did we decide about caching?")
5392
5990
  \`\`\`
5393
5991
  `.trim();
@@ -5411,6 +6009,20 @@ ${CONTEXTSTREAM_RULES}
5411
6009
  description: "Cline AI rules",
5412
6010
  content: `# Cline Rules
5413
6011
  ${CONTEXTSTREAM_RULES}
6012
+ `
6013
+ },
6014
+ kilo: {
6015
+ filename: ".kilocode/rules/contextstream.md",
6016
+ description: "Kilo Code AI rules",
6017
+ content: `# Kilo Code Rules
6018
+ ${CONTEXTSTREAM_RULES}
6019
+ `
6020
+ },
6021
+ roo: {
6022
+ filename: ".roo/rules/contextstream.md",
6023
+ description: "Roo Code AI rules",
6024
+ content: `# Roo Code Rules
6025
+ ${CONTEXTSTREAM_RULES}
5414
6026
  `
5415
6027
  },
5416
6028
  claude: {
@@ -5484,6 +6096,7 @@ function registerTools(server, client, sessionManager) {
5484
6096
  contextPrefix = autoInitResult.contextSummary + "\n\n";
5485
6097
  }
5486
6098
  }
6099
+ sessionManager.warnIfContextSmartNotCalled(toolName);
5487
6100
  const result = await handler(input);
5488
6101
  if (contextPrefix && result && typeof result === "object") {
5489
6102
  const r = result;
@@ -5557,6 +6170,38 @@ function registerTools(server, client, sessionManager) {
5557
6170
  return { content: [{ type: "text", text: formatContent(result) }], structuredContent: toStructured(result) };
5558
6171
  }
5559
6172
  );
6173
+ registerTool(
6174
+ "workspaces_update",
6175
+ {
6176
+ title: "Update workspace",
6177
+ description: "Update a workspace (rename, change description, or visibility)",
6178
+ inputSchema: external_exports.object({
6179
+ workspace_id: external_exports.string().uuid(),
6180
+ name: external_exports.string().optional(),
6181
+ description: external_exports.string().optional(),
6182
+ visibility: external_exports.enum(["private", "team", "org"]).optional()
6183
+ })
6184
+ },
6185
+ async (input) => {
6186
+ const { workspace_id, ...updates } = input;
6187
+ const result = await client.updateWorkspace(workspace_id, updates);
6188
+ return { content: [{ type: "text", text: formatContent(result) }], structuredContent: toStructured(result) };
6189
+ }
6190
+ );
6191
+ registerTool(
6192
+ "workspaces_delete",
6193
+ {
6194
+ title: "Delete workspace",
6195
+ description: "Delete a workspace and all its contents (projects, memory, etc.). This action is irreversible.",
6196
+ inputSchema: external_exports.object({
6197
+ workspace_id: external_exports.string().uuid()
6198
+ })
6199
+ },
6200
+ async (input) => {
6201
+ const result = await client.deleteWorkspace(input.workspace_id);
6202
+ return { content: [{ type: "text", text: formatContent(result || { success: true, message: "Workspace deleted successfully" }) }], structuredContent: toStructured(result) };
6203
+ }
6204
+ );
5560
6205
  registerTool(
5561
6206
  "projects_list",
5562
6207
  {
@@ -5585,6 +6230,37 @@ function registerTools(server, client, sessionManager) {
5585
6230
  return { content: [{ type: "text", text: formatContent(result) }], structuredContent: toStructured(result) };
5586
6231
  }
5587
6232
  );
6233
+ registerTool(
6234
+ "projects_update",
6235
+ {
6236
+ title: "Update project",
6237
+ description: "Update a project (rename or change description)",
6238
+ inputSchema: external_exports.object({
6239
+ project_id: external_exports.string().uuid(),
6240
+ name: external_exports.string().optional(),
6241
+ description: external_exports.string().optional()
6242
+ })
6243
+ },
6244
+ async (input) => {
6245
+ const { project_id, ...updates } = input;
6246
+ const result = await client.updateProject(project_id, updates);
6247
+ return { content: [{ type: "text", text: formatContent(result) }], structuredContent: toStructured(result) };
6248
+ }
6249
+ );
6250
+ registerTool(
6251
+ "projects_delete",
6252
+ {
6253
+ title: "Delete project",
6254
+ description: "Delete a project and all its contents (indexed files, memory events, etc.). This action is irreversible.",
6255
+ inputSchema: external_exports.object({
6256
+ project_id: external_exports.string().uuid()
6257
+ })
6258
+ },
6259
+ async (input) => {
6260
+ const result = await client.deleteProject(input.project_id);
6261
+ return { content: [{ type: "text", text: formatContent(result || { success: true, message: "Project deleted successfully" }) }], structuredContent: toStructured(result) };
6262
+ }
6263
+ );
5588
6264
  registerTool(
5589
6265
  "projects_index",
5590
6266
  {
@@ -6287,13 +6963,17 @@ Automatically detects code files and skips ignored directories like node_modules
6287
6963
  This is the FIRST tool AI assistants should call when starting a conversation.
6288
6964
  Returns: workspace info, project info, recent memory, recent decisions, and relevant context.
6289
6965
  Automatically detects the IDE workspace/project path and can auto-index code.
6290
- IMPORTANT: If you know the current workspace folder path, pass it as folder_path for accurate context resolution.`,
6966
+
6967
+ IMPORTANT: Pass the user's FIRST MESSAGE as context_hint to get semantically relevant context!
6968
+ Example: session_init(folder_path="/path/to/project", context_hint="how do I implement auth?")
6969
+
6970
+ This does semantic search on the first message. You only need context_smart on subsequent messages.`,
6291
6971
  inputSchema: external_exports.object({
6292
6972
  folder_path: external_exports.string().optional().describe("Current workspace/project folder path (absolute). Use this when IDE roots are not available."),
6293
6973
  workspace_id: external_exports.string().uuid().optional().describe("Workspace to initialize context for"),
6294
6974
  project_id: external_exports.string().uuid().optional().describe("Project to initialize context for"),
6295
6975
  session_id: external_exports.string().optional().describe("Custom session ID (auto-generated if not provided)"),
6296
- context_hint: external_exports.string().optional().describe("Hint about what the user wants to work on (used for context search)"),
6976
+ context_hint: external_exports.string().optional().describe("RECOMMENDED: Pass the user's first message here for semantic search. This finds relevant context from ANY time, not just recent items."),
6297
6977
  include_recent_memory: external_exports.boolean().optional().describe("Include recent memory events (default: true)"),
6298
6978
  include_decisions: external_exports.boolean().optional().describe("Include recent decisions (default: true)"),
6299
6979
  auto_index: external_exports.boolean().optional().describe("Automatically create and index project from IDE workspace (default: true)")
@@ -6347,7 +7027,7 @@ Optionally generates AI editor rules for automatic ContextStream usage.`,
6347
7027
  workspace_id: external_exports.string().uuid().describe("Workspace ID to associate with"),
6348
7028
  workspace_name: external_exports.string().optional().describe("Workspace name for reference"),
6349
7029
  create_parent_mapping: external_exports.boolean().optional().describe("Also create a parent folder mapping (e.g., /dev/maker/* -> workspace)"),
6350
- generate_editor_rules: external_exports.boolean().optional().describe("Generate AI editor rules (.windsurfrules, .cursorrules, etc.) for automatic ContextStream usage")
7030
+ generate_editor_rules: external_exports.boolean().optional().describe("Generate AI editor rules for Windsurf, Cursor, Cline, Kilo Code, Roo Code, Claude Code, and Aider")
6351
7031
  })
6352
7032
  },
6353
7033
  async (input) => {
@@ -6548,12 +7228,12 @@ Example: "What were the auth decisions?" or "What are my TypeScript preferences?
6548
7228
  "generate_editor_rules",
6549
7229
  {
6550
7230
  title: "Generate editor AI rules",
6551
- description: `Generate AI rule files for editors (Windsurf, Cursor, Cline, Claude Code, Aider).
7231
+ description: `Generate AI rule files for editors (Windsurf, Cursor, Cline, Kilo Code, Roo Code, Claude Code, Aider).
6552
7232
  These rules instruct the AI to automatically use ContextStream for memory and context.
6553
7233
  Supported editors: ${getAvailableEditors().join(", ")}`,
6554
7234
  inputSchema: external_exports.object({
6555
7235
  folder_path: external_exports.string().describe("Absolute path to the project folder"),
6556
- editors: external_exports.array(external_exports.enum(["windsurf", "cursor", "cline", "claude", "aider", "all"])).optional().describe("Which editors to generate rules for. Defaults to all."),
7236
+ editors: external_exports.array(external_exports.enum(["windsurf", "cursor", "cline", "kilo", "roo", "claude", "aider", "all"])).optional().describe("Which editors to generate rules for. Defaults to all."),
6557
7237
  workspace_name: external_exports.string().optional().describe("Workspace name to include in rules"),
6558
7238
  workspace_id: external_exports.string().uuid().optional().describe("Workspace ID to include in rules"),
6559
7239
  project_name: external_exports.string().optional().describe("Project name to include in rules"),
@@ -6617,6 +7297,274 @@ Supported editors: ${getAvailableEditors().join(", ")}`,
6617
7297
  return { content: [{ type: "text", text: formatContent(summary) }], structuredContent: toStructured(summary) };
6618
7298
  }
6619
7299
  );
7300
+ registerTool(
7301
+ "session_summary",
7302
+ {
7303
+ title: "Get compact context summary",
7304
+ description: `Get a compact, token-efficient summary of workspace context (~500 tokens).
7305
+ This is designed to replace loading full chat history in AI prompts.
7306
+ Returns: workspace/project info, top decisions (titles only), preferences, memory count.
7307
+ Use this at conversation start instead of loading everything.
7308
+ For specific details, use session_recall or session_smart_search.`,
7309
+ inputSchema: external_exports.object({
7310
+ workspace_id: external_exports.string().uuid().optional(),
7311
+ project_id: external_exports.string().uuid().optional(),
7312
+ max_tokens: external_exports.number().optional().describe("Maximum tokens for summary (default: 500)")
7313
+ })
7314
+ },
7315
+ async (input) => {
7316
+ let workspaceId = input.workspace_id;
7317
+ let projectId = input.project_id;
7318
+ if (!workspaceId && sessionManager) {
7319
+ const ctx = sessionManager.getContext();
7320
+ if (ctx) {
7321
+ workspaceId = ctx.workspace_id;
7322
+ projectId = projectId || ctx.project_id;
7323
+ }
7324
+ }
7325
+ const result = await client.getContextSummary({
7326
+ workspace_id: workspaceId,
7327
+ project_id: projectId,
7328
+ max_tokens: input.max_tokens
7329
+ });
7330
+ return {
7331
+ content: [{ type: "text", text: result.summary }],
7332
+ structuredContent: toStructured(result)
7333
+ };
7334
+ }
7335
+ );
7336
+ registerTool(
7337
+ "session_compress",
7338
+ {
7339
+ title: "Compress chat history to memory",
7340
+ description: `Extract and store key information from chat history as memory events.
7341
+ This allows clearing chat history while preserving important context.
7342
+ Use at conversation end or when context window is getting full.
7343
+
7344
+ Extracts:
7345
+ - Decisions made
7346
+ - User preferences learned
7347
+ - Insights discovered
7348
+ - Tasks/action items
7349
+ - Code patterns established
7350
+
7351
+ After compression, the AI can use session_recall to retrieve this context in future conversations.`,
7352
+ inputSchema: external_exports.object({
7353
+ chat_history: external_exports.string().describe("The chat history to compress and extract from"),
7354
+ workspace_id: external_exports.string().uuid().optional(),
7355
+ project_id: external_exports.string().uuid().optional(),
7356
+ extract_types: external_exports.array(external_exports.enum(["decisions", "preferences", "insights", "tasks", "code_patterns"])).optional().describe("Types of information to extract (default: all)")
7357
+ })
7358
+ },
7359
+ async (input) => {
7360
+ let workspaceId = input.workspace_id;
7361
+ let projectId = input.project_id;
7362
+ if (!workspaceId && sessionManager) {
7363
+ const ctx = sessionManager.getContext();
7364
+ if (ctx) {
7365
+ workspaceId = ctx.workspace_id;
7366
+ projectId = projectId || ctx.project_id;
7367
+ }
7368
+ }
7369
+ if (!workspaceId) {
7370
+ return {
7371
+ content: [{
7372
+ type: "text",
7373
+ text: "Error: workspace_id is required. Please call session_init first or provide workspace_id explicitly."
7374
+ }],
7375
+ isError: true
7376
+ };
7377
+ }
7378
+ const result = await client.compressChat({
7379
+ workspace_id: workspaceId,
7380
+ project_id: projectId,
7381
+ chat_history: input.chat_history,
7382
+ extract_types: input.extract_types
7383
+ });
7384
+ const summary = [
7385
+ `\u2705 Compressed chat history into ${result.events_created} memory events:`,
7386
+ "",
7387
+ `\u{1F4CB} Decisions: ${result.extracted.decisions.length}`,
7388
+ `\u2699\uFE0F Preferences: ${result.extracted.preferences.length}`,
7389
+ `\u{1F4A1} Insights: ${result.extracted.insights.length}`,
7390
+ `\u{1F4DD} Tasks: ${result.extracted.tasks.length}`,
7391
+ `\u{1F527} Code patterns: ${result.extracted.code_patterns.length}`,
7392
+ "",
7393
+ "These are now stored in ContextStream memory.",
7394
+ "Future conversations can access them via session_recall."
7395
+ ].join("\n");
7396
+ return {
7397
+ content: [{ type: "text", text: summary }],
7398
+ structuredContent: toStructured(result)
7399
+ };
7400
+ }
7401
+ );
7402
+ registerTool(
7403
+ "ai_context_budget",
7404
+ {
7405
+ title: "Get context within token budget",
7406
+ description: `Get the most relevant context that fits within a specified token budget.
7407
+ This is the key tool for token-efficient AI interactions:
7408
+
7409
+ 1. AI calls this with a query and token budget
7410
+ 2. Gets optimally selected context (decisions, memory, code)
7411
+ 3. No need to include full chat history in the prompt
7412
+
7413
+ The tool prioritizes:
7414
+ 1. Relevant decisions (highest value per token)
7415
+ 2. Query-matched memory events
7416
+ 3. Related code snippets (if requested and budget allows)
7417
+
7418
+ Example: ai_context_budget(query="authentication", max_tokens=1000)`,
7419
+ inputSchema: external_exports.object({
7420
+ query: external_exports.string().describe("What context to retrieve"),
7421
+ max_tokens: external_exports.number().describe("Maximum tokens for the context (e.g., 500, 1000, 2000)"),
7422
+ workspace_id: external_exports.string().uuid().optional(),
7423
+ project_id: external_exports.string().uuid().optional(),
7424
+ include_decisions: external_exports.boolean().optional().describe("Include relevant decisions (default: true)"),
7425
+ include_memory: external_exports.boolean().optional().describe("Include memory search results (default: true)"),
7426
+ include_code: external_exports.boolean().optional().describe("Include code search results (default: false)")
7427
+ })
7428
+ },
7429
+ async (input) => {
7430
+ let workspaceId = input.workspace_id;
7431
+ let projectId = input.project_id;
7432
+ if (!workspaceId && sessionManager) {
7433
+ const ctx = sessionManager.getContext();
7434
+ if (ctx) {
7435
+ workspaceId = ctx.workspace_id;
7436
+ projectId = projectId || ctx.project_id;
7437
+ }
7438
+ }
7439
+ const result = await client.getContextWithBudget({
7440
+ query: input.query,
7441
+ workspace_id: workspaceId,
7442
+ project_id: projectId,
7443
+ max_tokens: input.max_tokens,
7444
+ include_decisions: input.include_decisions,
7445
+ include_memory: input.include_memory,
7446
+ include_code: input.include_code
7447
+ });
7448
+ const footer = `
7449
+ ---
7450
+ \u{1F4CA} Token estimate: ${result.token_estimate}/${input.max_tokens} | Sources: ${result.sources.length}`;
7451
+ return {
7452
+ content: [{ type: "text", text: result.context + footer }],
7453
+ structuredContent: toStructured(result)
7454
+ };
7455
+ }
7456
+ );
7457
+ registerTool(
7458
+ "session_delta",
7459
+ {
7460
+ title: "Get context changes since timestamp",
7461
+ description: `Get new context added since a specific timestamp.
7462
+ Useful for efficient context synchronization without reloading everything.
7463
+
7464
+ Returns:
7465
+ - Count of new decisions and memory events
7466
+ - List of new items with titles and timestamps
7467
+
7468
+ Use case: AI can track what's new since last session_init.`,
7469
+ inputSchema: external_exports.object({
7470
+ since: external_exports.string().describe('ISO timestamp to get changes since (e.g., "2025-12-05T00:00:00Z")'),
7471
+ workspace_id: external_exports.string().uuid().optional(),
7472
+ project_id: external_exports.string().uuid().optional(),
7473
+ limit: external_exports.number().optional().describe("Maximum items to return (default: 20)")
7474
+ })
7475
+ },
7476
+ async (input) => {
7477
+ let workspaceId = input.workspace_id;
7478
+ let projectId = input.project_id;
7479
+ if (!workspaceId && sessionManager) {
7480
+ const ctx = sessionManager.getContext();
7481
+ if (ctx) {
7482
+ workspaceId = ctx.workspace_id;
7483
+ projectId = projectId || ctx.project_id;
7484
+ }
7485
+ }
7486
+ const result = await client.getContextDelta({
7487
+ workspace_id: workspaceId,
7488
+ project_id: projectId,
7489
+ since: input.since,
7490
+ limit: input.limit
7491
+ });
7492
+ const summary = [
7493
+ `\u{1F4C8} Context changes since ${input.since}:`,
7494
+ ` New decisions: ${result.new_decisions}`,
7495
+ ` New memory events: ${result.new_memory}`,
7496
+ "",
7497
+ ...result.items.slice(0, 10).map((i) => `\u2022 [${i.type}] ${i.title}`),
7498
+ result.items.length > 10 ? ` (+${result.items.length - 10} more)` : ""
7499
+ ].filter(Boolean).join("\n");
7500
+ return {
7501
+ content: [{ type: "text", text: summary }],
7502
+ structuredContent: toStructured(result)
7503
+ };
7504
+ }
7505
+ );
7506
+ registerTool(
7507
+ "context_smart",
7508
+ {
7509
+ title: "Get smart context for user query",
7510
+ description: `**CALL THIS BEFORE EVERY AI RESPONSE** to get relevant context.
7511
+
7512
+ This is the KEY tool for token-efficient AI interactions. It:
7513
+ 1. Analyzes the user's message to understand what context is needed
7514
+ 2. Retrieves only relevant context in a minified, token-efficient format
7515
+ 3. Replaces the need to include full chat history in prompts
7516
+
7517
+ Format options:
7518
+ - 'minified': Ultra-compact D:decision|P:preference|M:memory (default, ~200 tokens)
7519
+ - 'readable': Line-separated with labels
7520
+ - 'structured': JSON-like grouped format
7521
+
7522
+ Type codes: W=Workspace, P=Project, D=Decision, M=Memory, I=Insight, T=Task
7523
+
7524
+ Example usage:
7525
+ 1. User asks "how should I implement auth?"
7526
+ 2. AI calls context_smart(user_message="how should I implement auth?")
7527
+ 3. Gets: "W:Maker|P:contextstream|D:Use JWT for auth|D:No session cookies|M:Auth API at /auth/..."
7528
+ 4. AI responds with relevant context already loaded
7529
+
7530
+ This saves ~80% tokens compared to including full chat history.`,
7531
+ inputSchema: external_exports.object({
7532
+ user_message: external_exports.string().describe("The user message to analyze and get context for"),
7533
+ workspace_id: external_exports.string().uuid().optional(),
7534
+ project_id: external_exports.string().uuid().optional(),
7535
+ max_tokens: external_exports.number().optional().describe("Maximum tokens for context (default: 800)"),
7536
+ format: external_exports.enum(["minified", "readable", "structured"]).optional().describe("Context format (default: minified)")
7537
+ })
7538
+ },
7539
+ async (input) => {
7540
+ if (sessionManager) {
7541
+ sessionManager.markContextSmartCalled();
7542
+ }
7543
+ let workspaceId = input.workspace_id;
7544
+ let projectId = input.project_id;
7545
+ if (!workspaceId && sessionManager) {
7546
+ const ctx = sessionManager.getContext();
7547
+ if (ctx) {
7548
+ workspaceId = ctx.workspace_id;
7549
+ projectId = projectId || ctx.project_id;
7550
+ }
7551
+ }
7552
+ const result = await client.getSmartContext({
7553
+ user_message: input.user_message,
7554
+ workspace_id: workspaceId,
7555
+ project_id: projectId,
7556
+ max_tokens: input.max_tokens,
7557
+ format: input.format
7558
+ });
7559
+ const footer = `
7560
+ ---
7561
+ \u{1F3AF} ${result.sources_used} sources | ~${result.token_estimate} tokens | format: ${result.format}`;
7562
+ return {
7563
+ content: [{ type: "text", text: result.context + footer }],
7564
+ structuredContent: toStructured(result)
7565
+ };
7566
+ }
7567
+ );
6620
7568
  }
6621
7569
 
6622
7570
  // src/resources.ts
@@ -6963,6 +7911,8 @@ var SessionManager = class {
6963
7911
  this.context = null;
6964
7912
  this.ideRoots = [];
6965
7913
  this.folderPath = null;
7914
+ this.contextSmartCalled = false;
7915
+ this.warningShown = false;
6966
7916
  }
6967
7917
  /**
6968
7918
  * Check if session has been auto-initialized
@@ -6989,6 +7939,30 @@ var SessionManager = class {
6989
7939
  setFolderPath(path3) {
6990
7940
  this.folderPath = path3;
6991
7941
  }
7942
+ /**
7943
+ * Mark that context_smart has been called in this session
7944
+ */
7945
+ markContextSmartCalled() {
7946
+ this.contextSmartCalled = true;
7947
+ }
7948
+ /**
7949
+ * Check if context_smart has been called and warn if not.
7950
+ * Returns true if a warning was shown, false otherwise.
7951
+ */
7952
+ warnIfContextSmartNotCalled(toolName) {
7953
+ const skipWarningTools = ["session_init", "context_smart", "session_recall", "session_remember"];
7954
+ if (skipWarningTools.includes(toolName)) {
7955
+ return false;
7956
+ }
7957
+ if (!this.initialized || this.contextSmartCalled || this.warningShown) {
7958
+ return false;
7959
+ }
7960
+ this.warningShown = true;
7961
+ console.warn(`[ContextStream] Warning: ${toolName} called without context_smart.`);
7962
+ console.warn('[ContextStream] For best results, call context_smart(user_message="...") before other tools.');
7963
+ console.warn("[ContextStream] context_smart provides semantically relevant context for the user's query.");
7964
+ return true;
7965
+ }
6992
7966
  /**
6993
7967
  * Auto-initialize the session if not already done.
6994
7968
  * Returns context summary to prepend to tool response.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contextstream/mcp-server",
3
- "version": "0.3.9",
3
+ "version": "0.3.11",
4
4
  "description": "MCP server exposing ContextStream public API - code context, memory, search, and AI tools for developers",
5
5
  "type": "module",
6
6
  "license": "MIT",