@contextstream/mcp-server 0.3.9 → 0.3.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +974 -79
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -5276,118 +5276,700 @@ var ContextStreamClient = class {
5276
5276
  }
5277
5277
  return results;
5278
5278
  }
5279
+ // ============================================
5280
+ // Token-Saving Context Tools
5281
+ // ============================================
5282
+ /**
5283
+ * Get a compact, token-efficient summary of workspace context.
5284
+ * Designed to be included in every AI prompt without consuming many tokens.
5285
+ *
5286
+ * Target: ~500 tokens max
5287
+ *
5288
+ * This replaces loading full chat history - AI can call session_recall
5289
+ * for specific details when needed.
5290
+ */
5291
+ async getContextSummary(params) {
5292
+ const withDefaults = this.withDefaults(params);
5293
+ const maxTokens = params.max_tokens || 500;
5294
+ if (!withDefaults.workspace_id) {
5295
+ return {
5296
+ summary: "No workspace context loaded. Call session_init first.",
5297
+ decision_count: 0,
5298
+ memory_count: 0
5299
+ };
5300
+ }
5301
+ const parts = [];
5302
+ let workspaceName;
5303
+ let projectName;
5304
+ let decisionCount = 0;
5305
+ let memoryCount = 0;
5306
+ try {
5307
+ const ws = await this.getWorkspace(withDefaults.workspace_id);
5308
+ workspaceName = ws?.name;
5309
+ if (workspaceName) {
5310
+ parts.push(`\u{1F4C1} Workspace: ${workspaceName}`);
5311
+ }
5312
+ } catch {
5313
+ }
5314
+ if (withDefaults.project_id) {
5315
+ try {
5316
+ const proj = await this.getProject(withDefaults.project_id);
5317
+ projectName = proj?.name;
5318
+ if (projectName) {
5319
+ parts.push(`\u{1F4C2} Project: ${projectName}`);
5320
+ }
5321
+ } catch {
5322
+ }
5323
+ }
5324
+ try {
5325
+ const decisions = await this.memoryDecisions({
5326
+ workspace_id: withDefaults.workspace_id,
5327
+ project_id: withDefaults.project_id,
5328
+ limit: 5
5329
+ });
5330
+ if (decisions.items && decisions.items.length > 0) {
5331
+ decisionCount = decisions.items.length;
5332
+ parts.push("");
5333
+ parts.push("\u{1F4CB} Recent Decisions:");
5334
+ decisions.items.slice(0, 3).forEach((d, i) => {
5335
+ parts.push(` ${i + 1}. ${d.title || "Untitled"}`);
5336
+ });
5337
+ if (decisions.items.length > 3) {
5338
+ parts.push(` (+${decisions.items.length - 3} more)`);
5339
+ }
5340
+ }
5341
+ } catch {
5342
+ }
5343
+ try {
5344
+ const prefs = await this.memorySearch({
5345
+ query: "user preferences coding style settings",
5346
+ workspace_id: withDefaults.workspace_id,
5347
+ limit: 5
5348
+ });
5349
+ if (prefs.results && prefs.results.length > 0) {
5350
+ parts.push("");
5351
+ parts.push("\u2699\uFE0F Preferences:");
5352
+ prefs.results.slice(0, 3).forEach((p) => {
5353
+ const title = p.title || "Preference";
5354
+ parts.push(` \u2022 ${title.slice(0, 60)}${title.length > 60 ? "..." : ""}`);
5355
+ });
5356
+ }
5357
+ } catch {
5358
+ }
5359
+ try {
5360
+ const summary2 = await this.memorySummary(withDefaults.workspace_id);
5361
+ memoryCount = summary2.events || 0;
5362
+ if (memoryCount > 0) {
5363
+ parts.push("");
5364
+ parts.push(`\u{1F9E0} Memory: ${memoryCount} events stored`);
5365
+ }
5366
+ } catch {
5367
+ }
5368
+ parts.push("");
5369
+ parts.push('\u{1F4A1} Use session_recall("topic") for specific context');
5370
+ const summary = parts.join("\n");
5371
+ return {
5372
+ summary,
5373
+ workspace_name: workspaceName,
5374
+ project_name: projectName,
5375
+ decision_count: decisionCount,
5376
+ memory_count: memoryCount
5377
+ };
5378
+ }
5379
+ /**
5380
+ * Compress chat history into structured memory events.
5381
+ * This extracts key information and stores it, allowing the chat
5382
+ * history to be cleared while preserving context.
5383
+ *
5384
+ * Use this at the end of a conversation or when context window is full.
5385
+ */
5386
+ async compressChat(params) {
5387
+ const withDefaults = this.withDefaults(params);
5388
+ if (!withDefaults.workspace_id) {
5389
+ throw new Error("workspace_id is required for compressChat");
5390
+ }
5391
+ const extractTypes = params.extract_types || ["decisions", "preferences", "insights", "tasks", "code_patterns"];
5392
+ const extracted = {
5393
+ decisions: [],
5394
+ preferences: [],
5395
+ insights: [],
5396
+ tasks: [],
5397
+ code_patterns: []
5398
+ };
5399
+ let eventsCreated = 0;
5400
+ const lines = params.chat_history.split("\n");
5401
+ for (const line of lines) {
5402
+ const lowerLine = line.toLowerCase();
5403
+ if (extractTypes.includes("decisions")) {
5404
+ if (lowerLine.includes("decided to") || lowerLine.includes("decision:") || lowerLine.includes("we'll use") || lowerLine.includes("going with") || lowerLine.includes("chose ")) {
5405
+ extracted.decisions.push(line.trim());
5406
+ }
5407
+ }
5408
+ if (extractTypes.includes("preferences")) {
5409
+ if (lowerLine.includes("prefer") || lowerLine.includes("i like") || lowerLine.includes("always use") || lowerLine.includes("don't use") || lowerLine.includes("never use")) {
5410
+ extracted.preferences.push(line.trim());
5411
+ }
5412
+ }
5413
+ if (extractTypes.includes("tasks")) {
5414
+ if (lowerLine.includes("todo:") || lowerLine.includes("task:") || lowerLine.includes("need to") || lowerLine.includes("should implement") || lowerLine.includes("will add")) {
5415
+ extracted.tasks.push(line.trim());
5416
+ }
5417
+ }
5418
+ if (extractTypes.includes("insights")) {
5419
+ if (lowerLine.includes("learned that") || lowerLine.includes("realized") || lowerLine.includes("found out") || lowerLine.includes("discovered") || lowerLine.includes("important:") || lowerLine.includes("note:")) {
5420
+ extracted.insights.push(line.trim());
5421
+ }
5422
+ }
5423
+ if (extractTypes.includes("code_patterns")) {
5424
+ if (lowerLine.includes("pattern:") || lowerLine.includes("convention:") || lowerLine.includes("style:") || lowerLine.includes("always format") || lowerLine.includes("naming convention")) {
5425
+ extracted.code_patterns.push(line.trim());
5426
+ }
5427
+ }
5428
+ }
5429
+ for (const decision of extracted.decisions.slice(0, 5)) {
5430
+ try {
5431
+ await this.captureContext({
5432
+ workspace_id: withDefaults.workspace_id,
5433
+ project_id: withDefaults.project_id,
5434
+ event_type: "decision",
5435
+ title: decision.slice(0, 100),
5436
+ content: decision,
5437
+ importance: "medium"
5438
+ });
5439
+ eventsCreated++;
5440
+ } catch {
5441
+ }
5442
+ }
5443
+ for (const pref of extracted.preferences.slice(0, 5)) {
5444
+ try {
5445
+ await this.captureContext({
5446
+ workspace_id: withDefaults.workspace_id,
5447
+ project_id: withDefaults.project_id,
5448
+ event_type: "preference",
5449
+ title: pref.slice(0, 100),
5450
+ content: pref,
5451
+ importance: "medium"
5452
+ });
5453
+ eventsCreated++;
5454
+ } catch {
5455
+ }
5456
+ }
5457
+ for (const task of extracted.tasks.slice(0, 5)) {
5458
+ try {
5459
+ await this.captureContext({
5460
+ workspace_id: withDefaults.workspace_id,
5461
+ project_id: withDefaults.project_id,
5462
+ event_type: "task",
5463
+ title: task.slice(0, 100),
5464
+ content: task,
5465
+ importance: "medium"
5466
+ });
5467
+ eventsCreated++;
5468
+ } catch {
5469
+ }
5470
+ }
5471
+ for (const insight of extracted.insights.slice(0, 5)) {
5472
+ try {
5473
+ await this.captureContext({
5474
+ workspace_id: withDefaults.workspace_id,
5475
+ project_id: withDefaults.project_id,
5476
+ event_type: "insight",
5477
+ title: insight.slice(0, 100),
5478
+ content: insight,
5479
+ importance: "medium"
5480
+ });
5481
+ eventsCreated++;
5482
+ } catch {
5483
+ }
5484
+ }
5485
+ return {
5486
+ events_created: eventsCreated,
5487
+ extracted
5488
+ };
5489
+ }
5490
+ /**
5491
+ * Get context optimized for a token budget.
5492
+ * Returns the most relevant context that fits within the specified token limit.
5493
+ *
5494
+ * This is the key tool for token-efficient AI interactions:
5495
+ * - AI calls this with a query and token budget
5496
+ * - Gets optimally selected context
5497
+ * - No need to include full chat history
5498
+ */
5499
+ async getContextWithBudget(params) {
5500
+ const withDefaults = this.withDefaults(params);
5501
+ const maxTokens = params.max_tokens || 2e3;
5502
+ const charsPerToken = 4;
5503
+ const maxChars = maxTokens * charsPerToken;
5504
+ const parts = [];
5505
+ const sources = [];
5506
+ let currentChars = 0;
5507
+ if (params.include_decisions !== false && withDefaults.workspace_id) {
5508
+ try {
5509
+ const decisions = await this.memoryDecisions({
5510
+ workspace_id: withDefaults.workspace_id,
5511
+ project_id: withDefaults.project_id,
5512
+ limit: 10
5513
+ });
5514
+ if (decisions.items) {
5515
+ parts.push("## Relevant Decisions\n");
5516
+ currentChars += 25;
5517
+ for (const d of decisions.items) {
5518
+ const entry = `\u2022 ${d.title || "Decision"}
5519
+ `;
5520
+ if (currentChars + entry.length > maxChars * 0.4) break;
5521
+ parts.push(entry);
5522
+ currentChars += entry.length;
5523
+ sources.push({ type: "decision", title: d.title || "Decision" });
5524
+ }
5525
+ parts.push("\n");
5526
+ }
5527
+ } catch {
5528
+ }
5529
+ }
5530
+ if (params.include_memory !== false && withDefaults.workspace_id) {
5531
+ try {
5532
+ const memory = await this.memorySearch({
5533
+ query: params.query,
5534
+ workspace_id: withDefaults.workspace_id,
5535
+ project_id: withDefaults.project_id,
5536
+ limit: 5
5537
+ });
5538
+ if (memory.results) {
5539
+ parts.push("## Related Context\n");
5540
+ currentChars += 20;
5541
+ for (const m of memory.results) {
5542
+ const title = m.title || "Context";
5543
+ const content = m.content?.slice(0, 200) || "";
5544
+ const entry = `\u2022 ${title}: ${content}...
5545
+ `;
5546
+ if (currentChars + entry.length > maxChars * 0.7) break;
5547
+ parts.push(entry);
5548
+ currentChars += entry.length;
5549
+ sources.push({ type: "memory", title });
5550
+ }
5551
+ parts.push("\n");
5552
+ }
5553
+ } catch {
5554
+ }
5555
+ }
5556
+ if (params.include_code && withDefaults.project_id && currentChars < maxChars * 0.8) {
5557
+ try {
5558
+ const code = await this.searchSemantic({
5559
+ query: params.query,
5560
+ workspace_id: withDefaults.workspace_id,
5561
+ project_id: withDefaults.project_id,
5562
+ limit: 3
5563
+ });
5564
+ if (code.results) {
5565
+ parts.push("## Relevant Code\n");
5566
+ currentChars += 18;
5567
+ for (const c of code.results) {
5568
+ const path3 = c.file_path || "file";
5569
+ const content = c.content?.slice(0, 150) || "";
5570
+ const entry = `\u2022 ${path3}: ${content}...
5571
+ `;
5572
+ if (currentChars + entry.length > maxChars) break;
5573
+ parts.push(entry);
5574
+ currentChars += entry.length;
5575
+ sources.push({ type: "code", title: path3 });
5576
+ }
5577
+ }
5578
+ } catch {
5579
+ }
5580
+ }
5581
+ const context = parts.join("");
5582
+ const tokenEstimate = Math.ceil(context.length / charsPerToken);
5583
+ return {
5584
+ context,
5585
+ token_estimate: tokenEstimate,
5586
+ sources
5587
+ };
5588
+ }
5589
+ /**
5590
+ * Get incremental context changes since a given timestamp.
5591
+ * Useful for syncing context without reloading everything.
5592
+ */
5593
+ async getContextDelta(params) {
5594
+ const withDefaults = this.withDefaults(params);
5595
+ if (!withDefaults.workspace_id) {
5596
+ return { new_decisions: 0, new_memory: 0, items: [] };
5597
+ }
5598
+ const items = [];
5599
+ let newDecisions = 0;
5600
+ let newMemory = 0;
5601
+ try {
5602
+ const memory = await this.listMemoryEvents({
5603
+ workspace_id: withDefaults.workspace_id,
5604
+ project_id: withDefaults.project_id,
5605
+ limit: params.limit || 20
5606
+ });
5607
+ if (memory.items) {
5608
+ for (const item of memory.items) {
5609
+ const createdAt = item.created_at || "";
5610
+ if (createdAt > params.since) {
5611
+ const type = item.metadata?.original_type || "memory";
5612
+ items.push({
5613
+ type,
5614
+ title: item.title || "Untitled",
5615
+ created_at: createdAt
5616
+ });
5617
+ if (type === "decision") newDecisions++;
5618
+ else newMemory++;
5619
+ }
5620
+ }
5621
+ }
5622
+ } catch {
5623
+ }
5624
+ return {
5625
+ new_decisions: newDecisions,
5626
+ new_memory: newMemory,
5627
+ items
5628
+ };
5629
+ }
5630
+ /**
5631
+ * Get smart context for a user query - CALL THIS BEFORE EVERY RESPONSE.
5632
+ *
5633
+ * This is the key tool for automatic context injection:
5634
+ * 1. Analyzes the user's message to understand what context is needed
5635
+ * 2. Retrieves relevant context in a minified, token-efficient format
5636
+ * 3. Returns context that the AI can use without including chat history
5637
+ *
5638
+ * The format is optimized for AI consumption:
5639
+ * - Compact notation (D: for Decision, P: for Preference, etc.)
5640
+ * - No redundant whitespace
5641
+ * - Structured for easy parsing
5642
+ *
5643
+ * Format options:
5644
+ * - 'minified': Ultra-compact TYPE:value|TYPE:value|...
5645
+ * - 'readable': Human-readable with line breaks
5646
+ * - 'structured': JSON-like grouped format
5647
+ */
5648
+ async getSmartContext(params) {
5649
+ const withDefaults = this.withDefaults(params);
5650
+ const maxTokens = params.max_tokens || 800;
5651
+ const format = params.format || "minified";
5652
+ if (!withDefaults.workspace_id) {
5653
+ return {
5654
+ context: "[NO_WORKSPACE]",
5655
+ token_estimate: 2,
5656
+ format,
5657
+ sources_used: 0
5658
+ };
5659
+ }
5660
+ const message = params.user_message.toLowerCase();
5661
+ const keywords = this.extractKeywords(message);
5662
+ const items = [];
5663
+ try {
5664
+ const ws = await this.getWorkspace(withDefaults.workspace_id);
5665
+ if (ws?.name) {
5666
+ items.push({ type: "W", key: "workspace", value: ws.name, relevance: 1 });
5667
+ }
5668
+ } catch {
5669
+ }
5670
+ if (withDefaults.project_id) {
5671
+ try {
5672
+ const proj = await this.getProject(withDefaults.project_id);
5673
+ if (proj?.name) {
5674
+ items.push({ type: "P", key: "project", value: proj.name, relevance: 1 });
5675
+ }
5676
+ } catch {
5677
+ }
5678
+ }
5679
+ try {
5680
+ const decisions = await this.memoryDecisions({
5681
+ workspace_id: withDefaults.workspace_id,
5682
+ project_id: withDefaults.project_id,
5683
+ limit: 10
5684
+ });
5685
+ if (decisions.items) {
5686
+ for (const d of decisions.items) {
5687
+ const title = d.title || "";
5688
+ const content = d.content || "";
5689
+ const relevance = this.calculateRelevance(keywords, title + " " + content);
5690
+ items.push({
5691
+ type: "D",
5692
+ key: "decision",
5693
+ value: title.slice(0, 80),
5694
+ relevance
5695
+ });
5696
+ }
5697
+ }
5698
+ } catch {
5699
+ }
5700
+ if (keywords.length > 0) {
5701
+ try {
5702
+ const memory = await this.memorySearch({
5703
+ query: params.user_message.slice(0, 200),
5704
+ workspace_id: withDefaults.workspace_id,
5705
+ project_id: withDefaults.project_id,
5706
+ limit: 5
5707
+ });
5708
+ if (memory.results) {
5709
+ for (const m of memory.results) {
5710
+ const title = m.title || "";
5711
+ const content = m.content || "";
5712
+ items.push({
5713
+ type: "M",
5714
+ key: "memory",
5715
+ value: title.slice(0, 80) + (content ? ": " + content.slice(0, 100) : ""),
5716
+ relevance: 0.8
5717
+ // Memory search already ranked by relevance
5718
+ });
5719
+ }
5720
+ }
5721
+ } catch {
5722
+ }
5723
+ }
5724
+ items.sort((a, b) => b.relevance - a.relevance);
5725
+ let context;
5726
+ let charsUsed = 0;
5727
+ const maxChars = maxTokens * 4;
5728
+ if (format === "minified") {
5729
+ const parts = [];
5730
+ for (const item of items) {
5731
+ const entry = `${item.type}:${item.value}`;
5732
+ if (charsUsed + entry.length + 1 > maxChars) break;
5733
+ parts.push(entry);
5734
+ charsUsed += entry.length + 1;
5735
+ }
5736
+ context = parts.join("|");
5737
+ } else if (format === "structured") {
5738
+ const grouped = {};
5739
+ for (const item of items) {
5740
+ if (charsUsed > maxChars) break;
5741
+ if (!grouped[item.type]) grouped[item.type] = [];
5742
+ grouped[item.type].push(item.value);
5743
+ charsUsed += item.value.length + 5;
5744
+ }
5745
+ context = JSON.stringify(grouped);
5746
+ } else {
5747
+ const lines = ["[CTX]"];
5748
+ for (const item of items) {
5749
+ const line = `${item.type}:${item.value}`;
5750
+ if (charsUsed + line.length + 1 > maxChars) break;
5751
+ lines.push(line);
5752
+ charsUsed += line.length + 1;
5753
+ }
5754
+ lines.push("[/CTX]");
5755
+ context = lines.join("\n");
5756
+ }
5757
+ return {
5758
+ context,
5759
+ token_estimate: Math.ceil(context.length / 4),
5760
+ format,
5761
+ sources_used: items.filter((i) => context.includes(i.value.slice(0, 20))).length
5762
+ };
5763
+ }
5764
+ /**
5765
+ * Extract keywords from a message for relevance matching
5766
+ */
5767
+ extractKeywords(message) {
5768
+ const stopWords = /* @__PURE__ */ new Set([
5769
+ "the",
5770
+ "a",
5771
+ "an",
5772
+ "is",
5773
+ "are",
5774
+ "was",
5775
+ "were",
5776
+ "be",
5777
+ "been",
5778
+ "being",
5779
+ "have",
5780
+ "has",
5781
+ "had",
5782
+ "do",
5783
+ "does",
5784
+ "did",
5785
+ "will",
5786
+ "would",
5787
+ "could",
5788
+ "should",
5789
+ "may",
5790
+ "might",
5791
+ "must",
5792
+ "can",
5793
+ "to",
5794
+ "of",
5795
+ "in",
5796
+ "for",
5797
+ "on",
5798
+ "with",
5799
+ "at",
5800
+ "by",
5801
+ "from",
5802
+ "as",
5803
+ "into",
5804
+ "through",
5805
+ "during",
5806
+ "before",
5807
+ "after",
5808
+ "above",
5809
+ "below",
5810
+ "between",
5811
+ "under",
5812
+ "again",
5813
+ "further",
5814
+ "then",
5815
+ "once",
5816
+ "here",
5817
+ "there",
5818
+ "when",
5819
+ "where",
5820
+ "why",
5821
+ "how",
5822
+ "all",
5823
+ "each",
5824
+ "few",
5825
+ "more",
5826
+ "most",
5827
+ "other",
5828
+ "some",
5829
+ "such",
5830
+ "no",
5831
+ "nor",
5832
+ "not",
5833
+ "only",
5834
+ "own",
5835
+ "same",
5836
+ "so",
5837
+ "than",
5838
+ "too",
5839
+ "very",
5840
+ "just",
5841
+ "and",
5842
+ "but",
5843
+ "if",
5844
+ "or",
5845
+ "because",
5846
+ "until",
5847
+ "while",
5848
+ "this",
5849
+ "that",
5850
+ "these",
5851
+ "those",
5852
+ "what",
5853
+ "which",
5854
+ "who",
5855
+ "whom",
5856
+ "i",
5857
+ "me",
5858
+ "my",
5859
+ "we",
5860
+ "our",
5861
+ "you",
5862
+ "your",
5863
+ "he",
5864
+ "she",
5865
+ "it",
5866
+ "they",
5867
+ "them"
5868
+ ]);
5869
+ return message.toLowerCase().replace(/[^\w\s]/g, " ").split(/\s+/).filter((word) => word.length > 2 && !stopWords.has(word));
5870
+ }
5871
+ /**
5872
+ * Calculate relevance score based on keyword matches
5873
+ */
5874
+ calculateRelevance(keywords, text) {
5875
+ if (keywords.length === 0) return 0.5;
5876
+ const textLower = text.toLowerCase();
5877
+ let matches = 0;
5878
+ for (const keyword of keywords) {
5879
+ if (textLower.includes(keyword)) {
5880
+ matches++;
5881
+ }
5882
+ }
5883
+ return matches / keywords.length;
5884
+ }
5279
5885
  };
5280
5886
 
5281
5887
  // src/rules-templates.ts
5282
5888
  var CONTEXTSTREAM_RULES = `
5283
5889
  ## ContextStream Integration
5284
5890
 
5285
- You have access to ContextStream MCP tools for persistent memory and context across sessions.
5891
+ You have access to ContextStream MCP tools for persistent memory and context.
5286
5892
 
5287
- ---
5288
-
5289
- ### \u{1F6A8} ESSENTIAL Tools (Always Use)
5893
+ ## TL;DR - REQUIRED EVERY MESSAGE
5290
5894
 
5291
- These tools should be called automatically in every conversation:
5895
+ | Message | What to Call |
5896
+ |---------|--------------|
5897
+ | **1st message** | \`session_init(folder_path="...", context_hint="<user's message>")\` |
5898
+ | **2nd+ messages** | \`context_smart(user_message="<user's message>")\` |
5899
+ | **After completing task** | \`session_capture(...)\` - MUST capture decisions/insights |
5292
5900
 
5293
- | When | Tool | Example Prompt |
5294
- |------|------|----------------|
5295
- | **Conversation start** | \`session_init\` | "Load my workspace context" |
5296
- | **User makes a decision** | \`session_capture\` | "Remember we decided to use PostgreSQL" |
5297
- | **User states a preference** | \`session_capture\` | "Remember I prefer TypeScript strict mode" |
5298
- | **Before starting work** | \`session_recall\` | "What did we decide about auth?" |
5901
+ **NO EXCEPTIONS.** Do not skip even if you think you have enough context.
5299
5902
 
5300
5903
  ---
5301
5904
 
5302
- ### \u{1F4DD} Memory & Context Tools (Use Frequently)
5905
+ ### \u26A0\uFE0F Why context_smart is Required (Even After session_init)
5303
5906
 
5304
- | Tool | Example Prompt |
5305
- |------|----------------|
5306
- | \`session_remember\` | "Remember that the API uses JWT tokens" |
5307
- | \`session_smart_search\` | "Search for everything about payment processing" |
5308
- | \`memory_search\` | "Search my notes for caching strategy" |
5309
- | \`memory_decisions\` | "Show me all past decisions" |
5310
- | \`memory_timeline\` | "Show me the timeline of this workspace" |
5311
- | \`memory_summary\` | "Summarize what we've worked on" |
5907
+ **Common mistake:** "session_init already gave me context, I don't need context_smart"
5312
5908
 
5313
- ---
5909
+ **This is WRONG. Here's why:**
5910
+ - \`session_init\` returns the last ~10 items **BY TIME** (chronological)
5911
+ - \`context_smart\` **SEARCHES** for items **RELEVANT to THIS message** (semantic)
5314
5912
 
5315
- ### \u{1F50D} Code Search Tools (Use When Exploring Code)
5913
+ **Example failure:**
5914
+ - User asks: "how should I implement authentication?"
5915
+ - Auth decisions were made 20 conversations ago
5916
+ - \u274C \`session_init\` won't have it (too old, not in recent 10)
5917
+ - \u2705 \`context_smart\` FINDS it via semantic search
5316
5918
 
5317
- | Tool | Example Prompt |
5318
- |------|----------------|
5319
- | \`search_semantic\` | "How does authentication work in this codebase?" |
5320
- | \`search_hybrid\` | "Find code related to user permissions" |
5321
- | \`search_keyword\` | "Search for handleSubmit function" |
5322
- | \`search_pattern\` | "Find all TODO comments" |
5323
- | \`ai_context\` | "Build context for implementing user roles" |
5324
- | \`ai_enhanced_context\` | "Give me deep context on the payment system" |
5919
+ **Without context_smart, you WILL miss relevant older context.**
5325
5920
 
5326
5921
  ---
5327
5922
 
5328
- ### \u{1F578}\uFE0F Code Analysis Tools (Use for Architecture)
5329
-
5330
- | Tool | Example Prompt |
5331
- |------|----------------|
5332
- | \`graph_dependencies\` | "What does UserService depend on?" |
5333
- | \`graph_impact\` | "What would be affected if I change the User model?" |
5334
- | \`graph_call_path\` | "How does login() eventually call the database?" |
5335
- | \`graph_circular_dependencies\` | "Are there any circular dependencies?" |
5336
- | \`graph_unused_code\` | "Find unused code in this project" |
5337
-
5338
- ---
5923
+ ### When to Capture (MANDATORY)
5339
5924
 
5340
- ### \u{1F4C1} Project & Workspace Tools
5925
+ | When | Tool | Example |
5926
+ |------|------|---------|
5927
+ | User makes a decision | \`session_capture\` | "Let's use PostgreSQL" \u2192 capture as decision |
5928
+ | User states preference | \`session_capture\` | "I prefer TypeScript" \u2192 capture as preference |
5929
+ | You complete a task | \`session_capture\` | Capture what was done, decisions made |
5930
+ | Need past context | \`session_recall\` | "What did we decide about X?" |
5341
5931
 
5342
- | Tool | Example Prompt |
5343
- |------|----------------|
5344
- | \`projects_overview\` | "Give me an overview of this project" |
5345
- | \`projects_index\` | "Re-index this project" |
5346
- | \`workspaces_overview\` | "Give me a workspace overview" |
5347
- | \`workspace_associate\` | "Associate this folder with my workspace" |
5348
- | \`generate_editor_rules\` | "Generate ContextStream rules for this project" |
5932
+ **You MUST capture after completing any significant task.** This ensures future sessions have context.
5349
5933
 
5350
5934
  ---
5351
5935
 
5352
- ### Automatic Behavior Guidelines
5936
+ ### Behavior Rules
5353
5937
 
5354
- **At conversation start:**
5355
- - Call \`session_init\` to load context (this happens automatically on first tool call)
5356
- - Review the loaded decisions and memory before responding
5938
+ **First message of conversation:**
5939
+ 1. Call \`session_init(folder_path="<cwd>", context_hint="<user's message>")\`
5940
+ 2. Then respond
5357
5941
 
5358
- **During the conversation:**
5359
- - When the user makes a decision \u2192 \`session_capture\` with \`event_type: "decision"\`
5360
- - When you learn a user preference \u2192 \`session_capture\` with \`event_type: "preference"\`
5361
- - When discovering important context \u2192 \`session_remember\`
5362
- - When encountering a bug \u2192 \`session_capture\` with \`event_type: "bug"\`
5363
- - When a feature is discussed \u2192 \`session_capture\` with \`event_type: "feature"\`
5942
+ **Every subsequent message:**
5943
+ 1. Call \`context_smart(user_message="<user's message>")\` FIRST
5944
+ 2. Then respond
5364
5945
 
5365
- **When exploring code:**
5366
- - Use \`search_semantic\` or \`search_hybrid\` for code search
5367
- - Use \`graph_dependencies\` to understand architecture
5368
- - Use \`graph_impact\` before making changes
5946
+ **After completing a task:**
5947
+ 1. Call \`session_capture\` to save decisions, preferences, or insights
5948
+ 2. This is NOT optional
5369
5949
 
5370
- **At conversation end (if significant work done):**
5371
- - Summarize and capture key decisions with \`session_capture\`
5950
+ **When user asks about past decisions:**
5951
+ - Use \`session_recall\` - do NOT ask user to repeat themselves
5372
5952
 
5373
5953
  ---
5374
5954
 
5375
5955
  ### Quick Examples
5376
5956
 
5377
5957
  \`\`\`
5378
- # Start of conversation - load context
5379
- session_init()
5958
+ # First message - user asks about auth
5959
+ session_init(folder_path="/path/to/project", context_hint="how should I implement auth?")
5960
+ # Returns workspace info + semantically relevant auth decisions from ANY time
5380
5961
 
5381
- # User says "Let's use PostgreSQL for the database"
5382
- session_capture(event_type="decision", title="Database Choice", content="Decided to use PostgreSQL...")
5962
+ # Second message - user asks about database
5963
+ context_smart(user_message="what database should I use?")
5964
+ # Returns: W:Maker|P:myproject|D:Use PostgreSQL|D:No ORMs|M:DB schema at...
5383
5965
 
5384
- # User says "I prefer async/await over callbacks"
5385
- session_capture(event_type="preference", title="Async Style", content="User prefers async/await...")
5966
+ # User says "Let's use Redis for caching"
5967
+ session_capture(event_type="decision", title="Caching Choice", content="Using Redis for caching layer")
5386
5968
 
5387
- # Need to find related code
5388
- search_semantic(query="authentication middleware")
5969
+ # After completing implementation
5970
+ session_capture(event_type="decision", title="Auth Implementation Complete", content="Implemented JWT auth with refresh tokens...")
5389
5971
 
5390
- # Check what we discussed before
5972
+ # Check past decisions
5391
5973
  session_recall(query="what did we decide about caching?")
5392
5974
  \`\`\`
5393
5975
  `.trim();
@@ -5411,6 +5993,20 @@ ${CONTEXTSTREAM_RULES}
5411
5993
  description: "Cline AI rules",
5412
5994
  content: `# Cline Rules
5413
5995
  ${CONTEXTSTREAM_RULES}
5996
+ `
5997
+ },
5998
+ kilo: {
5999
+ filename: ".kilocode/rules/contextstream.md",
6000
+ description: "Kilo Code AI rules",
6001
+ content: `# Kilo Code Rules
6002
+ ${CONTEXTSTREAM_RULES}
6003
+ `
6004
+ },
6005
+ roo: {
6006
+ filename: ".roo/rules/contextstream.md",
6007
+ description: "Roo Code AI rules",
6008
+ content: `# Roo Code Rules
6009
+ ${CONTEXTSTREAM_RULES}
5414
6010
  `
5415
6011
  },
5416
6012
  claude: {
@@ -5484,6 +6080,7 @@ function registerTools(server, client, sessionManager) {
5484
6080
  contextPrefix = autoInitResult.contextSummary + "\n\n";
5485
6081
  }
5486
6082
  }
6083
+ sessionManager.warnIfContextSmartNotCalled(toolName);
5487
6084
  const result = await handler(input);
5488
6085
  if (contextPrefix && result && typeof result === "object") {
5489
6086
  const r = result;
@@ -6287,13 +6884,17 @@ Automatically detects code files and skips ignored directories like node_modules
6287
6884
  This is the FIRST tool AI assistants should call when starting a conversation.
6288
6885
  Returns: workspace info, project info, recent memory, recent decisions, and relevant context.
6289
6886
  Automatically detects the IDE workspace/project path and can auto-index code.
6290
- IMPORTANT: If you know the current workspace folder path, pass it as folder_path for accurate context resolution.`,
6887
+
6888
+ IMPORTANT: Pass the user's FIRST MESSAGE as context_hint to get semantically relevant context!
6889
+ Example: session_init(folder_path="/path/to/project", context_hint="how do I implement auth?")
6890
+
6891
+ This does semantic search on the first message. You only need context_smart on subsequent messages.`,
6291
6892
  inputSchema: external_exports.object({
6292
6893
  folder_path: external_exports.string().optional().describe("Current workspace/project folder path (absolute). Use this when IDE roots are not available."),
6293
6894
  workspace_id: external_exports.string().uuid().optional().describe("Workspace to initialize context for"),
6294
6895
  project_id: external_exports.string().uuid().optional().describe("Project to initialize context for"),
6295
6896
  session_id: external_exports.string().optional().describe("Custom session ID (auto-generated if not provided)"),
6296
- context_hint: external_exports.string().optional().describe("Hint about what the user wants to work on (used for context search)"),
6897
+ context_hint: external_exports.string().optional().describe("RECOMMENDED: Pass the user's first message here for semantic search. This finds relevant context from ANY time, not just recent items."),
6297
6898
  include_recent_memory: external_exports.boolean().optional().describe("Include recent memory events (default: true)"),
6298
6899
  include_decisions: external_exports.boolean().optional().describe("Include recent decisions (default: true)"),
6299
6900
  auto_index: external_exports.boolean().optional().describe("Automatically create and index project from IDE workspace (default: true)")
@@ -6347,7 +6948,7 @@ Optionally generates AI editor rules for automatic ContextStream usage.`,
6347
6948
  workspace_id: external_exports.string().uuid().describe("Workspace ID to associate with"),
6348
6949
  workspace_name: external_exports.string().optional().describe("Workspace name for reference"),
6349
6950
  create_parent_mapping: external_exports.boolean().optional().describe("Also create a parent folder mapping (e.g., /dev/maker/* -> workspace)"),
6350
- generate_editor_rules: external_exports.boolean().optional().describe("Generate AI editor rules (.windsurfrules, .cursorrules, etc.) for automatic ContextStream usage")
6951
+ generate_editor_rules: external_exports.boolean().optional().describe("Generate AI editor rules for Windsurf, Cursor, Cline, Kilo Code, Roo Code, Claude Code, and Aider")
6351
6952
  })
6352
6953
  },
6353
6954
  async (input) => {
@@ -6548,12 +7149,12 @@ Example: "What were the auth decisions?" or "What are my TypeScript preferences?
6548
7149
  "generate_editor_rules",
6549
7150
  {
6550
7151
  title: "Generate editor AI rules",
6551
- description: `Generate AI rule files for editors (Windsurf, Cursor, Cline, Claude Code, Aider).
7152
+ description: `Generate AI rule files for editors (Windsurf, Cursor, Cline, Kilo Code, Roo Code, Claude Code, Aider).
6552
7153
  These rules instruct the AI to automatically use ContextStream for memory and context.
6553
7154
  Supported editors: ${getAvailableEditors().join(", ")}`,
6554
7155
  inputSchema: external_exports.object({
6555
7156
  folder_path: external_exports.string().describe("Absolute path to the project folder"),
6556
- editors: external_exports.array(external_exports.enum(["windsurf", "cursor", "cline", "claude", "aider", "all"])).optional().describe("Which editors to generate rules for. Defaults to all."),
7157
+ editors: external_exports.array(external_exports.enum(["windsurf", "cursor", "cline", "kilo", "roo", "claude", "aider", "all"])).optional().describe("Which editors to generate rules for. Defaults to all."),
6557
7158
  workspace_name: external_exports.string().optional().describe("Workspace name to include in rules"),
6558
7159
  workspace_id: external_exports.string().uuid().optional().describe("Workspace ID to include in rules"),
6559
7160
  project_name: external_exports.string().optional().describe("Project name to include in rules"),
@@ -6617,6 +7218,274 @@ Supported editors: ${getAvailableEditors().join(", ")}`,
6617
7218
  return { content: [{ type: "text", text: formatContent(summary) }], structuredContent: toStructured(summary) };
6618
7219
  }
6619
7220
  );
7221
+ registerTool(
7222
+ "session_summary",
7223
+ {
7224
+ title: "Get compact context summary",
7225
+ description: `Get a compact, token-efficient summary of workspace context (~500 tokens).
7226
+ This is designed to replace loading full chat history in AI prompts.
7227
+ Returns: workspace/project info, top decisions (titles only), preferences, memory count.
7228
+ Use this at conversation start instead of loading everything.
7229
+ For specific details, use session_recall or session_smart_search.`,
7230
+ inputSchema: external_exports.object({
7231
+ workspace_id: external_exports.string().uuid().optional(),
7232
+ project_id: external_exports.string().uuid().optional(),
7233
+ max_tokens: external_exports.number().optional().describe("Maximum tokens for summary (default: 500)")
7234
+ })
7235
+ },
7236
+ async (input) => {
7237
+ let workspaceId = input.workspace_id;
7238
+ let projectId = input.project_id;
7239
+ if (!workspaceId && sessionManager) {
7240
+ const ctx = sessionManager.getContext();
7241
+ if (ctx) {
7242
+ workspaceId = ctx.workspace_id;
7243
+ projectId = projectId || ctx.project_id;
7244
+ }
7245
+ }
7246
+ const result = await client.getContextSummary({
7247
+ workspace_id: workspaceId,
7248
+ project_id: projectId,
7249
+ max_tokens: input.max_tokens
7250
+ });
7251
+ return {
7252
+ content: [{ type: "text", text: result.summary }],
7253
+ structuredContent: toStructured(result)
7254
+ };
7255
+ }
7256
+ );
7257
+ registerTool(
7258
+ "session_compress",
7259
+ {
7260
+ title: "Compress chat history to memory",
7261
+ description: `Extract and store key information from chat history as memory events.
7262
+ This allows clearing chat history while preserving important context.
7263
+ Use at conversation end or when context window is getting full.
7264
+
7265
+ Extracts:
7266
+ - Decisions made
7267
+ - User preferences learned
7268
+ - Insights discovered
7269
+ - Tasks/action items
7270
+ - Code patterns established
7271
+
7272
+ After compression, the AI can use session_recall to retrieve this context in future conversations.`,
7273
+ inputSchema: external_exports.object({
7274
+ chat_history: external_exports.string().describe("The chat history to compress and extract from"),
7275
+ workspace_id: external_exports.string().uuid().optional(),
7276
+ project_id: external_exports.string().uuid().optional(),
7277
+ extract_types: external_exports.array(external_exports.enum(["decisions", "preferences", "insights", "tasks", "code_patterns"])).optional().describe("Types of information to extract (default: all)")
7278
+ })
7279
+ },
7280
+ async (input) => {
7281
+ let workspaceId = input.workspace_id;
7282
+ let projectId = input.project_id;
7283
+ if (!workspaceId && sessionManager) {
7284
+ const ctx = sessionManager.getContext();
7285
+ if (ctx) {
7286
+ workspaceId = ctx.workspace_id;
7287
+ projectId = projectId || ctx.project_id;
7288
+ }
7289
+ }
7290
+ if (!workspaceId) {
7291
+ return {
7292
+ content: [{
7293
+ type: "text",
7294
+ text: "Error: workspace_id is required. Please call session_init first or provide workspace_id explicitly."
7295
+ }],
7296
+ isError: true
7297
+ };
7298
+ }
7299
+ const result = await client.compressChat({
7300
+ workspace_id: workspaceId,
7301
+ project_id: projectId,
7302
+ chat_history: input.chat_history,
7303
+ extract_types: input.extract_types
7304
+ });
7305
+ const summary = [
7306
+ `\u2705 Compressed chat history into ${result.events_created} memory events:`,
7307
+ "",
7308
+ `\u{1F4CB} Decisions: ${result.extracted.decisions.length}`,
7309
+ `\u2699\uFE0F Preferences: ${result.extracted.preferences.length}`,
7310
+ `\u{1F4A1} Insights: ${result.extracted.insights.length}`,
7311
+ `\u{1F4DD} Tasks: ${result.extracted.tasks.length}`,
7312
+ `\u{1F527} Code patterns: ${result.extracted.code_patterns.length}`,
7313
+ "",
7314
+ "These are now stored in ContextStream memory.",
7315
+ "Future conversations can access them via session_recall."
7316
+ ].join("\n");
7317
+ return {
7318
+ content: [{ type: "text", text: summary }],
7319
+ structuredContent: toStructured(result)
7320
+ };
7321
+ }
7322
+ );
7323
+ registerTool(
7324
+ "ai_context_budget",
7325
+ {
7326
+ title: "Get context within token budget",
7327
+ description: `Get the most relevant context that fits within a specified token budget.
7328
+ This is the key tool for token-efficient AI interactions:
7329
+
7330
+ 1. AI calls this with a query and token budget
7331
+ 2. Gets optimally selected context (decisions, memory, code)
7332
+ 3. No need to include full chat history in the prompt
7333
+
7334
+ The tool prioritizes:
7335
+ 1. Relevant decisions (highest value per token)
7336
+ 2. Query-matched memory events
7337
+ 3. Related code snippets (if requested and budget allows)
7338
+
7339
+ Example: ai_context_budget(query="authentication", max_tokens=1000)`,
7340
+ inputSchema: external_exports.object({
7341
+ query: external_exports.string().describe("What context to retrieve"),
7342
+ max_tokens: external_exports.number().describe("Maximum tokens for the context (e.g., 500, 1000, 2000)"),
7343
+ workspace_id: external_exports.string().uuid().optional(),
7344
+ project_id: external_exports.string().uuid().optional(),
7345
+ include_decisions: external_exports.boolean().optional().describe("Include relevant decisions (default: true)"),
7346
+ include_memory: external_exports.boolean().optional().describe("Include memory search results (default: true)"),
7347
+ include_code: external_exports.boolean().optional().describe("Include code search results (default: false)")
7348
+ })
7349
+ },
7350
+ async (input) => {
7351
+ let workspaceId = input.workspace_id;
7352
+ let projectId = input.project_id;
7353
+ if (!workspaceId && sessionManager) {
7354
+ const ctx = sessionManager.getContext();
7355
+ if (ctx) {
7356
+ workspaceId = ctx.workspace_id;
7357
+ projectId = projectId || ctx.project_id;
7358
+ }
7359
+ }
7360
+ const result = await client.getContextWithBudget({
7361
+ query: input.query,
7362
+ workspace_id: workspaceId,
7363
+ project_id: projectId,
7364
+ max_tokens: input.max_tokens,
7365
+ include_decisions: input.include_decisions,
7366
+ include_memory: input.include_memory,
7367
+ include_code: input.include_code
7368
+ });
7369
+ const footer = `
7370
+ ---
7371
+ \u{1F4CA} Token estimate: ${result.token_estimate}/${input.max_tokens} | Sources: ${result.sources.length}`;
7372
+ return {
7373
+ content: [{ type: "text", text: result.context + footer }],
7374
+ structuredContent: toStructured(result)
7375
+ };
7376
+ }
7377
+ );
7378
+ registerTool(
7379
+ "session_delta",
7380
+ {
7381
+ title: "Get context changes since timestamp",
7382
+ description: `Get new context added since a specific timestamp.
7383
+ Useful for efficient context synchronization without reloading everything.
7384
+
7385
+ Returns:
7386
+ - Count of new decisions and memory events
7387
+ - List of new items with titles and timestamps
7388
+
7389
+ Use case: AI can track what's new since last session_init.`,
7390
+ inputSchema: external_exports.object({
7391
+ since: external_exports.string().describe('ISO timestamp to get changes since (e.g., "2025-12-05T00:00:00Z")'),
7392
+ workspace_id: external_exports.string().uuid().optional(),
7393
+ project_id: external_exports.string().uuid().optional(),
7394
+ limit: external_exports.number().optional().describe("Maximum items to return (default: 20)")
7395
+ })
7396
+ },
7397
+ async (input) => {
7398
+ let workspaceId = input.workspace_id;
7399
+ let projectId = input.project_id;
7400
+ if (!workspaceId && sessionManager) {
7401
+ const ctx = sessionManager.getContext();
7402
+ if (ctx) {
7403
+ workspaceId = ctx.workspace_id;
7404
+ projectId = projectId || ctx.project_id;
7405
+ }
7406
+ }
7407
+ const result = await client.getContextDelta({
7408
+ workspace_id: workspaceId,
7409
+ project_id: projectId,
7410
+ since: input.since,
7411
+ limit: input.limit
7412
+ });
7413
+ const summary = [
7414
+ `\u{1F4C8} Context changes since ${input.since}:`,
7415
+ ` New decisions: ${result.new_decisions}`,
7416
+ ` New memory events: ${result.new_memory}`,
7417
+ "",
7418
+ ...result.items.slice(0, 10).map((i) => `\u2022 [${i.type}] ${i.title}`),
7419
+ result.items.length > 10 ? ` (+${result.items.length - 10} more)` : ""
7420
+ ].filter(Boolean).join("\n");
7421
+ return {
7422
+ content: [{ type: "text", text: summary }],
7423
+ structuredContent: toStructured(result)
7424
+ };
7425
+ }
7426
+ );
7427
+ registerTool(
7428
+ "context_smart",
7429
+ {
7430
+ title: "Get smart context for user query",
7431
+ description: `**CALL THIS BEFORE EVERY AI RESPONSE** to get relevant context.
7432
+
7433
+ This is the KEY tool for token-efficient AI interactions. It:
7434
+ 1. Analyzes the user's message to understand what context is needed
7435
+ 2. Retrieves only relevant context in a minified, token-efficient format
7436
+ 3. Replaces the need to include full chat history in prompts
7437
+
7438
+ Format options:
7439
+ - 'minified': Ultra-compact D:decision|P:preference|M:memory (default, ~200 tokens)
7440
+ - 'readable': Line-separated with labels
7441
+ - 'structured': JSON-like grouped format
7442
+
7443
+ Type codes: W=Workspace, P=Project, D=Decision, M=Memory, I=Insight, T=Task
7444
+
7445
+ Example usage:
7446
+ 1. User asks "how should I implement auth?"
7447
+ 2. AI calls context_smart(user_message="how should I implement auth?")
7448
+ 3. Gets: "W:Maker|P:contextstream|D:Use JWT for auth|D:No session cookies|M:Auth API at /auth/..."
7449
+ 4. AI responds with relevant context already loaded
7450
+
7451
+ This saves ~80% tokens compared to including full chat history.`,
7452
+ inputSchema: external_exports.object({
7453
+ user_message: external_exports.string().describe("The user message to analyze and get context for"),
7454
+ workspace_id: external_exports.string().uuid().optional(),
7455
+ project_id: external_exports.string().uuid().optional(),
7456
+ max_tokens: external_exports.number().optional().describe("Maximum tokens for context (default: 800)"),
7457
+ format: external_exports.enum(["minified", "readable", "structured"]).optional().describe("Context format (default: minified)")
7458
+ })
7459
+ },
7460
+ async (input) => {
7461
+ if (sessionManager) {
7462
+ sessionManager.markContextSmartCalled();
7463
+ }
7464
+ let workspaceId = input.workspace_id;
7465
+ let projectId = input.project_id;
7466
+ if (!workspaceId && sessionManager) {
7467
+ const ctx = sessionManager.getContext();
7468
+ if (ctx) {
7469
+ workspaceId = ctx.workspace_id;
7470
+ projectId = projectId || ctx.project_id;
7471
+ }
7472
+ }
7473
+ const result = await client.getSmartContext({
7474
+ user_message: input.user_message,
7475
+ workspace_id: workspaceId,
7476
+ project_id: projectId,
7477
+ max_tokens: input.max_tokens,
7478
+ format: input.format
7479
+ });
7480
+ const footer = `
7481
+ ---
7482
+ \u{1F3AF} ${result.sources_used} sources | ~${result.token_estimate} tokens | format: ${result.format}`;
7483
+ return {
7484
+ content: [{ type: "text", text: result.context + footer }],
7485
+ structuredContent: toStructured(result)
7486
+ };
7487
+ }
7488
+ );
6620
7489
  }
6621
7490
 
6622
7491
  // src/resources.ts
@@ -6963,6 +7832,8 @@ var SessionManager = class {
6963
7832
  this.context = null;
6964
7833
  this.ideRoots = [];
6965
7834
  this.folderPath = null;
7835
+ this.contextSmartCalled = false;
7836
+ this.warningShown = false;
6966
7837
  }
6967
7838
  /**
6968
7839
  * Check if session has been auto-initialized
@@ -6989,6 +7860,30 @@ var SessionManager = class {
6989
7860
  setFolderPath(path3) {
6990
7861
  this.folderPath = path3;
6991
7862
  }
7863
+ /**
7864
+ * Mark that context_smart has been called in this session
7865
+ */
7866
+ markContextSmartCalled() {
7867
+ this.contextSmartCalled = true;
7868
+ }
7869
+ /**
7870
+ * Check if context_smart has been called and warn if not.
7871
+ * Returns true if a warning was shown, false otherwise.
7872
+ */
7873
+ warnIfContextSmartNotCalled(toolName) {
7874
+ const skipWarningTools = ["session_init", "context_smart", "session_recall", "session_remember"];
7875
+ if (skipWarningTools.includes(toolName)) {
7876
+ return false;
7877
+ }
7878
+ if (!this.initialized || this.contextSmartCalled || this.warningShown) {
7879
+ return false;
7880
+ }
7881
+ this.warningShown = true;
7882
+ console.warn(`[ContextStream] Warning: ${toolName} called without context_smart.`);
7883
+ console.warn('[ContextStream] For best results, call context_smart(user_message="...") before other tools.');
7884
+ console.warn("[ContextStream] context_smart provides semantically relevant context for the user's query.");
7885
+ return true;
7886
+ }
6992
7887
  /**
6993
7888
  * Auto-initialize the session if not already done.
6994
7889
  * Returns context summary to prepend to tool response.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@contextstream/mcp-server",
3
- "version": "0.3.9",
3
+ "version": "0.3.10",
4
4
  "description": "MCP server exposing ContextStream public API - code context, memory, search, and AI tools for developers",
5
5
  "type": "module",
6
6
  "license": "MIT",