fenix-mcp 1.14.0__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fenix_mcp/__init__.py CHANGED
@@ -8,4 +8,4 @@ Fênix Cloud MCP Server (Python edition).
8
8
  __all__ = ["__version__"]
9
9
 
10
10
 
11
- __version__ = "1.14.0"
11
+ __version__ = "2.1.0"
@@ -5,7 +5,6 @@ from __future__ import annotations
5
5
 
6
6
  import json
7
7
  from enum import Enum
8
- from typing import List, Optional
9
8
 
10
9
  from pydantic import Field
11
10
 
@@ -17,26 +16,13 @@ from fenix_mcp.infrastructure.context import AppContext
17
16
 
18
17
  class InitializeAction(str, Enum):
19
18
  INIT = "init"
20
- SETUP = "setup"
21
19
 
22
20
 
23
21
  class InitializeRequest(ToolRequest):
24
22
  action: InitializeAction = Field(description="Initialization operation to execute.")
25
23
  include_user_docs: bool = Field(
26
24
  default=True,
27
- description=(
28
- "Include personal documents during initialization (only for init action)."
29
- ),
30
- )
31
- limit: int = Field(
32
- default=50,
33
- ge=1,
34
- le=200,
35
- description=("Maximum number of core/personal documents to load."),
36
- )
37
- answers: Optional[List[str]] = Field(
38
- default=None,
39
- description=("List of 9 text answers to process the personalized setup."),
25
+ description=("Include personal documents during initialization."),
40
26
  )
41
27
 
42
28
 
@@ -54,15 +40,12 @@ class InitializeTool(Tool):
54
40
  async def run(self, payload: InitializeRequest, context: AppContext):
55
41
  if payload.action is InitializeAction.INIT:
56
42
  return await self._handle_init(payload)
57
- if payload.action is InitializeAction.SETUP:
58
- return await self._handle_setup(payload)
59
43
  return text("❌ Unknown initialization action.")
60
44
 
61
45
  async def _handle_init(self, payload: InitializeRequest):
62
46
  try:
63
47
  data = await self._service.gather_data(
64
48
  include_user_docs=payload.include_user_docs,
65
- limit=payload.limit,
66
49
  )
67
50
  except Exception as exc: # pragma: no cover - defensive
68
51
  self._context.logger.error("Initialize failed: %s", exc)
@@ -77,16 +60,30 @@ class InitializeTool(Tool):
77
60
  and not data.profile
78
61
  ):
79
62
  return text(
80
- "⚠️ Could not load documents or profile. Confirm the token and, if this is your first access, use `initialize action=setup` to answer the initial questionnaire."
63
+ "⚠️ Could not load documents or profile. Confirm the token has API access."
81
64
  )
82
65
 
83
66
  payload_dict = {
84
67
  "profile": data.profile,
85
68
  "core_documents": data.core_documents,
86
69
  "user_documents": data.user_documents if payload.include_user_docs else [],
70
+ "my_work_items": [
71
+ {
72
+ "id": item.get("id"),
73
+ "key": item.get("key"),
74
+ "title": item.get("title"),
75
+ "item_type": item.get("item_type"),
76
+ "status": (
77
+ item.get("status", {}).get("name")
78
+ if isinstance(item.get("status"), dict)
79
+ else item.get("status")
80
+ ),
81
+ "priority": item.get("priority"),
82
+ "due_date": item.get("due_date"),
83
+ }
84
+ for item in data.my_work_items
85
+ ],
87
86
  }
88
- if data.recent_memories:
89
- payload_dict["recent_memories"] = data.recent_memories
90
87
 
91
88
  # Extract key IDs for easy reference
92
89
  profile = data.profile or {}
@@ -116,14 +113,6 @@ class InitializeTool(Tool):
116
113
  "```",
117
114
  ]
118
115
 
119
- if payload.include_user_docs and not data.user_documents and data.profile:
120
- message_lines.extend(
121
- [
122
- "",
123
- self._service.build_new_user_prompt(data),
124
- ]
125
- )
126
-
127
116
  # Add memory usage instructions
128
117
  message_lines.extend(
129
118
  [
@@ -144,30 +133,3 @@ class InitializeTool(Tool):
144
133
  )
145
134
 
146
135
  return text("\n".join(message_lines))
147
-
148
- async def _handle_setup(self, payload: InitializeRequest):
149
- answers = payload.answers or []
150
- validation_error = self._service.validate_setup_answers(answers)
151
- if validation_error:
152
- return text(f"❌ {validation_error}")
153
-
154
- summary_lines = [
155
- "📝 **Personalized setup received!**",
156
- "",
157
- "Your answers have been registered. I will suggest documents, rules and routines based on this information.",
158
- "",
159
- "Answer summary:",
160
- ]
161
- for idx, answer in enumerate(answers, start=1):
162
- summary_lines.append(f"{idx}. {answer.strip()}")
163
-
164
- summary_lines.extend(
165
- [
166
- "",
167
- "You can now request specific content, for example:",
168
- "- `productivity action=todo_create ...`",
169
- "- `knowledge action=mode_list`",
170
- ]
171
- )
172
-
173
- return text("\n".join(summary_lines))
@@ -1,18 +1,16 @@
1
1
  # SPDX-License-Identifier: MIT
2
- """Intelligence tool implementation (memories and smart operations)."""
2
+ """Intelligence tool implementation (memories)."""
3
3
 
4
4
  from __future__ import annotations
5
5
 
6
6
  from enum import Enum
7
- from typing import Any, Dict, List, Optional
7
+ from typing import Any, List, Optional
8
8
 
9
9
  from pydantic import Field, field_validator
10
10
 
11
11
  from fenix_mcp.application.presenters import text
12
12
  from fenix_mcp.application.tool_base import (
13
13
  MERMAID_HINT,
14
- CategoryStr,
15
- DateTimeStr,
16
14
  MarkdownStr,
17
15
  TagStr,
18
16
  TitleStr,
@@ -20,7 +18,7 @@ from fenix_mcp.application.tool_base import (
20
18
  ToolRequest,
21
19
  UUIDStr,
22
20
  )
23
- from fenix_mcp.domain.intelligence import IntelligenceService, build_metadata
21
+ from fenix_mcp.domain.intelligence import IntelligenceService
24
22
  from fenix_mcp.infrastructure.context import AppContext
25
23
 
26
24
 
@@ -31,73 +29,73 @@ class IntelligenceAction(str, Enum):
31
29
  obj.description = description
32
30
  return obj
33
31
 
34
- SMART_CREATE = (
35
- "memory_smart_create",
36
- "Creates intelligent memories with similarity analysis.",
32
+ SAVE = (
33
+ "memory_save",
34
+ "INTELLIGENT save with automatic deduplication using AI embeddings. "
35
+ "If similar memory exists (>80% semantic match): UPDATES existing memory (prevents duplicates). "
36
+ "If no match found: CREATES new memory. "
37
+ "REQUIRED: title, content (Markdown), tags (array, min 1). "
38
+ "OPTIONAL: documentation_item_id, work_item_id, sprint_id (link to entities). "
39
+ "EXAMPLES of what to save: architecture decisions, solutions found, user preferences, learnings.",
37
40
  )
38
- QUERY = ("memory_query", "Lists memories applying filters and text search.")
39
- GET = ("memory_get", "Gets memory details and content by ID.")
40
- SIMILARITY = ("memory_similarity", "Finds memories similar to a base content.")
41
- CONSOLIDATE = (
42
- "memory_consolidate",
43
- "Consolidates multiple memories into a primary one.",
41
+
42
+ SEARCH = (
43
+ "memory_search",
44
+ "Semantic search using AI embeddings - NOT keyword matching. "
45
+ "CALL THIS AT CONVERSATION START to recover context from previous sessions. "
46
+ "REQUIRED: query (natural language, e.g., 'authentication decisions', 'database choices'). "
47
+ "OPTIONAL: limit (1-20, default 5), tags (filter). "
48
+ "RETURNS: Memories ranked by semantic similarity with scores. "
49
+ "EXAMPLE: 'what did we decide about the API?' finds related memories even without exact words.",
44
50
  )
45
- UPDATE = ("memory_update", "Updates fields of an existing memory.")
46
- DELETE = ("memory_delete", "Removes a memory by ID.")
47
- HELP = ("memory_help", "Shows supported actions and their uses.")
48
51
 
49
52
  @classmethod
50
53
  def choices(cls) -> List[str]:
51
54
  return [member.value for member in cls]
52
55
 
53
- @classmethod
54
- def formatted_help(cls) -> str:
55
- lines = [
56
- "| **Action** | **Description** |",
57
- "| --- | --- |",
58
- ]
59
- for member in cls:
60
- lines.append(f"| `{member.value}` | {member.description} |")
61
- return "\n".join(lines)
62
-
63
-
64
- ACTION_FIELD_DESCRIPTION = (
65
- "Intelligence action to execute. Use one of the values: "
66
- + ", ".join(
67
- f"`{member.value}` ({member.description.rstrip('.')})."
68
- for member in IntelligenceAction
69
- )
56
+
57
+ ACTION_FIELD_DESCRIPTION = "Memory action to execute. " + " | ".join(
58
+ f"`{member.value}`: {member.description.split('.')[0]}."
59
+ for member in IntelligenceAction
70
60
  )
71
61
 
72
62
 
73
63
  class IntelligenceRequest(ToolRequest):
74
64
  action: IntelligenceAction = Field(description=ACTION_FIELD_DESCRIPTION)
75
- title: Optional[TitleStr] = Field(default=None, description="Memory title.")
76
- content: Optional[MarkdownStr] = Field(
77
- default=None, description=f"Memory content/text (Markdown).{MERMAID_HINT}"
65
+
66
+ # For memory_save
67
+ title: Optional[TitleStr] = Field(
68
+ default=None, description="Memory title. REQUIRED for memory_save."
78
69
  )
79
- metadata: Optional[MarkdownStr] = Field(
70
+ content: Optional[MarkdownStr] = Field(
80
71
  default=None,
81
- description="Structured memory metadata (pipe format, compact toml, etc.).",
72
+ description=f"Memory content (Markdown supported).{MERMAID_HINT} REQUIRED for memory_save.",
82
73
  )
83
- context: Optional[str] = Field(default=None, description="Additional context.")
84
- source: Optional[str] = Field(default=None, description="Memory source.")
85
- importance: Optional[str] = Field(
74
+ tags: Optional[List[TagStr]] = Field(
86
75
  default=None,
87
- description="Memory importance level (low, medium, high, critical).",
76
+ description='Tags for categorization. REQUIRED for memory_save. Format: JSON array, e.g.: ["tag1", "tag2"].',
77
+ json_schema_extra={"example": ["tag1", "tag2", "tag3"]},
88
78
  )
89
- include_content: bool = Field(
90
- default=False,
91
- description="Return full memory content? Set true to include the full text.",
79
+ documentation_item_id: Optional[UUIDStr] = Field(
80
+ default=None, description="Link memory to a documentation item (UUID)."
92
81
  )
93
- include_metadata: bool = Field(
94
- default=False,
95
- description="Return full memory metadata? Set true to include the raw field.",
82
+ work_item_id: Optional[UUIDStr] = Field(
83
+ default=None, description="Link memory to a work item (UUID)."
96
84
  )
97
- tags: Optional[List[TagStr]] = Field(
85
+ sprint_id: Optional[UUIDStr] = Field(
86
+ default=None, description="Link memory to a sprint (UUID)."
87
+ )
88
+
89
+ # For memory_search
90
+ query: Optional[str] = Field(
98
91
  default=None,
99
- description='Memory tags. REQUIRED for create. Format: JSON array of strings, e.g.: ["tag1", "tag2"]. Do not use a single string.',
100
- json_schema_extra={"example": ["tag1", "tag2", "tag3"]},
92
+ description="Search query (natural language). REQUIRED for memory_search.",
93
+ )
94
+ limit: int = Field(
95
+ default=5,
96
+ ge=1,
97
+ le=20,
98
+ description="Maximum number of results to return. Default: 5.",
101
99
  )
102
100
 
103
101
  @field_validator("tags", mode="before")
@@ -107,11 +105,9 @@ class IntelligenceRequest(ToolRequest):
107
105
  if v is None or v == "":
108
106
  return None
109
107
 
110
- # If it's already a list, return as is
111
108
  if isinstance(v, (list, tuple, set)):
112
109
  return [str(item).strip() for item in v if str(item).strip()]
113
110
 
114
- # If it's a string, try to parse as JSON
115
111
  if isinstance(v, str):
116
112
  try:
117
113
  import json
@@ -121,64 +117,32 @@ class IntelligenceRequest(ToolRequest):
121
117
  return [str(item).strip() for item in parsed if str(item).strip()]
122
118
  except (json.JSONDecodeError, TypeError):
123
119
  pass
124
-
125
- # If JSON parsing fails, treat as comma-separated string
126
120
  return [item.strip() for item in v.split(",") if item.strip()]
127
121
 
128
- # For any other type, convert to string and wrap in list
129
122
  return [str(v).strip()] if str(v).strip() else None
130
123
 
131
- limit: int = Field(default=20, ge=1, le=100, description="Result limit.")
132
- offset: int = Field(default=0, ge=0, description="Pagination offset.")
133
- query: Optional[str] = Field(default=None, description="Search term.")
134
- category: Optional[CategoryStr] = Field(
135
- default=None, description="Category for filtering."
136
- )
137
- date_from: Optional[DateTimeStr] = Field(
138
- default=None, description="Start date filter (ISO 8601)."
139
- )
140
- date_to: Optional[DateTimeStr] = Field(
141
- default=None, description="End date filter (ISO 8601)."
142
- )
143
- threshold: float = Field(
144
- default=0.8, ge=0, le=1, description="Minimum similarity threshold."
145
- )
146
- max_results: int = Field(
147
- default=5, ge=1, le=20, description="Maximum similar memories."
148
- )
149
- memory_ids: Optional[List[UUIDStr]] = Field(
150
- default=None, description="Memory IDs for consolidation (UUIDs)."
151
- )
152
- strategy: str = Field(default="merge", description="Consolidation strategy.")
153
- time_range: str = Field(default="month", description="Time window for analytics.")
154
- group_by: str = Field(default="category", description="Grouping for analytics.")
155
- id: Optional[UUIDStr] = Field(default=None, description="Memory ID (UUID).")
156
- documentation_item_id: Optional[UUIDStr] = Field(
157
- default=None, description="Related documentation ID (UUID)."
158
- )
159
- mode_id: Optional[UUIDStr] = Field(
160
- default=None, description="Related mode ID (UUID)."
161
- )
162
- rule_id: Optional[UUIDStr] = Field(
163
- default=None, description="Related rule ID (UUID)."
164
- )
165
- work_item_id: Optional[UUIDStr] = Field(
166
- default=None, description="Related work item ID (UUID)."
167
- )
168
- sprint_id: Optional[UUIDStr] = Field(
169
- default=None, description="Related sprint ID (UUID)."
170
- )
171
-
172
124
 
173
125
  class IntelligenceTool(Tool):
174
126
  name = "intelligence"
175
- description = (
176
- "Fenix persistent memory system for cross-session continuity. "
177
- "RECOMMENDED USAGE: "
178
- "(1) Call memory_query at the START of conversations to retrieve relevant context from previous sessions. "
179
- "(2) Call memory_smart_create to save important information, decisions, user preferences, and learnings. "
180
- "This enables personalized responses and prevents users from repeating themselves."
181
- )
127
+ description = """Fenix INTELLIGENT memory system with semantic understanding (uses AI embeddings, not keyword matching).
128
+
129
+ **memory_search** - CALL AT CONVERSATION START
130
+ - Uses AI embeddings for semantic similarity
131
+ - "what did we decide about auth?" finds related memories even without exact keywords
132
+ - Returns ranked results with similarity scores
133
+ - BEST PRACTICE: Always search before asking user to repeat context
134
+
135
+ **memory_save** - SMART SAVE with auto-deduplication
136
+ - Automatically detects similar existing memories via embeddings
137
+ - If >80% similar memory exists: UPDATES it (prevents duplicates)
138
+ - If no match: Creates new memory
139
+ - Requires: title, content, tags (at least one)
140
+
141
+ WORKFLOW:
142
+ 1. START of conversation -> memory_search for relevant context
143
+ 2. User shares decision/learning -> memory_save to persist
144
+ 3. Similar topic later -> memory_search finds it automatically
145
+ """
182
146
  request_model = IntelligenceRequest
183
147
 
184
148
  def __init__(self, context: AppContext):
@@ -187,222 +151,84 @@ class IntelligenceTool(Tool):
187
151
 
188
152
  async def run(self, payload: IntelligenceRequest, context: AppContext):
189
153
  action = payload.action
190
- if action is IntelligenceAction.HELP:
191
- return await self._handle_help()
192
- if action is IntelligenceAction.SMART_CREATE:
193
- return await self._handle_smart_create(payload)
194
- if action is IntelligenceAction.QUERY:
195
- return await self._handle_query(payload)
196
- if action is IntelligenceAction.GET:
197
- return await self._handle_get(payload)
198
- if action is IntelligenceAction.SIMILARITY:
199
- return await self._handle_similarity(payload)
200
- if action is IntelligenceAction.CONSOLIDATE:
201
- return await self._handle_consolidate(payload)
202
- if action is IntelligenceAction.UPDATE:
203
- return await self._handle_update(payload)
204
- if action is IntelligenceAction.DELETE:
205
- return await self._handle_delete(payload)
206
- return text(
207
- "❌ Invalid action for intelligence.\n\nChoose one of the values:\n"
208
- + "\n".join(f"- `{value}`" for value in IntelligenceAction.choices())
209
- )
210
154
 
211
- async def _handle_smart_create(self, payload: IntelligenceRequest):
212
- if not payload.title or not payload.content:
213
- return text("❌ Provide title and content to create a memory.")
155
+ if action is IntelligenceAction.SAVE:
156
+ return await self._handle_save(payload)
214
157
 
215
- if not payload.metadata or not payload.metadata.strip():
216
- return text("❌ Provide metadata to create a memory.")
158
+ if action is IntelligenceAction.SEARCH:
159
+ return await self._handle_search(payload)
217
160
 
218
- if not payload.source or not payload.source.strip():
219
- return text(" Provide source to create a memory.")
161
+ return text(
162
+ "Invalid action. Use: "
163
+ + ", ".join(f"`{v}`" for v in IntelligenceAction.choices())
164
+ )
220
165
 
221
- try:
222
- normalized_tags = _ensure_tag_sequence(payload.tags)
223
- except ValueError as exc:
224
- return text(f" {exc}")
166
+ async def _handle_save(self, payload: IntelligenceRequest):
167
+ """Save a memory (create or update based on similarity)."""
168
+ if not payload.title:
169
+ return text("Provide `title` to save a memory.")
170
+
171
+ if not payload.content:
172
+ return text("Provide `content` to save a memory.")
225
173
 
226
- if not normalized_tags or len(normalized_tags) == 0:
227
- return text("❌ Provide tags to create a memory.")
174
+ tags = payload.tags
175
+ if not tags or len(tags) == 0:
176
+ return text("Provide `tags` to save a memory (at least one required).")
228
177
 
229
- memory = await self._service.smart_create_memory(
178
+ result = await self._service.save_memory(
230
179
  title=payload.title,
231
180
  content=payload.content,
232
- metadata=payload.metadata,
233
- context=payload.context,
234
- source=payload.source,
235
- importance=payload.importance,
236
- tags=normalized_tags,
181
+ tags=tags,
182
+ documentation_item_id=payload.documentation_item_id,
183
+ work_item_id=payload.work_item_id,
184
+ sprint_id=payload.sprint_id,
237
185
  )
238
- lines = [
239
- "🧠 **Memory created successfully!**",
240
- f"ID: {memory.get('memoryId') or memory.get('id', 'N/A')}",
241
- f"Action: {memory.get('action') or 'created'}",
242
- f"Similarity: {format_percentage(memory.get('similarity'))}",
243
- f"Tags: {', '.join(memory.get('tags', [])) or 'Automatic'}",
244
- f"Category: {memory.get('category') or 'Automatic'}",
245
- ]
246
- return text("\n".join(lines))
247
186
 
248
- async def _handle_query(self, payload: IntelligenceRequest):
249
- memories = await self._service.query_memories(
250
- limit=payload.limit,
251
- offset=payload.offset,
187
+ action = result.get("action", "saved")
188
+ memory_id = result.get("memoryId", "N/A")
189
+ title = result.get("title", payload.title)
190
+ version = result.get("version", 1)
191
+ similarity = result.get("similarity")
192
+
193
+ if action == "updated":
194
+ similarity_str = f"{similarity * 100:.0f}%" if similarity else "N/A"
195
+ return text(
196
+ f"Memory updated (similarity: {similarity_str})\n"
197
+ f"ID: {memory_id}\n"
198
+ f"Title: {title}\n"
199
+ f"Version: {version}"
200
+ )
201
+ else:
202
+ return text(f"Memory created\nID: {memory_id}\nTitle: {title}")
203
+
204
+ async def _handle_search(self, payload: IntelligenceRequest):
205
+ """Search memories using semantic similarity."""
206
+ if not payload.query:
207
+ return text("Provide `query` to search memories.")
208
+
209
+ memories = await self._service.search_memories(
252
210
  query=payload.query,
211
+ limit=payload.limit,
253
212
  tags=payload.tags,
254
- include_content=payload.include_content,
255
- include_metadata=payload.include_metadata,
256
- modeId=payload.mode_id,
257
- ruleId=payload.rule_id,
258
- workItemId=payload.work_item_id,
259
- sprintId=payload.sprint_id,
260
- documentationItemId=payload.documentation_item_id,
261
- category=payload.category,
262
- dateFrom=payload.date_from,
263
- dateTo=payload.date_to,
264
- importance=payload.importance,
265
- )
266
- if not memories:
267
- return text("🧠 No memories found.")
268
- body = "\n\n".join(_format_memory(mem) for mem in memories)
269
- return text(f"🧠 **Memories ({len(memories)}):**\n\n{body}")
270
-
271
- async def _handle_get(self, payload: IntelligenceRequest):
272
- if not payload.id:
273
- return text("❌ Provide the memory ID to get details.")
274
- memory = await self._service.get_memory(
275
- payload.id, include_content=True, include_metadata=True
276
213
  )
277
- return text(_format_memory(memory, show_content=True))
278
214
 
279
- async def _handle_similarity(self, payload: IntelligenceRequest):
280
- if not payload.content:
281
- return text("❌ Provide the base content to compare similarity.")
282
- memories = await self._service.similar_memories(
283
- content=payload.content,
284
- threshold=payload.threshold,
285
- max_results=payload.max_results,
286
- )
287
215
  if not memories:
288
- return text("🔍 No similar memories found.")
289
- body = "\n\n".join(
290
- f"🔍 **{mem.get('title', 'Untitled')}**\n Similarity: {format_percentage(mem.get('finalScore'))}\n ID: {mem.get('memoryId', 'N/A')}"
291
- for mem in memories
292
- )
293
- return text(f"🔍 **Similar memories ({len(memories)}):**\n\n{body}")
294
-
295
- async def _handle_consolidate(self, payload: IntelligenceRequest):
296
- if not payload.memory_ids or len(payload.memory_ids) < 2:
297
- return text("❌ Provide at least 2 memory IDs to consolidate.")
298
- result = await self._service.consolidate_memories(
299
- memory_ids=payload.memory_ids,
300
- strategy=payload.strategy,
301
- )
302
- lines = [
303
- "🔄 **Consolidation complete!**",
304
- f"Primary memory: {result.get('primary_memory_id', 'N/A')}",
305
- f"Consolidated: {result.get('consolidated_count', 'N/A')}",
306
- f"Action executed: {result.get('action', 'N/A')}",
307
- ]
308
- return text("\n".join(lines))
216
+ return text("No memories found matching your query.")
309
217
 
310
- async def _handle_update(self, payload: IntelligenceRequest):
311
- if not payload.id:
312
- return text("❌ Provide the memory ID for update.")
313
- existing = await self._service.get_memory(
314
- payload.id, include_content=False, include_metadata=True
315
- )
316
- try:
317
- normalized_tags = _ensure_tag_sequence(payload.tags)
318
- except ValueError as exc:
319
- return text(f"❌ {exc}")
320
- metadata = build_metadata(
321
- payload.metadata,
322
- importance=payload.importance,
323
- tags=normalized_tags,
324
- source=payload.source,
325
- existing=existing.get("metadata") if isinstance(existing, dict) else None,
326
- )
327
- update_fields: Dict[str, Any] = {
328
- "title": payload.title,
329
- "content": payload.content,
330
- "metadata": metadata,
331
- "tags": normalized_tags,
332
- "documentation_item_id": payload.documentation_item_id,
333
- "mode_id": payload.mode_id,
334
- "rule_id": payload.rule_id,
335
- "work_item_id": payload.work_item_id,
336
- "sprint_id": payload.sprint_id,
337
- "importance": payload.importance,
338
- }
339
- memory = await self._service.update_memory(payload.id, **update_fields)
340
- return text(
341
- "\n".join(
342
- [
343
- "✅ **Memory updated!**",
344
- f"ID: {memory.get('id', payload.id)}",
345
- f"Title: {memory.get('title', 'N/A')}",
346
- f"Priority: {memory.get('priority_score', 'N/A')}",
347
- ]
348
- )
349
- )
218
+ lines = [f"Found {len(memories)} relevant memories:\n"]
350
219
 
351
- async def _handle_delete(self, payload: IntelligenceRequest):
352
- if not payload.id:
353
- return text(" Provide the memory ID to remove.")
354
- await self._service.delete_memory(payload.id)
355
- return text(f"🗑️ Memory {payload.id} removed successfully.")
220
+ for mem in memories:
221
+ similarity = mem.get("similarity")
222
+ similarity_str = f" ({similarity * 100:.0f}% match)" if similarity else ""
356
223
 
357
- async def _handle_help(self):
358
- return text(
359
- "📚 **Available actions for intelligence**\n\n"
360
- + IntelligenceAction.formatted_help()
361
- )
224
+ lines.append(f"### {mem.get('title', 'Untitled')}{similarity_str}")
225
+ lines.append(f"ID: {mem.get('id', 'N/A')}")
226
+ lines.append(f"Tags: {', '.join(mem.get('tags', []))}")
362
227
 
228
+ content = mem.get("content", "")
229
+ if content:
230
+ lines.append(f"\n{content}")
363
231
 
364
- def _format_memory(memory: Dict[str, Any], *, show_content: bool = False) -> str:
365
- lines = [
366
- f"🧠 **{memory.get('title', 'Untitled')}**",
367
- f"ID: {memory.get('id', memory.get('memoryId', 'N/A'))}",
368
- f"Category: {memory.get('category', 'N/A')}",
369
- f"Tags: {', '.join(memory.get('tags', [])) or 'None'}",
370
- f"Importance: {memory.get('importance', 'N/A')}",
371
- f"Accesses: {memory.get('access_count', 'N/A')}",
372
- ]
373
- if show_content and memory.get("content"):
374
- lines.append("")
375
- lines.append("**Content:**")
376
- lines.append(memory.get("content"))
377
- return "\n".join(lines)
378
-
379
-
380
- def format_percentage(value: Optional[float]) -> str:
381
- if value is None:
382
- return "N/A"
383
- return f"{value * 100:.1f}%"
384
-
385
-
386
- def _ensure_tag_sequence(raw: Optional[Any]) -> Optional[List[str]]:
387
- if raw is None or raw == "":
388
- return None
389
- if isinstance(raw, (list, tuple, set)):
390
- result = [str(item).strip() for item in raw if str(item).strip()]
391
- return result or None
392
- if isinstance(raw, str):
393
- # Try to parse as JSON array first
394
- try:
395
- import json
396
-
397
- parsed = json.loads(raw)
398
- if isinstance(parsed, list):
399
- result = [str(item).strip() for item in parsed if str(item).strip()]
400
- return result or None
401
- except (json.JSONDecodeError, TypeError):
402
- pass
403
-
404
- raise ValueError(
405
- "The `tags` field must be sent as a JSON array, for example: "
406
- '["tag1", "tag2"].'
407
- )
408
- return [str(raw).strip()]
232
+ lines.append("\n---\n")
233
+
234
+ return text("\n".join(lines))