omni-cortex 1.13.0__tar.gz → 1.14.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/PKG-INFO +1 -1
  2. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/chat_service.py +61 -2
  3. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/main.py +4 -0
  4. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/models.py +4 -0
  5. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/__init__.py +1 -1
  6. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/pyproject.toml +1 -1
  7. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/.gitignore +0 -0
  8. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/LICENSE +0 -0
  9. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/README.md +0 -0
  10. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/.env.example +0 -0
  11. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/backfill_summaries.py +0 -0
  12. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/database.py +0 -0
  13. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/image_service.py +0 -0
  14. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/logging_config.py +0 -0
  15. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/project_config.py +0 -0
  16. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/project_scanner.py +0 -0
  17. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/prompt_security.py +0 -0
  18. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/pyproject.toml +0 -0
  19. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/security.py +0 -0
  20. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/uv.lock +0 -0
  21. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/dashboard/backend/websocket_manager.py +0 -0
  22. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/hooks/post_tool_use.py +0 -0
  23. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/hooks/pre_tool_use.py +0 -0
  24. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/hooks/session_utils.py +0 -0
  25. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/hooks/stop.py +0 -0
  26. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/hooks/subagent_stop.py +0 -0
  27. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/hooks/user_prompt.py +0 -0
  28. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/categorization/__init__.py +0 -0
  29. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/categorization/auto_tags.py +0 -0
  30. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/categorization/auto_type.py +0 -0
  31. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/config.py +0 -0
  32. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/dashboard.py +0 -0
  33. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/database/__init__.py +0 -0
  34. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/database/connection.py +0 -0
  35. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/database/migrations.py +0 -0
  36. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/database/schema.py +0 -0
  37. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/database/sync.py +0 -0
  38. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/decay/__init__.py +0 -0
  39. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/decay/importance.py +0 -0
  40. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/embeddings/__init__.py +0 -0
  41. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/embeddings/local.py +0 -0
  42. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/models/__init__.py +0 -0
  43. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/models/activity.py +0 -0
  44. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/models/agent.py +0 -0
  45. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/models/memory.py +0 -0
  46. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/models/relationship.py +0 -0
  47. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/models/session.py +0 -0
  48. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/resources/__init__.py +0 -0
  49. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/search/__init__.py +0 -0
  50. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/search/hybrid.py +0 -0
  51. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/search/keyword.py +0 -0
  52. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/search/ranking.py +0 -0
  53. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/search/semantic.py +0 -0
  54. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/server.py +0 -0
  55. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/setup.py +0 -0
  56. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/tools/__init__.py +0 -0
  57. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/tools/activities.py +0 -0
  58. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/tools/memories.py +0 -0
  59. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/tools/sessions.py +0 -0
  60. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/tools/utilities.py +0 -0
  61. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/utils/__init__.py +0 -0
  62. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/utils/formatting.py +0 -0
  63. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/utils/ids.py +0 -0
  64. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/utils/timestamps.py +0 -0
  65. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/omni_cortex/utils/truncation.py +0 -0
  66. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/scripts/check-venv.py +0 -0
  67. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/scripts/import_ken_memories.py +0 -0
  68. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/scripts/populate_session_data.py +0 -0
  69. {omni_cortex-1.13.0 → omni_cortex-1.14.0}/scripts/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omni-cortex
3
- Version: 1.13.0
3
+ Version: 1.14.0
4
4
  Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
5
5
  Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
6
6
  Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
@@ -42,9 +42,13 @@ def is_available() -> bool:
42
42
  return False
43
43
 
44
44
 
45
- def build_style_context_prompt(style_profile: dict) -> str:
45
+ def build_style_context_prompt(style_profile: dict | None) -> str:
46
46
  """Build a prompt section describing user's communication style."""
47
47
 
48
+ # Return empty string if no style profile provided
49
+ if not style_profile:
50
+ return ""
51
+
48
52
  # Handle both camelCase (new format) and snake_case (old format)
49
53
  tone_dist = style_profile.get("toneDistribution") or style_profile.get("tone_distribution", {})
50
54
  tone_list = ", ".join(tone_dist.keys()) if tone_dist else "neutral"
@@ -416,6 +420,8 @@ def build_compose_prompt(
416
420
  template: Optional[str],
417
421
  tone_level: int,
418
422
  memory_context: str,
423
+ custom_instructions: Optional[str] = None,
424
+ include_explanation: bool = False,
419
425
  ) -> str:
420
426
  """Build the prompt for composing a response in user's style.
421
427
 
@@ -426,6 +432,8 @@ def build_compose_prompt(
426
432
  template: Optional response template (answer, guide, redirect, acknowledge)
427
433
  tone_level: Tone formality level (0-100)
428
434
  memory_context: Relevant memories formatted as context
435
+ custom_instructions: Optional specific instructions from the user
436
+ include_explanation: Whether to explain the incoming message first
429
437
 
430
438
  Returns:
431
439
  Complete prompt for response generation
@@ -480,7 +488,37 @@ Use this information naturally in your response if relevant. Don't explicitly ci
480
488
 
481
489
  """
482
490
 
483
- prompt += """
491
+ # Add custom instructions if provided
492
+ if custom_instructions:
493
+ prompt += f"""
494
+ ## CUSTOM INSTRUCTIONS FROM USER
495
+
496
+ The user has provided these specific instructions for the response:
497
+
498
+ <custom_instructions>
499
+ {xml_escape(custom_instructions)}
500
+ </custom_instructions>
501
+
502
+ Please incorporate these requirements while maintaining the user's voice.
503
+
504
+ """
505
+
506
+ # Build task instructions based on explanation mode
507
+ if include_explanation:
508
+ prompt += """
509
+ **Your Task:**
510
+ 1. FIRST, provide a clear explanation of what the incoming message means or is asking
511
+ Format: "**Understanding:** [your explanation in user's voice]"
512
+ 2. THEN, write a response to the incoming message in YOUR voice
513
+ Format: "**Response:** [your response]"
514
+ 3. Use the knowledge from your memories naturally if relevant
515
+ 4. Match the tone level specified above
516
+ 5. Follow the platform context guidelines
517
+ 6. Sound exactly like something you would write yourself
518
+
519
+ Write the explanation and response now:"""
520
+ else:
521
+ prompt += """
484
522
  **Your Task:**
485
523
  1. Write a response to the incoming message in YOUR voice (the user's voice)
486
524
  2. Use the knowledge from your memories naturally if relevant
@@ -501,6 +539,8 @@ async def compose_response(
501
539
  tone_level: int = 50,
502
540
  include_memories: bool = True,
503
541
  style_profile: Optional[dict] = None,
542
+ custom_instructions: Optional[str] = None,
543
+ include_explanation: bool = False,
504
544
  ) -> dict:
505
545
  """Compose a response to an incoming message in the user's style.
506
546
 
@@ -512,6 +552,8 @@ async def compose_response(
512
552
  tone_level: Tone formality level (0-100)
513
553
  include_memories: Whether to include relevant memories
514
554
  style_profile: User's style profile dictionary
555
+ custom_instructions: Optional specific instructions from the user
556
+ include_explanation: Whether to explain the incoming message first
515
557
 
516
558
  Returns:
517
559
  Dict with response, sources, and metadata
@@ -550,6 +592,8 @@ async def compose_response(
550
592
  template=template,
551
593
  tone_level=tone_level,
552
594
  memory_context=memory_context,
595
+ custom_instructions=custom_instructions,
596
+ include_explanation=include_explanation,
553
597
  )
554
598
 
555
599
  try:
@@ -563,10 +607,25 @@ async def compose_response(
563
607
  "response": f"Failed to generate response: {str(e)}",
564
608
  "sources": sources,
565
609
  "error": "generation_failed",
610
+ "explanation": None,
566
611
  }
567
612
 
613
+ # Parse explanation if requested
614
+ explanation = None
615
+ if include_explanation:
616
+ # Try to extract explanation and response parts
617
+ import re
618
+ understanding_match = re.search(r'\*\*Understanding:\*\*\s*(.+?)(?=\*\*Response:\*\*)', composed_response, re.DOTALL)
619
+ response_match = re.search(r'\*\*Response:\*\*\s*(.+)', composed_response, re.DOTALL)
620
+
621
+ if understanding_match and response_match:
622
+ explanation = understanding_match.group(1).strip()
623
+ composed_response = response_match.group(1).strip()
624
+ # If parsing fails, leave explanation as None and return full response
625
+
568
626
  return {
569
627
  "response": composed_response,
570
628
  "sources": sources,
571
629
  "error": None,
630
+ "explanation": explanation,
572
631
  }
@@ -1146,6 +1146,8 @@ async def compose_response_endpoint(
1146
1146
  tone_level=request.tone_level,
1147
1147
  include_memories=request.include_memories,
1148
1148
  style_profile=style_profile,
1149
+ custom_instructions=request.custom_instructions,
1150
+ include_explanation=request.include_explanation,
1149
1151
  )
1150
1152
 
1151
1153
  if result.get("error"):
@@ -1165,6 +1167,8 @@ async def compose_response_endpoint(
1165
1167
  incoming_message=request.incoming_message,
1166
1168
  context_type=request.context_type,
1167
1169
  created_at=datetime.now().isoformat(),
1170
+ custom_instructions=request.custom_instructions,
1171
+ explanation=result.get("explanation"),
1168
1172
  )
1169
1173
 
1170
1174
  log_success("/api/compose-response", context=request.context_type, tone=request.tone_level)
@@ -384,6 +384,8 @@ class ComposeRequest(BaseModel):
384
384
  template: Optional[str] = None # answer, guide, redirect, acknowledge
385
385
  tone_level: int = Field(default=50, ge=0, le=100) # 0=casual, 100=professional
386
386
  include_memories: bool = Field(default=True)
387
+ custom_instructions: Optional[str] = Field(default=None, max_length=2000)
388
+ include_explanation: bool = Field(default=False)
387
389
 
388
390
 
389
391
  class ComposeResponse(BaseModel):
@@ -398,3 +400,5 @@ class ComposeResponse(BaseModel):
398
400
  incoming_message: str
399
401
  context_type: str
400
402
  created_at: str
403
+ custom_instructions: Optional[str] = None
404
+ explanation: Optional[str] = None
@@ -1,3 +1,3 @@
1
1
  """Omni Cortex MCP - Universal Memory System for Claude Code."""
2
2
 
3
- __version__ = "1.13.0"
3
+ __version__ = "1.14.0"
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "omni-cortex"
7
- version = "1.13.0"
7
+ version = "1.14.0"
8
8
  description = "Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time"
9
9
  readme = "README.md"
10
10
  license = "MIT"
File without changes
File without changes
File without changes
File without changes