omni-cortex 1.12.0__py3-none-any.whl → 1.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +57 -3
  2. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/database.py +1430 -1094
  3. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/main.py +1592 -1381
  4. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/models.py +370 -285
  5. {omni_cortex-1.12.0.dist-info → omni_cortex-1.12.1.dist-info}/METADATA +1 -1
  6. omni_cortex-1.12.1.dist-info/RECORD +26 -0
  7. omni_cortex-1.12.0.dist-info/RECORD +0 -26
  8. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/.env.example +0 -0
  9. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +0 -0
  10. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/image_service.py +0 -0
  11. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
  12. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  13. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  14. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/prompt_security.py +0 -0
  15. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  16. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/security.py +0 -0
  17. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
  18. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
  19. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
  20. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
  21. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/hooks/session_utils.py +0 -0
  22. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  23. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  24. {omni_cortex-1.12.0.data → omni_cortex-1.12.1.data}/data/share/omni-cortex/hooks/user_prompt.py +0 -0
  25. {omni_cortex-1.12.0.dist-info → omni_cortex-1.12.1.dist-info}/WHEEL +0 -0
  26. {omni_cortex-1.12.0.dist-info → omni_cortex-1.12.1.dist-info}/entry_points.txt +0 -0
  27. {omni_cortex-1.12.0.dist-info → omni_cortex-1.12.1.dist-info}/licenses/LICENSE +0 -0
@@ -42,7 +42,38 @@ def is_available() -> bool:
42
42
  return False
43
43
 
44
44
 
45
- def _build_prompt(question: str, context_str: str) -> str:
45
+ def build_style_context_prompt(style_profile: dict) -> str:
46
+ """Build a prompt section describing user's communication style."""
47
+
48
+ tone_dist = style_profile.get("tone_distribution", {})
49
+ tone_list = ", ".join(tone_dist.keys()) if tone_dist else "neutral"
50
+ avg_words = style_profile.get("avg_word_count", 20)
51
+ question_freq = style_profile.get("question_frequency", 0)
52
+
53
+ markers = style_profile.get("key_markers", [])
54
+ markers_text = "\n".join(f"- {m}" for m in markers) if markers else "- Direct and clear"
55
+
56
+ return f"""
57
+ ## User Communication Style Profile
58
+
59
+ When the user requests content "in their style" or "like they write", follow these patterns:
60
+
61
+ **Typical Message Length:** ~{int(avg_words)} words
62
+ **Common Tones:** {tone_list}
63
+ **Question Frequency:** {int(question_freq * 100)}% of messages include questions
64
+
65
+ **Key Style Markers:**
66
+ {markers_text}
67
+
68
+ **Guidelines:**
69
+ - Match the user's typical message length and structure
70
+ - Use their common vocabulary patterns
71
+ - Mirror their tone and formality level
72
+ - If they're typically direct, be concise; if detailed, be comprehensive
73
+ """
74
+
75
+
76
+ def _build_prompt(question: str, context_str: str, style_context: Optional[str] = None) -> str:
46
77
  """Build the prompt for the AI model with injection protection."""
47
78
  system_instruction = """You are a helpful assistant that answers questions about stored memories and knowledge.
48
79
 
@@ -59,6 +90,10 @@ Instructions:
59
90
 
60
91
  Answer:"""
61
92
 
93
+ # Add style context if provided
94
+ if style_context:
95
+ system_instruction = f"{system_instruction}\n\n{style_context}"
96
+
62
97
  return build_safe_prompt(
63
98
  system_instruction=system_instruction,
64
99
  user_data={"memories": context_str},
@@ -112,9 +147,16 @@ async def stream_ask_about_memories(
112
147
  db_path: str,
113
148
  question: str,
114
149
  max_memories: int = 10,
150
+ style_context: Optional[dict] = None,
115
151
  ) -> AsyncGenerator[dict[str, Any], None]:
116
152
  """Stream a response to a question about memories.
117
153
 
154
+ Args:
155
+ db_path: Path to the database file
156
+ question: The user's question
157
+ max_memories: Maximum memories to include in context
158
+ style_context: Optional user style profile dictionary
159
+
118
160
  Yields events with type 'sources', 'chunk', 'done', or 'error'.
119
161
  """
120
162
  if not is_available():
@@ -155,8 +197,13 @@ async def stream_ask_about_memories(
155
197
  "data": sources,
156
198
  }
157
199
 
200
+ # Build style context prompt if provided
201
+ style_prompt = None
202
+ if style_context:
203
+ style_prompt = build_style_context_prompt(style_context)
204
+
158
205
  # Build and stream the response
159
- prompt = _build_prompt(question, context_str)
206
+ prompt = _build_prompt(question, context_str, style_prompt)
160
207
 
161
208
  try:
162
209
  # Use streaming with the new google.genai client
@@ -260,6 +307,7 @@ async def ask_about_memories(
260
307
  db_path: str,
261
308
  question: str,
262
309
  max_memories: int = 10,
310
+ style_context: Optional[dict] = None,
263
311
  ) -> dict:
264
312
  """Ask a natural language question about memories (non-streaming).
265
313
 
@@ -267,6 +315,7 @@ async def ask_about_memories(
267
315
  db_path: Path to the database file
268
316
  question: The user's question
269
317
  max_memories: Maximum memories to include in context
318
+ style_context: Optional user style profile dictionary
270
319
 
271
320
  Returns:
272
321
  Dict with answer and sources
@@ -295,7 +344,12 @@ async def ask_about_memories(
295
344
  "error": None,
296
345
  }
297
346
 
298
- prompt = _build_prompt(question, context_str)
347
+ # Build style context prompt if provided
348
+ style_prompt = None
349
+ if style_context:
350
+ style_prompt = build_style_context_prompt(style_context)
351
+
352
+ prompt = _build_prompt(question, context_str, style_prompt)
299
353
 
300
354
  try:
301
355
  response = client.models.generate_content(