omni-cortex 1.12.1__py3-none-any.whl → 1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +631 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/database.py +224 -1
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/main.py +130 -37
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/models.py +35 -1
- {omni_cortex-1.12.1.dist-info → omni_cortex-1.14.0.dist-info}/METADATA +1 -1
- omni_cortex-1.14.0.dist-info/RECORD +26 -0
- omni_cortex-1.12.1.data/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -371
- omni_cortex-1.12.1.dist-info/RECORD +0 -26
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/.env.example +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/image_service.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/prompt_security.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/security.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/hooks/session_utils.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/hooks/stop.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/hooks/user_prompt.py +0 -0
- {omni_cortex-1.12.1.dist-info → omni_cortex-1.14.0.dist-info}/WHEEL +0 -0
- {omni_cortex-1.12.1.dist-info → omni_cortex-1.14.0.dist-info}/entry_points.txt +0 -0
- {omni_cortex-1.12.1.dist-info → omni_cortex-1.14.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1189,10 +1189,14 @@ def get_user_messages(
|
|
|
1189
1189
|
except (json.JSONDecodeError, TypeError):
|
|
1190
1190
|
pass
|
|
1191
1191
|
|
|
1192
|
+
# Get primary tone (first in the list) for frontend compatibility
|
|
1193
|
+
primary_tone = tone_indicators[0] if tone_indicators else None
|
|
1194
|
+
|
|
1192
1195
|
messages.append({
|
|
1193
1196
|
"id": row["id"],
|
|
1194
1197
|
"session_id": row["session_id"],
|
|
1195
|
-
"
|
|
1198
|
+
"created_at": row["timestamp"], # Frontend expects created_at
|
|
1199
|
+
"timestamp": row["timestamp"], # Keep for backward compatibility
|
|
1196
1200
|
"content": row["content"],
|
|
1197
1201
|
"word_count": row["word_count"],
|
|
1198
1202
|
"char_count": row["char_count"],
|
|
@@ -1200,6 +1204,7 @@ def get_user_messages(
|
|
|
1200
1204
|
"has_code_blocks": bool(row["has_code_blocks"]),
|
|
1201
1205
|
"has_questions": bool(row["has_questions"]),
|
|
1202
1206
|
"has_commands": bool(row["has_commands"]),
|
|
1207
|
+
"tone": primary_tone, # Frontend expects single tone string
|
|
1203
1208
|
"tone_indicators": tone_indicators,
|
|
1204
1209
|
"project_path": row["project_path"],
|
|
1205
1210
|
})
|
|
@@ -1428,3 +1433,221 @@ def _row_to_sample(row) -> dict:
|
|
|
1428
1433
|
"has_questions": bool(row["has_questions"]),
|
|
1429
1434
|
"tone_indicators": tone_indicators,
|
|
1430
1435
|
}
|
|
1436
|
+
|
|
1437
|
+
|
|
1438
|
+
def get_style_samples_by_category(db_path: str, samples_per_tone: int = 3) -> dict:
|
|
1439
|
+
"""Get sample user messages grouped by style category.
|
|
1440
|
+
|
|
1441
|
+
Maps tone_indicators to frontend categories:
|
|
1442
|
+
- professional: direct, polite, formal tones
|
|
1443
|
+
- casual: casual tones
|
|
1444
|
+
- technical: technical tones
|
|
1445
|
+
- creative: unique patterns, inquisitive tones
|
|
1446
|
+
|
|
1447
|
+
Args:
|
|
1448
|
+
db_path: Path to database
|
|
1449
|
+
samples_per_tone: Max samples per category
|
|
1450
|
+
|
|
1451
|
+
Returns:
|
|
1452
|
+
Dict with professional, casual, technical, creative lists
|
|
1453
|
+
"""
|
|
1454
|
+
conn = get_connection(db_path)
|
|
1455
|
+
|
|
1456
|
+
# Check if user_messages table exists
|
|
1457
|
+
table_check = conn.execute(
|
|
1458
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='user_messages'"
|
|
1459
|
+
).fetchone()
|
|
1460
|
+
|
|
1461
|
+
if not table_check:
|
|
1462
|
+
conn.close()
|
|
1463
|
+
return {
|
|
1464
|
+
"professional": [],
|
|
1465
|
+
"casual": [],
|
|
1466
|
+
"technical": [],
|
|
1467
|
+
"creative": []
|
|
1468
|
+
}
|
|
1469
|
+
|
|
1470
|
+
result = {
|
|
1471
|
+
"professional": [],
|
|
1472
|
+
"casual": [],
|
|
1473
|
+
"technical": [],
|
|
1474
|
+
"creative": []
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
# Mapping from tone_indicators to categories
|
|
1478
|
+
tone_to_category = {
|
|
1479
|
+
"direct": "professional",
|
|
1480
|
+
"polite": "professional",
|
|
1481
|
+
"formal": "professional",
|
|
1482
|
+
"casual": "casual",
|
|
1483
|
+
"technical": "technical",
|
|
1484
|
+
"inquisitive": "creative",
|
|
1485
|
+
"urgent": "professional",
|
|
1486
|
+
}
|
|
1487
|
+
|
|
1488
|
+
# Get all messages with tone indicators
|
|
1489
|
+
cursor = conn.execute(
|
|
1490
|
+
"""SELECT content, tone_indicators FROM user_messages
|
|
1491
|
+
WHERE tone_indicators IS NOT NULL AND tone_indicators != '[]'
|
|
1492
|
+
ORDER BY timestamp DESC LIMIT 200"""
|
|
1493
|
+
)
|
|
1494
|
+
|
|
1495
|
+
for row in cursor.fetchall():
|
|
1496
|
+
content = row["content"]
|
|
1497
|
+
try:
|
|
1498
|
+
tones = json.loads(row["tone_indicators"]) if row["tone_indicators"] else []
|
|
1499
|
+
except (json.JSONDecodeError, TypeError):
|
|
1500
|
+
tones = []
|
|
1501
|
+
|
|
1502
|
+
# Map to categories
|
|
1503
|
+
for tone in tones:
|
|
1504
|
+
category = tone_to_category.get(tone.lower(), "creative")
|
|
1505
|
+
if len(result[category]) < samples_per_tone:
|
|
1506
|
+
# Truncate content for preview
|
|
1507
|
+
preview = content[:200] + "..." if len(content) > 200 else content
|
|
1508
|
+
if preview not in result[category]:
|
|
1509
|
+
result[category].append(preview)
|
|
1510
|
+
break # Only add to first matching category
|
|
1511
|
+
|
|
1512
|
+
# Fill any empty categories with recent messages
|
|
1513
|
+
if any(len(v) == 0 for v in result.values()):
|
|
1514
|
+
cursor = conn.execute(
|
|
1515
|
+
"SELECT content FROM user_messages ORDER BY timestamp DESC LIMIT ?",
|
|
1516
|
+
(samples_per_tone * 4,)
|
|
1517
|
+
)
|
|
1518
|
+
fallback_messages = [
|
|
1519
|
+
row["content"][:200] + "..." if len(row["content"]) > 200 else row["content"]
|
|
1520
|
+
for row in cursor.fetchall()
|
|
1521
|
+
]
|
|
1522
|
+
|
|
1523
|
+
for category in result:
|
|
1524
|
+
if len(result[category]) == 0 and fallback_messages:
|
|
1525
|
+
# Take messages for empty categories
|
|
1526
|
+
for msg in fallback_messages[:samples_per_tone]:
|
|
1527
|
+
if msg not in [m for v in result.values() for m in v]:
|
|
1528
|
+
result[category].append(msg)
|
|
1529
|
+
|
|
1530
|
+
conn.close()
|
|
1531
|
+
return result
|
|
1532
|
+
|
|
1533
|
+
|
|
1534
|
+
def compute_style_profile_from_messages(db_path: str) -> Optional[dict]:
|
|
1535
|
+
"""Compute a style profile from user_messages table.
|
|
1536
|
+
|
|
1537
|
+
This is used when no pre-computed profile exists.
|
|
1538
|
+
|
|
1539
|
+
Returns format expected by frontend StyleProfileCard:
|
|
1540
|
+
- total_messages: int
|
|
1541
|
+
- avg_word_count: float
|
|
1542
|
+
- primary_tone: str
|
|
1543
|
+
- question_percentage: float
|
|
1544
|
+
- tone_distribution: dict[str, int]
|
|
1545
|
+
- style_markers: list[str]
|
|
1546
|
+
"""
|
|
1547
|
+
conn = get_connection(db_path)
|
|
1548
|
+
|
|
1549
|
+
# Check if user_messages table exists
|
|
1550
|
+
table_check = conn.execute(
|
|
1551
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='user_messages'"
|
|
1552
|
+
).fetchone()
|
|
1553
|
+
|
|
1554
|
+
if not table_check:
|
|
1555
|
+
conn.close()
|
|
1556
|
+
return None
|
|
1557
|
+
|
|
1558
|
+
# Get total count and averages
|
|
1559
|
+
stats = conn.execute(
|
|
1560
|
+
"""SELECT
|
|
1561
|
+
COUNT(*) as total,
|
|
1562
|
+
AVG(word_count) as avg_words,
|
|
1563
|
+
AVG(char_count) as avg_chars,
|
|
1564
|
+
SUM(CASE WHEN has_questions = 1 THEN 1 ELSE 0 END) as question_count
|
|
1565
|
+
FROM user_messages"""
|
|
1566
|
+
).fetchone()
|
|
1567
|
+
|
|
1568
|
+
if not stats or stats["total"] == 0:
|
|
1569
|
+
conn.close()
|
|
1570
|
+
return None
|
|
1571
|
+
|
|
1572
|
+
total_messages = stats["total"]
|
|
1573
|
+
avg_word_count = stats["avg_words"] or 0
|
|
1574
|
+
question_percentage = (stats["question_count"] / total_messages * 100) if total_messages > 0 else 0
|
|
1575
|
+
|
|
1576
|
+
# Compute tone distribution
|
|
1577
|
+
tone_distribution = {}
|
|
1578
|
+
cursor = conn.execute(
|
|
1579
|
+
"SELECT tone_indicators FROM user_messages WHERE tone_indicators IS NOT NULL AND tone_indicators != '[]'"
|
|
1580
|
+
)
|
|
1581
|
+
for row in cursor.fetchall():
|
|
1582
|
+
try:
|
|
1583
|
+
tones = json.loads(row["tone_indicators"]) if row["tone_indicators"] else []
|
|
1584
|
+
for tone in tones:
|
|
1585
|
+
tone_lower = tone.lower()
|
|
1586
|
+
tone_distribution[tone_lower] = tone_distribution.get(tone_lower, 0) + 1
|
|
1587
|
+
except (json.JSONDecodeError, TypeError):
|
|
1588
|
+
pass
|
|
1589
|
+
|
|
1590
|
+
# Determine primary tone (most common)
|
|
1591
|
+
primary_tone = "direct"
|
|
1592
|
+
if tone_distribution:
|
|
1593
|
+
primary_tone = max(tone_distribution, key=tone_distribution.get)
|
|
1594
|
+
|
|
1595
|
+
# Generate style markers based on the data
|
|
1596
|
+
style_markers = []
|
|
1597
|
+
|
|
1598
|
+
if avg_word_count < 15:
|
|
1599
|
+
style_markers.append("Concise")
|
|
1600
|
+
elif avg_word_count > 40:
|
|
1601
|
+
style_markers.append("Detailed")
|
|
1602
|
+
else:
|
|
1603
|
+
style_markers.append("Balanced length")
|
|
1604
|
+
|
|
1605
|
+
if question_percentage > 40:
|
|
1606
|
+
style_markers.append("Question-driven")
|
|
1607
|
+
elif question_percentage < 10:
|
|
1608
|
+
style_markers.append("Statement-focused")
|
|
1609
|
+
|
|
1610
|
+
# Check for code usage
|
|
1611
|
+
code_stats = conn.execute(
|
|
1612
|
+
"SELECT SUM(CASE WHEN has_code_blocks = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*) as code_pct FROM user_messages"
|
|
1613
|
+
).fetchone()
|
|
1614
|
+
if code_stats and code_stats["code_pct"] and code_stats["code_pct"] > 20:
|
|
1615
|
+
style_markers.append("Code-heavy")
|
|
1616
|
+
|
|
1617
|
+
# Add primary tone to markers
|
|
1618
|
+
tone_labels = {
|
|
1619
|
+
"direct": "Direct",
|
|
1620
|
+
"polite": "Polite",
|
|
1621
|
+
"technical": "Technical",
|
|
1622
|
+
"casual": "Casual",
|
|
1623
|
+
"inquisitive": "Inquisitive",
|
|
1624
|
+
"urgent": "Urgent",
|
|
1625
|
+
}
|
|
1626
|
+
if primary_tone in tone_labels:
|
|
1627
|
+
style_markers.append(tone_labels[primary_tone])
|
|
1628
|
+
|
|
1629
|
+
if not style_markers:
|
|
1630
|
+
style_markers.append("Building profile...")
|
|
1631
|
+
|
|
1632
|
+
# Get sample messages to show the AI how the user actually writes
|
|
1633
|
+
sample_messages = []
|
|
1634
|
+
cursor = conn.execute(
|
|
1635
|
+
"""SELECT content FROM user_messages
|
|
1636
|
+
WHERE length(content) > 20 AND length(content) < 500
|
|
1637
|
+
AND has_commands = 0
|
|
1638
|
+
ORDER BY timestamp DESC LIMIT 5"""
|
|
1639
|
+
)
|
|
1640
|
+
for row in cursor.fetchall():
|
|
1641
|
+
sample_messages.append(row["content"])
|
|
1642
|
+
|
|
1643
|
+
conn.close()
|
|
1644
|
+
|
|
1645
|
+
return {
|
|
1646
|
+
"totalMessages": total_messages,
|
|
1647
|
+
"avgWordCount": round(avg_word_count, 1),
|
|
1648
|
+
"primaryTone": primary_tone,
|
|
1649
|
+
"questionPercentage": round(question_percentage, 1),
|
|
1650
|
+
"toneDistribution": tone_distribution,
|
|
1651
|
+
"styleMarkers": style_markers,
|
|
1652
|
+
"sampleMessages": sample_messages,
|
|
1653
|
+
}
|
{omni_cortex-1.12.1.data → omni_cortex-1.14.0.data}/data/share/omni-cortex/dashboard/backend/main.py
RENAMED
|
@@ -54,6 +54,8 @@ from database import (
|
|
|
54
54
|
get_skill_usage,
|
|
55
55
|
get_style_profile,
|
|
56
56
|
get_style_samples,
|
|
57
|
+
get_style_samples_by_category,
|
|
58
|
+
compute_style_profile_from_messages,
|
|
57
59
|
get_timeline,
|
|
58
60
|
get_tool_usage,
|
|
59
61
|
get_type_distribution,
|
|
@@ -73,6 +75,8 @@ from models import (
|
|
|
73
75
|
BulkDeleteRequest,
|
|
74
76
|
ChatRequest,
|
|
75
77
|
ChatResponse,
|
|
78
|
+
ComposeRequest,
|
|
79
|
+
ComposeResponse,
|
|
76
80
|
ConversationSaveRequest,
|
|
77
81
|
ConversationSaveResponse,
|
|
78
82
|
FilterParams,
|
|
@@ -1023,7 +1027,11 @@ async def chat_with_memories(
|
|
|
1023
1027
|
style_context = None
|
|
1024
1028
|
if request.use_style:
|
|
1025
1029
|
try:
|
|
1026
|
-
|
|
1030
|
+
# First try computed profile from user_messages (richer data)
|
|
1031
|
+
style_context = compute_style_profile_from_messages(project)
|
|
1032
|
+
# Fall back to stored profile if no user_messages
|
|
1033
|
+
if not style_context:
|
|
1034
|
+
style_context = get_style_profile(project)
|
|
1027
1035
|
except Exception:
|
|
1028
1036
|
pass # Graceful fallback if no style data
|
|
1029
1037
|
|
|
@@ -1061,7 +1069,11 @@ async def stream_chat(
|
|
|
1061
1069
|
style_context = None
|
|
1062
1070
|
if use_style:
|
|
1063
1071
|
try:
|
|
1064
|
-
|
|
1072
|
+
# First try computed profile from user_messages (richer data)
|
|
1073
|
+
style_context = compute_style_profile_from_messages(project)
|
|
1074
|
+
# Fall back to stored profile if no user_messages
|
|
1075
|
+
if not style_context:
|
|
1076
|
+
style_context = get_style_profile(project)
|
|
1065
1077
|
except Exception:
|
|
1066
1078
|
pass # Graceful fallback if no style data
|
|
1067
1079
|
|
|
@@ -1110,6 +1122,64 @@ async def save_chat_conversation(
|
|
|
1110
1122
|
raise
|
|
1111
1123
|
|
|
1112
1124
|
|
|
1125
|
+
@app.post("/api/compose-response", response_model=ComposeResponse)
|
|
1126
|
+
@rate_limit("10/minute")
|
|
1127
|
+
async def compose_response_endpoint(
|
|
1128
|
+
request: ComposeRequest,
|
|
1129
|
+
project: str = Query(..., description="Path to the database file"),
|
|
1130
|
+
):
|
|
1131
|
+
"""Compose a response to an incoming message in the user's style."""
|
|
1132
|
+
try:
|
|
1133
|
+
if not Path(project).exists():
|
|
1134
|
+
log_error("/api/compose-response", FileNotFoundError("Database not found"))
|
|
1135
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
1136
|
+
|
|
1137
|
+
# Get style profile
|
|
1138
|
+
style_profile = compute_style_profile_from_messages(project)
|
|
1139
|
+
|
|
1140
|
+
# Compose the response
|
|
1141
|
+
result = await chat_service.compose_response(
|
|
1142
|
+
db_path=project,
|
|
1143
|
+
incoming_message=request.incoming_message,
|
|
1144
|
+
context_type=request.context_type,
|
|
1145
|
+
template=request.template,
|
|
1146
|
+
tone_level=request.tone_level,
|
|
1147
|
+
include_memories=request.include_memories,
|
|
1148
|
+
style_profile=style_profile,
|
|
1149
|
+
custom_instructions=request.custom_instructions,
|
|
1150
|
+
include_explanation=request.include_explanation,
|
|
1151
|
+
)
|
|
1152
|
+
|
|
1153
|
+
if result.get("error"):
|
|
1154
|
+
log_error("/api/compose-response", Exception(result["error"]))
|
|
1155
|
+
raise HTTPException(status_code=500, detail=result["error"])
|
|
1156
|
+
|
|
1157
|
+
# Build response model
|
|
1158
|
+
import uuid
|
|
1159
|
+
from datetime import datetime
|
|
1160
|
+
response = ComposeResponse(
|
|
1161
|
+
id=str(uuid.uuid4()),
|
|
1162
|
+
response=result["response"],
|
|
1163
|
+
sources=result["sources"],
|
|
1164
|
+
style_applied=bool(style_profile and style_profile.get("total_messages", 0) > 0),
|
|
1165
|
+
tone_level=request.tone_level,
|
|
1166
|
+
template_used=request.template,
|
|
1167
|
+
incoming_message=request.incoming_message,
|
|
1168
|
+
context_type=request.context_type,
|
|
1169
|
+
created_at=datetime.now().isoformat(),
|
|
1170
|
+
custom_instructions=request.custom_instructions,
|
|
1171
|
+
explanation=result.get("explanation"),
|
|
1172
|
+
)
|
|
1173
|
+
|
|
1174
|
+
log_success("/api/compose-response", context=request.context_type, tone=request.tone_level)
|
|
1175
|
+
return response
|
|
1176
|
+
except HTTPException:
|
|
1177
|
+
raise
|
|
1178
|
+
except Exception as e:
|
|
1179
|
+
log_error("/api/compose-response", e)
|
|
1180
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
1181
|
+
|
|
1182
|
+
|
|
1113
1183
|
# --- Image Generation Endpoints ---
|
|
1114
1184
|
|
|
1115
1185
|
|
|
@@ -1262,6 +1332,7 @@ async def list_user_messages(
|
|
|
1262
1332
|
)
|
|
1263
1333
|
|
|
1264
1334
|
total_count = get_user_message_count(project, session_id=session_id)
|
|
1335
|
+
has_more = (offset + len(messages)) < total_count
|
|
1265
1336
|
|
|
1266
1337
|
log_success("/api/user-messages", count=len(messages), total=total_count)
|
|
1267
1338
|
return UserMessagesResponse(
|
|
@@ -1269,6 +1340,7 @@ async def list_user_messages(
|
|
|
1269
1340
|
total_count=total_count,
|
|
1270
1341
|
limit=limit,
|
|
1271
1342
|
offset=offset,
|
|
1343
|
+
has_more=has_more,
|
|
1272
1344
|
)
|
|
1273
1345
|
except HTTPException:
|
|
1274
1346
|
raise
|
|
@@ -1321,79 +1393,100 @@ async def delete_user_messages_bulk_endpoint(
|
|
|
1321
1393
|
raise HTTPException(status_code=500, detail=str(e))
|
|
1322
1394
|
|
|
1323
1395
|
|
|
1324
|
-
@app.get("/api/style
|
|
1396
|
+
@app.get("/api/style/profile")
|
|
1325
1397
|
async def get_style_profile_endpoint(
|
|
1326
1398
|
project: str = Query(..., description="Path to the database file"),
|
|
1327
1399
|
project_path: Optional[str] = Query(None, description="Project-specific profile path, or None for global"),
|
|
1328
1400
|
):
|
|
1329
1401
|
"""Get user style profile for style analysis.
|
|
1330
1402
|
|
|
1331
|
-
Returns
|
|
1332
|
-
-
|
|
1333
|
-
-
|
|
1334
|
-
-
|
|
1335
|
-
-
|
|
1336
|
-
-
|
|
1403
|
+
Returns style metrics computed from user messages:
|
|
1404
|
+
- total_messages: Total message count
|
|
1405
|
+
- avg_word_count: Average words per message
|
|
1406
|
+
- primary_tone: Most common tone (direct, polite, technical, etc.)
|
|
1407
|
+
- question_percentage: Percentage of messages containing questions
|
|
1408
|
+
- tone_distribution: Count of messages by tone
|
|
1409
|
+
- style_markers: Descriptive labels for writing style
|
|
1337
1410
|
"""
|
|
1338
1411
|
try:
|
|
1339
1412
|
if not Path(project).exists():
|
|
1340
1413
|
raise HTTPException(status_code=404, detail="Database not found")
|
|
1341
1414
|
|
|
1415
|
+
# First try to get pre-computed profile from user_style_profiles table
|
|
1342
1416
|
profile = get_style_profile(project, project_path=project_path)
|
|
1343
1417
|
|
|
1418
|
+
# If no stored profile, compute from user_messages
|
|
1419
|
+
if not profile:
|
|
1420
|
+
profile = compute_style_profile_from_messages(project)
|
|
1421
|
+
|
|
1422
|
+
# If still no profile (no user_messages), return empty structure
|
|
1344
1423
|
if not profile:
|
|
1345
|
-
# Return empty profile structure if none exists
|
|
1346
1424
|
return {
|
|
1347
|
-
"
|
|
1348
|
-
"
|
|
1349
|
-
"
|
|
1350
|
-
"
|
|
1351
|
-
"
|
|
1352
|
-
"
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1425
|
+
"totalMessages": 0,
|
|
1426
|
+
"avgWordCount": 0,
|
|
1427
|
+
"primaryTone": "direct",
|
|
1428
|
+
"questionPercentage": 0,
|
|
1429
|
+
"toneDistribution": {},
|
|
1430
|
+
"styleMarkers": ["No data available yet"],
|
|
1431
|
+
}
|
|
1432
|
+
|
|
1433
|
+
# Convert stored profile format to frontend expected format if needed
|
|
1434
|
+
if "totalMessages" in profile:
|
|
1435
|
+
# Already in camelCase format from compute_style_profile_from_messages
|
|
1436
|
+
pass
|
|
1437
|
+
elif "id" in profile:
|
|
1438
|
+
# Convert stored profile (from user_style_profiles table) to frontend format
|
|
1439
|
+
tone_dist = {}
|
|
1440
|
+
# Stored profile doesn't have tone_distribution, so compute it
|
|
1441
|
+
computed = compute_style_profile_from_messages(project)
|
|
1442
|
+
if computed:
|
|
1443
|
+
tone_dist = computed.get("toneDistribution", {})
|
|
1444
|
+
primary_tone = computed.get("primaryTone", "direct")
|
|
1445
|
+
style_markers = computed.get("styleMarkers", [])
|
|
1446
|
+
else:
|
|
1447
|
+
primary_tone = "direct"
|
|
1448
|
+
style_markers = []
|
|
1449
|
+
|
|
1450
|
+
profile = {
|
|
1451
|
+
"totalMessages": profile.get("total_messages", 0),
|
|
1452
|
+
"avgWordCount": profile.get("avg_word_count", 0) or 0,
|
|
1453
|
+
"primaryTone": primary_tone,
|
|
1454
|
+
"questionPercentage": (profile.get("question_frequency", 0) or 0) * 100,
|
|
1455
|
+
"toneDistribution": tone_dist,
|
|
1456
|
+
"styleMarkers": style_markers or profile.get("greeting_patterns", []) or [],
|
|
1364
1457
|
}
|
|
1365
1458
|
|
|
1366
|
-
log_success("/api/style
|
|
1459
|
+
log_success("/api/style/profile", has_profile=True, total_messages=profile.get("totalMessages", 0))
|
|
1367
1460
|
return profile
|
|
1368
1461
|
except HTTPException:
|
|
1369
1462
|
raise
|
|
1370
1463
|
except Exception as e:
|
|
1371
|
-
log_error("/api/style
|
|
1464
|
+
log_error("/api/style/profile", e)
|
|
1372
1465
|
raise HTTPException(status_code=500, detail=str(e))
|
|
1373
1466
|
|
|
1374
1467
|
|
|
1375
|
-
@app.get("/api/style
|
|
1468
|
+
@app.get("/api/style/samples")
|
|
1376
1469
|
async def get_style_samples_endpoint(
|
|
1377
1470
|
project: str = Query(..., description="Path to the database file"),
|
|
1378
|
-
|
|
1471
|
+
samples_per_tone: int = Query(3, ge=1, le=10, description="Max samples per tone category"),
|
|
1379
1472
|
):
|
|
1380
1473
|
"""Get sample user messages for style analysis preview.
|
|
1381
1474
|
|
|
1382
|
-
Returns
|
|
1383
|
-
including recent messages, messages with code blocks, and longer messages.
|
|
1475
|
+
Returns messages grouped by style category (professional, casual, technical, creative).
|
|
1384
1476
|
"""
|
|
1385
1477
|
try:
|
|
1386
1478
|
if not Path(project).exists():
|
|
1387
1479
|
raise HTTPException(status_code=404, detail="Database not found")
|
|
1388
1480
|
|
|
1389
|
-
samples =
|
|
1481
|
+
samples = get_style_samples_by_category(project, samples_per_tone=samples_per_tone)
|
|
1390
1482
|
|
|
1391
|
-
|
|
1392
|
-
|
|
1483
|
+
total_count = sum(len(v) for v in samples.values())
|
|
1484
|
+
log_success("/api/style/samples", count=total_count)
|
|
1485
|
+
return samples
|
|
1393
1486
|
except HTTPException:
|
|
1394
1487
|
raise
|
|
1395
1488
|
except Exception as e:
|
|
1396
|
-
log_error("/api/style
|
|
1489
|
+
log_error("/api/style/samples", e)
|
|
1397
1490
|
raise HTTPException(status_code=500, detail=str(e))
|
|
1398
1491
|
|
|
1399
1492
|
|
|
@@ -294,7 +294,8 @@ class UserMessage(BaseModel):
|
|
|
294
294
|
|
|
295
295
|
id: str
|
|
296
296
|
session_id: Optional[str] = None
|
|
297
|
-
timestamp: str
|
|
297
|
+
timestamp: Optional[str] = None # Backward compatibility
|
|
298
|
+
created_at: Optional[str] = None # Frontend expects created_at
|
|
298
299
|
content: str
|
|
299
300
|
word_count: Optional[int] = None
|
|
300
301
|
char_count: Optional[int] = None
|
|
@@ -302,6 +303,7 @@ class UserMessage(BaseModel):
|
|
|
302
303
|
has_code_blocks: bool = False
|
|
303
304
|
has_questions: bool = False
|
|
304
305
|
has_commands: bool = False
|
|
306
|
+
tone: Optional[str] = None # Primary tone for frontend
|
|
305
307
|
tone_indicators: list[str] = []
|
|
306
308
|
project_path: Optional[str] = None
|
|
307
309
|
|
|
@@ -328,6 +330,7 @@ class UserMessagesResponse(BaseModel):
|
|
|
328
330
|
total_count: int
|
|
329
331
|
limit: int
|
|
330
332
|
offset: int
|
|
333
|
+
has_more: bool = False # Whether more results are available
|
|
331
334
|
|
|
332
335
|
|
|
333
336
|
class StyleSample(BaseModel):
|
|
@@ -368,3 +371,34 @@ class BulkDeleteRequest(BaseModel):
|
|
|
368
371
|
"""Request body for bulk delete operations."""
|
|
369
372
|
|
|
370
373
|
message_ids: list[str] = Field(..., min_length=1, max_length=100)
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
# --- Response Composer Models ---
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class ComposeRequest(BaseModel):
|
|
380
|
+
"""Request for composing a response in user's style."""
|
|
381
|
+
|
|
382
|
+
incoming_message: str = Field(..., min_length=1, max_length=5000)
|
|
383
|
+
context_type: str = Field(default="general") # skool_post, dm, email, comment, general
|
|
384
|
+
template: Optional[str] = None # answer, guide, redirect, acknowledge
|
|
385
|
+
tone_level: int = Field(default=50, ge=0, le=100) # 0=casual, 100=professional
|
|
386
|
+
include_memories: bool = Field(default=True)
|
|
387
|
+
custom_instructions: Optional[str] = Field(default=None, max_length=2000)
|
|
388
|
+
include_explanation: bool = Field(default=False)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
class ComposeResponse(BaseModel):
|
|
392
|
+
"""Response from compose endpoint."""
|
|
393
|
+
|
|
394
|
+
id: str
|
|
395
|
+
response: str
|
|
396
|
+
sources: list[ChatSource]
|
|
397
|
+
style_applied: bool
|
|
398
|
+
tone_level: int
|
|
399
|
+
template_used: Optional[str]
|
|
400
|
+
incoming_message: str
|
|
401
|
+
context_type: str
|
|
402
|
+
created_at: str
|
|
403
|
+
custom_instructions: Optional[str] = None
|
|
404
|
+
explanation: Optional[str] = None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: omni-cortex
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.14.0
|
|
4
4
|
Summary: Give Claude Code a perfect memory - auto-logs everything, searches smartly, and gets smarter over time
|
|
5
5
|
Project-URL: Homepage, https://github.com/AllCytes/Omni-Cortex
|
|
6
6
|
Project-URL: Repository, https://github.com/AllCytes/Omni-Cortex
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/hooks/post_tool_use.py,sha256=zdaKChi8zOghRlHswisCBSQE3kW1MtmM6AFfI_ivvpI,16581
|
|
2
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/hooks/pre_tool_use.py,sha256=3_V6Qw5m40eGrMmm5i94vINzeVxmcJvivdPa69H3AOI,8585
|
|
3
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/hooks/session_utils.py,sha256=3SKPCytqWuRPOupWdzmwBoKBDJqtLcT1Nle_pueDQUY,5746
|
|
4
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/hooks/stop.py,sha256=UroliJsyIS9_lj29-1d_r-80V4AfTMUFCaOjJZv3lwM,6976
|
|
5
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/hooks/subagent_stop.py,sha256=V9HQSFGNOfkg8ZCstPEy4h5V8BP4AbrVr8teFzN1kNk,3314
|
|
6
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/hooks/user_prompt.py,sha256=WNHJvhnkb9rXQ_HDpr6eLpM5vwy1Y1xl1EUoqyNC-x8,6859
|
|
7
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/.env.example,sha256=9xS7-UiWlMddRwzlyyyKNHAMlNTsgH-2sPV266guJpQ,372
|
|
8
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py,sha256=ElchfcBv4pmVr2PsePCgFlCyuvf4_jDJj_C3AmMhu7U,8973
|
|
9
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py,sha256=iEDpAw8OVaHM2312VPcAM-w6dYabUpjaTvWl1jGhqi0,20948
|
|
10
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/database.py,sha256=_sWqLjx_mWOxqNpfbv-bChtPfQkHzUNzly1pGu_zPKI,54199
|
|
11
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/image_service.py,sha256=NP6ojFpHb6iNTYRkXqYu1CL6WvooZpZ54mjLiWSWG_g,19205
|
|
12
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/logging_config.py,sha256=WnunFGET9zlsn9WBpVsio2zI7BiUQanE0xzAQQxIhII,3944
|
|
13
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/main.py,sha256=d6DW-UMCCHgEIX9kPQPgsxKKstKY8MXJmSMhQ7vPeHc,59602
|
|
14
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/models.py,sha256=q2dV8OxziqXbwtq-xo9Hv4RwSz_0xGgnDjX5-_lBnv4,11383
|
|
15
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/project_config.py,sha256=ZxGoeRpHvN5qQyf2hRxrAZiHrPSwdQp59f0di6O1LKM,4352
|
|
16
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/project_scanner.py,sha256=lwFXS8iJbOoxf7FAyo2TjH25neaMHiJ8B3jS57XxtDI,5713
|
|
17
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/prompt_security.py,sha256=LcdZhYy1CfpSq_4BPO6lMJ15phc2ZXLUSBAnAvODVCI,3423
|
|
18
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/pyproject.toml,sha256=9pbbGQXLe1Xd06nZAtDySCHIlfMWvPaB-C6tGZR6umc,502
|
|
19
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/security.py,sha256=nQsoPE0n5dtY9ive00d33W1gL48GgK7C5Ae0BK2oW2k,3479
|
|
20
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/uv.lock,sha256=miB9zGGSirBkjDE-OZTPCnv43Yc98xuAz_Ne8vTNFHg,186004
|
|
21
|
+
omni_cortex-1.14.0.data/data/share/omni-cortex/dashboard/backend/websocket_manager.py,sha256=gNQLd94AcC-InumGQmUolREhiogCzilYWpLN8SRZjHI,3645
|
|
22
|
+
omni_cortex-1.14.0.dist-info/METADATA,sha256=eCAIYv9Dc5Zol89ZVQOFMr7zbIMgzpXV0TQRyVwuoNU,15712
|
|
23
|
+
omni_cortex-1.14.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
24
|
+
omni_cortex-1.14.0.dist-info/entry_points.txt,sha256=rohx4mFH2ffZmMb9QXPZmFf-ZGjA3jpKVDVeET-ttiM,150
|
|
25
|
+
omni_cortex-1.14.0.dist-info/licenses/LICENSE,sha256=oG_397owMmi-Umxp5sYocJ6RPohp9_bDNnnEu9OUphg,1072
|
|
26
|
+
omni_cortex-1.14.0.dist-info/RECORD,,
|