wingman-ai 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- share/wingman/node_listener/package-lock.json +1785 -0
- share/wingman/node_listener/package.json +50 -0
- share/wingman/node_listener/src/index.ts +108 -0
- share/wingman/node_listener/src/ipc.ts +70 -0
- share/wingman/node_listener/src/messageHandler.ts +135 -0
- share/wingman/node_listener/src/socket.ts +244 -0
- share/wingman/node_listener/src/types.d.ts +13 -0
- share/wingman/node_listener/tsconfig.json +19 -0
- wingman/__init__.py +4 -0
- wingman/__main__.py +6 -0
- wingman/cli/__init__.py +5 -0
- wingman/cli/commands/__init__.py +1 -0
- wingman/cli/commands/auth.py +90 -0
- wingman/cli/commands/config.py +109 -0
- wingman/cli/commands/init.py +71 -0
- wingman/cli/commands/logs.py +84 -0
- wingman/cli/commands/start.py +111 -0
- wingman/cli/commands/status.py +84 -0
- wingman/cli/commands/stop.py +33 -0
- wingman/cli/commands/uninstall.py +113 -0
- wingman/cli/main.py +50 -0
- wingman/cli/wizard.py +356 -0
- wingman/config/__init__.py +31 -0
- wingman/config/paths.py +153 -0
- wingman/config/personality.py +155 -0
- wingman/config/registry.py +343 -0
- wingman/config/settings.py +294 -0
- wingman/core/__init__.py +16 -0
- wingman/core/agent.py +257 -0
- wingman/core/ipc_handler.py +124 -0
- wingman/core/llm/__init__.py +5 -0
- wingman/core/llm/client.py +77 -0
- wingman/core/memory/__init__.py +6 -0
- wingman/core/memory/context.py +109 -0
- wingman/core/memory/models.py +213 -0
- wingman/core/message_processor.py +277 -0
- wingman/core/policy/__init__.py +5 -0
- wingman/core/policy/evaluator.py +265 -0
- wingman/core/process_manager.py +135 -0
- wingman/core/safety/__init__.py +8 -0
- wingman/core/safety/cooldown.py +63 -0
- wingman/core/safety/quiet_hours.py +75 -0
- wingman/core/safety/rate_limiter.py +58 -0
- wingman/core/safety/triggers.py +117 -0
- wingman/core/transports/__init__.py +14 -0
- wingman/core/transports/base.py +106 -0
- wingman/core/transports/imessage/__init__.py +5 -0
- wingman/core/transports/imessage/db_listener.py +280 -0
- wingman/core/transports/imessage/sender.py +162 -0
- wingman/core/transports/imessage/transport.py +140 -0
- wingman/core/transports/whatsapp.py +180 -0
- wingman/daemon/__init__.py +5 -0
- wingman/daemon/manager.py +303 -0
- wingman/installer/__init__.py +5 -0
- wingman/installer/node_installer.py +253 -0
- wingman_ai-1.0.0.dist-info/METADATA +553 -0
- wingman_ai-1.0.0.dist-info/RECORD +60 -0
- wingman_ai-1.0.0.dist-info/WHEEL +4 -0
- wingman_ai-1.0.0.dist-info/entry_points.txt +2 -0
- wingman_ai-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""OpenAI API client wrapper."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
from openai import AsyncOpenAI
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LLMClient:
|
|
11
|
+
"""Async OpenAI API client for generating responses."""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self, api_key: str, model: str = "gpt-4o", max_tokens: int = 150, temperature: float = 0.8
|
|
15
|
+
):
|
|
16
|
+
self.client = AsyncOpenAI(api_key=api_key)
|
|
17
|
+
self.model = model
|
|
18
|
+
self.max_tokens = max_tokens
|
|
19
|
+
self.temperature = temperature
|
|
20
|
+
|
|
21
|
+
async def generate_response(
|
|
22
|
+
self,
|
|
23
|
+
system_prompt: str,
|
|
24
|
+
messages: list[dict[str, str]],
|
|
25
|
+
language_instruction: str | None = None,
|
|
26
|
+
) -> str | None:
|
|
27
|
+
"""
|
|
28
|
+
Generate a response using the OpenAI API.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
system_prompt: The personality/system prompt
|
|
32
|
+
messages: Conversation history
|
|
33
|
+
language_instruction: Optional language-specific instruction
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Generated response text or None on error
|
|
37
|
+
"""
|
|
38
|
+
try:
|
|
39
|
+
# Build full system prompt
|
|
40
|
+
full_system = system_prompt
|
|
41
|
+
if language_instruction:
|
|
42
|
+
full_system += f"\n\n{language_instruction}"
|
|
43
|
+
|
|
44
|
+
# Prepare messages for API
|
|
45
|
+
api_messages = [{"role": "system", "content": full_system}, *messages]
|
|
46
|
+
|
|
47
|
+
logger.debug(f"Sending request to {self.model} with {len(messages)} context messages")
|
|
48
|
+
|
|
49
|
+
response = await self.client.chat.completions.create(
|
|
50
|
+
model=self.model,
|
|
51
|
+
messages=api_messages,
|
|
52
|
+
max_tokens=self.max_tokens,
|
|
53
|
+
temperature=self.temperature,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if response.choices and response.choices[0].message.content:
|
|
57
|
+
text = response.choices[0].message.content.strip()
|
|
58
|
+
logger.debug(f"Generated response: {text[:50]}...")
|
|
59
|
+
return text
|
|
60
|
+
|
|
61
|
+
logger.warning("Empty response from API")
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logger.error(f"API error: {e}")
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
async def health_check(self) -> bool:
|
|
69
|
+
"""Check if the API is accessible."""
|
|
70
|
+
try:
|
|
71
|
+
response = await self.client.chat.completions.create(
|
|
72
|
+
model=self.model, messages=[{"role": "user", "content": "Hi"}], max_tokens=5
|
|
73
|
+
)
|
|
74
|
+
return bool(response.choices)
|
|
75
|
+
except Exception as e:
|
|
76
|
+
logger.error(f"Health check failed: {e}")
|
|
77
|
+
return False
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""Context building for LLM conversations."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from .models import MessageStore
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ContextBuilder:
|
|
12
|
+
"""Builds conversation context for LLM from stored messages."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self, message_store: MessageStore, window_size: int = 30, bot_name: str = "Maximus"
|
|
16
|
+
):
|
|
17
|
+
self.store = message_store
|
|
18
|
+
self.window_size = window_size
|
|
19
|
+
self.bot_name = bot_name
|
|
20
|
+
|
|
21
|
+
def build_context(self, chat_id: str, current_message: dict[str, Any]) -> list[dict[str, str]]:
|
|
22
|
+
"""
|
|
23
|
+
Build conversation context for the LLM.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
chat_id: Chat to build context for
|
|
27
|
+
current_message: The current incoming message
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
List of message dicts for OpenAI API format
|
|
31
|
+
"""
|
|
32
|
+
# Get recent messages from storage
|
|
33
|
+
messages = self.store.get_recent_messages(chat_id, limit=self.window_size)
|
|
34
|
+
|
|
35
|
+
# Convert to OpenAI message format
|
|
36
|
+
context = []
|
|
37
|
+
for msg in messages:
|
|
38
|
+
if msg.is_self:
|
|
39
|
+
context.append({"role": "assistant", "content": msg.text})
|
|
40
|
+
else:
|
|
41
|
+
# Format user messages with sender name for context
|
|
42
|
+
sender = msg.sender_name or "User"
|
|
43
|
+
content = f"[{sender}]: {msg.text}"
|
|
44
|
+
context.append({"role": "user", "content": content})
|
|
45
|
+
|
|
46
|
+
# Add current message
|
|
47
|
+
sender_name = current_message.get("senderName") or "User"
|
|
48
|
+
context.append(
|
|
49
|
+
{"role": "user", "content": f"[{sender_name}]: {current_message.get('text', '')}"}
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
logger.debug(f"Built context with {len(context)} messages for {chat_id}")
|
|
53
|
+
return context
|
|
54
|
+
|
|
55
|
+
def detect_language(self, text: str) -> str:
|
|
56
|
+
"""
|
|
57
|
+
Detect the primary language of the text.
|
|
58
|
+
Simple heuristic for Hindi/Hinglish/English detection.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
'hindi', 'hinglish', or 'english'
|
|
62
|
+
"""
|
|
63
|
+
# Devanagari Unicode range
|
|
64
|
+
hindi_chars = sum(1 for c in text if "\u0900" <= c <= "\u097f")
|
|
65
|
+
|
|
66
|
+
if hindi_chars > len(text) * 0.3:
|
|
67
|
+
return "hindi"
|
|
68
|
+
|
|
69
|
+
# Check for common Hinglish words/patterns
|
|
70
|
+
hinglish_markers = [
|
|
71
|
+
"hai",
|
|
72
|
+
"hain",
|
|
73
|
+
"kya",
|
|
74
|
+
"nahi",
|
|
75
|
+
"aur",
|
|
76
|
+
"bhi",
|
|
77
|
+
"kaise",
|
|
78
|
+
"kaisa",
|
|
79
|
+
"accha",
|
|
80
|
+
"theek",
|
|
81
|
+
"yaar",
|
|
82
|
+
"bhai",
|
|
83
|
+
"arre",
|
|
84
|
+
"haan",
|
|
85
|
+
"matlab",
|
|
86
|
+
"wala",
|
|
87
|
+
"kar",
|
|
88
|
+
"karo",
|
|
89
|
+
"karna",
|
|
90
|
+
"raha",
|
|
91
|
+
"rahi",
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
text_lower = text.lower()
|
|
95
|
+
hinglish_count = sum(1 for word in hinglish_markers if word in text_lower)
|
|
96
|
+
|
|
97
|
+
if hinglish_count >= 2:
|
|
98
|
+
return "hinglish"
|
|
99
|
+
|
|
100
|
+
return "english"
|
|
101
|
+
|
|
102
|
+
def get_language_instruction(self, language: str) -> str:
|
|
103
|
+
"""Get language-specific instruction for the LLM."""
|
|
104
|
+
instructions = {
|
|
105
|
+
"hindi": "Respond in Hindi (Devanagari script). Match the casual tone.",
|
|
106
|
+
"hinglish": "Respond in Hinglish (Hindi words in Roman script mixed with English). Keep it natural and casual.",
|
|
107
|
+
"english": "Respond in English. Keep it casual and friendly.",
|
|
108
|
+
}
|
|
109
|
+
return instructions.get(language, instructions["english"])
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"""SQLite database models and operations."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import sqlite3
|
|
5
|
+
from contextlib import contextmanager
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class Message:
|
|
14
|
+
"""Represents a stored message."""
|
|
15
|
+
|
|
16
|
+
id: int | None
|
|
17
|
+
chat_id: str
|
|
18
|
+
sender_id: str
|
|
19
|
+
sender_name: str | None
|
|
20
|
+
text: str
|
|
21
|
+
timestamp: float
|
|
22
|
+
is_self: bool = False
|
|
23
|
+
platform: str = "whatsapp"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MessageStore:
|
|
27
|
+
"""SQLite-based message storage."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, db_path: Path):
|
|
30
|
+
self.db_path = db_path
|
|
31
|
+
self._ensure_db_exists()
|
|
32
|
+
|
|
33
|
+
def _ensure_db_exists(self) -> None:
|
|
34
|
+
"""Create database and tables if they don't exist."""
|
|
35
|
+
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
36
|
+
|
|
37
|
+
with self._get_connection() as conn:
|
|
38
|
+
conn.execute("""
|
|
39
|
+
CREATE TABLE IF NOT EXISTS messages (
|
|
40
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
41
|
+
chat_id TEXT NOT NULL,
|
|
42
|
+
sender_id TEXT NOT NULL,
|
|
43
|
+
sender_name TEXT,
|
|
44
|
+
text TEXT NOT NULL,
|
|
45
|
+
timestamp REAL NOT NULL,
|
|
46
|
+
is_self BOOLEAN DEFAULT 0,
|
|
47
|
+
platform TEXT DEFAULT 'whatsapp'
|
|
48
|
+
)
|
|
49
|
+
""")
|
|
50
|
+
conn.execute("""
|
|
51
|
+
CREATE INDEX IF NOT EXISTS idx_messages_chat
|
|
52
|
+
ON messages(chat_id, timestamp DESC)
|
|
53
|
+
""")
|
|
54
|
+
# Add platform column if it doesn't exist (migration for existing DBs)
|
|
55
|
+
try:
|
|
56
|
+
conn.execute("SELECT platform FROM messages LIMIT 1")
|
|
57
|
+
except Exception:
|
|
58
|
+
conn.execute("ALTER TABLE messages ADD COLUMN platform TEXT DEFAULT 'whatsapp'")
|
|
59
|
+
logger.info("Added platform column to messages table")
|
|
60
|
+
conn.commit()
|
|
61
|
+
logger.info(f"Database initialized: {self.db_path}")
|
|
62
|
+
|
|
63
|
+
@contextmanager
|
|
64
|
+
def _get_connection(self):
|
|
65
|
+
"""Get a database connection context manager."""
|
|
66
|
+
conn = sqlite3.connect(str(self.db_path))
|
|
67
|
+
conn.row_factory = sqlite3.Row
|
|
68
|
+
try:
|
|
69
|
+
yield conn
|
|
70
|
+
finally:
|
|
71
|
+
conn.close()
|
|
72
|
+
|
|
73
|
+
def store_message(self, message: Message) -> int:
|
|
74
|
+
"""
|
|
75
|
+
Store a message in the database.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
The ID of the inserted message
|
|
79
|
+
"""
|
|
80
|
+
with self._get_connection() as conn:
|
|
81
|
+
cursor = conn.execute(
|
|
82
|
+
"""
|
|
83
|
+
INSERT INTO messages (chat_id, sender_id, sender_name, text, timestamp, is_self, platform)
|
|
84
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
85
|
+
""",
|
|
86
|
+
(
|
|
87
|
+
message.chat_id,
|
|
88
|
+
message.sender_id,
|
|
89
|
+
message.sender_name,
|
|
90
|
+
message.text,
|
|
91
|
+
message.timestamp,
|
|
92
|
+
1 if message.is_self else 0,
|
|
93
|
+
message.platform,
|
|
94
|
+
),
|
|
95
|
+
)
|
|
96
|
+
conn.commit()
|
|
97
|
+
msg_id = cursor.lastrowid
|
|
98
|
+
logger.debug(
|
|
99
|
+
f"Stored message {msg_id} in chat {message.chat_id} (platform={message.platform})"
|
|
100
|
+
)
|
|
101
|
+
return msg_id
|
|
102
|
+
|
|
103
|
+
def get_recent_messages(self, chat_id: str, limit: int = 30) -> list[Message]:
|
|
104
|
+
"""
|
|
105
|
+
Get recent messages from a chat.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
chat_id: The chat to get messages from
|
|
109
|
+
limit: Maximum number of messages to return
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
List of messages, oldest first
|
|
113
|
+
"""
|
|
114
|
+
with self._get_connection() as conn:
|
|
115
|
+
cursor = conn.execute(
|
|
116
|
+
"""
|
|
117
|
+
SELECT id, chat_id, sender_id, sender_name, text, timestamp, is_self, platform
|
|
118
|
+
FROM messages
|
|
119
|
+
WHERE chat_id = ?
|
|
120
|
+
ORDER BY timestamp DESC
|
|
121
|
+
LIMIT ?
|
|
122
|
+
""",
|
|
123
|
+
(chat_id, limit),
|
|
124
|
+
)
|
|
125
|
+
rows = cursor.fetchall()
|
|
126
|
+
|
|
127
|
+
# Convert to Message objects and reverse for chronological order
|
|
128
|
+
messages = [
|
|
129
|
+
Message(
|
|
130
|
+
id=row["id"],
|
|
131
|
+
chat_id=row["chat_id"],
|
|
132
|
+
sender_id=row["sender_id"],
|
|
133
|
+
sender_name=row["sender_name"],
|
|
134
|
+
text=row["text"],
|
|
135
|
+
timestamp=row["timestamp"],
|
|
136
|
+
is_self=bool(row["is_self"]),
|
|
137
|
+
platform=row["platform"] or "whatsapp",
|
|
138
|
+
)
|
|
139
|
+
for row in reversed(rows)
|
|
140
|
+
]
|
|
141
|
+
|
|
142
|
+
logger.debug(f"Retrieved {len(messages)} messages from {chat_id}")
|
|
143
|
+
return messages
|
|
144
|
+
|
|
145
|
+
def get_last_sender(self, chat_id: str) -> str | None:
|
|
146
|
+
"""
|
|
147
|
+
Get the sender ID of the last message in a chat.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Sender ID or None if chat is empty
|
|
151
|
+
"""
|
|
152
|
+
with self._get_connection() as conn:
|
|
153
|
+
cursor = conn.execute(
|
|
154
|
+
"""
|
|
155
|
+
SELECT sender_id, is_self
|
|
156
|
+
FROM messages
|
|
157
|
+
WHERE chat_id = ?
|
|
158
|
+
ORDER BY timestamp DESC
|
|
159
|
+
LIMIT 1
|
|
160
|
+
""",
|
|
161
|
+
(chat_id,),
|
|
162
|
+
)
|
|
163
|
+
row = cursor.fetchone()
|
|
164
|
+
|
|
165
|
+
if row:
|
|
166
|
+
return "self" if row["is_self"] else row["sender_id"]
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
def was_last_message_from_self(self, chat_id: str) -> bool:
|
|
170
|
+
"""Check if the last message in a chat was from the bot."""
|
|
171
|
+
with self._get_connection() as conn:
|
|
172
|
+
cursor = conn.execute(
|
|
173
|
+
"""
|
|
174
|
+
SELECT is_self
|
|
175
|
+
FROM messages
|
|
176
|
+
WHERE chat_id = ?
|
|
177
|
+
ORDER BY timestamp DESC
|
|
178
|
+
LIMIT 1
|
|
179
|
+
""",
|
|
180
|
+
(chat_id,),
|
|
181
|
+
)
|
|
182
|
+
row = cursor.fetchone()
|
|
183
|
+
|
|
184
|
+
return bool(row and row["is_self"])
|
|
185
|
+
|
|
186
|
+
def get_message_count(self, chat_id: str | None = None) -> int:
|
|
187
|
+
"""Get total message count, optionally filtered by chat."""
|
|
188
|
+
with self._get_connection() as conn:
|
|
189
|
+
if chat_id:
|
|
190
|
+
cursor = conn.execute("SELECT COUNT(*) FROM messages WHERE chat_id = ?", (chat_id,))
|
|
191
|
+
else:
|
|
192
|
+
cursor = conn.execute("SELECT COUNT(*) FROM messages")
|
|
193
|
+
return cursor.fetchone()[0]
|
|
194
|
+
|
|
195
|
+
def cleanup_old_messages(self, days: int = 30) -> int:
|
|
196
|
+
"""
|
|
197
|
+
Delete messages older than specified days.
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
Number of deleted messages
|
|
201
|
+
"""
|
|
202
|
+
import time
|
|
203
|
+
|
|
204
|
+
cutoff = time.time() - (days * 24 * 60 * 60)
|
|
205
|
+
|
|
206
|
+
with self._get_connection() as conn:
|
|
207
|
+
cursor = conn.execute("DELETE FROM messages WHERE timestamp < ?", (cutoff,))
|
|
208
|
+
conn.commit()
|
|
209
|
+
deleted = cursor.rowcount
|
|
210
|
+
|
|
211
|
+
if deleted > 0:
|
|
212
|
+
logger.info(f"Cleaned up {deleted} old messages")
|
|
213
|
+
return deleted
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
"""Core message processing logic."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import time
|
|
5
|
+
from collections.abc import Callable, Coroutine
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from wingman.config.personality import RoleBasedPromptBuilder, get_personality_prompt
|
|
9
|
+
from wingman.config.registry import ContactProfile, ContactRegistry, GroupRegistry
|
|
10
|
+
|
|
11
|
+
from .llm.client import LLMClient
|
|
12
|
+
from .memory.context import ContextBuilder
|
|
13
|
+
from .memory.models import Message, MessageStore
|
|
14
|
+
from .policy import PolicyEvaluator
|
|
15
|
+
from .safety import CooldownManager, QuietHoursChecker, RateLimiter, TriggerDetector
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
# Type alias for message sender callback
|
|
20
|
+
MessageSender = Callable[[str, str, str], Coroutine[Any, Any, bool]]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MessageProcessor:
|
|
24
|
+
"""
|
|
25
|
+
Core orchestrator for processing incoming messages.
|
|
26
|
+
Implements the full message handling flow with safety checks.
|
|
27
|
+
|
|
28
|
+
Transport-agnostic: uses a sender callback instead of direct IPC.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
store: MessageStore,
|
|
34
|
+
llm: LLMClient,
|
|
35
|
+
contact_registry: ContactRegistry,
|
|
36
|
+
group_registry: GroupRegistry,
|
|
37
|
+
policy_evaluator: PolicyEvaluator,
|
|
38
|
+
bot_name: str = "Maximus",
|
|
39
|
+
max_replies_per_hour: int = 30,
|
|
40
|
+
default_cooldown: int = 60,
|
|
41
|
+
quiet_start: int = 0,
|
|
42
|
+
quiet_end: int = 6,
|
|
43
|
+
context_window: int = 30,
|
|
44
|
+
):
|
|
45
|
+
self.store = store
|
|
46
|
+
self.llm = llm
|
|
47
|
+
self.bot_name = bot_name
|
|
48
|
+
|
|
49
|
+
# Config-driven registries
|
|
50
|
+
self.contact_registry = contact_registry
|
|
51
|
+
self.group_registry = group_registry
|
|
52
|
+
self.policy_evaluator = policy_evaluator
|
|
53
|
+
|
|
54
|
+
# Initialize safety components
|
|
55
|
+
self.rate_limiter = RateLimiter(max_replies_per_hour)
|
|
56
|
+
self.cooldown = CooldownManager(default_cooldown)
|
|
57
|
+
self.quiet_hours = QuietHoursChecker(quiet_start, quiet_end)
|
|
58
|
+
self.triggers = TriggerDetector(bot_name) # Keep for fallback
|
|
59
|
+
|
|
60
|
+
# Context builder
|
|
61
|
+
self.context_builder = ContextBuilder(store, context_window, bot_name)
|
|
62
|
+
|
|
63
|
+
# Role-based prompt builder
|
|
64
|
+
self.prompt_builder = RoleBasedPromptBuilder(bot_name)
|
|
65
|
+
|
|
66
|
+
# Track our own user ID per platform
|
|
67
|
+
self.self_ids: dict[str, str] = {}
|
|
68
|
+
|
|
69
|
+
# Message sender callback: (platform, chat_id, text) -> success
|
|
70
|
+
self._send_message: MessageSender | None = None
|
|
71
|
+
|
|
72
|
+
def set_self_id(self, user_id: str, platform: str = "whatsapp") -> None:
|
|
73
|
+
"""Set our own user ID for self-message detection."""
|
|
74
|
+
self.self_ids[platform] = user_id
|
|
75
|
+
logger.info(f"Self ID set for {platform}: {user_id}")
|
|
76
|
+
|
|
77
|
+
def set_sender(self, sender: MessageSender) -> None:
|
|
78
|
+
"""Set the message sender callback."""
|
|
79
|
+
self._send_message = sender
|
|
80
|
+
|
|
81
|
+
async def process_message(self, data: dict[str, Any]) -> None:
|
|
82
|
+
"""
|
|
83
|
+
Process an incoming message through the full pipeline.
|
|
84
|
+
|
|
85
|
+
Flow:
|
|
86
|
+
1. Store message in SQLite
|
|
87
|
+
2. Check safety rules
|
|
88
|
+
3. Check for triggers
|
|
89
|
+
4. Generate and send response
|
|
90
|
+
"""
|
|
91
|
+
chat_id = data.get("chatId", "")
|
|
92
|
+
sender_id = data.get("senderId", "")
|
|
93
|
+
sender_name = data.get("senderName")
|
|
94
|
+
text = data.get("text", "")
|
|
95
|
+
timestamp = data.get("timestamp", time.time())
|
|
96
|
+
is_group = data.get("isGroup", False)
|
|
97
|
+
is_self = data.get("isSelf", False)
|
|
98
|
+
platform = data.get("platform", "whatsapp")
|
|
99
|
+
|
|
100
|
+
logger.info(
|
|
101
|
+
f"Processing message: platform={platform}, chat={chat_id[:20]}..., "
|
|
102
|
+
f"sender={sender_name or sender_id[:15]}..., "
|
|
103
|
+
f"text={text[:50]}..."
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# 1. Always store the message
|
|
107
|
+
message = Message(
|
|
108
|
+
id=None,
|
|
109
|
+
chat_id=chat_id,
|
|
110
|
+
sender_id=sender_id,
|
|
111
|
+
sender_name=sender_name,
|
|
112
|
+
text=text,
|
|
113
|
+
timestamp=timestamp,
|
|
114
|
+
is_self=is_self,
|
|
115
|
+
platform=platform,
|
|
116
|
+
)
|
|
117
|
+
self.store.store_message(message)
|
|
118
|
+
|
|
119
|
+
# Don't process our own messages
|
|
120
|
+
if is_self:
|
|
121
|
+
logger.debug("Skipping self message")
|
|
122
|
+
return
|
|
123
|
+
|
|
124
|
+
# 2. Resolve contact and group profiles
|
|
125
|
+
contact = self.contact_registry.resolve(sender_id)
|
|
126
|
+
group = self.group_registry.resolve(chat_id) if is_group else None
|
|
127
|
+
|
|
128
|
+
logger.info(
|
|
129
|
+
f"Resolved: contact={contact.name} (role={contact.role.value}, tone={contact.tone.value})"
|
|
130
|
+
+ (f", group={group.name} (category={group.category.value})" if group else "")
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# 3. Apply per-contact cooldown override if configured
|
|
134
|
+
if contact.cooldown_override is not None:
|
|
135
|
+
self.cooldown.set_cooldown(chat_id, contact.cooldown_override)
|
|
136
|
+
|
|
137
|
+
# 4. Check safety rules
|
|
138
|
+
skip_reason = self._check_safety_rules(chat_id)
|
|
139
|
+
if skip_reason:
|
|
140
|
+
logger.info(f"Skipping message: {skip_reason}")
|
|
141
|
+
return
|
|
142
|
+
|
|
143
|
+
# 5. Evaluate policy rules
|
|
144
|
+
is_reply_to_bot = self._is_reply_to_bot(data, platform)
|
|
145
|
+
|
|
146
|
+
context = self.policy_evaluator.create_context(
|
|
147
|
+
chat_id=chat_id,
|
|
148
|
+
sender_id=sender_id,
|
|
149
|
+
text=text,
|
|
150
|
+
is_group=is_group,
|
|
151
|
+
contact=contact,
|
|
152
|
+
group=group,
|
|
153
|
+
is_reply_to_bot=is_reply_to_bot,
|
|
154
|
+
platform=platform,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
decision = self.policy_evaluator.evaluate(context)
|
|
158
|
+
|
|
159
|
+
if not decision.should_respond:
|
|
160
|
+
logger.debug(f"Policy decision: no response (reason: {decision.reason})")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
logger.info(
|
|
164
|
+
f"Responding to message (policy: {decision.reason}, action: {decision.action.value})"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# 6. Generate response with role-based prompt
|
|
168
|
+
response = await self._generate_response(chat_id, data, contact)
|
|
169
|
+
|
|
170
|
+
if not response:
|
|
171
|
+
logger.warning("Failed to generate response")
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
# 7. Send response via transport
|
|
175
|
+
if self._send_message:
|
|
176
|
+
success = await self._send_message(platform, chat_id, response)
|
|
177
|
+
if not success:
|
|
178
|
+
logger.error(f"Failed to send message via {platform}")
|
|
179
|
+
return
|
|
180
|
+
else:
|
|
181
|
+
logger.error("No message sender configured")
|
|
182
|
+
return
|
|
183
|
+
|
|
184
|
+
# 8. Update safety trackers
|
|
185
|
+
self.rate_limiter.record_reply()
|
|
186
|
+
self.cooldown.record_reply(chat_id)
|
|
187
|
+
|
|
188
|
+
# 9. Store our response
|
|
189
|
+
self_id = self.self_ids.get(platform, "self")
|
|
190
|
+
bot_message = Message(
|
|
191
|
+
id=None,
|
|
192
|
+
chat_id=chat_id,
|
|
193
|
+
sender_id=self_id,
|
|
194
|
+
sender_name=self.bot_name,
|
|
195
|
+
text=response,
|
|
196
|
+
timestamp=time.time(),
|
|
197
|
+
is_self=True,
|
|
198
|
+
platform=platform,
|
|
199
|
+
)
|
|
200
|
+
self.store.store_message(bot_message)
|
|
201
|
+
|
|
202
|
+
logger.info(f"Response sent via {platform}: {response[:50]}...")
|
|
203
|
+
|
|
204
|
+
def _check_safety_rules(self, chat_id: str) -> str | None:
|
|
205
|
+
"""
|
|
206
|
+
Check all safety rules.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Reason string if should skip, None if OK to proceed
|
|
210
|
+
"""
|
|
211
|
+
# Check quiet hours
|
|
212
|
+
if self.quiet_hours.is_quiet_time():
|
|
213
|
+
return "quiet_hours"
|
|
214
|
+
|
|
215
|
+
# Check rate limit
|
|
216
|
+
if not self.rate_limiter.can_reply():
|
|
217
|
+
return "rate_limit"
|
|
218
|
+
|
|
219
|
+
# Check per-chat cooldown
|
|
220
|
+
if self.cooldown.is_on_cooldown(chat_id):
|
|
221
|
+
return "cooldown"
|
|
222
|
+
|
|
223
|
+
# Check double-reply (don't reply if last message was ours)
|
|
224
|
+
if self.store.was_last_message_from_self(chat_id):
|
|
225
|
+
return "double_reply"
|
|
226
|
+
|
|
227
|
+
return None
|
|
228
|
+
|
|
229
|
+
def _is_reply_to_bot(self, data: dict[str, Any], platform: str = "whatsapp") -> bool:
|
|
230
|
+
"""Check if the message is a reply to one of our messages."""
|
|
231
|
+
quoted = data.get("quotedMessage")
|
|
232
|
+
if not quoted:
|
|
233
|
+
return False
|
|
234
|
+
|
|
235
|
+
quoted_sender = quoted.get("senderId", "")
|
|
236
|
+
|
|
237
|
+
# Check if quoted message is from us (platform-specific self ID)
|
|
238
|
+
self_id = self.self_ids.get(platform)
|
|
239
|
+
if self_id and quoted_sender == self_id:
|
|
240
|
+
return True
|
|
241
|
+
|
|
242
|
+
# Also check for our name in the quoted sender
|
|
243
|
+
if self.bot_name.lower() in quoted_sender.lower():
|
|
244
|
+
return True
|
|
245
|
+
|
|
246
|
+
return False
|
|
247
|
+
|
|
248
|
+
async def _generate_response(
|
|
249
|
+
self, chat_id: str, message_data: dict[str, Any], contact: ContactProfile | None = None
|
|
250
|
+
) -> str | None:
|
|
251
|
+
"""Generate an LLM response for the message."""
|
|
252
|
+
# Build context
|
|
253
|
+
context = self.context_builder.build_context(chat_id, message_data)
|
|
254
|
+
|
|
255
|
+
# Detect language
|
|
256
|
+
text = message_data.get("text", "")
|
|
257
|
+
language = self.context_builder.detect_language(text)
|
|
258
|
+
language_instruction = self.context_builder.get_language_instruction(language)
|
|
259
|
+
|
|
260
|
+
logger.debug(f"Detected language: {language}")
|
|
261
|
+
|
|
262
|
+
# Build role-based system prompt
|
|
263
|
+
if contact:
|
|
264
|
+
system_prompt = self.prompt_builder.build_prompt(
|
|
265
|
+
tone=contact.tone,
|
|
266
|
+
contact_name=contact.name if contact.name != "Unknown" else None,
|
|
267
|
+
)
|
|
268
|
+
logger.debug(f"Using tone: {contact.tone.value} for {contact.name}")
|
|
269
|
+
else:
|
|
270
|
+
system_prompt = get_personality_prompt(self.bot_name)
|
|
271
|
+
|
|
272
|
+
# Generate response
|
|
273
|
+
response = await self.llm.generate_response(
|
|
274
|
+
system_prompt=system_prompt, messages=context, language_instruction=language_instruction
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
return response
|