wingman-ai 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- share/wingman/node_listener/package-lock.json +1785 -0
- share/wingman/node_listener/package.json +50 -0
- share/wingman/node_listener/src/index.ts +108 -0
- share/wingman/node_listener/src/ipc.ts +70 -0
- share/wingman/node_listener/src/messageHandler.ts +135 -0
- share/wingman/node_listener/src/socket.ts +244 -0
- share/wingman/node_listener/src/types.d.ts +13 -0
- share/wingman/node_listener/tsconfig.json +19 -0
- wingman/__init__.py +4 -0
- wingman/__main__.py +6 -0
- wingman/cli/__init__.py +5 -0
- wingman/cli/commands/__init__.py +1 -0
- wingman/cli/commands/auth.py +90 -0
- wingman/cli/commands/config.py +109 -0
- wingman/cli/commands/init.py +71 -0
- wingman/cli/commands/logs.py +84 -0
- wingman/cli/commands/start.py +111 -0
- wingman/cli/commands/status.py +84 -0
- wingman/cli/commands/stop.py +33 -0
- wingman/cli/commands/uninstall.py +113 -0
- wingman/cli/main.py +50 -0
- wingman/cli/wizard.py +356 -0
- wingman/config/__init__.py +31 -0
- wingman/config/paths.py +153 -0
- wingman/config/personality.py +155 -0
- wingman/config/registry.py +343 -0
- wingman/config/settings.py +294 -0
- wingman/core/__init__.py +16 -0
- wingman/core/agent.py +257 -0
- wingman/core/ipc_handler.py +124 -0
- wingman/core/llm/__init__.py +5 -0
- wingman/core/llm/client.py +77 -0
- wingman/core/memory/__init__.py +6 -0
- wingman/core/memory/context.py +109 -0
- wingman/core/memory/models.py +213 -0
- wingman/core/message_processor.py +277 -0
- wingman/core/policy/__init__.py +5 -0
- wingman/core/policy/evaluator.py +265 -0
- wingman/core/process_manager.py +135 -0
- wingman/core/safety/__init__.py +8 -0
- wingman/core/safety/cooldown.py +63 -0
- wingman/core/safety/quiet_hours.py +75 -0
- wingman/core/safety/rate_limiter.py +58 -0
- wingman/core/safety/triggers.py +117 -0
- wingman/core/transports/__init__.py +14 -0
- wingman/core/transports/base.py +106 -0
- wingman/core/transports/imessage/__init__.py +5 -0
- wingman/core/transports/imessage/db_listener.py +280 -0
- wingman/core/transports/imessage/sender.py +162 -0
- wingman/core/transports/imessage/transport.py +140 -0
- wingman/core/transports/whatsapp.py +180 -0
- wingman/daemon/__init__.py +5 -0
- wingman/daemon/manager.py +303 -0
- wingman/installer/__init__.py +5 -0
- wingman/installer/node_installer.py +253 -0
- wingman_ai-1.0.0.dist-info/METADATA +553 -0
- wingman_ai-1.0.0.dist-info/RECORD +60 -0
- wingman_ai-1.0.0.dist-info/WHEEL +4 -0
- wingman_ai-1.0.0.dist-info/entry_points.txt +2 -0
- wingman_ai-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
"""Configuration settings loaded from environment or YAML."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import yaml
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
|
|
12
|
+
from .paths import WingmanPaths
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class Settings:
|
|
19
|
+
"""Application settings loaded from YAML config or environment variables."""
|
|
20
|
+
|
|
21
|
+
# OpenAI
|
|
22
|
+
openai_api_key: str = ""
|
|
23
|
+
openai_model: str = "gpt-4o"
|
|
24
|
+
|
|
25
|
+
# Bot identity
|
|
26
|
+
bot_name: str = "Maximus"
|
|
27
|
+
|
|
28
|
+
# Safety limits
|
|
29
|
+
max_replies_per_hour: int = 30
|
|
30
|
+
default_cooldown_seconds: int = 60
|
|
31
|
+
quiet_hours_start: int = 0 # Midnight
|
|
32
|
+
quiet_hours_end: int = 6 # 6 AM
|
|
33
|
+
quiet_hours_enabled: bool = True
|
|
34
|
+
|
|
35
|
+
# LLM settings
|
|
36
|
+
context_window_size: int = 30
|
|
37
|
+
max_response_tokens: int = 150
|
|
38
|
+
temperature: float = 0.8
|
|
39
|
+
|
|
40
|
+
# iMessage settings
|
|
41
|
+
imessage_enabled: bool = False
|
|
42
|
+
imessage_poll_interval: float = 2.0
|
|
43
|
+
imessage_max_replies_per_hour: int = 15
|
|
44
|
+
imessage_cooldown: int = 120
|
|
45
|
+
|
|
46
|
+
# Paths (set after loading)
|
|
47
|
+
node_dir: Path = field(default_factory=Path)
|
|
48
|
+
data_dir: Path = field(default_factory=Path)
|
|
49
|
+
log_dir: Path = field(default_factory=Path)
|
|
50
|
+
db_path: Path = field(default_factory=Path)
|
|
51
|
+
auth_state_dir: Path = field(default_factory=Path)
|
|
52
|
+
|
|
53
|
+
# Config file paths
|
|
54
|
+
contacts_config: Path = field(default_factory=Path)
|
|
55
|
+
groups_config: Path = field(default_factory=Path)
|
|
56
|
+
policies_config: Path = field(default_factory=Path)
|
|
57
|
+
|
|
58
|
+
# Source mode
|
|
59
|
+
_is_cli_mode: bool = False
|
|
60
|
+
|
|
61
|
+
@classmethod
|
|
62
|
+
def load(cls, env_path: Path | None = None, paths: WingmanPaths | None = None) -> "Settings":
|
|
63
|
+
"""
|
|
64
|
+
Load settings with fallback chain:
|
|
65
|
+
1. YAML config file (if paths provided and config exists)
|
|
66
|
+
2. Environment variables
|
|
67
|
+
3. Defaults
|
|
68
|
+
"""
|
|
69
|
+
# Determine paths
|
|
70
|
+
if paths is None:
|
|
71
|
+
# Try CLI mode first (XDG paths)
|
|
72
|
+
paths = WingmanPaths()
|
|
73
|
+
if paths.config_exists():
|
|
74
|
+
return cls._load_from_yaml(paths)
|
|
75
|
+
|
|
76
|
+
# Fall back to legacy mode (project root)
|
|
77
|
+
project_root = Path(__file__).parent.parent.parent.parent
|
|
78
|
+
if (project_root / ".env").exists() or (project_root / "node_listener").exists():
|
|
79
|
+
return cls._load_from_env(project_root, env_path)
|
|
80
|
+
|
|
81
|
+
# No config found, use XDG paths with env vars
|
|
82
|
+
return cls._load_from_env_with_paths(paths, env_path)
|
|
83
|
+
else:
|
|
84
|
+
if paths.config_exists():
|
|
85
|
+
return cls._load_from_yaml(paths)
|
|
86
|
+
else:
|
|
87
|
+
return cls._load_from_env_with_paths(paths, env_path)
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def _load_from_yaml(cls, paths: WingmanPaths) -> "Settings":
|
|
91
|
+
"""Load settings from YAML config file."""
|
|
92
|
+
config_file = paths.config_file
|
|
93
|
+
logger.info(f"Loading settings from {config_file}")
|
|
94
|
+
|
|
95
|
+
with open(config_file) as f:
|
|
96
|
+
config = yaml.safe_load(f) or {}
|
|
97
|
+
|
|
98
|
+
# Parse config sections
|
|
99
|
+
bot_config = config.get("bot", {})
|
|
100
|
+
openai_config = config.get("openai", {})
|
|
101
|
+
safety_config = config.get("safety", {})
|
|
102
|
+
quiet_hours_config = safety_config.get("quiet_hours", {})
|
|
103
|
+
imessage_config = config.get("imessage", {})
|
|
104
|
+
|
|
105
|
+
# Get API key from config or environment
|
|
106
|
+
api_key = openai_config.get("api_key") or os.getenv("OPENAI_API_KEY", "")
|
|
107
|
+
|
|
108
|
+
settings = cls(
|
|
109
|
+
# OpenAI
|
|
110
|
+
openai_api_key=api_key,
|
|
111
|
+
openai_model=openai_config.get("model", "gpt-4o"),
|
|
112
|
+
# Bot identity
|
|
113
|
+
bot_name=bot_config.get("name", "Maximus"),
|
|
114
|
+
# Safety limits
|
|
115
|
+
max_replies_per_hour=safety_config.get("max_replies_per_hour", 30),
|
|
116
|
+
default_cooldown_seconds=safety_config.get("cooldown_seconds", 60),
|
|
117
|
+
quiet_hours_start=quiet_hours_config.get("start", 0),
|
|
118
|
+
quiet_hours_end=quiet_hours_config.get("end", 6),
|
|
119
|
+
quiet_hours_enabled=quiet_hours_config.get("enabled", True),
|
|
120
|
+
# LLM settings
|
|
121
|
+
context_window_size=openai_config.get("context_window_size", 30),
|
|
122
|
+
max_response_tokens=openai_config.get("max_response_tokens", 150),
|
|
123
|
+
temperature=openai_config.get("temperature", 0.8),
|
|
124
|
+
# iMessage settings
|
|
125
|
+
imessage_enabled=imessage_config.get("enabled", False),
|
|
126
|
+
imessage_poll_interval=imessage_config.get("poll_interval", 2.0),
|
|
127
|
+
imessage_max_replies_per_hour=imessage_config.get("max_replies_per_hour", 15),
|
|
128
|
+
imessage_cooldown=imessage_config.get("cooldown", 120),
|
|
129
|
+
# Paths from WingmanPaths
|
|
130
|
+
node_dir=paths.node_dir,
|
|
131
|
+
data_dir=paths.data_dir,
|
|
132
|
+
log_dir=paths.log_dir,
|
|
133
|
+
db_path=paths.db_path,
|
|
134
|
+
auth_state_dir=paths.auth_state_dir,
|
|
135
|
+
contacts_config=paths.contacts_config,
|
|
136
|
+
groups_config=paths.groups_config,
|
|
137
|
+
policies_config=paths.policies_config,
|
|
138
|
+
_is_cli_mode=True,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Ensure directories exist
|
|
142
|
+
paths.ensure_directories()
|
|
143
|
+
|
|
144
|
+
logger.info(f"Settings loaded: bot_name={settings.bot_name}, model={settings.openai_model}")
|
|
145
|
+
return settings
|
|
146
|
+
|
|
147
|
+
@classmethod
|
|
148
|
+
def _load_from_env(cls, project_root: Path, env_path: Path | None = None) -> "Settings":
|
|
149
|
+
"""Load settings from environment variables (legacy mode)."""
|
|
150
|
+
# Load .env file
|
|
151
|
+
if env_path:
|
|
152
|
+
load_dotenv(env_path)
|
|
153
|
+
else:
|
|
154
|
+
load_dotenv(project_root / ".env")
|
|
155
|
+
|
|
156
|
+
settings = cls(
|
|
157
|
+
# OpenAI
|
|
158
|
+
openai_api_key=os.getenv("OPENAI_API_KEY", ""),
|
|
159
|
+
openai_model=os.getenv("OPENAI_MODEL", "gpt-4o"),
|
|
160
|
+
# Bot identity
|
|
161
|
+
bot_name=os.getenv("BOT_NAME", "Maximus"),
|
|
162
|
+
# Safety limits
|
|
163
|
+
max_replies_per_hour=int(os.getenv("MAX_REPLIES_PER_HOUR", "30")),
|
|
164
|
+
default_cooldown_seconds=int(os.getenv("DEFAULT_COOLDOWN_SECONDS", "60")),
|
|
165
|
+
quiet_hours_start=int(os.getenv("QUIET_HOURS_START", "0")),
|
|
166
|
+
quiet_hours_end=int(os.getenv("QUIET_HOURS_END", "6")),
|
|
167
|
+
# LLM settings
|
|
168
|
+
context_window_size=int(os.getenv("CONTEXT_WINDOW_SIZE", "30")),
|
|
169
|
+
max_response_tokens=int(os.getenv("MAX_RESPONSE_TOKENS", "150")),
|
|
170
|
+
temperature=float(os.getenv("TEMPERATURE", "0.8")),
|
|
171
|
+
# iMessage settings
|
|
172
|
+
imessage_enabled=os.getenv("IMESSAGE_ENABLED", "false").lower() == "true",
|
|
173
|
+
imessage_poll_interval=float(os.getenv("IMESSAGE_POLL_INTERVAL", "2.0")),
|
|
174
|
+
imessage_max_replies_per_hour=int(os.getenv("IMESSAGE_MAX_REPLIES_PER_HOUR", "15")),
|
|
175
|
+
imessage_cooldown=int(os.getenv("IMESSAGE_COOLDOWN", "120")),
|
|
176
|
+
# Paths (legacy project structure)
|
|
177
|
+
node_dir=project_root / "node_listener",
|
|
178
|
+
data_dir=project_root / "data",
|
|
179
|
+
log_dir=project_root / "logs",
|
|
180
|
+
db_path=project_root / "data" / "conversations.db",
|
|
181
|
+
auth_state_dir=project_root / "auth_state",
|
|
182
|
+
contacts_config=project_root / "config" / "contacts.yaml",
|
|
183
|
+
groups_config=project_root / "config" / "groups.yaml",
|
|
184
|
+
policies_config=project_root / "config" / "policies.yaml",
|
|
185
|
+
_is_cli_mode=False,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# Validate required settings
|
|
189
|
+
if not settings.openai_api_key:
|
|
190
|
+
logger.warning("OPENAI_API_KEY not set - LLM features will fail")
|
|
191
|
+
|
|
192
|
+
# Ensure directories exist
|
|
193
|
+
settings.data_dir.mkdir(parents=True, exist_ok=True)
|
|
194
|
+
settings.log_dir.mkdir(parents=True, exist_ok=True)
|
|
195
|
+
|
|
196
|
+
logger.info(f"Settings loaded: bot_name={settings.bot_name}, model={settings.openai_model}")
|
|
197
|
+
return settings
|
|
198
|
+
|
|
199
|
+
@classmethod
|
|
200
|
+
def _load_from_env_with_paths(
|
|
201
|
+
cls, paths: WingmanPaths, env_path: Path | None = None
|
|
202
|
+
) -> "Settings":
|
|
203
|
+
"""Load settings from environment variables with XDG paths."""
|
|
204
|
+
if env_path:
|
|
205
|
+
load_dotenv(env_path)
|
|
206
|
+
|
|
207
|
+
settings = cls(
|
|
208
|
+
# OpenAI
|
|
209
|
+
openai_api_key=os.getenv("OPENAI_API_KEY", ""),
|
|
210
|
+
openai_model=os.getenv("OPENAI_MODEL", "gpt-4o"),
|
|
211
|
+
# Bot identity
|
|
212
|
+
bot_name=os.getenv("BOT_NAME", "Maximus"),
|
|
213
|
+
# Safety limits
|
|
214
|
+
max_replies_per_hour=int(os.getenv("MAX_REPLIES_PER_HOUR", "30")),
|
|
215
|
+
default_cooldown_seconds=int(os.getenv("DEFAULT_COOLDOWN_SECONDS", "60")),
|
|
216
|
+
quiet_hours_start=int(os.getenv("QUIET_HOURS_START", "0")),
|
|
217
|
+
quiet_hours_end=int(os.getenv("QUIET_HOURS_END", "6")),
|
|
218
|
+
# LLM settings
|
|
219
|
+
context_window_size=int(os.getenv("CONTEXT_WINDOW_SIZE", "30")),
|
|
220
|
+
max_response_tokens=int(os.getenv("MAX_RESPONSE_TOKENS", "150")),
|
|
221
|
+
temperature=float(os.getenv("TEMPERATURE", "0.8")),
|
|
222
|
+
# iMessage settings
|
|
223
|
+
imessage_enabled=os.getenv("IMESSAGE_ENABLED", "false").lower() == "true",
|
|
224
|
+
imessage_poll_interval=float(os.getenv("IMESSAGE_POLL_INTERVAL", "2.0")),
|
|
225
|
+
imessage_max_replies_per_hour=int(os.getenv("IMESSAGE_MAX_REPLIES_PER_HOUR", "15")),
|
|
226
|
+
imessage_cooldown=int(os.getenv("IMESSAGE_COOLDOWN", "120")),
|
|
227
|
+
# Paths from WingmanPaths
|
|
228
|
+
node_dir=paths.node_dir,
|
|
229
|
+
data_dir=paths.data_dir,
|
|
230
|
+
log_dir=paths.log_dir,
|
|
231
|
+
db_path=paths.db_path,
|
|
232
|
+
auth_state_dir=paths.auth_state_dir,
|
|
233
|
+
contacts_config=paths.contacts_config,
|
|
234
|
+
groups_config=paths.groups_config,
|
|
235
|
+
policies_config=paths.policies_config,
|
|
236
|
+
_is_cli_mode=True,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
paths.ensure_directories()
|
|
240
|
+
return settings
|
|
241
|
+
|
|
242
|
+
def validate(self) -> list[str]:
|
|
243
|
+
"""Validate settings and return list of errors."""
|
|
244
|
+
errors = []
|
|
245
|
+
|
|
246
|
+
if not self.openai_api_key:
|
|
247
|
+
errors.append("OPENAI_API_KEY is required")
|
|
248
|
+
|
|
249
|
+
if not self.node_dir.exists():
|
|
250
|
+
errors.append(f"Node listener directory not found: {self.node_dir}")
|
|
251
|
+
|
|
252
|
+
if not (self.node_dir / "dist" / "index.js").exists():
|
|
253
|
+
errors.append(
|
|
254
|
+
f"Node listener not built. Run 'wingman init' or manually build: "
|
|
255
|
+
f"cd {self.node_dir} && npm install && npm run build"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
if not 0 <= self.quiet_hours_start <= 23:
|
|
259
|
+
errors.append("quiet_hours_start must be 0-23")
|
|
260
|
+
|
|
261
|
+
if not 0 <= self.quiet_hours_end <= 23:
|
|
262
|
+
errors.append("quiet_hours_end must be 0-23")
|
|
263
|
+
|
|
264
|
+
return errors
|
|
265
|
+
|
|
266
|
+
def to_yaml_dict(self) -> dict[str, Any]:
|
|
267
|
+
"""Convert settings to a dictionary suitable for YAML serialization."""
|
|
268
|
+
return {
|
|
269
|
+
"bot": {
|
|
270
|
+
"name": self.bot_name,
|
|
271
|
+
},
|
|
272
|
+
"openai": {
|
|
273
|
+
"api_key": self.openai_api_key,
|
|
274
|
+
"model": self.openai_model,
|
|
275
|
+
"context_window_size": self.context_window_size,
|
|
276
|
+
"max_response_tokens": self.max_response_tokens,
|
|
277
|
+
"temperature": self.temperature,
|
|
278
|
+
},
|
|
279
|
+
"safety": {
|
|
280
|
+
"max_replies_per_hour": self.max_replies_per_hour,
|
|
281
|
+
"cooldown_seconds": self.default_cooldown_seconds,
|
|
282
|
+
"quiet_hours": {
|
|
283
|
+
"enabled": self.quiet_hours_enabled,
|
|
284
|
+
"start": self.quiet_hours_start,
|
|
285
|
+
"end": self.quiet_hours_end,
|
|
286
|
+
},
|
|
287
|
+
},
|
|
288
|
+
"imessage": {
|
|
289
|
+
"enabled": self.imessage_enabled,
|
|
290
|
+
"poll_interval": self.imessage_poll_interval,
|
|
291
|
+
"max_replies_per_hour": self.imessage_max_replies_per_hour,
|
|
292
|
+
"cooldown": self.imessage_cooldown,
|
|
293
|
+
},
|
|
294
|
+
}
|
wingman/core/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Core orchestration module for Wingman."""
|
|
2
|
+
|
|
3
|
+
from .agent import MultiTransportAgent, WhatsAppAgent
|
|
4
|
+
from .ipc_handler import IPCCommand, IPCHandler, IPCMessage
|
|
5
|
+
from .message_processor import MessageProcessor
|
|
6
|
+
from .process_manager import NodeProcessManager
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"MultiTransportAgent",
|
|
10
|
+
"WhatsAppAgent",
|
|
11
|
+
"NodeProcessManager",
|
|
12
|
+
"IPCHandler",
|
|
13
|
+
"IPCMessage",
|
|
14
|
+
"IPCCommand",
|
|
15
|
+
"MessageProcessor",
|
|
16
|
+
]
|
wingman/core/agent.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""Main agent entry point for the Python orchestrator."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import signal
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from wingman.config.registry import ContactRegistry, GroupRegistry
|
|
10
|
+
from wingman.config.settings import Settings
|
|
11
|
+
|
|
12
|
+
from .llm.client import LLMClient
|
|
13
|
+
from .memory.models import MessageStore
|
|
14
|
+
from .message_processor import MessageProcessor
|
|
15
|
+
from .policy import PolicyEvaluator
|
|
16
|
+
from .transports import (
|
|
17
|
+
BaseTransport,
|
|
18
|
+
IMessageTransport,
|
|
19
|
+
MessageEvent,
|
|
20
|
+
Platform,
|
|
21
|
+
WhatsAppTransport,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def setup_logging(log_dir: Path) -> None:
|
|
28
|
+
"""Set up logging to file and console."""
|
|
29
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
|
30
|
+
log_file = log_dir / "agent.log"
|
|
31
|
+
|
|
32
|
+
logging.basicConfig(
|
|
33
|
+
level=logging.INFO,
|
|
34
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
35
|
+
handlers=[logging.FileHandler(log_file), logging.StreamHandler(sys.stderr)],
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Reduce noise from some libraries
|
|
39
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
|
40
|
+
logging.getLogger("openai").setLevel(logging.WARNING)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class MultiTransportAgent:
|
|
44
|
+
"""
|
|
45
|
+
Multi-transport agent that manages WhatsApp and iMessage.
|
|
46
|
+
|
|
47
|
+
Shares MessageProcessor, registries, and LLM across all transports.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(self, settings: Settings):
|
|
51
|
+
self.settings = settings
|
|
52
|
+
self.store = MessageStore(settings.db_path)
|
|
53
|
+
self.llm = LLMClient(
|
|
54
|
+
api_key=settings.openai_api_key,
|
|
55
|
+
model=settings.openai_model,
|
|
56
|
+
max_tokens=settings.max_response_tokens,
|
|
57
|
+
temperature=settings.temperature,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Initialize config-driven registries
|
|
61
|
+
self.contact_registry = ContactRegistry(settings.contacts_config)
|
|
62
|
+
self.group_registry = GroupRegistry(settings.groups_config)
|
|
63
|
+
self.policy_evaluator = PolicyEvaluator(
|
|
64
|
+
settings.policies_config, bot_name=settings.bot_name
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Initialize message processor (transport-agnostic)
|
|
68
|
+
self.processor = MessageProcessor(
|
|
69
|
+
store=self.store,
|
|
70
|
+
llm=self.llm,
|
|
71
|
+
contact_registry=self.contact_registry,
|
|
72
|
+
group_registry=self.group_registry,
|
|
73
|
+
policy_evaluator=self.policy_evaluator,
|
|
74
|
+
bot_name=settings.bot_name,
|
|
75
|
+
max_replies_per_hour=settings.max_replies_per_hour,
|
|
76
|
+
default_cooldown=settings.default_cooldown_seconds,
|
|
77
|
+
quiet_start=settings.quiet_hours_start,
|
|
78
|
+
quiet_end=settings.quiet_hours_end,
|
|
79
|
+
context_window=settings.context_window_size,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Set up message sender callback
|
|
83
|
+
self.processor.set_sender(self._send_message)
|
|
84
|
+
|
|
85
|
+
# Transports
|
|
86
|
+
self.transports: dict[Platform, BaseTransport] = {}
|
|
87
|
+
self._transport_tasks: list[asyncio.Task] = []
|
|
88
|
+
self._shutdown_event = asyncio.Event()
|
|
89
|
+
|
|
90
|
+
async def _send_message(self, platform: str, chat_id: str, text: str) -> bool:
|
|
91
|
+
"""Route message to appropriate transport."""
|
|
92
|
+
try:
|
|
93
|
+
plat = Platform(platform)
|
|
94
|
+
transport = self.transports.get(plat)
|
|
95
|
+
if transport:
|
|
96
|
+
return await transport.send_message(chat_id, text)
|
|
97
|
+
else:
|
|
98
|
+
logger.error(f"No transport for platform: {platform}")
|
|
99
|
+
return False
|
|
100
|
+
except ValueError:
|
|
101
|
+
logger.error(f"Unknown platform: {platform}")
|
|
102
|
+
return False
|
|
103
|
+
|
|
104
|
+
async def _on_message(self, event: MessageEvent) -> None:
|
|
105
|
+
"""Handle incoming message from any transport."""
|
|
106
|
+
# Convert MessageEvent to dict format expected by processor
|
|
107
|
+
data = {
|
|
108
|
+
"chatId": event.chat_id,
|
|
109
|
+
"senderId": event.sender_id,
|
|
110
|
+
"senderName": event.sender_name,
|
|
111
|
+
"text": event.text,
|
|
112
|
+
"timestamp": event.timestamp,
|
|
113
|
+
"isGroup": event.is_group,
|
|
114
|
+
"isSelf": event.is_self,
|
|
115
|
+
"platform": event.platform.value,
|
|
116
|
+
"quotedMessage": event.quoted_message,
|
|
117
|
+
}
|
|
118
|
+
await self.processor.process_message(data)
|
|
119
|
+
|
|
120
|
+
async def start(self) -> None:
|
|
121
|
+
"""Start all configured transports."""
|
|
122
|
+
logger.info("Starting Multi-Transport Agent...")
|
|
123
|
+
logger.info(f"Bot name: {self.settings.bot_name}")
|
|
124
|
+
logger.info(f"Model: {self.settings.openai_model}")
|
|
125
|
+
|
|
126
|
+
# Initialize WhatsApp transport
|
|
127
|
+
whatsapp = WhatsAppTransport(
|
|
128
|
+
self.settings.node_dir, auth_state_dir=self.settings.auth_state_dir
|
|
129
|
+
)
|
|
130
|
+
whatsapp.set_message_handler(self._on_message)
|
|
131
|
+
whatsapp.set_connected_handler(self._on_whatsapp_connected)
|
|
132
|
+
self.transports[Platform.WHATSAPP] = whatsapp
|
|
133
|
+
|
|
134
|
+
# Initialize iMessage transport if enabled
|
|
135
|
+
if self.settings.imessage_enabled:
|
|
136
|
+
imessage = IMessageTransport(poll_interval=self.settings.imessage_poll_interval)
|
|
137
|
+
|
|
138
|
+
# Check if iMessage is available on this system
|
|
139
|
+
if await imessage.check_availability():
|
|
140
|
+
imessage.set_message_handler(self._on_message)
|
|
141
|
+
self.transports[Platform.IMESSAGE] = imessage
|
|
142
|
+
logger.info("iMessage transport initialized")
|
|
143
|
+
else:
|
|
144
|
+
logger.warning(
|
|
145
|
+
"iMessage not available. Ensure Full Disk Access is granted "
|
|
146
|
+
"and Messages.app is configured."
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Start all transports
|
|
150
|
+
for platform, transport in self.transports.items():
|
|
151
|
+
logger.info(f"Starting {platform.value} transport...")
|
|
152
|
+
task = asyncio.create_task(
|
|
153
|
+
self._run_transport(platform, transport), name=f"transport_{platform.value}"
|
|
154
|
+
)
|
|
155
|
+
self._transport_tasks.append(task)
|
|
156
|
+
|
|
157
|
+
logger.info(f"Agent started with {len(self.transports)} transport(s)")
|
|
158
|
+
|
|
159
|
+
# Wait for shutdown or all tasks to complete
|
|
160
|
+
try:
|
|
161
|
+
await asyncio.gather(*self._transport_tasks)
|
|
162
|
+
except asyncio.CancelledError:
|
|
163
|
+
logger.info("Transport tasks cancelled")
|
|
164
|
+
|
|
165
|
+
async def _run_transport(self, platform: Platform, transport: BaseTransport) -> None:
|
|
166
|
+
"""Run a single transport, handling errors."""
|
|
167
|
+
try:
|
|
168
|
+
await transport.start()
|
|
169
|
+
except Exception as e:
|
|
170
|
+
logger.error(f"{platform.value} transport error: {e}")
|
|
171
|
+
raise
|
|
172
|
+
|
|
173
|
+
async def _on_whatsapp_connected(self, user_id: str) -> None:
|
|
174
|
+
"""Handle WhatsApp connection."""
|
|
175
|
+
self.processor.set_self_id(user_id, "whatsapp")
|
|
176
|
+
|
|
177
|
+
async def stop(self) -> None:
|
|
178
|
+
"""Stop all transports gracefully."""
|
|
179
|
+
logger.info("Stopping agent...")
|
|
180
|
+
|
|
181
|
+
# Stop all transports
|
|
182
|
+
for platform, transport in self.transports.items():
|
|
183
|
+
logger.info(f"Stopping {platform.value} transport...")
|
|
184
|
+
try:
|
|
185
|
+
await transport.stop()
|
|
186
|
+
except Exception as e:
|
|
187
|
+
logger.error(f"Error stopping {platform.value}: {e}")
|
|
188
|
+
|
|
189
|
+
# Cancel transport tasks
|
|
190
|
+
for task in self._transport_tasks:
|
|
191
|
+
task.cancel()
|
|
192
|
+
try:
|
|
193
|
+
await task
|
|
194
|
+
except asyncio.CancelledError:
|
|
195
|
+
pass
|
|
196
|
+
|
|
197
|
+
logger.info("Agent stopped.")
|
|
198
|
+
|
|
199
|
+
def request_shutdown(self) -> None:
|
|
200
|
+
"""Request graceful shutdown."""
|
|
201
|
+
self._shutdown_event.set()
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
# Keep WhatsAppAgent for backward compatibility
|
|
205
|
+
class WhatsAppAgent(MultiTransportAgent):
|
|
206
|
+
"""
|
|
207
|
+
Legacy WhatsApp-only agent.
|
|
208
|
+
|
|
209
|
+
Deprecated: Use MultiTransportAgent instead.
|
|
210
|
+
"""
|
|
211
|
+
|
|
212
|
+
def __init__(self, settings: Settings):
|
|
213
|
+
# Disable iMessage for backward compatibility
|
|
214
|
+
settings.imessage_enabled = False
|
|
215
|
+
super().__init__(settings)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
async def run_agent(settings: Settings | None = None) -> None:
|
|
219
|
+
"""Run the agent with provided settings."""
|
|
220
|
+
# Load settings if not provided
|
|
221
|
+
if settings is None:
|
|
222
|
+
settings = Settings.load()
|
|
223
|
+
|
|
224
|
+
# Set up logging
|
|
225
|
+
setup_logging(settings.log_dir)
|
|
226
|
+
|
|
227
|
+
# Validate settings
|
|
228
|
+
errors = settings.validate()
|
|
229
|
+
if errors:
|
|
230
|
+
for error in errors:
|
|
231
|
+
logger.error(f"Configuration error: {error}")
|
|
232
|
+
sys.exit(1)
|
|
233
|
+
|
|
234
|
+
# Create agent
|
|
235
|
+
agent = MultiTransportAgent(settings)
|
|
236
|
+
|
|
237
|
+
# Set up signal handlers
|
|
238
|
+
loop = asyncio.get_event_loop()
|
|
239
|
+
|
|
240
|
+
def signal_handler():
|
|
241
|
+
logger.info("Shutdown signal received")
|
|
242
|
+
agent.request_shutdown()
|
|
243
|
+
# Cancel all tasks
|
|
244
|
+
for task in asyncio.all_tasks(loop):
|
|
245
|
+
if task is not asyncio.current_task():
|
|
246
|
+
task.cancel()
|
|
247
|
+
|
|
248
|
+
for sig in (signal.SIGINT, signal.SIGTERM):
|
|
249
|
+
loop.add_signal_handler(sig, signal_handler)
|
|
250
|
+
|
|
251
|
+
# Run agent
|
|
252
|
+
try:
|
|
253
|
+
await agent.start()
|
|
254
|
+
except asyncio.CancelledError:
|
|
255
|
+
pass
|
|
256
|
+
finally:
|
|
257
|
+
await agent.stop()
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""IPC Handler for communication with Node.js subprocess."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
from collections.abc import Callable, Coroutine
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
NULL_CHAR = "\0"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class IPCMessage:
|
|
16
|
+
"""Message received from Node.js."""
|
|
17
|
+
|
|
18
|
+
type: str
|
|
19
|
+
data: dict | None = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class IPCCommand:
|
|
24
|
+
"""Command to send to Node.js."""
|
|
25
|
+
|
|
26
|
+
action: str
|
|
27
|
+
payload: dict | None = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class IPCHandler:
|
|
31
|
+
"""Handles JSON IPC communication with Node.js subprocess."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, stdin: asyncio.StreamWriter, stdout: asyncio.StreamReader):
|
|
34
|
+
self.stdin = stdin
|
|
35
|
+
self.stdout = stdout
|
|
36
|
+
self._buffer = ""
|
|
37
|
+
self._handlers: dict[str, Callable[[dict], Coroutine]] = {}
|
|
38
|
+
self._running = False
|
|
39
|
+
|
|
40
|
+
def register_handler(self, message_type: str, handler: Callable[[dict], Coroutine]) -> None:
|
|
41
|
+
"""Register a handler for a specific message type."""
|
|
42
|
+
self._handlers[message_type] = handler
|
|
43
|
+
logger.debug(f"Registered handler for: {message_type}")
|
|
44
|
+
|
|
45
|
+
async def send_command(self, command: IPCCommand) -> None:
|
|
46
|
+
"""Send a command to Node.js."""
|
|
47
|
+
try:
|
|
48
|
+
message = {
|
|
49
|
+
"action": command.action,
|
|
50
|
+
**({"payload": command.payload} if command.payload else {}),
|
|
51
|
+
}
|
|
52
|
+
json_str = json.dumps(message) + NULL_CHAR
|
|
53
|
+
self.stdin.write(json_str.encode("utf-8"))
|
|
54
|
+
await self.stdin.drain()
|
|
55
|
+
logger.debug(f"Sent command: {command.action}")
|
|
56
|
+
except Exception as e:
|
|
57
|
+
logger.error(f"Failed to send command: {e}")
|
|
58
|
+
raise
|
|
59
|
+
|
|
60
|
+
async def send_message(self, jid: str, text: str, message_id: str | None = None) -> None:
|
|
61
|
+
"""Convenience method to send a WhatsApp message."""
|
|
62
|
+
await self.send_command(
|
|
63
|
+
IPCCommand(
|
|
64
|
+
action="send_message",
|
|
65
|
+
payload={
|
|
66
|
+
"jid": jid,
|
|
67
|
+
"text": text,
|
|
68
|
+
**({"messageId": message_id} if message_id else {}),
|
|
69
|
+
},
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
async def _read_messages(self) -> None:
|
|
74
|
+
"""Read and process messages from Node.js stdout."""
|
|
75
|
+
while self._running:
|
|
76
|
+
try:
|
|
77
|
+
chunk = await self.stdout.read(4096)
|
|
78
|
+
if not chunk:
|
|
79
|
+
logger.warning("Node.js stdout closed")
|
|
80
|
+
break
|
|
81
|
+
|
|
82
|
+
self._buffer += chunk.decode("utf-8")
|
|
83
|
+
|
|
84
|
+
# Process all complete messages
|
|
85
|
+
while NULL_CHAR in self._buffer:
|
|
86
|
+
null_idx = self._buffer.index(NULL_CHAR)
|
|
87
|
+
json_str = self._buffer[:null_idx]
|
|
88
|
+
self._buffer = self._buffer[null_idx + 1 :]
|
|
89
|
+
|
|
90
|
+
if json_str.strip():
|
|
91
|
+
await self._process_message(json_str)
|
|
92
|
+
|
|
93
|
+
except asyncio.CancelledError:
|
|
94
|
+
logger.info("Message reader cancelled")
|
|
95
|
+
break
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.error(f"Error reading from stdout: {e}")
|
|
98
|
+
await asyncio.sleep(0.1)
|
|
99
|
+
|
|
100
|
+
async def _process_message(self, json_str: str) -> None:
|
|
101
|
+
"""Parse and dispatch a single message."""
|
|
102
|
+
try:
|
|
103
|
+
data = json.loads(json_str)
|
|
104
|
+
message = IPCMessage(type=data.get("type", "unknown"), data=data.get("data"))
|
|
105
|
+
|
|
106
|
+
handler = self._handlers.get(message.type)
|
|
107
|
+
if handler:
|
|
108
|
+
await handler(message.data or {})
|
|
109
|
+
else:
|
|
110
|
+
logger.debug(f"No handler for message type: {message.type}")
|
|
111
|
+
|
|
112
|
+
except json.JSONDecodeError as e:
|
|
113
|
+
logger.error(f"Failed to parse JSON: {e}")
|
|
114
|
+
except Exception as e:
|
|
115
|
+
logger.error(f"Error processing message: {e}")
|
|
116
|
+
|
|
117
|
+
async def start(self) -> None:
|
|
118
|
+
"""Start the IPC message loop."""
|
|
119
|
+
self._running = True
|
|
120
|
+
await self._read_messages()
|
|
121
|
+
|
|
122
|
+
def stop(self) -> None:
|
|
123
|
+
"""Stop the IPC message loop."""
|
|
124
|
+
self._running = False
|