deeptrade-quant 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deeptrade/__init__.py +8 -0
- deeptrade/channels_builtin/__init__.py +0 -0
- deeptrade/channels_builtin/stdout/__init__.py +0 -0
- deeptrade/channels_builtin/stdout/deeptrade_plugin.yaml +25 -0
- deeptrade/channels_builtin/stdout/migrations/20260429_001_init.sql +13 -0
- deeptrade/channels_builtin/stdout/stdout_channel/__init__.py +0 -0
- deeptrade/channels_builtin/stdout/stdout_channel/channel.py +180 -0
- deeptrade/cli.py +214 -0
- deeptrade/cli_config.py +396 -0
- deeptrade/cli_data.py +33 -0
- deeptrade/cli_plugin.py +176 -0
- deeptrade/core/__init__.py +8 -0
- deeptrade/core/config.py +344 -0
- deeptrade/core/config_migrations.py +138 -0
- deeptrade/core/db.py +176 -0
- deeptrade/core/llm_client.py +591 -0
- deeptrade/core/llm_manager.py +174 -0
- deeptrade/core/logging_config.py +61 -0
- deeptrade/core/migrations/__init__.py +0 -0
- deeptrade/core/migrations/core/20260427_001_init.sql +121 -0
- deeptrade/core/migrations/core/20260501_002_drop_llm_calls_stage.sql +10 -0
- deeptrade/core/migrations/core/__init__.py +0 -0
- deeptrade/core/notifier.py +302 -0
- deeptrade/core/paths.py +49 -0
- deeptrade/core/plugin_manager.py +616 -0
- deeptrade/core/run_status.py +29 -0
- deeptrade/core/secrets.py +152 -0
- deeptrade/core/tushare_client.py +824 -0
- deeptrade/plugins_api/__init__.py +44 -0
- deeptrade/plugins_api/base.py +66 -0
- deeptrade/plugins_api/channel.py +42 -0
- deeptrade/plugins_api/events.py +61 -0
- deeptrade/plugins_api/llm.py +46 -0
- deeptrade/plugins_api/metadata.py +84 -0
- deeptrade/plugins_api/notify.py +67 -0
- deeptrade/strategies_builtin/__init__.py +0 -0
- deeptrade/strategies_builtin/limit_up_board/__init__.py +0 -0
- deeptrade/strategies_builtin/limit_up_board/deeptrade_plugin.yaml +101 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/__init__.py +0 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/calendar.py +65 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/cli.py +269 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/config.py +76 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/data.py +1191 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/pipeline.py +869 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/plugin.py +30 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/profiles.py +85 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/prompts.py +485 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/render.py +890 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/runner.py +1087 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/runtime.py +172 -0
- deeptrade/strategies_builtin/limit_up_board/limit_up_board/schemas.py +178 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260430_001_init.sql +150 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260501_002_lub_stage_results_llm_provider.sql +8 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260508_001_lub_lhb_tables.sql +36 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260508_002_lub_cyq_perf.sql +18 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260508_003_lub_lhb_pk_fix.sql +46 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260508_004_lub_lhb_drop_pk.sql +53 -0
- deeptrade/strategies_builtin/limit_up_board/migrations/20260508_005_lub_config.sql +17 -0
- deeptrade/strategies_builtin/volume_anomaly/__init__.py +0 -0
- deeptrade/strategies_builtin/volume_anomaly/deeptrade_plugin.yaml +59 -0
- deeptrade/strategies_builtin/volume_anomaly/migrations/20260430_001_init.sql +94 -0
- deeptrade/strategies_builtin/volume_anomaly/migrations/20260601_001_realized_returns.sql +44 -0
- deeptrade/strategies_builtin/volume_anomaly/migrations/20260601_002_dimension_scores.sql +13 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/__init__.py +0 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/calendar.py +52 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/cli.py +247 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/data.py +2154 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/pipeline.py +327 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/plugin.py +22 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/profiles.py +49 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/prompts.py +187 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/prompts_examples.py +84 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/render.py +906 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/runner.py +772 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/runtime.py +90 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/schemas.py +97 -0
- deeptrade/strategies_builtin/volume_anomaly/volume_anomaly/stats.py +174 -0
- deeptrade/theme.py +48 -0
- deeptrade_quant-0.0.2.dist-info/METADATA +166 -0
- deeptrade_quant-0.0.2.dist-info/RECORD +83 -0
- deeptrade_quant-0.0.2.dist-info/WHEEL +4 -0
- deeptrade_quant-0.0.2.dist-info/entry_points.txt +2 -0
- deeptrade_quant-0.0.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
"""LLM Manager — framework-level service for multi-provider LLM access.
|
|
2
|
+
|
|
3
|
+
DESIGN §0.7 + §10. The manager is the **only** path plugins should use to
|
|
4
|
+
obtain an ``LLMClient``. It provides:
|
|
5
|
+
|
|
6
|
+
* ``list_providers()`` — names of currently usable providers
|
|
7
|
+
* ``get_provider_info()`` — display metadata (no api_key)
|
|
8
|
+
* ``get_client()`` — a fully-wired ``LLMClient`` for one provider
|
|
9
|
+
|
|
10
|
+
Multiple providers coexist; there is no "default" provider concept. A single
|
|
11
|
+
plugin may call ``get_client("deepseek", ...)`` and ``get_client("kimi", ...)``
|
|
12
|
+
in the same run and treat them as independent clients.
|
|
13
|
+
|
|
14
|
+
Thread safety
|
|
15
|
+
-------------
|
|
16
|
+
**Not thread-safe.** Cached ``LLMClient`` instances are shared by callers
|
|
17
|
+
holding the same ``LLMManager`` and asking for the same
|
|
18
|
+
``(name, plugin_id, run_id)`` triple. The underlying ``OpenAI`` SDK + httpx
|
|
19
|
+
pool is itself thread-safe, but ``LLMClient.complete_json()`` writes to the
|
|
20
|
+
shared DB connection (``llm_calls`` audit) and ``llm_calls.jsonl`` file —
|
|
21
|
+
those are **not** serialized inside the client. Callers wanting parallel
|
|
22
|
+
LLM calls must either:
|
|
23
|
+
|
|
24
|
+
* use a separate ``LLMManager`` per worker thread, or
|
|
25
|
+
* serialize their ``complete_json`` calls externally with a lock.
|
|
26
|
+
|
|
27
|
+
Inside a single-threaded plugin run (the default), caching strictly improves
|
|
28
|
+
performance (one transport per provider) with no correctness risk.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from __future__ import annotations
|
|
32
|
+
|
|
33
|
+
from dataclasses import dataclass
|
|
34
|
+
from pathlib import Path
|
|
35
|
+
from typing import TYPE_CHECKING
|
|
36
|
+
|
|
37
|
+
from deeptrade.core.llm_client import LLMClient, _select_transport_class
|
|
38
|
+
|
|
39
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
40
|
+
from deeptrade.core.config import ConfigService
|
|
41
|
+
from deeptrade.core.db import Database
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class LLMNotConfiguredError(RuntimeError):
|
|
45
|
+
"""Raised when a provider is not configured, or its ``api_key`` is missing.
|
|
46
|
+
|
|
47
|
+
Distinct from a generic ``KeyError`` so callers can branch on this
|
|
48
|
+
specifically (e.g. CLI returns a friendly hint pointing at
|
|
49
|
+
``deeptrade config set-llm``).
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass(frozen=True)
|
|
54
|
+
class LLMProviderInfo:
|
|
55
|
+
"""Display metadata for a provider; intentionally excludes ``api_key``
|
|
56
|
+
so this can be safely logged or shown in a TUI.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
name: str
|
|
60
|
+
model: str
|
|
61
|
+
base_url: str
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
_CacheKey = tuple[str, str, str | None]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class LLMManager:
|
|
68
|
+
"""Framework-level LLM access for plugins.
|
|
69
|
+
|
|
70
|
+
Construction is cheap (no network IO, no client building); the actual
|
|
71
|
+
``OpenAI`` transport is created lazily on the first ``get_client()`` for
|
|
72
|
+
a given ``(name, plugin_id, run_id)`` and then cached on this manager.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(self, db: Database, config: ConfigService) -> None:
|
|
76
|
+
self._db = db
|
|
77
|
+
self._config = config
|
|
78
|
+
self._cache: dict[_CacheKey, LLMClient] = {}
|
|
79
|
+
|
|
80
|
+
# ------------------------------------------------------------------
|
|
81
|
+
# Listing / introspection
|
|
82
|
+
# ------------------------------------------------------------------
|
|
83
|
+
|
|
84
|
+
def list_providers(self) -> list[str]:
|
|
85
|
+
"""Names of providers that are configured AND have an api_key set.
|
|
86
|
+
|
|
87
|
+
Filtering by api_key prevents callers from receiving a name that
|
|
88
|
+
will 401 at the first ``complete_json`` call. Returned list is
|
|
89
|
+
sorted for determinism.
|
|
90
|
+
"""
|
|
91
|
+
cfg = self._config.get_app_config()
|
|
92
|
+
out: list[str] = []
|
|
93
|
+
for name in sorted(cfg.llm_providers.keys()):
|
|
94
|
+
if self._config.get(f"llm.{name}.api_key"):
|
|
95
|
+
out.append(name)
|
|
96
|
+
return out
|
|
97
|
+
|
|
98
|
+
def get_provider_info(self, name: str) -> LLMProviderInfo:
|
|
99
|
+
"""Return display metadata for ``name``.
|
|
100
|
+
|
|
101
|
+
Raises ``LLMNotConfiguredError`` if the provider is not in
|
|
102
|
+
``llm.providers``. Does NOT check that an api_key is set — this
|
|
103
|
+
method is intentionally usable for inspecting partially-configured
|
|
104
|
+
entries (e.g. listing for an "edit existing" CLI flow).
|
|
105
|
+
"""
|
|
106
|
+
cfg = self._config.get_app_config()
|
|
107
|
+
provider = cfg.llm_providers.get(name)
|
|
108
|
+
if provider is None:
|
|
109
|
+
raise LLMNotConfiguredError(
|
|
110
|
+
f"LLM provider {name!r} is not configured; "
|
|
111
|
+
"run `deeptrade config set-llm` to add it"
|
|
112
|
+
)
|
|
113
|
+
return LLMProviderInfo(name=name, model=provider.model, base_url=provider.base_url)
|
|
114
|
+
|
|
115
|
+
# ------------------------------------------------------------------
|
|
116
|
+
# Client construction
|
|
117
|
+
# ------------------------------------------------------------------
|
|
118
|
+
|
|
119
|
+
def get_client(
|
|
120
|
+
self,
|
|
121
|
+
name: str,
|
|
122
|
+
*,
|
|
123
|
+
plugin_id: str,
|
|
124
|
+
run_id: str | None = None,
|
|
125
|
+
reports_dir: Path | None = None,
|
|
126
|
+
) -> LLMClient:
|
|
127
|
+
"""Return an ``LLMClient`` bound to provider ``name``.
|
|
128
|
+
|
|
129
|
+
Cached by ``(name, plugin_id, run_id)`` for the lifetime of this
|
|
130
|
+
manager — repeated calls during a single run reuse the same
|
|
131
|
+
transport / httpx pool.
|
|
132
|
+
|
|
133
|
+
Raises:
|
|
134
|
+
LLMNotConfiguredError — provider not in ``llm.providers``, or
|
|
135
|
+
its ``llm.<name>.api_key`` is unset.
|
|
136
|
+
"""
|
|
137
|
+
cache_key: _CacheKey = (name, plugin_id, run_id)
|
|
138
|
+
cached = self._cache.get(cache_key)
|
|
139
|
+
if cached is not None:
|
|
140
|
+
return cached
|
|
141
|
+
|
|
142
|
+
cfg = self._config.get_app_config()
|
|
143
|
+
provider = cfg.llm_providers.get(name)
|
|
144
|
+
if provider is None:
|
|
145
|
+
raise LLMNotConfiguredError(
|
|
146
|
+
f"LLM provider {name!r} is not configured; "
|
|
147
|
+
"run `deeptrade config set-llm` to add it"
|
|
148
|
+
)
|
|
149
|
+
api_key = self._config.get(f"llm.{name}.api_key")
|
|
150
|
+
if not api_key:
|
|
151
|
+
raise LLMNotConfiguredError(
|
|
152
|
+
f"LLM provider {name!r} has no api_key set; "
|
|
153
|
+
f"run `deeptrade config set-llm` and choose {name!r}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
transport_cls = _select_transport_class(provider.base_url)
|
|
157
|
+
transport = transport_cls(
|
|
158
|
+
api_key=str(api_key),
|
|
159
|
+
base_url=provider.base_url,
|
|
160
|
+
timeout=provider.timeout,
|
|
161
|
+
)
|
|
162
|
+
# v0.7 — LLMClient no longer holds a profile set; the per-call
|
|
163
|
+
# ``StageProfile`` is supplied by the plugin at ``complete_json`` time.
|
|
164
|
+
client = LLMClient(
|
|
165
|
+
self._db,
|
|
166
|
+
transport,
|
|
167
|
+
model=provider.model,
|
|
168
|
+
plugin_id=plugin_id,
|
|
169
|
+
run_id=run_id,
|
|
170
|
+
audit_full_payload=cfg.llm_audit_full_payload,
|
|
171
|
+
reports_dir=reports_dir,
|
|
172
|
+
)
|
|
173
|
+
self._cache[cache_key] = client
|
|
174
|
+
return client
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""Logging configuration with file rotation.
|
|
2
|
+
|
|
3
|
+
ADR-006: log records go to STDERR (avoiding stdout where questionary lives) and
|
|
4
|
+
to a rotating file under ~/.deeptrade/logs/.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
import sys
|
|
11
|
+
from logging.handlers import RotatingFileHandler
|
|
12
|
+
|
|
13
|
+
from deeptrade.core import paths
|
|
14
|
+
|
|
15
|
+
DEFAULT_LOG_FILENAME = "deeptrade.log"
|
|
16
|
+
DEFAULT_MAX_BYTES = 5 * 1024 * 1024 # 5 MB per file
|
|
17
|
+
DEFAULT_BACKUP_COUNT = 5
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def setup_logging(
|
|
21
|
+
*,
|
|
22
|
+
level: str = "INFO",
|
|
23
|
+
log_filename: str = DEFAULT_LOG_FILENAME,
|
|
24
|
+
max_bytes: int = DEFAULT_MAX_BYTES,
|
|
25
|
+
backup_count: int = DEFAULT_BACKUP_COUNT,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""Configure root logger with stderr + rotating file handler.
|
|
28
|
+
|
|
29
|
+
Idempotent — calling twice replaces existing deeptrade handlers.
|
|
30
|
+
"""
|
|
31
|
+
root = logging.getLogger()
|
|
32
|
+
# Remove any prior deeptrade-tagged handlers so configure-after-init works
|
|
33
|
+
for h in list(root.handlers):
|
|
34
|
+
if getattr(h, "_deeptrade", False):
|
|
35
|
+
root.removeHandler(h)
|
|
36
|
+
|
|
37
|
+
fmt = logging.Formatter(
|
|
38
|
+
"%(asctime)s [%(levelname).1s] %(name)s: %(message)s",
|
|
39
|
+
datefmt="%H:%M:%S",
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# stderr (so stdout stays clean for questionary / dashboards)
|
|
43
|
+
stderr_h = logging.StreamHandler(stream=sys.stderr)
|
|
44
|
+
stderr_h.setFormatter(fmt)
|
|
45
|
+
stderr_h._deeptrade = True # type: ignore[attr-defined]
|
|
46
|
+
root.addHandler(stderr_h)
|
|
47
|
+
|
|
48
|
+
# rotating file under ~/.deeptrade/logs/
|
|
49
|
+
log_dir = paths.logs_dir()
|
|
50
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
|
51
|
+
file_h = RotatingFileHandler(
|
|
52
|
+
log_dir / log_filename,
|
|
53
|
+
maxBytes=max_bytes,
|
|
54
|
+
backupCount=backup_count,
|
|
55
|
+
encoding="utf-8",
|
|
56
|
+
)
|
|
57
|
+
file_h.setFormatter(fmt)
|
|
58
|
+
file_h._deeptrade = True # type: ignore[attr-defined]
|
|
59
|
+
root.addHandler(file_h)
|
|
60
|
+
|
|
61
|
+
root.setLevel(level)
|
|
File without changes
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
-- DeepTrade core schema initial migration.
|
|
2
|
+
--
|
|
3
|
+
-- Scope: ONLY framework-owned tables. Plugin-owned tables (including any
|
|
4
|
+
-- tushare-derived business tables like stock_basic / daily / moneyflow) are
|
|
5
|
+
-- declared by each plugin in its own deeptrade_plugin.yaml + migrations/*.sql
|
|
6
|
+
-- and applied via plugin_schema_migrations (per-plugin tracking). The
|
|
7
|
+
-- framework never owns business data tables.
|
|
8
|
+
|
|
9
|
+
-- ============================================================
|
|
10
|
+
-- Framework configuration & secrets
|
|
11
|
+
-- ============================================================
|
|
12
|
+
|
|
13
|
+
-- Non-secret app config
|
|
14
|
+
CREATE TABLE IF NOT EXISTS app_config (
|
|
15
|
+
key VARCHAR PRIMARY KEY,
|
|
16
|
+
value_json VARCHAR NOT NULL,
|
|
17
|
+
is_secret BOOLEAN DEFAULT FALSE,
|
|
18
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
-- Encrypted secrets (keyring-backed; plaintext fallback when keyring unavailable)
|
|
22
|
+
CREATE TABLE IF NOT EXISTS secret_store (
|
|
23
|
+
key VARCHAR PRIMARY KEY,
|
|
24
|
+
encrypted_value BLOB NOT NULL,
|
|
25
|
+
encryption_method VARCHAR NOT NULL, -- 'keyring' | 'plaintext'
|
|
26
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
27
|
+
);
|
|
28
|
+
|
|
29
|
+
-- ============================================================
|
|
30
|
+
-- Framework schema-migration tracking
|
|
31
|
+
-- ============================================================
|
|
32
|
+
|
|
33
|
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
34
|
+
version VARCHAR PRIMARY KEY,
|
|
35
|
+
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
36
|
+
);
|
|
37
|
+
|
|
38
|
+
-- ============================================================
|
|
39
|
+
-- Plugin registry
|
|
40
|
+
-- ============================================================
|
|
41
|
+
|
|
42
|
+
CREATE TABLE IF NOT EXISTS plugins (
|
|
43
|
+
plugin_id VARCHAR PRIMARY KEY,
|
|
44
|
+
name VARCHAR NOT NULL,
|
|
45
|
+
version VARCHAR NOT NULL,
|
|
46
|
+
type VARCHAR NOT NULL, -- 'strategy' | 'channel' | future
|
|
47
|
+
api_version VARCHAR NOT NULL,
|
|
48
|
+
entrypoint VARCHAR NOT NULL,
|
|
49
|
+
install_path VARCHAR NOT NULL,
|
|
50
|
+
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
|
51
|
+
metadata_yaml VARCHAR NOT NULL,
|
|
52
|
+
installed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
53
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
54
|
+
);
|
|
55
|
+
|
|
56
|
+
CREATE TABLE IF NOT EXISTS plugin_tables (
|
|
57
|
+
plugin_id VARCHAR NOT NULL,
|
|
58
|
+
table_name VARCHAR NOT NULL,
|
|
59
|
+
description VARCHAR,
|
|
60
|
+
purge_on_uninstall BOOLEAN NOT NULL DEFAULT TRUE,
|
|
61
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
62
|
+
PRIMARY KEY (plugin_id, table_name)
|
|
63
|
+
);
|
|
64
|
+
|
|
65
|
+
CREATE TABLE IF NOT EXISTS plugin_schema_migrations (
|
|
66
|
+
plugin_id VARCHAR NOT NULL,
|
|
67
|
+
version VARCHAR NOT NULL,
|
|
68
|
+
checksum VARCHAR NOT NULL,
|
|
69
|
+
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
70
|
+
PRIMARY KEY (plugin_id, version)
|
|
71
|
+
);
|
|
72
|
+
|
|
73
|
+
-- ============================================================
|
|
74
|
+
-- Framework service audit / cache state
|
|
75
|
+
-- ============================================================
|
|
76
|
+
|
|
77
|
+
-- LLM call audit (LLMClient writes; per-plugin scoped via plugin_id column).
|
|
78
|
+
-- v0.7 dropped the `stage` column — see migration 20260501_002.
|
|
79
|
+
CREATE TABLE IF NOT EXISTS llm_calls (
|
|
80
|
+
call_id UUID PRIMARY KEY,
|
|
81
|
+
run_id UUID,
|
|
82
|
+
plugin_id VARCHAR,
|
|
83
|
+
model VARCHAR,
|
|
84
|
+
prompt_hash VARCHAR,
|
|
85
|
+
input_tokens BIGINT,
|
|
86
|
+
output_tokens BIGINT,
|
|
87
|
+
latency_ms INTEGER,
|
|
88
|
+
request_json VARCHAR,
|
|
89
|
+
response_json VARCHAR,
|
|
90
|
+
validation_status VARCHAR,
|
|
91
|
+
error VARCHAR,
|
|
92
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
93
|
+
);
|
|
94
|
+
|
|
95
|
+
-- Tushare sync idempotency state, per (plugin_id, api_name, trade_date).
|
|
96
|
+
-- Each plugin tracks its own sync state — plugins do not share cached
|
|
97
|
+
-- payloads with each other (per pure-isolation data model).
|
|
98
|
+
CREATE TABLE IF NOT EXISTS tushare_sync_state (
|
|
99
|
+
plugin_id VARCHAR NOT NULL,
|
|
100
|
+
api_name VARCHAR NOT NULL,
|
|
101
|
+
trade_date VARCHAR NOT NULL, -- '*' for non-dated APIs (e.g. stock_basic)
|
|
102
|
+
status VARCHAR NOT NULL, -- ok | partial | failed | unauthorized
|
|
103
|
+
row_count BIGINT,
|
|
104
|
+
cache_class VARCHAR NOT NULL DEFAULT 'trade_day_immutable',
|
|
105
|
+
-- static | trade_day_immutable | trade_day_mutable | hot_or_anns
|
|
106
|
+
ttl_seconds INTEGER,
|
|
107
|
+
data_completeness VARCHAR NOT NULL DEFAULT 'final',
|
|
108
|
+
-- final | intraday
|
|
109
|
+
synced_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
110
|
+
PRIMARY KEY (plugin_id, api_name, trade_date)
|
|
111
|
+
);
|
|
112
|
+
|
|
113
|
+
-- Tushare per-call audit (per plugin)
|
|
114
|
+
CREATE TABLE IF NOT EXISTS tushare_calls (
|
|
115
|
+
plugin_id VARCHAR,
|
|
116
|
+
api_name VARCHAR,
|
|
117
|
+
params_hash VARCHAR,
|
|
118
|
+
rows INTEGER,
|
|
119
|
+
latency_ms INTEGER,
|
|
120
|
+
called_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
121
|
+
);
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
-- v0.7 — drop llm_calls.stage column.
|
|
2
|
+
--
|
|
3
|
+
-- Stage 概念已彻底归插件维护:``LLMClient.complete_json`` 不再接收 stage 入
|
|
4
|
+
-- 参,框架因此也不再写入这一列。历史 run 的 stage 信息仍可在
|
|
5
|
+
-- ``~/.deeptrade/reports/<run_id>/llm_calls.jsonl`` 中按需查阅(旧文件不动;
|
|
6
|
+
-- v0.7 起新写入的 jsonl 行也不再含 ``stage`` 键)。
|
|
7
|
+
--
|
|
8
|
+
-- DuckDB 1.0+ 支持 ALTER TABLE ... DROP COLUMN,且为 IF EXISTS 安全。
|
|
9
|
+
|
|
10
|
+
ALTER TABLE llm_calls DROP COLUMN IF EXISTS stage;
|
|
File without changes
|
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
"""Notification orchestration — pure framework code, NO IM-protocol logic.
|
|
2
|
+
|
|
3
|
+
Three layers compose top-down:
|
|
4
|
+
|
|
5
|
+
AsyncDispatchNotifier ← async wrapper: queue + daemon worker + join
|
|
6
|
+
└─ MultiplexNotifier ← fan-out + per-channel exception isolation
|
|
7
|
+
└─ ChannelPlugin instances (loaded from type=channel plugins)
|
|
8
|
+
|
|
9
|
+
The framework knows nothing about feishu / dingtalk / wechat-work. Channels
|
|
10
|
+
are delivered as plugins (``channels_builtin/`` mirrors ``strategies_builtin/``);
|
|
11
|
+
new channels = new plugin packages, zero framework change.
|
|
12
|
+
|
|
13
|
+
Top-level API (used by any plugin that needs to notify):
|
|
14
|
+
|
|
15
|
+
from deeptrade import notify, notification_session
|
|
16
|
+
|
|
17
|
+
notify(db, payload) # one-shot
|
|
18
|
+
with notification_session(db) as ns: # batch
|
|
19
|
+
ns.push(p1)
|
|
20
|
+
ns.push(p2)
|
|
21
|
+
|
|
22
|
+
If no channel plugins are enabled, both forms degrade to no-op silently.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
|
|
27
|
+
import logging
|
|
28
|
+
import queue
|
|
29
|
+
import threading
|
|
30
|
+
from collections.abc import Iterator, Sequence
|
|
31
|
+
from contextlib import contextmanager
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
from typing import TYPE_CHECKING, Protocol, runtime_checkable
|
|
34
|
+
|
|
35
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
36
|
+
from deeptrade.core.db import Database
|
|
37
|
+
from deeptrade.core.plugin_manager import InstalledPlugin, PluginManager
|
|
38
|
+
from deeptrade.plugins_api.base import PluginContext
|
|
39
|
+
from deeptrade.plugins_api.channel import ChannelPlugin
|
|
40
|
+
from deeptrade.plugins_api.notify import NotificationPayload
|
|
41
|
+
|
|
42
|
+
logger = logging.getLogger(__name__)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# ---------------------------------------------------------------------------
|
|
46
|
+
# Protocol
|
|
47
|
+
# ---------------------------------------------------------------------------
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@runtime_checkable
|
|
51
|
+
class Notifier(Protocol):
|
|
52
|
+
"""Handle exposed to callers (plugins or framework code).
|
|
53
|
+
|
|
54
|
+
Implementations MUST guarantee ``push`` returns quickly (no synchronous
|
|
55
|
+
HTTP) — the asynchrony is provided by ``AsyncDispatchNotifier``.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def is_enabled(self) -> bool: ...
|
|
59
|
+
def push(self, payload: NotificationPayload) -> None: ...
|
|
60
|
+
def join(self, timeout: float = 0.0) -> None: ...
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
# ---------------------------------------------------------------------------
|
|
64
|
+
# NoopNotifier — used when no channel plugin is installed/enabled
|
|
65
|
+
# ---------------------------------------------------------------------------
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class NoopNotifier:
|
|
69
|
+
"""Returned by ``build_notifier`` when there are no enabled channels."""
|
|
70
|
+
|
|
71
|
+
def is_enabled(self) -> bool:
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
def push(self, payload: NotificationPayload) -> None: # noqa: ARG002
|
|
75
|
+
return
|
|
76
|
+
|
|
77
|
+
def join(self, timeout: float = 0.0) -> None: # noqa: ARG002
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# ---------------------------------------------------------------------------
|
|
82
|
+
# MultiplexNotifier — fan-out to all enabled channel plugins
|
|
83
|
+
# ---------------------------------------------------------------------------
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class MultiplexNotifier:
|
|
87
|
+
"""Synchronous fan-out across one ChannelPlugin per channel.
|
|
88
|
+
|
|
89
|
+
Per-channel ``push`` is wrapped in ``try``/``except``: a broken/slow
|
|
90
|
+
channel never blocks or breaks the others. Consumed by
|
|
91
|
+
``AsyncDispatchNotifier`` on a background thread, so blocking on HTTP
|
|
92
|
+
here is fine.
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
def __init__(self, channels: Sequence[tuple[ChannelPlugin, PluginContext]]) -> None:
|
|
96
|
+
self._channels: list[tuple[ChannelPlugin, PluginContext]] = list(channels)
|
|
97
|
+
|
|
98
|
+
def is_enabled(self) -> bool:
|
|
99
|
+
return bool(self._channels)
|
|
100
|
+
|
|
101
|
+
def push(self, payload: NotificationPayload) -> None:
|
|
102
|
+
for ch, ctx in self._channels:
|
|
103
|
+
try:
|
|
104
|
+
ch.push(ctx, payload)
|
|
105
|
+
except Exception as e: # noqa: BLE001 — single-channel isolation
|
|
106
|
+
pid = getattr(getattr(ch, "metadata", None), "plugin_id", "?")
|
|
107
|
+
logger.warning("channel %s push failed: %s", pid, e)
|
|
108
|
+
|
|
109
|
+
def join(self, timeout: float = 0.0) -> None: # noqa: ARG002
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# ---------------------------------------------------------------------------
|
|
114
|
+
# AsyncDispatchNotifier — wraps a synchronous Notifier in a worker thread
|
|
115
|
+
# ---------------------------------------------------------------------------
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class _Shutdown:
|
|
119
|
+
"""Sentinel value placed on the queue to signal worker shutdown."""
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
_SHUTDOWN = _Shutdown()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class AsyncDispatchNotifier:
|
|
126
|
+
"""Non-blocking adapter: ``push()`` enqueues and returns immediately;
|
|
127
|
+
a daemon worker thread drains the queue and calls the inner notifier.
|
|
128
|
+
|
|
129
|
+
Invariants:
|
|
130
|
+
* ``push`` MUST NOT block on HTTP. ``put_nowait`` raises Queue.Full
|
|
131
|
+
if the queue is saturated → drop + warn; never block the caller.
|
|
132
|
+
* ``join(timeout)`` MUST be called before process exit to flush
|
|
133
|
+
in-flight payloads (worker is daemon, would be killed otherwise).
|
|
134
|
+
* Worker exceptions are caught — a broken inner notifier never kills
|
|
135
|
+
the worker thread.
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
DEFAULT_QUEUE_SIZE = 16
|
|
139
|
+
DEFAULT_JOIN_TIMEOUT = 10.0 # seconds
|
|
140
|
+
|
|
141
|
+
def __init__(
|
|
142
|
+
self,
|
|
143
|
+
inner: Notifier,
|
|
144
|
+
*,
|
|
145
|
+
queue_size: int = DEFAULT_QUEUE_SIZE,
|
|
146
|
+
) -> None:
|
|
147
|
+
self._inner = inner
|
|
148
|
+
self._queue: queue.Queue[NotificationPayload | _Shutdown] = queue.Queue(queue_size)
|
|
149
|
+
self._dispatched_count = 0
|
|
150
|
+
self._dropped_count = 0
|
|
151
|
+
self._lock = threading.Lock()
|
|
152
|
+
self._thread = threading.Thread(
|
|
153
|
+
target=self._worker, name="deeptrade-notify", daemon=True
|
|
154
|
+
)
|
|
155
|
+
self._started = False
|
|
156
|
+
|
|
157
|
+
def is_enabled(self) -> bool:
|
|
158
|
+
return self._inner.is_enabled()
|
|
159
|
+
|
|
160
|
+
def push(self, payload: NotificationPayload) -> None:
|
|
161
|
+
if not self.is_enabled():
|
|
162
|
+
return
|
|
163
|
+
if not self._started:
|
|
164
|
+
self._started = True
|
|
165
|
+
self._thread.start()
|
|
166
|
+
try:
|
|
167
|
+
self._queue.put_nowait(payload)
|
|
168
|
+
except queue.Full:
|
|
169
|
+
with self._lock:
|
|
170
|
+
self._dropped_count += 1
|
|
171
|
+
logger.warning(
|
|
172
|
+
"notify queue full (size=%d); dropping payload run_id=%s",
|
|
173
|
+
self._queue.maxsize,
|
|
174
|
+
payload.run_id,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
def join(self, timeout: float = DEFAULT_JOIN_TIMEOUT) -> None:
|
|
178
|
+
"""Block until the queue drains or ``timeout`` seconds elapse.
|
|
179
|
+
Always call this once before process exit. Idempotent."""
|
|
180
|
+
if not self._started:
|
|
181
|
+
return
|
|
182
|
+
try:
|
|
183
|
+
self._queue.put(_SHUTDOWN, timeout=max(0.1, timeout))
|
|
184
|
+
except queue.Full: # pragma: no cover
|
|
185
|
+
logger.warning("notify queue full while signaling shutdown")
|
|
186
|
+
return
|
|
187
|
+
self._thread.join(timeout=timeout)
|
|
188
|
+
if self._thread.is_alive():
|
|
189
|
+
logger.warning(
|
|
190
|
+
"notify worker did not finish within %.1fs; in-flight payload may be lost",
|
|
191
|
+
timeout,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
@property
|
|
195
|
+
def dispatched_count(self) -> int:
|
|
196
|
+
with self._lock:
|
|
197
|
+
return self._dispatched_count
|
|
198
|
+
|
|
199
|
+
@property
|
|
200
|
+
def dropped_count(self) -> int:
|
|
201
|
+
with self._lock:
|
|
202
|
+
return self._dropped_count
|
|
203
|
+
|
|
204
|
+
def _worker(self) -> None:
|
|
205
|
+
while True:
|
|
206
|
+
item = self._queue.get()
|
|
207
|
+
if isinstance(item, _Shutdown):
|
|
208
|
+
return
|
|
209
|
+
try:
|
|
210
|
+
self._inner.push(item)
|
|
211
|
+
except Exception as e: # noqa: BLE001 — keep worker alive
|
|
212
|
+
logger.warning("notify worker caught inner.push exception: %s", e)
|
|
213
|
+
else:
|
|
214
|
+
with self._lock:
|
|
215
|
+
self._dispatched_count += 1
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# ---------------------------------------------------------------------------
|
|
219
|
+
# Discovery + assembly
|
|
220
|
+
# ---------------------------------------------------------------------------
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def build_notifier(db: Database, plugin_manager: PluginManager) -> Notifier:
|
|
224
|
+
"""Discover all enabled ``type=channel`` plugins and assemble a Notifier.
|
|
225
|
+
|
|
226
|
+
Returns a ``NoopNotifier`` if no channels are enabled (zero-cost path).
|
|
227
|
+
Returns an ``AsyncDispatchNotifier`` wrapping a ``MultiplexNotifier``
|
|
228
|
+
otherwise. Channel plugin entrypoint load failures are logged and the
|
|
229
|
+
affected channel is skipped — one bad channel never breaks the others.
|
|
230
|
+
"""
|
|
231
|
+
from deeptrade.core.config import ConfigService # avoid circular import
|
|
232
|
+
from deeptrade.core.plugin_manager import _load_entrypoint
|
|
233
|
+
from deeptrade.plugins_api.base import PluginContext
|
|
234
|
+
|
|
235
|
+
channel_records: list[InstalledPlugin] = [
|
|
236
|
+
r for r in plugin_manager.list_all() if r.type == "channel" and r.enabled
|
|
237
|
+
]
|
|
238
|
+
if not channel_records:
|
|
239
|
+
return NoopNotifier()
|
|
240
|
+
|
|
241
|
+
pairs: list[tuple[ChannelPlugin, PluginContext]] = []
|
|
242
|
+
for rec in channel_records:
|
|
243
|
+
try:
|
|
244
|
+
instance = _load_entrypoint(Path(rec.install_path), rec.entrypoint, rec.metadata)
|
|
245
|
+
except Exception as e: # noqa: BLE001
|
|
246
|
+
logger.warning("failed to load channel plugin %s: %s", rec.plugin_id, e)
|
|
247
|
+
continue
|
|
248
|
+
ctx = PluginContext(db=db, config=ConfigService(db), plugin_id=rec.plugin_id)
|
|
249
|
+
pairs.append((instance, ctx))
|
|
250
|
+
|
|
251
|
+
if not pairs:
|
|
252
|
+
return NoopNotifier()
|
|
253
|
+
return AsyncDispatchNotifier(MultiplexNotifier(pairs))
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
# ---------------------------------------------------------------------------
|
|
257
|
+
# Top-level user-facing API: notify(...) / notification_session(...)
|
|
258
|
+
# ---------------------------------------------------------------------------
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def notify(db: Database, payload: NotificationPayload, *, timeout: float = 10.0) -> None:
|
|
262
|
+
"""Push a single ``NotificationPayload`` through all enabled channel plugins.
|
|
263
|
+
|
|
264
|
+
Convenience one-shot: builds a notifier from the current plugin registry,
|
|
265
|
+
pushes, then joins (waits for in-flight delivery up to ``timeout``).
|
|
266
|
+
|
|
267
|
+
Silently no-op if no channel plugins are enabled. Per-channel failures
|
|
268
|
+
are isolated and logged (never raised).
|
|
269
|
+
|
|
270
|
+
For repeated calls in the same process (e.g. multiple payloads in one
|
|
271
|
+
plugin run), prefer :func:`notification_session` to avoid rebuilding the
|
|
272
|
+
notifier on every call.
|
|
273
|
+
"""
|
|
274
|
+
from deeptrade.core.plugin_manager import PluginManager
|
|
275
|
+
|
|
276
|
+
notifier = build_notifier(db, PluginManager(db))
|
|
277
|
+
try:
|
|
278
|
+
notifier.push(payload)
|
|
279
|
+
finally:
|
|
280
|
+
notifier.join(timeout=timeout)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
@contextmanager
|
|
284
|
+
def notification_session(db: Database, *, timeout: float = 10.0) -> Iterator[Notifier]:
|
|
285
|
+
"""Context manager that yields a ``Notifier`` for batch push and joins on exit.
|
|
286
|
+
|
|
287
|
+
Use this when a plugin will push multiple payloads in one run — the
|
|
288
|
+
notifier (and its worker thread) is built once and reused.
|
|
289
|
+
|
|
290
|
+
Example:
|
|
291
|
+
with notification_session(db) as ns:
|
|
292
|
+
ns.push(payload_a)
|
|
293
|
+
ns.push(payload_b)
|
|
294
|
+
# join + cleanup happen automatically here
|
|
295
|
+
"""
|
|
296
|
+
from deeptrade.core.plugin_manager import PluginManager
|
|
297
|
+
|
|
298
|
+
notifier = build_notifier(db, PluginManager(db))
|
|
299
|
+
try:
|
|
300
|
+
yield notifier
|
|
301
|
+
finally:
|
|
302
|
+
notifier.join(timeout=timeout)
|