aethergraph 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aethergraph/__init__.py +49 -0
- aethergraph/config/__init__.py +0 -0
- aethergraph/config/config.py +121 -0
- aethergraph/config/context.py +16 -0
- aethergraph/config/llm.py +26 -0
- aethergraph/config/loader.py +60 -0
- aethergraph/config/runtime.py +9 -0
- aethergraph/contracts/errors/errors.py +44 -0
- aethergraph/contracts/services/artifacts.py +142 -0
- aethergraph/contracts/services/channel.py +72 -0
- aethergraph/contracts/services/continuations.py +23 -0
- aethergraph/contracts/services/eventbus.py +12 -0
- aethergraph/contracts/services/kv.py +24 -0
- aethergraph/contracts/services/llm.py +17 -0
- aethergraph/contracts/services/mcp.py +22 -0
- aethergraph/contracts/services/memory.py +108 -0
- aethergraph/contracts/services/resume.py +28 -0
- aethergraph/contracts/services/state_stores.py +33 -0
- aethergraph/contracts/services/wakeup.py +28 -0
- aethergraph/core/execution/base_scheduler.py +77 -0
- aethergraph/core/execution/forward_scheduler.py +777 -0
- aethergraph/core/execution/global_scheduler.py +634 -0
- aethergraph/core/execution/retry_policy.py +22 -0
- aethergraph/core/execution/step_forward.py +411 -0
- aethergraph/core/execution/step_result.py +18 -0
- aethergraph/core/execution/wait_types.py +72 -0
- aethergraph/core/graph/graph_builder.py +192 -0
- aethergraph/core/graph/graph_fn.py +219 -0
- aethergraph/core/graph/graph_io.py +67 -0
- aethergraph/core/graph/graph_refs.py +154 -0
- aethergraph/core/graph/graph_spec.py +115 -0
- aethergraph/core/graph/graph_state.py +59 -0
- aethergraph/core/graph/graphify.py +128 -0
- aethergraph/core/graph/interpreter.py +145 -0
- aethergraph/core/graph/node_handle.py +33 -0
- aethergraph/core/graph/node_spec.py +46 -0
- aethergraph/core/graph/node_state.py +63 -0
- aethergraph/core/graph/task_graph.py +747 -0
- aethergraph/core/graph/task_node.py +82 -0
- aethergraph/core/graph/utils.py +37 -0
- aethergraph/core/graph/visualize.py +239 -0
- aethergraph/core/runtime/ad_hoc_context.py +61 -0
- aethergraph/core/runtime/base_service.py +153 -0
- aethergraph/core/runtime/bind_adapter.py +42 -0
- aethergraph/core/runtime/bound_memory.py +69 -0
- aethergraph/core/runtime/execution_context.py +220 -0
- aethergraph/core/runtime/graph_runner.py +349 -0
- aethergraph/core/runtime/lifecycle.py +26 -0
- aethergraph/core/runtime/node_context.py +203 -0
- aethergraph/core/runtime/node_services.py +30 -0
- aethergraph/core/runtime/recovery.py +159 -0
- aethergraph/core/runtime/run_registration.py +33 -0
- aethergraph/core/runtime/runtime_env.py +157 -0
- aethergraph/core/runtime/runtime_registry.py +32 -0
- aethergraph/core/runtime/runtime_services.py +224 -0
- aethergraph/core/runtime/wakeup_watcher.py +40 -0
- aethergraph/core/tools/__init__.py +10 -0
- aethergraph/core/tools/builtins/channel_tools.py +194 -0
- aethergraph/core/tools/builtins/toolset.py +134 -0
- aethergraph/core/tools/toolkit.py +510 -0
- aethergraph/core/tools/waitable.py +109 -0
- aethergraph/plugins/channel/__init__.py +0 -0
- aethergraph/plugins/channel/adapters/__init__.py +0 -0
- aethergraph/plugins/channel/adapters/console.py +106 -0
- aethergraph/plugins/channel/adapters/file.py +102 -0
- aethergraph/plugins/channel/adapters/slack.py +285 -0
- aethergraph/plugins/channel/adapters/telegram.py +302 -0
- aethergraph/plugins/channel/adapters/webhook.py +104 -0
- aethergraph/plugins/channel/adapters/webui.py +134 -0
- aethergraph/plugins/channel/routes/__init__.py +0 -0
- aethergraph/plugins/channel/routes/console_routes.py +86 -0
- aethergraph/plugins/channel/routes/slack_routes.py +49 -0
- aethergraph/plugins/channel/routes/telegram_routes.py +26 -0
- aethergraph/plugins/channel/routes/webui_routes.py +136 -0
- aethergraph/plugins/channel/utils/__init__.py +0 -0
- aethergraph/plugins/channel/utils/slack_utils.py +278 -0
- aethergraph/plugins/channel/utils/telegram_utils.py +324 -0
- aethergraph/plugins/channel/websockets/slack_ws.py +68 -0
- aethergraph/plugins/channel/websockets/telegram_polling.py +151 -0
- aethergraph/plugins/mcp/fs_server.py +128 -0
- aethergraph/plugins/mcp/http_server.py +101 -0
- aethergraph/plugins/mcp/ws_server.py +180 -0
- aethergraph/plugins/net/http.py +10 -0
- aethergraph/plugins/utils/data_io.py +359 -0
- aethergraph/runner/__init__.py +5 -0
- aethergraph/runtime/__init__.py +62 -0
- aethergraph/server/__init__.py +3 -0
- aethergraph/server/app_factory.py +84 -0
- aethergraph/server/start.py +122 -0
- aethergraph/services/__init__.py +10 -0
- aethergraph/services/artifacts/facade.py +284 -0
- aethergraph/services/artifacts/factory.py +35 -0
- aethergraph/services/artifacts/fs_store.py +656 -0
- aethergraph/services/artifacts/jsonl_index.py +123 -0
- aethergraph/services/artifacts/paths.py +23 -0
- aethergraph/services/artifacts/sqlite_index.py +209 -0
- aethergraph/services/artifacts/utils.py +124 -0
- aethergraph/services/auth/dev.py +16 -0
- aethergraph/services/channel/channel_bus.py +293 -0
- aethergraph/services/channel/factory.py +44 -0
- aethergraph/services/channel/session.py +511 -0
- aethergraph/services/channel/wait_helpers.py +57 -0
- aethergraph/services/clock/clock.py +9 -0
- aethergraph/services/container/default_container.py +320 -0
- aethergraph/services/continuations/continuation.py +56 -0
- aethergraph/services/continuations/factory.py +34 -0
- aethergraph/services/continuations/stores/fs_store.py +264 -0
- aethergraph/services/continuations/stores/inmem_store.py +95 -0
- aethergraph/services/eventbus/inmem.py +21 -0
- aethergraph/services/features/static.py +10 -0
- aethergraph/services/kv/ephemeral.py +90 -0
- aethergraph/services/kv/factory.py +27 -0
- aethergraph/services/kv/layered.py +41 -0
- aethergraph/services/kv/sqlite_kv.py +128 -0
- aethergraph/services/llm/factory.py +157 -0
- aethergraph/services/llm/generic_client.py +542 -0
- aethergraph/services/llm/providers.py +3 -0
- aethergraph/services/llm/service.py +105 -0
- aethergraph/services/logger/base.py +36 -0
- aethergraph/services/logger/compat.py +50 -0
- aethergraph/services/logger/formatters.py +106 -0
- aethergraph/services/logger/std.py +203 -0
- aethergraph/services/mcp/helpers.py +23 -0
- aethergraph/services/mcp/http_client.py +70 -0
- aethergraph/services/mcp/mcp_tools.py +21 -0
- aethergraph/services/mcp/registry.py +14 -0
- aethergraph/services/mcp/service.py +100 -0
- aethergraph/services/mcp/stdio_client.py +70 -0
- aethergraph/services/mcp/ws_client.py +115 -0
- aethergraph/services/memory/bound.py +106 -0
- aethergraph/services/memory/distillers/episode.py +116 -0
- aethergraph/services/memory/distillers/rolling.py +74 -0
- aethergraph/services/memory/facade.py +633 -0
- aethergraph/services/memory/factory.py +78 -0
- aethergraph/services/memory/hotlog_kv.py +27 -0
- aethergraph/services/memory/indices.py +74 -0
- aethergraph/services/memory/io_helpers.py +72 -0
- aethergraph/services/memory/persist_fs.py +40 -0
- aethergraph/services/memory/resolver.py +152 -0
- aethergraph/services/metering/noop.py +4 -0
- aethergraph/services/prompts/file_store.py +41 -0
- aethergraph/services/rag/chunker.py +29 -0
- aethergraph/services/rag/facade.py +593 -0
- aethergraph/services/rag/index/base.py +27 -0
- aethergraph/services/rag/index/faiss_index.py +121 -0
- aethergraph/services/rag/index/sqlite_index.py +134 -0
- aethergraph/services/rag/index_factory.py +52 -0
- aethergraph/services/rag/parsers/md.py +7 -0
- aethergraph/services/rag/parsers/pdf.py +14 -0
- aethergraph/services/rag/parsers/txt.py +7 -0
- aethergraph/services/rag/utils/hybrid.py +39 -0
- aethergraph/services/rag/utils/make_fs_key.py +62 -0
- aethergraph/services/redactor/simple.py +16 -0
- aethergraph/services/registry/key_parsing.py +44 -0
- aethergraph/services/registry/registry_key.py +19 -0
- aethergraph/services/registry/unified_registry.py +185 -0
- aethergraph/services/resume/multi_scheduler_resume_bus.py +65 -0
- aethergraph/services/resume/router.py +73 -0
- aethergraph/services/schedulers/registry.py +41 -0
- aethergraph/services/secrets/base.py +7 -0
- aethergraph/services/secrets/env.py +8 -0
- aethergraph/services/state_stores/externalize.py +135 -0
- aethergraph/services/state_stores/graph_observer.py +131 -0
- aethergraph/services/state_stores/json_store.py +67 -0
- aethergraph/services/state_stores/resume_policy.py +119 -0
- aethergraph/services/state_stores/serialize.py +249 -0
- aethergraph/services/state_stores/utils.py +91 -0
- aethergraph/services/state_stores/validate.py +78 -0
- aethergraph/services/tracing/noop.py +18 -0
- aethergraph/services/waits/wait_registry.py +91 -0
- aethergraph/services/wakeup/memory_queue.py +57 -0
- aethergraph/services/wakeup/scanner_producer.py +56 -0
- aethergraph/services/wakeup/worker.py +31 -0
- aethergraph/tools/__init__.py +25 -0
- aethergraph/utils/optdeps.py +8 -0
- aethergraph-0.1.0a1.dist-info/METADATA +410 -0
- aethergraph-0.1.0a1.dist-info/RECORD +182 -0
- aethergraph-0.1.0a1.dist-info/WHEEL +5 -0
- aethergraph-0.1.0a1.dist-info/entry_points.txt +2 -0
- aethergraph-0.1.0a1.dist-info/licenses/LICENSE +176 -0
- aethergraph-0.1.0a1.dist-info/licenses/NOTICE +31 -0
- aethergraph-0.1.0a1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
from .std import LoggingConfig, StdLoggerService
|
|
6
|
+
|
|
7
|
+
"""For backward compatibility with v2 LoggerFactory interface. Delegates to StdLoggerService under the hood. Will deprecate in future."""
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def build_logging(force: bool = True, cfg: LoggingConfig | None = None) -> logging.Logger:
|
|
11
|
+
"""
|
|
12
|
+
Back-compat entry point. Returns the base logger (as before), but created via StdLoggerService.
|
|
13
|
+
`force` is kept for signature parity; handlers are already reset in StdLoggerService.build().
|
|
14
|
+
"""
|
|
15
|
+
svc = StdLoggerService.build(cfg)
|
|
16
|
+
return svc.base()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LoggerFactory:
|
|
20
|
+
"""
|
|
21
|
+
Back-compat API that delegates to StdLoggerService.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, base: logging.Logger | None = None, *, cfg: LoggingConfig | None = None):
|
|
25
|
+
self._svc = (
|
|
26
|
+
StdLoggerService.build(cfg)
|
|
27
|
+
if base is None
|
|
28
|
+
else StdLoggerService(base, cfg=cfg or LoggingConfig.from_env())
|
|
29
|
+
)
|
|
30
|
+
self.base = self._svc.base() # keep original attr for callers that relied on it
|
|
31
|
+
|
|
32
|
+
def for_node(self, node_id: str) -> logging.Logger:
|
|
33
|
+
return self._svc.for_node(node_id)
|
|
34
|
+
|
|
35
|
+
def for_inspect(self) -> logging.Logger:
|
|
36
|
+
return self._svc.for_inspect()
|
|
37
|
+
|
|
38
|
+
def for_run(self) -> logging.Logger:
|
|
39
|
+
return self._svc.for_run()
|
|
40
|
+
|
|
41
|
+
def for_scheduler(self) -> logging.Logger:
|
|
42
|
+
return self._svc.for_scheduler()
|
|
43
|
+
|
|
44
|
+
def for_node_ctx(
|
|
45
|
+
self, *, run_id: str, node_id: str, graph_id: str | None = None
|
|
46
|
+
) -> logging.Logger:
|
|
47
|
+
return self._svc.for_node_ctx(run_id=run_id, node_id=node_id, graph_id=graph_id)
|
|
48
|
+
|
|
49
|
+
def for_run_ctx(self, *, run_id: str, graph_id: str | None = None) -> logging.Logger:
|
|
50
|
+
return self._svc.for_run_ctx(run_id=run_id, graph_id=graph_id)
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import sys
|
|
6
|
+
import time
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SafeFormatter(logging.Formatter):
|
|
11
|
+
"""
|
|
12
|
+
Text formatter that won't explode if `extra` keys are missing.
|
|
13
|
+
Use %(run_id)s etc. in format strings without having to always bind them.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
17
|
+
# Provide default values for our known keys so %()s doesn't KeyError
|
|
18
|
+
for k in ("run_id", "node_id", "graph_id", "agent_id"):
|
|
19
|
+
if not hasattr(record, k):
|
|
20
|
+
setattr(record, k, "-")
|
|
21
|
+
return super().format(record)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class JsonFormatter(logging.Formatter):
|
|
25
|
+
"""
|
|
26
|
+
Structured JSON logs; safe for missing extras.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, *, include_timestamp: bool = True):
|
|
30
|
+
super().__init__()
|
|
31
|
+
self.include_timestamp = include_timestamp
|
|
32
|
+
|
|
33
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
34
|
+
payload: dict[str, Any] = {
|
|
35
|
+
"level": record.levelname,
|
|
36
|
+
"logger": record.name,
|
|
37
|
+
"message": record.getMessage(),
|
|
38
|
+
}
|
|
39
|
+
if self.include_timestamp:
|
|
40
|
+
payload["time"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(record.created))
|
|
41
|
+
# standard attrs we care about
|
|
42
|
+
payload.update(
|
|
43
|
+
{
|
|
44
|
+
"run_id": getattr(record, "run_id", None),
|
|
45
|
+
"node_id": getattr(record, "node_id", None),
|
|
46
|
+
"graph_id": getattr(record, "graph_id", None),
|
|
47
|
+
"agent_id": getattr(record, "agent_id", None),
|
|
48
|
+
}
|
|
49
|
+
)
|
|
50
|
+
if record.exc_info:
|
|
51
|
+
payload["exc_info"] = self.formatException(record.exc_info)
|
|
52
|
+
return json.dumps({k: v for k, v in payload.items() if v is not None}, ensure_ascii=False)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ColorFormatter(SafeFormatter):
|
|
56
|
+
"""
|
|
57
|
+
Console/file formatter that adds ANSI color only to:
|
|
58
|
+
- level name (INFO/WARNING/ERROR/...)
|
|
59
|
+
- run_id / node_id / graph_id tokens
|
|
60
|
+
|
|
61
|
+
Everything else stays uncolored.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
RESET = "\033[0m"
|
|
65
|
+
LEVEL_COLORS = {
|
|
66
|
+
"DEBUG": "\033[36m", # cyan
|
|
67
|
+
"INFO": "\033[32m", # green
|
|
68
|
+
"WARNING": "\033[33m", # yellow
|
|
69
|
+
"ERROR": "\033[31m", # red
|
|
70
|
+
"CRITICAL": "\033[41m", # red background
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
ID_COLOR = "\033[35m" # magenta for IDs
|
|
74
|
+
|
|
75
|
+
def __init__(self, fmt: str, datefmt: str | None = None, use_color: bool | None = None):
|
|
76
|
+
super().__init__(fmt, datefmt=datefmt)
|
|
77
|
+
# auto-disable color if not a TTY unless explicitly forced
|
|
78
|
+
if use_color is None:
|
|
79
|
+
use_color = sys.stderr.isatty()
|
|
80
|
+
self.use_color = use_color
|
|
81
|
+
|
|
82
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
83
|
+
# First let SafeFormatter fill in missing run_id/node_id/etc
|
|
84
|
+
base = super().format(record)
|
|
85
|
+
|
|
86
|
+
if not self.use_color:
|
|
87
|
+
return base
|
|
88
|
+
|
|
89
|
+
reset = self.RESET
|
|
90
|
+
level = record.levelname
|
|
91
|
+
level_color = self.LEVEL_COLORS.get(level, "")
|
|
92
|
+
|
|
93
|
+
# 1) Color only the level name token (first occurrence)
|
|
94
|
+
if level_color:
|
|
95
|
+
base = base.replace(level, f"{level_color}{level}{reset}", 1)
|
|
96
|
+
|
|
97
|
+
# 2) Color run_id / node_id / graph_id tokens like `run=...`, `node=...`, `graph=...`
|
|
98
|
+
id_color = self.ID_COLOR
|
|
99
|
+
for key in ("run_id", "node_id", "graph_id"):
|
|
100
|
+
val = getattr(record, key, None)
|
|
101
|
+
if val and val != "-":
|
|
102
|
+
token = f"{key}={val}"
|
|
103
|
+
colored = f"{id_color}{token}{reset}"
|
|
104
|
+
base = base.replace(token, colored)
|
|
105
|
+
|
|
106
|
+
return base
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
import logging
|
|
6
|
+
import logging.handlers
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
import queue
|
|
10
|
+
|
|
11
|
+
from aethergraph.config.config import AppSettings
|
|
12
|
+
|
|
13
|
+
from .base import LogContext, LoggerService
|
|
14
|
+
from .formatters import ColorFormatter, JsonFormatter, SafeFormatter
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _ensure_dir(path: Path) -> None:
|
|
18
|
+
path.mkdir(parents=True, exist_ok=True)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass(frozen=True)
|
|
22
|
+
class LoggingConfig:
|
|
23
|
+
"""
|
|
24
|
+
Configure sinks & formats.
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
root_ns: base logger name to use (`aethergraph`).
|
|
28
|
+
level: default level for root logger.
|
|
29
|
+
log_dir: directory for file logs (rotated).
|
|
30
|
+
use_json: True => JSON logs for files; console stays text by default.
|
|
31
|
+
enable_queue: True => offload file IO via QueueHandler/Listener (non-blocking).
|
|
32
|
+
per_namespace_levels: optional map (e.g. {"aethergraph.node": "DEBUG"}).
|
|
33
|
+
console_pattern: text format string for console.
|
|
34
|
+
file_pattern: text format string for file when use_json=False.
|
|
35
|
+
max_bytes / backup_count: rotation for file handlers.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
root_ns: str = "aethergraph"
|
|
39
|
+
level: str = "INFO"
|
|
40
|
+
log_dir: str = "./logs"
|
|
41
|
+
use_json: bool = False
|
|
42
|
+
enable_queue: bool = False
|
|
43
|
+
per_namespace_levels: Mapping[str, str] = None
|
|
44
|
+
console_pattern: str = (
|
|
45
|
+
"%(asctime)s %(levelname)s \t%(name)s run=%(run_id)s node=%(node_id)s - %(message)s"
|
|
46
|
+
)
|
|
47
|
+
file_pattern: str = (
|
|
48
|
+
"%(asctime)s %(levelname)s %(name)s %(run_id)s %(node_id)s %(graph_id)s %(message)s"
|
|
49
|
+
)
|
|
50
|
+
max_bytes: int = 10 * 1024 * 1024
|
|
51
|
+
backup_count: int = 5
|
|
52
|
+
|
|
53
|
+
# external loggers
|
|
54
|
+
external_level: str = "WARNING"
|
|
55
|
+
quiet_loggers: tuple[str, ...] = ("httpx", "faiss", "faiss.loader")
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def from_env() -> LoggingConfig:
|
|
59
|
+
return LoggingConfig(
|
|
60
|
+
root_ns=os.getenv("AETHERGRAPH_LOG_ROOT", "aethergraph"),
|
|
61
|
+
level=os.getenv("AETHERGRAPH_LOG_LEVEL", "INFO"),
|
|
62
|
+
log_dir=os.getenv("AETHERGRAPH_LOG_DIR", "./logs"),
|
|
63
|
+
use_json=os.getenv("AETHERGRAPH_LOG_JSON", "0") == "1",
|
|
64
|
+
enable_queue=os.getenv("AETHERGRAPH_LOG_ASYNC", "0") == "1",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
@staticmethod
|
|
68
|
+
def from_cfg(cfg: AppSettings, log_dir: str | None = None) -> LoggingConfig:
|
|
69
|
+
return LoggingConfig(
|
|
70
|
+
root_ns=cfg.logging.nspace or "aethergraph",
|
|
71
|
+
level=cfg.logging.level,
|
|
72
|
+
log_dir=log_dir or "./logs",
|
|
73
|
+
use_json=cfg.logging.json_logs,
|
|
74
|
+
enable_queue=cfg.logging.enable_queue,
|
|
75
|
+
external_level=cfg.logging.external_level,
|
|
76
|
+
quiet_loggers=tuple(cfg.logging.quiet_loggers),
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class _ContextAdapter(logging.LoggerAdapter):
|
|
81
|
+
"""
|
|
82
|
+
Injects contextual fields into LogRecord via `extra`.
|
|
83
|
+
Preserves original logger API (info, debug, etc.).
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
def process(self, msg, kwargs):
|
|
87
|
+
extra = kwargs.get("extra") or {}
|
|
88
|
+
merged = {**self.extra, **extra}
|
|
89
|
+
kwargs["extra"] = merged
|
|
90
|
+
return msg, kwargs
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class StdLoggerService(LoggerService):
|
|
94
|
+
"""
|
|
95
|
+
• text/JSON formatters
|
|
96
|
+
• per-namespace levels
|
|
97
|
+
• optional async file IO via QueueHandler
|
|
98
|
+
• context helpers (with_context / for_*_ctx)
|
|
99
|
+
"""
|
|
100
|
+
|
|
101
|
+
def __init__(self, base: logging.Logger, *, cfg: LoggingConfig):
|
|
102
|
+
self._base = base
|
|
103
|
+
self._cfg = cfg
|
|
104
|
+
|
|
105
|
+
# --- LoggerService interface ---
|
|
106
|
+
|
|
107
|
+
def base(self) -> logging.Logger:
|
|
108
|
+
return self._base
|
|
109
|
+
|
|
110
|
+
def for_namespace(self, ns: str) -> logging.Logger:
|
|
111
|
+
return self._base.getChild(ns)
|
|
112
|
+
|
|
113
|
+
def with_context(self, logger: logging.Logger, ctx: LogContext) -> logging.Logger:
|
|
114
|
+
return _ContextAdapter(logger, ctx.as_extra())
|
|
115
|
+
|
|
116
|
+
# Back-compat helpers
|
|
117
|
+
def for_node(self, node_id: str) -> logging.Logger:
|
|
118
|
+
return self.for_namespace(f"node.{node_id}")
|
|
119
|
+
|
|
120
|
+
def for_run(self) -> logging.Logger:
|
|
121
|
+
return self.for_namespace("run")
|
|
122
|
+
|
|
123
|
+
def for_inspect(self) -> logging.Logger:
|
|
124
|
+
return self.for_namespace("inspect")
|
|
125
|
+
|
|
126
|
+
def for_scheduler(self) -> logging.Logger:
|
|
127
|
+
return self.for_namespace("scheduler")
|
|
128
|
+
|
|
129
|
+
def for_node_ctx(
|
|
130
|
+
self, *, run_id: str, node_id: str, graph_id: str | None = None
|
|
131
|
+
) -> logging.Logger:
|
|
132
|
+
base = self.for_node(node_id)
|
|
133
|
+
return self.with_context(
|
|
134
|
+
base, LogContext(run_id=run_id, node_id=node_id, graph_id=graph_id)
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def for_run_ctx(self, *, run_id: str, graph_id: str | None = None) -> logging.Logger:
|
|
138
|
+
base = self.for_run()
|
|
139
|
+
return self.with_context(base, LogContext(run_id=run_id, graph_id=graph_id))
|
|
140
|
+
|
|
141
|
+
# --- builder ---
|
|
142
|
+
|
|
143
|
+
@staticmethod
|
|
144
|
+
def build(cfg: LoggingConfig | None = None) -> StdLoggerService:
|
|
145
|
+
cfg = cfg or LoggingConfig.from_env()
|
|
146
|
+
|
|
147
|
+
root = logging.getLogger(cfg.root_ns)
|
|
148
|
+
# Reset handlers if rebuilding (idempotent server restarts)
|
|
149
|
+
for h in list(root.handlers):
|
|
150
|
+
root.removeHandler(h)
|
|
151
|
+
root.setLevel(getattr(logging, cfg.level.upper(), logging.INFO))
|
|
152
|
+
root.propagate = False
|
|
153
|
+
|
|
154
|
+
# Per-namespace levels
|
|
155
|
+
if cfg.per_namespace_levels:
|
|
156
|
+
for ns, lvl in cfg.per_namespace_levels.items():
|
|
157
|
+
logging.getLogger(ns).setLevel(getattr(logging, str(lvl).upper(), logging.INFO))
|
|
158
|
+
|
|
159
|
+
# Console handler (text)
|
|
160
|
+
console = logging.StreamHandler()
|
|
161
|
+
console.setLevel(getattr(logging, cfg.level.upper(), logging.INFO))
|
|
162
|
+
console.setFormatter(ColorFormatter(cfg.console_pattern))
|
|
163
|
+
root.addHandler(console)
|
|
164
|
+
|
|
165
|
+
# File handler (rotating)
|
|
166
|
+
_ensure_dir(Path(cfg.log_dir))
|
|
167
|
+
file_path = Path(cfg.log_dir) / "aethergraph.log"
|
|
168
|
+
|
|
169
|
+
if cfg.enable_queue:
|
|
170
|
+
# Non-blocking file IO
|
|
171
|
+
q = queue.Queue(-1)
|
|
172
|
+
qh = logging.handlers.QueueHandler(q)
|
|
173
|
+
root.addHandler(qh)
|
|
174
|
+
|
|
175
|
+
fh = logging.handlers.RotatingFileHandler(
|
|
176
|
+
file_path, maxBytes=cfg.max_bytes, backupCount=cfg.backup_count, encoding="utf-8"
|
|
177
|
+
)
|
|
178
|
+
if cfg.use_json:
|
|
179
|
+
fh.setFormatter(JsonFormatter())
|
|
180
|
+
else:
|
|
181
|
+
fh.setFormatter(SafeFormatter(cfg.file_pattern))
|
|
182
|
+
fh.setLevel(getattr(logging, cfg.level.upper(), logging.INFO))
|
|
183
|
+
listener = logging.handlers.QueueListener(q, fh, respect_handler_level=True)
|
|
184
|
+
listener.daemon = True
|
|
185
|
+
listener.start()
|
|
186
|
+
else:
|
|
187
|
+
fh = logging.handlers.RotatingFileHandler(
|
|
188
|
+
file_path, maxBytes=cfg.max_bytes, backupCount=cfg.backup_count, encoding="utf-8"
|
|
189
|
+
)
|
|
190
|
+
if cfg.use_json:
|
|
191
|
+
fh.setFormatter(JsonFormatter())
|
|
192
|
+
else:
|
|
193
|
+
fh.setFormatter(SafeFormatter(cfg.file_pattern))
|
|
194
|
+
fh.setLevel(getattr(logging, cfg.level.upper(), logging.INFO))
|
|
195
|
+
root.addHandler(fh)
|
|
196
|
+
|
|
197
|
+
ext_level = getattr(logging, cfg.external_level.upper(), logging.WARNING)
|
|
198
|
+
for name in cfg.quiet_loggers:
|
|
199
|
+
lg = logging.getLogger(name)
|
|
200
|
+
lg.setLevel(ext_level)
|
|
201
|
+
lg.propagate = True
|
|
202
|
+
|
|
203
|
+
return StdLoggerService(root, cfg=cfg)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
from aethergraph.core.runtime.node_context import NodeContext
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("aethergraph.services.mcp.helpers")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def mcp_call_logged(context: NodeContext, server: str, tool: str, params: dict | None = None):
|
|
11
|
+
client = context.mcp(server)
|
|
12
|
+
res = await client.call(tool, params or {})
|
|
13
|
+
try:
|
|
14
|
+
await context.mem().write_result(
|
|
15
|
+
topic=f"mcp.{server}.{tool}",
|
|
16
|
+
inputs=[{"name": "args", "kind": "json", "value": params or {}}],
|
|
17
|
+
outputs=[{"name": "result", "kind": "json", "value": res}],
|
|
18
|
+
tags=["mcp", "tool_call"],
|
|
19
|
+
message=f"MCP {server}:{tool}",
|
|
20
|
+
)
|
|
21
|
+
except Exception:
|
|
22
|
+
logger.warning("Failed to log MCP tool call result")
|
|
23
|
+
return res
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from aethergraph.contracts.services.mcp import MCPClientProtocol, MCPResource, MCPTool
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class HttpMCPClient(MCPClientProtocol):
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
base_url: str,
|
|
16
|
+
*,
|
|
17
|
+
headers: dict[str, str] | None = None,
|
|
18
|
+
timeout: float = 60.0,
|
|
19
|
+
):
|
|
20
|
+
self.base_url = base_url.rstrip("/")
|
|
21
|
+
self.headers = {"Content-Type": "application/json"}
|
|
22
|
+
if headers:
|
|
23
|
+
self.headers.update(headers)
|
|
24
|
+
self.timeout = timeout
|
|
25
|
+
|
|
26
|
+
self._client: httpx.AsyncClient | None = None
|
|
27
|
+
self._id = 0
|
|
28
|
+
self._lock = asyncio.Lock()
|
|
29
|
+
|
|
30
|
+
async def open(self):
|
|
31
|
+
if self._client is None:
|
|
32
|
+
self._client = httpx.AsyncClient(timeout=self.timeout)
|
|
33
|
+
|
|
34
|
+
async def close(self):
|
|
35
|
+
if self._client is not None:
|
|
36
|
+
try:
|
|
37
|
+
await self._client.aclose()
|
|
38
|
+
finally:
|
|
39
|
+
self._client = None
|
|
40
|
+
|
|
41
|
+
async def _ensure(self):
|
|
42
|
+
if self._client is None:
|
|
43
|
+
await self.open()
|
|
44
|
+
|
|
45
|
+
async def _rpc(self, method: str, params: dict[str, Any] | None = None) -> Any:
|
|
46
|
+
await self._ensure()
|
|
47
|
+
async with self._lock:
|
|
48
|
+
self._id += 1
|
|
49
|
+
req = {"jsonrpc": "2.0", "id": self._id, "method": method, "params": params or {}}
|
|
50
|
+
assert self._client is not None
|
|
51
|
+
r = await self._client.post(
|
|
52
|
+
f"{self.base_url}/rpc", headers=self.headers, content=json.dumps(req)
|
|
53
|
+
)
|
|
54
|
+
r.raise_for_status()
|
|
55
|
+
resp = r.json()
|
|
56
|
+
if "error" in resp:
|
|
57
|
+
raise RuntimeError(str(resp["error"]))
|
|
58
|
+
return resp.get("result")
|
|
59
|
+
|
|
60
|
+
async def list_tools(self) -> list[MCPTool]:
|
|
61
|
+
return await self._rpc("tools/list")
|
|
62
|
+
|
|
63
|
+
async def call(self, tool: str, params: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
64
|
+
return await self._rpc("tools/call", {"name": tool, "arguments": params or {}})
|
|
65
|
+
|
|
66
|
+
async def list_resources(self) -> list[MCPResource]:
|
|
67
|
+
return await self._rpc("resources/list")
|
|
68
|
+
|
|
69
|
+
async def read_resource(self, uri: str) -> dict[str, Any]:
|
|
70
|
+
return await self._rpc("resources/read", {"uri": uri})
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from services.mcp.helpers import mcp_call_logged
|
|
2
|
+
|
|
3
|
+
from aethergraph import NodeContext, tool
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@tool(outputs=["result"], name="mcp.call", version="0.1.0")
|
|
7
|
+
async def mcp_call(server: str, tool_name: str, args: dict | None = None, *, context: NodeContext):
|
|
8
|
+
out = await mcp_call_logged(context, server, tool_name, args or {})
|
|
9
|
+
await context.channel().send_text(f"🔌 MCP {server}:{tool_name} ✓")
|
|
10
|
+
return {"result": out}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@tool(outputs=["tools"], name="mcp.list_tools", version="0.1.0")
|
|
14
|
+
async def mcp_list_tools(server: str, *, context: NodeContext):
|
|
15
|
+
tools = await context.mcp(server).list_tools()
|
|
16
|
+
await context.mem().write_result(
|
|
17
|
+
topic=f"mcp.{server}.list_tools",
|
|
18
|
+
outputs=[{"name": "tools", "kind": "json", "value": tools}],
|
|
19
|
+
tags=["mcp"],
|
|
20
|
+
)
|
|
21
|
+
return {"tools": tools}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from contracts.services.mcp import MCPClientProtocol
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MCPRegistry:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
self._clients: dict[str, MCPClientProtocol] = {}
|
|
7
|
+
|
|
8
|
+
def register(self, name: str, client: MCPClientProtocol):
|
|
9
|
+
self._clients[name] = client
|
|
10
|
+
|
|
11
|
+
def get(self, name: str) -> MCPClientProtocol:
|
|
12
|
+
if name not in self._clients:
|
|
13
|
+
raise KeyError(f"MCP server '{name}' not registered")
|
|
14
|
+
return self._clients[name]
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from aethergraph.contracts.services.mcp import MCPClientProtocol, MCPResource, MCPTool
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("aethergraph.services.mcp")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MCPService:
|
|
12
|
+
"""
|
|
13
|
+
Holds many MCP clients (stdio/ws) under names, manages lifecycle, and
|
|
14
|
+
provides thin convenience helpers (open/close/call/list_tools).
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, clients: dict[str, MCPClientProtocol] | None = None, *, secrets=None):
|
|
18
|
+
self._clients: dict[str, MCPClientProtocol] = clients or {}
|
|
19
|
+
self._secrets = secrets # optional (Secrets provider) Not implemented here
|
|
20
|
+
|
|
21
|
+
# ---- registration ----
|
|
22
|
+
def register(self, name: str, client: MCPClientProtocol) -> None:
|
|
23
|
+
self._clients[name] = client
|
|
24
|
+
|
|
25
|
+
def remove(self, name: str) -> None:
|
|
26
|
+
self._clients.pop(name, None)
|
|
27
|
+
|
|
28
|
+
def has(self, name: str) -> bool:
|
|
29
|
+
return name in self._clients
|
|
30
|
+
|
|
31
|
+
def names(self) -> list[str]:
|
|
32
|
+
return list(self._clients.keys())
|
|
33
|
+
|
|
34
|
+
def list_clients(self) -> list[str]:
|
|
35
|
+
return list(self._clients.keys())
|
|
36
|
+
|
|
37
|
+
def get(self, name: str = "default") -> MCPClientProtocol:
|
|
38
|
+
if name not in self._clients:
|
|
39
|
+
raise KeyError(f"Unknown MCP server '{name}'")
|
|
40
|
+
return self._clients[name]
|
|
41
|
+
|
|
42
|
+
# ---- lifecycle ----
|
|
43
|
+
async def open(self, name: str) -> None:
|
|
44
|
+
await self.get(name).open()
|
|
45
|
+
|
|
46
|
+
async def close(self, name: str) -> None:
|
|
47
|
+
try:
|
|
48
|
+
await self.get(name).close()
|
|
49
|
+
except Exception:
|
|
50
|
+
logger.warning(f"Failed to close MCP client '{name}'")
|
|
51
|
+
|
|
52
|
+
async def open_all(self) -> None:
|
|
53
|
+
for n in self._clients:
|
|
54
|
+
await self._clients[n].open()
|
|
55
|
+
|
|
56
|
+
async def close_all(self) -> None:
|
|
57
|
+
for n in self._clients:
|
|
58
|
+
try:
|
|
59
|
+
await self._clients[n].close()
|
|
60
|
+
except Exception:
|
|
61
|
+
logger.warning(f"Failed to close MCP client '{n}'")
|
|
62
|
+
|
|
63
|
+
# ---- call helpers (optional, keeps call sites tiny) ----
|
|
64
|
+
async def call(
|
|
65
|
+
self, name: str, tool: str, params: dict[str, Any] | None = None
|
|
66
|
+
) -> dict[str, Any]:
|
|
67
|
+
# lazy-open on first use; clients themselves also lazy-reconnect
|
|
68
|
+
c = self.get(name)
|
|
69
|
+
await c.open()
|
|
70
|
+
return await c.call(tool, params or {})
|
|
71
|
+
|
|
72
|
+
async def list_tools(self, name: str) -> list[MCPTool]:
|
|
73
|
+
c = self.get(name)
|
|
74
|
+
await c.open()
|
|
75
|
+
return await c.list_tools()
|
|
76
|
+
|
|
77
|
+
async def list_resources(self, name: str) -> list[MCPResource]:
|
|
78
|
+
c = self.get(name)
|
|
79
|
+
await c.open()
|
|
80
|
+
return await c.list_resources()
|
|
81
|
+
|
|
82
|
+
async def read_resource(self, name: str, uri: str) -> dict[str, Any]:
|
|
83
|
+
c = self.get(name)
|
|
84
|
+
await c.open()
|
|
85
|
+
return await c.read_resource(uri)
|
|
86
|
+
|
|
87
|
+
# ---- optional secrets helpers ----
|
|
88
|
+
def set_header(self, name: str, key: str, value: str) -> None:
|
|
89
|
+
"""For ws clients: set/override a header at runtime (demo/notebook UX)."""
|
|
90
|
+
c = self.get(name)
|
|
91
|
+
# duck-typing for ws client
|
|
92
|
+
if hasattr(c, "headers") and isinstance(c.headers, dict): # type: ignore[attr-defined]
|
|
93
|
+
c.headers[key] = value # type: ignore[attr-defined]
|
|
94
|
+
else:
|
|
95
|
+
raise RuntimeError(f"MCP '{name}' does not support headers")
|
|
96
|
+
|
|
97
|
+
def persist_secret(self, secret_name: str, value: str) -> None:
|
|
98
|
+
if not self._secrets or not hasattr(self._secrets, "set"):
|
|
99
|
+
raise RuntimeError("Secrets provider is not writable")
|
|
100
|
+
self._secrets.set(secret_name, value) # type: ignore
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from aethergraph.contracts.services.mcp import MCPClientProtocol, MCPResource, MCPTool
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class StdioMCPClient(MCPClientProtocol):
|
|
10
|
+
def __init__(self, cmd: list[str], env: dict[str, str] | None = None, timeout: float = 60.0):
|
|
11
|
+
"""MCP client that talks to a subprocess over stdio using JSON-RPC 2.0.
|
|
12
|
+
Args:
|
|
13
|
+
cmd: Command to start the MCP server subprocess (list of str).
|
|
14
|
+
env: Optional environment variables to set for the subprocess.
|
|
15
|
+
timeout: Timeout in seconds for each RPC call.
|
|
16
|
+
"""
|
|
17
|
+
self.cmd, self.env, self.timeout = cmd, env or {}, timeout
|
|
18
|
+
self.proc = None
|
|
19
|
+
self._id = 0
|
|
20
|
+
self._lock = asyncio.Lock()
|
|
21
|
+
|
|
22
|
+
async def open(self):
|
|
23
|
+
if self.proc:
|
|
24
|
+
return
|
|
25
|
+
self.proc = await asyncio.create_subprocess_exec(
|
|
26
|
+
*self.cmd,
|
|
27
|
+
stdin=asyncio.subprocess.PIPE,
|
|
28
|
+
stdout=asyncio.subprocess.PIPE,
|
|
29
|
+
stderr=asyncio.subprocess.PIPE,
|
|
30
|
+
env={**os.environ, **self.env},
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
async def close(self):
|
|
34
|
+
if not self.proc:
|
|
35
|
+
return
|
|
36
|
+
try:
|
|
37
|
+
self.proc.terminate()
|
|
38
|
+
except Exception:
|
|
39
|
+
self.logger.warning("mcp_stdio_terminate_failed")
|
|
40
|
+
|
|
41
|
+
self.proc = None
|
|
42
|
+
|
|
43
|
+
async def _rpc(self, method: str, params: dict[str, Any] | None = None) -> Any:
|
|
44
|
+
await self.open()
|
|
45
|
+
async with self._lock:
|
|
46
|
+
self._id += 1
|
|
47
|
+
req = {"jsonrpc": "2.0", "id": self._id, "method": method, "params": params or {}}
|
|
48
|
+
data = (json.dumps(req) + "\n").encode("utf-8")
|
|
49
|
+
assert self.proc and self.proc.stdin and self.proc.stdout
|
|
50
|
+
self.proc.stdin.write(data)
|
|
51
|
+
await self.proc.stdin.drain()
|
|
52
|
+
line = await asyncio.wait_for(self.proc.stdout.readline(), timeout=self.timeout)
|
|
53
|
+
if not line:
|
|
54
|
+
raise RuntimeError("MCP server closed")
|
|
55
|
+
resp = json.loads(line.decode("utf-8"))
|
|
56
|
+
if "error" in resp:
|
|
57
|
+
raise RuntimeError(str(resp["error"]))
|
|
58
|
+
return resp.get("result")
|
|
59
|
+
|
|
60
|
+
async def list_tools(self) -> list[MCPTool]:
|
|
61
|
+
return await self._rpc("tools/list")
|
|
62
|
+
|
|
63
|
+
async def call(self, tool: str, params: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
64
|
+
return await self._rpc("tools/call", {"name": tool, "arguments": params or {}})
|
|
65
|
+
|
|
66
|
+
async def list_resources(self) -> list[MCPResource]:
|
|
67
|
+
return await self._rpc("resources/list")
|
|
68
|
+
|
|
69
|
+
async def read_resource(self, uri: str) -> dict[str, Any]:
|
|
70
|
+
return await self._rpc("resources/read", {"uri": uri})
|