agentscope-sdk 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentscope_sdk-0.1.0/PKG-INFO +10 -0
- agentscope_sdk-0.1.0/README.md +3 -0
- agentscope_sdk-0.1.0/agentscope/__init__.py +15 -0
- agentscope_sdk-0.1.0/agentscope/client.py +44 -0
- agentscope_sdk-0.1.0/agentscope/coding_agent.py +145 -0
- agentscope_sdk-0.1.0/agentscope/exporter.py +16 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/__init__.py +3 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/anthropic_adapter.py +100 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/http_interceptor.py +51 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/langchain_adapter.py +109 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/llm_tracer.py +160 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/openai_adapter.py +123 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/provider_detection.py +28 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/registry.py +35 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/token_usage.py +33 -0
- agentscope_sdk-0.1.0/agentscope/instrumentation/tracer.py +263 -0
- agentscope_sdk-0.1.0/agentscope/run.py +129 -0
- agentscope_sdk-0.1.0/agentscope/span.py +66 -0
- agentscope_sdk-0.1.0/agentscope_sdk.egg-info/PKG-INFO +10 -0
- agentscope_sdk-0.1.0/agentscope_sdk.egg-info/SOURCES.txt +22 -0
- agentscope_sdk-0.1.0/agentscope_sdk.egg-info/dependency_links.txt +1 -0
- agentscope_sdk-0.1.0/agentscope_sdk.egg-info/top_level.txt +1 -0
- agentscope_sdk-0.1.0/pyproject.toml +17 -0
- agentscope_sdk-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from .coding_agent import coding_agent_run, instrument_coding_agent, read_file, run_command, write_file
|
|
2
|
+
from .instrumentation import auto_instrument
|
|
3
|
+
from .run import observe_run
|
|
4
|
+
from .span import observe_span
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"observe_run",
|
|
8
|
+
"observe_span",
|
|
9
|
+
"auto_instrument",
|
|
10
|
+
"coding_agent_run",
|
|
11
|
+
"instrument_coding_agent",
|
|
12
|
+
"read_file",
|
|
13
|
+
"write_file",
|
|
14
|
+
"run_command",
|
|
15
|
+
]
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import urllib.error
|
|
4
|
+
import urllib.request
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AgentScopeClient:
|
|
9
|
+
def __init__(self, base_url: str | None = None, api_key: str | None = None, timeout: float = 5.0) -> None:
|
|
10
|
+
self.base_url = (base_url or os.getenv("AGENTSCOPE_API_BASE", "http://localhost:8080")).rstrip("/")
|
|
11
|
+
self.api_key = api_key or os.getenv("AGENTSCOPE_API_KEY", "")
|
|
12
|
+
self.timeout = timeout
|
|
13
|
+
|
|
14
|
+
def ingest(self, payload: Dict[str, Any]) -> None:
|
|
15
|
+
if not self.api_key:
|
|
16
|
+
raise RuntimeError("AgentScope ingest requires an API key. Set AGENTSCOPE_API_KEY or pass api_key.")
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
import requests
|
|
20
|
+
except ImportError:
|
|
21
|
+
body = json.dumps(payload).encode("utf-8")
|
|
22
|
+
request = urllib.request.Request(
|
|
23
|
+
f"{self.base_url}/v1/ingest",
|
|
24
|
+
data=body,
|
|
25
|
+
headers={
|
|
26
|
+
"Content-Type": "application/json",
|
|
27
|
+
"X-AgentScope-API-Key": self.api_key,
|
|
28
|
+
},
|
|
29
|
+
method="POST",
|
|
30
|
+
)
|
|
31
|
+
try:
|
|
32
|
+
with urllib.request.urlopen(request, timeout=self.timeout):
|
|
33
|
+
return
|
|
34
|
+
except urllib.error.HTTPError as exc:
|
|
35
|
+
raise RuntimeError(f"AgentScope ingest failed with status {exc.code}") from exc
|
|
36
|
+
return
|
|
37
|
+
|
|
38
|
+
response = requests.post(
|
|
39
|
+
f"{self.base_url}/v1/ingest",
|
|
40
|
+
json=payload,
|
|
41
|
+
headers={"X-AgentScope-API-Key": self.api_key},
|
|
42
|
+
timeout=self.timeout,
|
|
43
|
+
)
|
|
44
|
+
response.raise_for_status()
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import difflib
|
|
4
|
+
import subprocess
|
|
5
|
+
import uuid
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from functools import wraps
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Callable, TypeVar
|
|
10
|
+
|
|
11
|
+
from .instrumentation import auto_instrument
|
|
12
|
+
from .run import _current_run_state, observe_run
|
|
13
|
+
from .span import observe_span
|
|
14
|
+
|
|
15
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class CommandResult:
|
|
20
|
+
command: str
|
|
21
|
+
exit_code: int
|
|
22
|
+
stdout: str
|
|
23
|
+
stderr: str
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _append_artifact(*, span: dict[str, Any], kind: str, payload: dict[str, Any]) -> None:
|
|
27
|
+
run_state = _current_run_state()
|
|
28
|
+
if run_state is None:
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
run_state.artifacts.append(
|
|
32
|
+
{
|
|
33
|
+
"id": str(uuid.uuid4()),
|
|
34
|
+
"run_id": span["run_id"],
|
|
35
|
+
"span_id": span["id"],
|
|
36
|
+
"kind": kind,
|
|
37
|
+
"payload": payload,
|
|
38
|
+
}
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _as_path(file_path: str | Path) -> Path:
|
|
43
|
+
return file_path if isinstance(file_path, Path) else Path(file_path)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class coding_agent_run:
|
|
47
|
+
def __init__(self, agent_name: str = "coding_agent") -> None:
|
|
48
|
+
self.agent_name = agent_name
|
|
49
|
+
self._run = observe_run("coding_agent", agent_name=agent_name)
|
|
50
|
+
|
|
51
|
+
def __enter__(self) -> dict[str, Any]:
|
|
52
|
+
auto_instrument()
|
|
53
|
+
return self._run.__enter__()
|
|
54
|
+
|
|
55
|
+
def __exit__(self, exc_type, exc, tb) -> bool:
|
|
56
|
+
return self._run.__exit__(exc_type, exc, tb)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def instrument_coding_agent(fn: F) -> F:
|
|
60
|
+
@wraps(fn)
|
|
61
|
+
def _wrapped(*args: Any, **kwargs: Any) -> Any:
|
|
62
|
+
with coding_agent_run(agent_name=fn.__name__):
|
|
63
|
+
return fn(*args, **kwargs)
|
|
64
|
+
|
|
65
|
+
return _wrapped # type: ignore[return-value]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def read_file(file_path: str | Path, *, encoding: str = "utf-8") -> str:
|
|
69
|
+
path = _as_path(file_path)
|
|
70
|
+
with observe_span("file_read") as span:
|
|
71
|
+
span["metadata"] = {"file_path": str(path)}
|
|
72
|
+
return path.read_text(encoding=encoding)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def write_file(file_path: str | Path, content: str, *, encoding: str = "utf-8") -> None:
|
|
76
|
+
path = _as_path(file_path)
|
|
77
|
+
previous = path.read_text(encoding=encoding) if path.exists() else ""
|
|
78
|
+
|
|
79
|
+
with observe_span("file_write") as span:
|
|
80
|
+
span["metadata"] = {"file_path": str(path)}
|
|
81
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
82
|
+
path.write_text(content, encoding=encoding)
|
|
83
|
+
|
|
84
|
+
diff = "".join(
|
|
85
|
+
difflib.unified_diff(
|
|
86
|
+
previous.splitlines(keepends=True),
|
|
87
|
+
content.splitlines(keepends=True),
|
|
88
|
+
fromfile=f"a/{path}",
|
|
89
|
+
tofile=f"b/{path}",
|
|
90
|
+
)
|
|
91
|
+
)
|
|
92
|
+
_append_artifact(
|
|
93
|
+
span=span,
|
|
94
|
+
kind="file.diff",
|
|
95
|
+
payload={"file_path": str(path), "diff": diff},
|
|
96
|
+
)
|
|
97
|
+
_append_artifact(
|
|
98
|
+
span=span,
|
|
99
|
+
kind="file.content",
|
|
100
|
+
payload={"file_path": str(path), "content": content},
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def run_command(
|
|
105
|
+
command: str | list[str],
|
|
106
|
+
*,
|
|
107
|
+
cwd: str | Path | None = None,
|
|
108
|
+
env: dict[str, str] | None = None,
|
|
109
|
+
check: bool = False,
|
|
110
|
+
shell: bool | None = None,
|
|
111
|
+
) -> CommandResult:
|
|
112
|
+
resolved_shell = shell if shell is not None else isinstance(command, str)
|
|
113
|
+
|
|
114
|
+
with observe_span("command_exec") as span:
|
|
115
|
+
command_text = command if isinstance(command, str) else " ".join(command)
|
|
116
|
+
completed = subprocess.run(
|
|
117
|
+
command,
|
|
118
|
+
cwd=str(cwd) if cwd is not None else None,
|
|
119
|
+
env=env,
|
|
120
|
+
check=False,
|
|
121
|
+
capture_output=True,
|
|
122
|
+
text=True,
|
|
123
|
+
shell=resolved_shell,
|
|
124
|
+
)
|
|
125
|
+
span["metadata"] = {
|
|
126
|
+
"command": command_text,
|
|
127
|
+
"exit_code": completed.returncode,
|
|
128
|
+
"stdout": completed.stdout,
|
|
129
|
+
"stderr": completed.stderr,
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
if check and completed.returncode != 0:
|
|
133
|
+
raise subprocess.CalledProcessError(
|
|
134
|
+
completed.returncode,
|
|
135
|
+
command,
|
|
136
|
+
output=completed.stdout,
|
|
137
|
+
stderr=completed.stderr,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
return CommandResult(
|
|
141
|
+
command=command_text,
|
|
142
|
+
exit_code=completed.returncode,
|
|
143
|
+
stdout=completed.stdout,
|
|
144
|
+
stderr=completed.stderr,
|
|
145
|
+
)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from typing import Any, Dict, List
|
|
2
|
+
|
|
3
|
+
from .client import AgentScopeClient
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class TelemetryExporter:
|
|
7
|
+
def __init__(self, client: AgentScopeClient | None = None) -> None:
|
|
8
|
+
self.client = client or AgentScopeClient()
|
|
9
|
+
|
|
10
|
+
def export(self, run: Dict[str, Any], spans: List[Dict[str, Any]], artifacts: List[Dict[str, Any]]) -> None:
|
|
11
|
+
payload = {
|
|
12
|
+
"run": run,
|
|
13
|
+
"spans": spans,
|
|
14
|
+
"artifacts": artifacts,
|
|
15
|
+
}
|
|
16
|
+
self.client.ingest(payload)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
from typing import Any, Callable
|
|
5
|
+
|
|
6
|
+
from .registry import ProviderAdapter, TargetSpec
|
|
7
|
+
from .token_usage import normalize_usage
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _safe_getattr(value: Any, name: str, default: Any = None) -> Any:
|
|
11
|
+
try:
|
|
12
|
+
return getattr(value, name, default)
|
|
13
|
+
except Exception:
|
|
14
|
+
return default
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _safe_get(value: Any, key: str, default: Any = None) -> Any:
|
|
18
|
+
if isinstance(value, dict):
|
|
19
|
+
return value.get(key, default)
|
|
20
|
+
return _safe_getattr(value, key, default)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _extract_call_data(
|
|
24
|
+
original: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any]
|
|
25
|
+
) -> dict[str, Any]:
|
|
26
|
+
try:
|
|
27
|
+
bound = inspect.signature(original).bind_partial(*args, **kwargs)
|
|
28
|
+
data = dict(bound.arguments)
|
|
29
|
+
extra_kwargs = data.pop("kwargs", None)
|
|
30
|
+
if isinstance(extra_kwargs, dict):
|
|
31
|
+
data.update(extra_kwargs)
|
|
32
|
+
return data
|
|
33
|
+
except Exception:
|
|
34
|
+
return dict(kwargs)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _extract_text(response: Any) -> str | None:
|
|
38
|
+
if isinstance(response, dict):
|
|
39
|
+
content = response.get("content")
|
|
40
|
+
if isinstance(content, list):
|
|
41
|
+
parts = [item.get("text") for item in content if isinstance(item, dict)]
|
|
42
|
+
parts = [part for part in parts if isinstance(part, str)]
|
|
43
|
+
return "".join(parts) if parts else None
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
content = _safe_getattr(response, "content")
|
|
47
|
+
if isinstance(content, list):
|
|
48
|
+
parts = [_safe_getattr(item, "text") for item in content]
|
|
49
|
+
parts = [part for part in parts if isinstance(part, str)]
|
|
50
|
+
return "".join(parts) if parts else None
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _request_extractor(
|
|
55
|
+
original: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any]
|
|
56
|
+
) -> dict[str, Any]:
|
|
57
|
+
data = _extract_call_data(original, args, kwargs)
|
|
58
|
+
return {
|
|
59
|
+
"model": data.get("model"),
|
|
60
|
+
"messages": data.get("messages"),
|
|
61
|
+
"prompt": data.get("system"),
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _response_extractor(response: Any) -> dict[str, Any]:
|
|
66
|
+
usage = _safe_get(response, "usage")
|
|
67
|
+
input_tokens, output_tokens, total_tokens = normalize_usage(
|
|
68
|
+
_safe_get(usage, "input_tokens"),
|
|
69
|
+
_safe_get(usage, "output_tokens"),
|
|
70
|
+
)
|
|
71
|
+
return {
|
|
72
|
+
"response_text": _extract_text(response),
|
|
73
|
+
"input_tokens": input_tokens,
|
|
74
|
+
"output_tokens": output_tokens,
|
|
75
|
+
"total_tokens": total_tokens,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def get_adapter() -> ProviderAdapter:
|
|
80
|
+
return ProviderAdapter(
|
|
81
|
+
name="anthropic",
|
|
82
|
+
targets=(
|
|
83
|
+
TargetSpec(
|
|
84
|
+
key="anthropic.messages.create",
|
|
85
|
+
provider="anthropic",
|
|
86
|
+
module="anthropic.resources.messages.messages",
|
|
87
|
+
path=("Messages", "create"),
|
|
88
|
+
request_extractor=_request_extractor,
|
|
89
|
+
response_extractor=_response_extractor,
|
|
90
|
+
),
|
|
91
|
+
TargetSpec(
|
|
92
|
+
key="anthropic.messages.async_create",
|
|
93
|
+
provider="anthropic",
|
|
94
|
+
module="anthropic.resources.messages.messages",
|
|
95
|
+
path=("AsyncMessages", "create"),
|
|
96
|
+
request_extractor=_request_extractor,
|
|
97
|
+
response_extractor=_response_extractor,
|
|
98
|
+
),
|
|
99
|
+
),
|
|
100
|
+
)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from ..run import _current_run_state, observe_run
|
|
6
|
+
from .llm_tracer import trace_http_llm_call
|
|
7
|
+
from .provider_detection import detect_provider
|
|
8
|
+
|
|
9
|
+
_REQUESTS_PATCHED = False
|
|
10
|
+
_ORIGINAL_REQUEST: Any = None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def instrument_requests() -> None:
|
|
14
|
+
global _REQUESTS_PATCHED
|
|
15
|
+
global _ORIGINAL_REQUEST
|
|
16
|
+
|
|
17
|
+
if _REQUESTS_PATCHED:
|
|
18
|
+
return
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
import requests
|
|
22
|
+
except Exception:
|
|
23
|
+
return
|
|
24
|
+
|
|
25
|
+
current = requests.Session.request
|
|
26
|
+
if getattr(current, "__agentscope_wrapped__", False):
|
|
27
|
+
_REQUESTS_PATCHED = True
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
_ORIGINAL_REQUEST = current
|
|
31
|
+
|
|
32
|
+
def wrapped_request(self: Any, method: str, url: str, *args: Any, **kwargs: Any) -> Any:
|
|
33
|
+
provider = detect_provider(url)
|
|
34
|
+
if provider is None:
|
|
35
|
+
return _ORIGINAL_REQUEST(self, method, url, *args, **kwargs)
|
|
36
|
+
|
|
37
|
+
payload = kwargs.get("json")
|
|
38
|
+
if payload is None:
|
|
39
|
+
payload = {}
|
|
40
|
+
|
|
41
|
+
def request_fn() -> Any:
|
|
42
|
+
return _ORIGINAL_REQUEST(self, method, url, *args, **kwargs)
|
|
43
|
+
|
|
44
|
+
if _current_run_state() is None:
|
|
45
|
+
with observe_run(f"{provider}_http_auto_instrumentation", agent_name=provider):
|
|
46
|
+
return trace_http_llm_call(provider, url, payload, request_fn)
|
|
47
|
+
return trace_http_llm_call(provider, url, payload, request_fn)
|
|
48
|
+
|
|
49
|
+
wrapped_request.__agentscope_wrapped__ = True # type: ignore[attr-defined]
|
|
50
|
+
requests.Session.request = wrapped_request
|
|
51
|
+
_REQUESTS_PATCHED = True
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
from typing import Any, Callable
|
|
5
|
+
|
|
6
|
+
from .registry import ProviderAdapter, TargetSpec
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _safe_getattr(value: Any, name: str, default: Any = None) -> Any:
|
|
10
|
+
try:
|
|
11
|
+
return getattr(value, name, default)
|
|
12
|
+
except Exception:
|
|
13
|
+
return default
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _extract_call_data(
|
|
17
|
+
original: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any]
|
|
18
|
+
) -> dict[str, Any]:
|
|
19
|
+
try:
|
|
20
|
+
bound = inspect.signature(original).bind_partial(*args, **kwargs)
|
|
21
|
+
data = dict(bound.arguments)
|
|
22
|
+
extra_kwargs = data.pop("kwargs", None)
|
|
23
|
+
if isinstance(extra_kwargs, dict):
|
|
24
|
+
data.update(extra_kwargs)
|
|
25
|
+
return data
|
|
26
|
+
except Exception:
|
|
27
|
+
return dict(kwargs)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _extract_text(response: Any) -> str | None:
|
|
31
|
+
if isinstance(response, str):
|
|
32
|
+
return response
|
|
33
|
+
if isinstance(response, dict):
|
|
34
|
+
content = response.get("content")
|
|
35
|
+
if isinstance(content, str):
|
|
36
|
+
return content
|
|
37
|
+
text = response.get("text")
|
|
38
|
+
if isinstance(text, str):
|
|
39
|
+
return text
|
|
40
|
+
return None
|
|
41
|
+
content = _safe_getattr(response, "content")
|
|
42
|
+
if isinstance(content, str):
|
|
43
|
+
return content
|
|
44
|
+
text = _safe_getattr(response, "text")
|
|
45
|
+
if isinstance(text, str):
|
|
46
|
+
return text
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _request_extractor(
|
|
51
|
+
original: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any]
|
|
52
|
+
) -> dict[str, Any]:
|
|
53
|
+
data = _extract_call_data(original, args, kwargs)
|
|
54
|
+
llm_obj = data.get("self")
|
|
55
|
+
model = _safe_getattr(llm_obj, "model_name") or _safe_getattr(llm_obj, "model")
|
|
56
|
+
input_data = data.get("input") or data.get("messages") or data.get("prompt")
|
|
57
|
+
return {
|
|
58
|
+
"model": model,
|
|
59
|
+
"messages": input_data if isinstance(input_data, list) else None,
|
|
60
|
+
"prompt": input_data if isinstance(input_data, (str, dict)) else None,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _response_extractor(response: Any) -> dict[str, Any]:
|
|
65
|
+
return {
|
|
66
|
+
"response_text": _extract_text(response),
|
|
67
|
+
"input_tokens": None,
|
|
68
|
+
"output_tokens": None,
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_adapter() -> ProviderAdapter:
|
|
73
|
+
return ProviderAdapter(
|
|
74
|
+
name="langchain",
|
|
75
|
+
targets=(
|
|
76
|
+
TargetSpec(
|
|
77
|
+
key="langchain.chat.invoke",
|
|
78
|
+
provider="langchain",
|
|
79
|
+
module="langchain_core.language_models.chat_models",
|
|
80
|
+
path=("BaseChatModel", "invoke"),
|
|
81
|
+
request_extractor=_request_extractor,
|
|
82
|
+
response_extractor=_response_extractor,
|
|
83
|
+
),
|
|
84
|
+
TargetSpec(
|
|
85
|
+
key="langchain.chat.ainvoke",
|
|
86
|
+
provider="langchain",
|
|
87
|
+
module="langchain_core.language_models.chat_models",
|
|
88
|
+
path=("BaseChatModel", "ainvoke"),
|
|
89
|
+
request_extractor=_request_extractor,
|
|
90
|
+
response_extractor=_response_extractor,
|
|
91
|
+
),
|
|
92
|
+
TargetSpec(
|
|
93
|
+
key="langchain.llm.invoke",
|
|
94
|
+
provider="langchain",
|
|
95
|
+
module="langchain_core.language_models.llms",
|
|
96
|
+
path=("BaseLLM", "invoke"),
|
|
97
|
+
request_extractor=_request_extractor,
|
|
98
|
+
response_extractor=_response_extractor,
|
|
99
|
+
),
|
|
100
|
+
TargetSpec(
|
|
101
|
+
key="langchain.llm.ainvoke",
|
|
102
|
+
provider="langchain",
|
|
103
|
+
module="langchain_core.language_models.llms",
|
|
104
|
+
path=("BaseLLM", "ainvoke"),
|
|
105
|
+
request_extractor=_request_extractor,
|
|
106
|
+
response_extractor=_response_extractor,
|
|
107
|
+
),
|
|
108
|
+
),
|
|
109
|
+
)
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import Any, Callable
|
|
6
|
+
|
|
7
|
+
from ..run import _current_run_state
|
|
8
|
+
from ..span import observe_span
|
|
9
|
+
from .token_usage import normalize_usage
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _extract_response_text(body: Any) -> str | None:
|
|
13
|
+
if not isinstance(body, dict):
|
|
14
|
+
return None
|
|
15
|
+
|
|
16
|
+
choices = body.get("choices")
|
|
17
|
+
if isinstance(choices, list) and choices:
|
|
18
|
+
message = choices[0].get("message")
|
|
19
|
+
if isinstance(message, dict):
|
|
20
|
+
content = message.get("content")
|
|
21
|
+
if isinstance(content, str):
|
|
22
|
+
return content
|
|
23
|
+
if isinstance(content, list):
|
|
24
|
+
parts = [part.get("text") for part in content if isinstance(part, dict)]
|
|
25
|
+
text_parts = [part for part in parts if isinstance(part, str)]
|
|
26
|
+
if text_parts:
|
|
27
|
+
return "".join(text_parts)
|
|
28
|
+
|
|
29
|
+
text = choices[0].get("text")
|
|
30
|
+
if isinstance(text, str):
|
|
31
|
+
return text
|
|
32
|
+
|
|
33
|
+
content = body.get("content")
|
|
34
|
+
if isinstance(content, list) and content:
|
|
35
|
+
first = content[0]
|
|
36
|
+
if isinstance(first, dict):
|
|
37
|
+
text = first.get("text")
|
|
38
|
+
if isinstance(text, str):
|
|
39
|
+
return text
|
|
40
|
+
|
|
41
|
+
response = body.get("response")
|
|
42
|
+
if isinstance(response, str):
|
|
43
|
+
return response
|
|
44
|
+
|
|
45
|
+
output = body.get("output")
|
|
46
|
+
if isinstance(output, str):
|
|
47
|
+
return output
|
|
48
|
+
|
|
49
|
+
return None
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _extract_usage(body: Any) -> tuple[int | None, int | None, int | None]:
|
|
53
|
+
if not isinstance(body, dict):
|
|
54
|
+
return None, None, None
|
|
55
|
+
|
|
56
|
+
usage = body.get("usage")
|
|
57
|
+
if not isinstance(usage, dict):
|
|
58
|
+
return None, None, None
|
|
59
|
+
|
|
60
|
+
input_tokens = usage.get("prompt_tokens")
|
|
61
|
+
if input_tokens is None:
|
|
62
|
+
input_tokens = usage.get("input_tokens")
|
|
63
|
+
|
|
64
|
+
output_tokens = usage.get("completion_tokens")
|
|
65
|
+
if output_tokens is None:
|
|
66
|
+
output_tokens = usage.get("output_tokens")
|
|
67
|
+
|
|
68
|
+
return normalize_usage(input_tokens, output_tokens)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _append_artifact(*, span: dict[str, Any], kind: str, payload: dict[str, Any]) -> None:
|
|
72
|
+
run_state = _current_run_state()
|
|
73
|
+
if run_state is None:
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
run_state.artifacts.append(
|
|
77
|
+
{
|
|
78
|
+
"id": str(uuid.uuid4()),
|
|
79
|
+
"run_id": span["run_id"],
|
|
80
|
+
"span_id": span["id"],
|
|
81
|
+
"kind": kind,
|
|
82
|
+
"payload": payload,
|
|
83
|
+
}
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _extract_prompt_fields(payload: Any) -> tuple[Any, Any, Any]:
|
|
88
|
+
if not isinstance(payload, dict):
|
|
89
|
+
return None, None, None
|
|
90
|
+
|
|
91
|
+
messages = payload.get("messages")
|
|
92
|
+
prompt = payload.get("prompt")
|
|
93
|
+
if prompt is None:
|
|
94
|
+
prompt = payload.get("input")
|
|
95
|
+
return messages, prompt, payload.get("input")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def trace_http_llm_call(
|
|
99
|
+
provider: str,
|
|
100
|
+
url: str,
|
|
101
|
+
payload: Any,
|
|
102
|
+
request_fn: Callable[[], Any],
|
|
103
|
+
) -> Any:
|
|
104
|
+
started = time.perf_counter()
|
|
105
|
+
with observe_span("llm_call", span_type="llm_call") as span:
|
|
106
|
+
span["provider"] = provider
|
|
107
|
+
span["endpoint_url"] = url
|
|
108
|
+
span["model"] = payload.get("model") if isinstance(payload, dict) else None
|
|
109
|
+
messages, prompt, input_value = _extract_prompt_fields(payload)
|
|
110
|
+
|
|
111
|
+
_append_artifact(
|
|
112
|
+
span=span,
|
|
113
|
+
kind="llm.prompt",
|
|
114
|
+
payload={
|
|
115
|
+
"provider": provider,
|
|
116
|
+
"endpoint_url": url,
|
|
117
|
+
"model": span.get("model"),
|
|
118
|
+
"messages": messages,
|
|
119
|
+
"prompt": prompt,
|
|
120
|
+
"input": input_value,
|
|
121
|
+
"payload": payload,
|
|
122
|
+
},
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
response = request_fn()
|
|
126
|
+
latency_ms = int((time.perf_counter() - started) * 1000)
|
|
127
|
+
|
|
128
|
+
response_body: Any = None
|
|
129
|
+
try:
|
|
130
|
+
response_body = response.json()
|
|
131
|
+
except Exception:
|
|
132
|
+
response_body = None
|
|
133
|
+
|
|
134
|
+
response_text = _extract_response_text(response_body)
|
|
135
|
+
input_tokens, output_tokens, total_tokens = _extract_usage(response_body)
|
|
136
|
+
http_status = getattr(response, "status_code", None)
|
|
137
|
+
|
|
138
|
+
span["latency_ms"] = latency_ms
|
|
139
|
+
span["input_tokens"] = input_tokens
|
|
140
|
+
span["output_tokens"] = output_tokens
|
|
141
|
+
span["total_tokens"] = total_tokens
|
|
142
|
+
|
|
143
|
+
_append_artifact(
|
|
144
|
+
span=span,
|
|
145
|
+
kind="llm.response",
|
|
146
|
+
payload={
|
|
147
|
+
"provider": provider,
|
|
148
|
+
"endpoint_url": url,
|
|
149
|
+
"model": span.get("model"),
|
|
150
|
+
"response": response_body,
|
|
151
|
+
"response_text": response_text,
|
|
152
|
+
"http_status": http_status,
|
|
153
|
+
"latency_ms": latency_ms,
|
|
154
|
+
"input_tokens": input_tokens,
|
|
155
|
+
"output_tokens": output_tokens,
|
|
156
|
+
"total_tokens": total_tokens,
|
|
157
|
+
},
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
return response
|