ddapm-test-agent 1.39.0__py3-none-any.whl → 1.40.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddapm_test_agent/agent.py +76 -1
- ddapm_test_agent/llmobs_event_platform.py +997 -0
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/METADATA +11 -1
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/RECORD +9 -8
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/WHEEL +1 -1
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/entry_points.txt +0 -0
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/licenses/LICENSE.BSD3 +0 -0
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/licenses/LICENSE.apache2 +0 -0
- {ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/top_level.txt +0 -0
ddapm_test_agent/agent.py
CHANGED
|
@@ -53,6 +53,7 @@ from .checks import CheckTrace
|
|
|
53
53
|
from .checks import Checks
|
|
54
54
|
from .checks import start_trace
|
|
55
55
|
from .integration import Integration
|
|
56
|
+
from .llmobs_event_platform import LLMObsEventPlatformAPI
|
|
56
57
|
from .logs import LOGS_ENDPOINT
|
|
57
58
|
from .logs import OTLPLogsGRPCServicer
|
|
58
59
|
from .logs import decode_logs_request
|
|
@@ -209,7 +210,7 @@ async def _prepare_and_send_request(data: bytes, request: Request, headers: Mapp
|
|
|
209
210
|
log.info("Forwarding request to agent at %r", full_agent_url)
|
|
210
211
|
log.debug(f"Using headers: {headers}")
|
|
211
212
|
|
|
212
|
-
|
|
213
|
+
client_response, body = await _forward_request(data, headers, full_agent_url)
|
|
213
214
|
return web.Response(
|
|
214
215
|
status=client_response.status,
|
|
215
216
|
headers=client_response.headers,
|
|
@@ -805,6 +806,34 @@ class Agent:
|
|
|
805
806
|
raise web.HTTPBadRequest(text=msg)
|
|
806
807
|
|
|
807
808
|
async def handle_evp_proxy_v2_api_v2_llmobs(self, request: Request) -> web.Response:
|
|
809
|
+
if request.app["disable_llmobs_data_forwarding"]:
|
|
810
|
+
return web.HTTPOk()
|
|
811
|
+
|
|
812
|
+
dd_site = request.app["dd_site"]
|
|
813
|
+
dd_api_key = request.app["dd_api_key"]
|
|
814
|
+
agent_url = request.app["agent_url"]
|
|
815
|
+
headers = request.headers.copy()
|
|
816
|
+
if agent_url:
|
|
817
|
+
url = f"{agent_url}/evp_proxy/v2/api/v2/llmobs" # use configured agent URL if provided
|
|
818
|
+
elif dd_api_key is None:
|
|
819
|
+
log.error("No DD_API_KEY set to forward LLM Observability events to Datadog. Skipping forwarding.")
|
|
820
|
+
return web.HTTPOk()
|
|
821
|
+
elif not dd_site:
|
|
822
|
+
log.error("No DD_SITE set to forward LLM Observability events to Datadog. Skipping forwarding.")
|
|
823
|
+
return web.HTTPOk()
|
|
824
|
+
else:
|
|
825
|
+
url = f"https://llmobs-intake.{dd_site}/api/v2/llmobs"
|
|
826
|
+
headers["DD-API-KEY"] = dd_api_key
|
|
827
|
+
|
|
828
|
+
async with ClientSession() as session:
|
|
829
|
+
async with session.post(url, headers=headers, data=await request.read()) as resp:
|
|
830
|
+
if not resp.ok:
|
|
831
|
+
log.warning(
|
|
832
|
+
f"Failed to forward LLM Observability events to Datadog: {resp.status} {await resp.text()}"
|
|
833
|
+
)
|
|
834
|
+
else:
|
|
835
|
+
log.info(f"Forwarded LLM Observability events to Datadog: {resp.status} {await resp.text()}")
|
|
836
|
+
|
|
808
837
|
return web.HTTPOk()
|
|
809
838
|
|
|
810
839
|
async def handle_evp_proxy_v2_llmobs_eval_metric(self, request: Request) -> web.Response:
|
|
@@ -881,6 +910,17 @@ class Agent:
|
|
|
881
910
|
return web.HTTPAccepted()
|
|
882
911
|
|
|
883
912
|
async def handle_info(self, request: Request) -> web.Response:
|
|
913
|
+
# CORS headers for cross-origin requests from Datadog UI
|
|
914
|
+
headers = {
|
|
915
|
+
"Access-Control-Allow-Origin": "*",
|
|
916
|
+
"Access-Control-Allow-Methods": "GET, OPTIONS",
|
|
917
|
+
"Access-Control-Allow-Headers": "Content-Type",
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
# Handle OPTIONS preflight
|
|
921
|
+
if request.method == "OPTIONS":
|
|
922
|
+
return web.Response(status=200, headers=headers)
|
|
923
|
+
|
|
884
924
|
return web.json_response(
|
|
885
925
|
{
|
|
886
926
|
"version": os.environ.get("TEST_AGENT_VERSION", "test"),
|
|
@@ -902,6 +942,7 @@ class Agent:
|
|
|
902
942
|
"peer_tags": ["db.name", "mongodb.db", "messaging.system"],
|
|
903
943
|
"span_events": True, # Advertise support for the top-level Span field for Span Events
|
|
904
944
|
},
|
|
945
|
+
headers=headers,
|
|
905
946
|
)
|
|
906
947
|
|
|
907
948
|
async def _handle_traces(self, request: Request, version: Literal["v0.4", "v0.5", "v0.7", "v1"]) -> web.Response:
|
|
@@ -1601,6 +1642,9 @@ def make_app(
|
|
|
1601
1642
|
vcr_ci_mode: bool,
|
|
1602
1643
|
vcr_provider_map: str,
|
|
1603
1644
|
vcr_ignore_headers: str,
|
|
1645
|
+
dd_site: str,
|
|
1646
|
+
dd_api_key: str | None,
|
|
1647
|
+
disable_llmobs_data_forwarding: bool,
|
|
1604
1648
|
enable_web_ui: bool = False,
|
|
1605
1649
|
) -> web.Application:
|
|
1606
1650
|
agent = Agent()
|
|
@@ -1650,6 +1694,7 @@ def make_app(
|
|
|
1650
1694
|
web.post("/evp_proxy/v2/api/v2/exposures", agent.handle_evp_proxy_v2_api_v2_exposures),
|
|
1651
1695
|
web.post("/evp_proxy/v4/api/v2/errorsintake", agent.handle_evp_proxy_v4_api_v2_errorsintake),
|
|
1652
1696
|
web.get("/info", agent.handle_info),
|
|
1697
|
+
web.options("/info", agent.handle_info),
|
|
1653
1698
|
web.get("/test/session/start", agent.handle_session_start),
|
|
1654
1699
|
web.get("/test/session/clear", agent.handle_session_clear),
|
|
1655
1700
|
web.get("/test/session/snapshot", agent.handle_snapshot),
|
|
@@ -1682,6 +1727,12 @@ def make_app(
|
|
|
1682
1727
|
),
|
|
1683
1728
|
]
|
|
1684
1729
|
)
|
|
1730
|
+
|
|
1731
|
+
# Add LLM Observability Event Platform API routes
|
|
1732
|
+
# These provide Datadog Event Platform compatible endpoints for local development
|
|
1733
|
+
llmobs_event_platform_api = LLMObsEventPlatformAPI(agent)
|
|
1734
|
+
app.add_routes(llmobs_event_platform_api.get_routes())
|
|
1735
|
+
|
|
1685
1736
|
checks = Checks(
|
|
1686
1737
|
checks=[
|
|
1687
1738
|
CheckMetaTracerVersionHeader,
|
|
@@ -1707,6 +1758,9 @@ def make_app(
|
|
|
1707
1758
|
app["snapshot_removed_attrs"] = snapshot_removed_attrs
|
|
1708
1759
|
app["snapshot_regex_placeholders"] = snapshot_regex_placeholders
|
|
1709
1760
|
app["vcr_cassettes_directory"] = vcr_cassettes_directory
|
|
1761
|
+
app["dd_site"] = dd_site
|
|
1762
|
+
app["dd_api_key"] = dd_api_key
|
|
1763
|
+
app["disable_llmobs_data_forwarding"] = disable_llmobs_data_forwarding
|
|
1710
1764
|
return app
|
|
1711
1765
|
|
|
1712
1766
|
|
|
@@ -2010,6 +2064,24 @@ def main(args: Optional[List[str]] = None) -> None:
|
|
|
2010
2064
|
default=int(os.environ.get("MAX_REQUESTS", 200)),
|
|
2011
2065
|
help="Maximum number of requests to keep in memory for the UI (default: 200). Older requests are discarded when limit is reached.",
|
|
2012
2066
|
)
|
|
2067
|
+
parser.add_argument(
|
|
2068
|
+
"--dd-site",
|
|
2069
|
+
type=str,
|
|
2070
|
+
default=os.environ.get("DD_SITE", "datadoghq.com"),
|
|
2071
|
+
help="Datadog site to use for the agent. Example: --dd-site=datadoghq.com",
|
|
2072
|
+
)
|
|
2073
|
+
parser.add_argument(
|
|
2074
|
+
"--dd-api-key",
|
|
2075
|
+
type=str,
|
|
2076
|
+
default=os.environ.get("DD_API_KEY", ""),
|
|
2077
|
+
help="Datadog API key to use for the agent. Example: --dd-api-key=1234567890",
|
|
2078
|
+
)
|
|
2079
|
+
parser.add_argument(
|
|
2080
|
+
"--disable-llmobs-data-forwarding",
|
|
2081
|
+
action="store_true",
|
|
2082
|
+
default=os.environ.get("DISABLE_LLMOBS_DATA_FORWARDING", "").lower() in ("true", "1", "yes"),
|
|
2083
|
+
help="Disable data forwarding to Datadog.",
|
|
2084
|
+
)
|
|
2013
2085
|
parsed_args = parser.parse_args(args=args)
|
|
2014
2086
|
logging.basicConfig(level=parsed_args.log_level)
|
|
2015
2087
|
|
|
@@ -2056,6 +2128,9 @@ def main(args: Optional[List[str]] = None) -> None:
|
|
|
2056
2128
|
vcr_ci_mode=parsed_args.vcr_ci_mode,
|
|
2057
2129
|
vcr_provider_map=parsed_args.vcr_provider_map,
|
|
2058
2130
|
vcr_ignore_headers=parsed_args.vcr_ignore_headers,
|
|
2131
|
+
dd_site=parsed_args.dd_site,
|
|
2132
|
+
dd_api_key=parsed_args.dd_api_key,
|
|
2133
|
+
disable_llmobs_data_forwarding=parsed_args.disable_llmobs_data_forwarding,
|
|
2059
2134
|
enable_web_ui=parsed_args.web_ui_port > 0,
|
|
2060
2135
|
)
|
|
2061
2136
|
|
|
@@ -0,0 +1,997 @@
|
|
|
1
|
+
"""LLM Observability Event Platform API."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
import gzip
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import re
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any
|
|
10
|
+
from typing import Awaitable
|
|
11
|
+
from typing import Callable
|
|
12
|
+
from typing import Dict
|
|
13
|
+
from typing import List
|
|
14
|
+
from typing import Optional
|
|
15
|
+
from typing import TYPE_CHECKING
|
|
16
|
+
import uuid
|
|
17
|
+
|
|
18
|
+
from aiohttp import web
|
|
19
|
+
from aiohttp.web import Request
|
|
20
|
+
import msgpack
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from .agent import Agent
|
|
25
|
+
|
|
26
|
+
log = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
# CORS headers for cross-origin requests from Datadog UI
|
|
29
|
+
CORS_HEADERS = {
|
|
30
|
+
"Access-Control-Allow-Origin": "*",
|
|
31
|
+
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
|
|
32
|
+
"Access-Control-Allow-Headers": "Content-Type, Authorization, X-DD-Api-Key, X-DD-Application-Key, "
|
|
33
|
+
"X-CSRF-Token, x-csrf-token, x-web-ui-version, X-Datadog-Trace-ID, "
|
|
34
|
+
"X-Datadog-Parent-ID, X-Datadog-Origin, X-Datadog-Sampling-Priority, Accept, Origin, Referer",
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def with_cors(
|
|
39
|
+
handler: Callable[[Request], Awaitable[web.Response]],
|
|
40
|
+
) -> Callable[[Request], Awaitable[web.Response]]:
|
|
41
|
+
"""Wrap handler to add CORS headers and handle OPTIONS preflight."""
|
|
42
|
+
|
|
43
|
+
async def wrapper(request: Request) -> web.Response:
|
|
44
|
+
if request.method == "OPTIONS":
|
|
45
|
+
return web.Response(status=200, headers=CORS_HEADERS)
|
|
46
|
+
response = await handler(request)
|
|
47
|
+
response.headers.update(CORS_HEADERS)
|
|
48
|
+
return response
|
|
49
|
+
|
|
50
|
+
return wrapper
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def decode_llmobs_payload(data: bytes, content_type: str) -> List[Dict[str, Any]]:
|
|
54
|
+
"""Decode LLMObs payload (gzip+msgpack or JSON)."""
|
|
55
|
+
events = []
|
|
56
|
+
try:
|
|
57
|
+
if content_type and "gzip" in content_type.lower():
|
|
58
|
+
data = gzip.decompress(data)
|
|
59
|
+
|
|
60
|
+
if content_type and "msgpack" in content_type.lower():
|
|
61
|
+
payload = msgpack.unpackb(data, raw=False, strict_map_key=False)
|
|
62
|
+
else:
|
|
63
|
+
try:
|
|
64
|
+
payload = json.loads(data)
|
|
65
|
+
except json.JSONDecodeError:
|
|
66
|
+
payload = msgpack.unpackb(data, raw=False, strict_map_key=False)
|
|
67
|
+
|
|
68
|
+
if isinstance(payload, list):
|
|
69
|
+
events.extend(payload)
|
|
70
|
+
else:
|
|
71
|
+
events.append(payload)
|
|
72
|
+
except Exception as e:
|
|
73
|
+
log.warning(f"Failed to decode LLMObs payload: {e}")
|
|
74
|
+
return events
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def extract_spans_from_events(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
78
|
+
"""Extract individual spans from LLMObs event payloads."""
|
|
79
|
+
spans = []
|
|
80
|
+
for event in events:
|
|
81
|
+
event_ml_app = event.get("ml_app", "")
|
|
82
|
+
event_tags = event.get("tags", [])
|
|
83
|
+
|
|
84
|
+
for span in event.get("spans", []):
|
|
85
|
+
span_tags = span.get("tags", [])
|
|
86
|
+
if event_tags:
|
|
87
|
+
span_tags = list(set(span_tags + event_tags))
|
|
88
|
+
span["tags"] = span_tags
|
|
89
|
+
span = remap_sdk_span_to_ui_format(span, event_ml_app)
|
|
90
|
+
spans.append(span)
|
|
91
|
+
return spans
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def remap_sdk_span_to_ui_format(span: Dict[str, Any], event_ml_app: str = "") -> Dict[str, Any]:
|
|
95
|
+
"""Remap span from SDK format to UI-expected format (extract ml_app, service, env from tags)."""
|
|
96
|
+
tags = span.get("tags", [])
|
|
97
|
+
extracted = extract_fields_from_tags(tags)
|
|
98
|
+
|
|
99
|
+
ml_app = extracted.get("ml_app") or event_ml_app or span.get("ml_app", "unknown")
|
|
100
|
+
span["ml_app"] = ml_app
|
|
101
|
+
|
|
102
|
+
if "service" not in span or not span["service"]:
|
|
103
|
+
span["service"] = extracted.get("service", "")
|
|
104
|
+
if "env" not in span or not span["env"]:
|
|
105
|
+
span["env"] = extracted.get("env", "")
|
|
106
|
+
|
|
107
|
+
meta = span.get("meta", {})
|
|
108
|
+
span_kind = meta.get("span", {}).get("kind", "llm")
|
|
109
|
+
|
|
110
|
+
if "meta" not in span:
|
|
111
|
+
span["meta"] = {}
|
|
112
|
+
if "span" not in span["meta"]:
|
|
113
|
+
span["meta"]["span"] = {}
|
|
114
|
+
span["meta"]["span"]["kind"] = span_kind
|
|
115
|
+
span["_ui_kind"] = span_kind
|
|
116
|
+
span["_ui_ml_app"] = ml_app
|
|
117
|
+
|
|
118
|
+
return span
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def extract_fields_from_tags(tags: List[str]) -> Dict[str, str]:
|
|
122
|
+
"""Extract ml_app, service, env, etc. from tags array."""
|
|
123
|
+
result = {}
|
|
124
|
+
fields_to_extract = ["ml_app", "service", "env", "version", "source", "language"]
|
|
125
|
+
for tag in tags:
|
|
126
|
+
if not isinstance(tag, str) or ":" not in tag:
|
|
127
|
+
continue
|
|
128
|
+
key, value = tag.split(":", 1)
|
|
129
|
+
if key in fields_to_extract:
|
|
130
|
+
result[key] = value
|
|
131
|
+
return result
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
DURATION_MULTIPLIERS = {
|
|
135
|
+
"ns": 1,
|
|
136
|
+
"us": 1_000,
|
|
137
|
+
"μs": 1_000,
|
|
138
|
+
"ms": 1_000_000,
|
|
139
|
+
"s": 1_000_000_000,
|
|
140
|
+
"m": 60_000_000_000,
|
|
141
|
+
"h": 3_600_000_000_000,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def parse_duration_to_nanoseconds(duration_str: str) -> Optional[float]:
|
|
146
|
+
"""Parse duration string (e.g., '5.5s', '100ms') to nanoseconds."""
|
|
147
|
+
duration_str = duration_str.strip()
|
|
148
|
+
match = re.match(r"^([0-9]*\.?[0-9]+)(ns|μs|us|ms|s|m|h)$", duration_str, re.IGNORECASE)
|
|
149
|
+
|
|
150
|
+
if not match:
|
|
151
|
+
try:
|
|
152
|
+
return float(duration_str)
|
|
153
|
+
except ValueError:
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
value = float(match.group(1))
|
|
157
|
+
unit = match.group(2).lower()
|
|
158
|
+
return value * DURATION_MULTIPLIERS.get(unit, 1)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def parse_filter_query(query: str) -> Dict[str, Any]:
|
|
162
|
+
"""Parse filter query string into filters and text search."""
|
|
163
|
+
result: Dict[str, Any] = {"filters": [], "text_search": ""}
|
|
164
|
+
if not query:
|
|
165
|
+
return result
|
|
166
|
+
|
|
167
|
+
remaining = query
|
|
168
|
+
|
|
169
|
+
# Range filters: @field:[min TO max]
|
|
170
|
+
for field, min_val, max_val in re.findall(r"@([\w.]+):\[([^\]]+)\s+TO\s+([^\]]+)\]", remaining, re.IGNORECASE):
|
|
171
|
+
f: Dict[str, Any] = {"field": field, "type": "facet", "operator": "range"}
|
|
172
|
+
if field == "duration":
|
|
173
|
+
min_ns, max_ns = parse_duration_to_nanoseconds(min_val.strip()), parse_duration_to_nanoseconds(
|
|
174
|
+
max_val.strip()
|
|
175
|
+
)
|
|
176
|
+
if min_ns is not None:
|
|
177
|
+
f["min"] = min_ns
|
|
178
|
+
if max_ns is not None:
|
|
179
|
+
f["max"] = max_ns
|
|
180
|
+
else:
|
|
181
|
+
try:
|
|
182
|
+
f["min"] = float(min_val.strip())
|
|
183
|
+
except ValueError:
|
|
184
|
+
f["min"] = min_val.strip()
|
|
185
|
+
try:
|
|
186
|
+
f["max"] = float(max_val.strip())
|
|
187
|
+
except ValueError:
|
|
188
|
+
f["max"] = max_val.strip()
|
|
189
|
+
result["filters"].append(f)
|
|
190
|
+
remaining = re.sub(r"@([\w.]+):\[([^\]]+)\s+TO\s+([^\]]+)\]", "", remaining, flags=re.IGNORECASE)
|
|
191
|
+
|
|
192
|
+
# Comparison filters: @field:>=value, @field:<=value, etc.
|
|
193
|
+
op_map = {">=": "gte", "<=": "lte", ">": "gt", "<": "lt"}
|
|
194
|
+
for field, op, value in re.findall(r"@([\w.]+):(>=|<=|>|<)([^\s]+)", remaining):
|
|
195
|
+
f = {"field": field, "type": "facet", "operator": op_map[op]}
|
|
196
|
+
if field == "duration":
|
|
197
|
+
parsed = parse_duration_to_nanoseconds(value)
|
|
198
|
+
if parsed is not None:
|
|
199
|
+
f["value"] = parsed
|
|
200
|
+
else:
|
|
201
|
+
try:
|
|
202
|
+
f["value"] = float(value)
|
|
203
|
+
except ValueError:
|
|
204
|
+
f["value"] = value
|
|
205
|
+
result["filters"].append(f)
|
|
206
|
+
remaining = re.sub(r"@([\w.]+):(>=|<=|>|<)([^\s]+)", "", remaining)
|
|
207
|
+
|
|
208
|
+
# Facet filters: @field:value
|
|
209
|
+
for field, value in re.findall(r"@([\w.]+):([^\s\[]+)", remaining):
|
|
210
|
+
if not value.startswith((">=", "<=", ">", "<")):
|
|
211
|
+
result["filters"].append({"field": field, "value": value, "type": "facet"})
|
|
212
|
+
remaining = re.sub(r"@([\w.]+):([^\s\[]+)", "", remaining)
|
|
213
|
+
|
|
214
|
+
# Tag filters: field:value (without @)
|
|
215
|
+
for field, value in re.findall(r"(?<!\S)([\w.]+):([^\s]+)", remaining):
|
|
216
|
+
result["filters"].append({"field": field, "value": value, "type": "tag"})
|
|
217
|
+
remaining = re.sub(r"(?<!\S)([\w.]+):([^\s]+)", "", remaining)
|
|
218
|
+
|
|
219
|
+
result["text_search"] = remaining.strip()
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def match_wildcard(value: str, pattern: str) -> bool:
|
|
224
|
+
"""Match value against pattern with wildcard support (*). Case-insensitive."""
|
|
225
|
+
v, p = value.lower(), pattern.lower()
|
|
226
|
+
if p == "*":
|
|
227
|
+
return True
|
|
228
|
+
if p.startswith("*") and p.endswith("*"):
|
|
229
|
+
return p[1:-1] in v
|
|
230
|
+
if p.startswith("*"):
|
|
231
|
+
return v.endswith(p[1:])
|
|
232
|
+
if p.endswith("*"):
|
|
233
|
+
return v.startswith(p[:-1])
|
|
234
|
+
return v == p
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def apply_filters(spans: List[Dict[str, Any]], parsed_query: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
238
|
+
"""Apply filter conditions and text search to spans."""
|
|
239
|
+
filters = parsed_query.get("filters", [])
|
|
240
|
+
text_search = parsed_query.get("text_search", "").lower()
|
|
241
|
+
|
|
242
|
+
if not filters and not text_search:
|
|
243
|
+
return spans
|
|
244
|
+
|
|
245
|
+
filtered = []
|
|
246
|
+
for span in spans:
|
|
247
|
+
if not _span_matches_filters(span, filters):
|
|
248
|
+
continue
|
|
249
|
+
if text_search and not text_search_span(span, text_search):
|
|
250
|
+
continue
|
|
251
|
+
filtered.append(span)
|
|
252
|
+
return filtered
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def _span_matches_filters(span: Dict[str, Any], filters: List[Dict[str, Any]]) -> bool:
|
|
256
|
+
"""Check if span matches all filters."""
|
|
257
|
+
for f in filters:
|
|
258
|
+
field = f["field"]
|
|
259
|
+
operator = f.get("operator")
|
|
260
|
+
span_value = get_span_field_value(span, field)
|
|
261
|
+
|
|
262
|
+
if operator == "range":
|
|
263
|
+
if span_value is None:
|
|
264
|
+
return False
|
|
265
|
+
try:
|
|
266
|
+
num = float(span_value)
|
|
267
|
+
if f.get("min") is not None and num < f["min"]:
|
|
268
|
+
return False
|
|
269
|
+
if f.get("max") is not None and num > f["max"]:
|
|
270
|
+
return False
|
|
271
|
+
except (ValueError, TypeError):
|
|
272
|
+
return False
|
|
273
|
+
|
|
274
|
+
elif operator in ("gte", "lte", "gt", "lt"):
|
|
275
|
+
if span_value is None or f.get("value") is None:
|
|
276
|
+
return False
|
|
277
|
+
try:
|
|
278
|
+
num, cmp = float(span_value), f["value"]
|
|
279
|
+
if operator == "gte" and num < cmp:
|
|
280
|
+
return False
|
|
281
|
+
if operator == "lte" and num > cmp:
|
|
282
|
+
return False
|
|
283
|
+
if operator == "gt" and num <= cmp:
|
|
284
|
+
return False
|
|
285
|
+
if operator == "lt" and num >= cmp:
|
|
286
|
+
return False
|
|
287
|
+
except (ValueError, TypeError):
|
|
288
|
+
return False
|
|
289
|
+
|
|
290
|
+
else:
|
|
291
|
+
value = f.get("value")
|
|
292
|
+
if value is None or value == "*":
|
|
293
|
+
continue
|
|
294
|
+
if span_value is None:
|
|
295
|
+
return False
|
|
296
|
+
if not match_wildcard(str(span_value), str(value)):
|
|
297
|
+
return False
|
|
298
|
+
|
|
299
|
+
return True
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def text_search_span(span: Dict[str, Any], search_text: str) -> bool:
|
|
303
|
+
"""Check if span matches free text search (name, input, output, tags)."""
|
|
304
|
+
s = search_text.lower()
|
|
305
|
+
|
|
306
|
+
if s in span.get("name", "").lower():
|
|
307
|
+
return True
|
|
308
|
+
|
|
309
|
+
meta = span.get("meta", {})
|
|
310
|
+
for key in ("input", "output"):
|
|
311
|
+
data = meta.get(key, {})
|
|
312
|
+
if s in str(data.get("value", "")).lower():
|
|
313
|
+
return True
|
|
314
|
+
for msg in data.get("messages", []):
|
|
315
|
+
if isinstance(msg, dict) and s in str(msg.get("content", "")).lower():
|
|
316
|
+
return True
|
|
317
|
+
|
|
318
|
+
for tag in span.get("tags", []):
|
|
319
|
+
if isinstance(tag, str) and s in tag.lower():
|
|
320
|
+
return True
|
|
321
|
+
|
|
322
|
+
return False
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
def compute_children_ids(spans: List[Dict[str, Any]]) -> Dict[str, List[str]]:
|
|
326
|
+
"""Compute children_ids from parent_id relationships."""
|
|
327
|
+
children_map: Dict[str, List[str]] = {span.get("span_id", ""): [] for span in spans if span.get("span_id")}
|
|
328
|
+
|
|
329
|
+
for span in spans:
|
|
330
|
+
span_id = span.get("span_id", "")
|
|
331
|
+
parent_id = span.get("parent_id", "")
|
|
332
|
+
if parent_id and parent_id != "undefined":
|
|
333
|
+
if parent_id not in children_map:
|
|
334
|
+
children_map[parent_id] = []
|
|
335
|
+
children_map[parent_id].append(span_id)
|
|
336
|
+
|
|
337
|
+
return children_map
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def get_span_field_value(span: Dict[str, Any], field: str) -> Optional[Any]:
|
|
341
|
+
"""Get field value from span (handles nested paths like meta.span.kind)."""
|
|
342
|
+
# Direct top-level fields
|
|
343
|
+
direct_fields = {
|
|
344
|
+
"ml_app": lambda s: s.get("ml_app", s.get("_ui_ml_app")),
|
|
345
|
+
"event_type": lambda s: "span",
|
|
346
|
+
"parent_id": lambda s: (
|
|
347
|
+
"undefined" if not s.get("parent_id") or s.get("parent_id") in ("0", "") else str(s["parent_id"])
|
|
348
|
+
),
|
|
349
|
+
"status": lambda s: s.get("status", "ok"),
|
|
350
|
+
"name": lambda s: s.get("name"),
|
|
351
|
+
"trace_id": lambda s: s.get("trace_id"),
|
|
352
|
+
"span_id": lambda s: s.get("span_id"),
|
|
353
|
+
"service": lambda s: s.get("service"),
|
|
354
|
+
"env": lambda s: s.get("env"),
|
|
355
|
+
"duration": lambda s: s.get("duration", 0),
|
|
356
|
+
}
|
|
357
|
+
if field in direct_fields:
|
|
358
|
+
return direct_fields[field](span)
|
|
359
|
+
|
|
360
|
+
meta = span.get("meta", {})
|
|
361
|
+
|
|
362
|
+
# Model fields with fallback to SDK format
|
|
363
|
+
if field == "meta.model_name":
|
|
364
|
+
return meta.get("model_name") or meta.get("metadata", {}).get("model_name")
|
|
365
|
+
if field == "meta.model_provider":
|
|
366
|
+
return meta.get("model_provider") or meta.get("metadata", {}).get("model_provider")
|
|
367
|
+
|
|
368
|
+
# Nested fields with dot notation
|
|
369
|
+
if field.startswith("meta.") or field.startswith("metrics."):
|
|
370
|
+
parts = field.split(".")
|
|
371
|
+
value = span.get(parts[0], {})
|
|
372
|
+
for part in parts[1:]:
|
|
373
|
+
if isinstance(value, dict):
|
|
374
|
+
value = value.get(part)
|
|
375
|
+
else:
|
|
376
|
+
return None
|
|
377
|
+
return value
|
|
378
|
+
|
|
379
|
+
# Check in tags
|
|
380
|
+
for tag in span.get("tags", []):
|
|
381
|
+
if isinstance(tag, str) and tag.startswith(f"{field}:"):
|
|
382
|
+
return tag.split(":", 1)[1]
|
|
383
|
+
|
|
384
|
+
return None
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def _tags_to_dict(tags: List[str]) -> Dict[str, Any]:
|
|
388
|
+
"""Convert tags array to dict, handling multiple values per key."""
|
|
389
|
+
tag_obj: Dict[str, Any] = {}
|
|
390
|
+
for tag in tags:
|
|
391
|
+
if isinstance(tag, str) and ":" in tag:
|
|
392
|
+
k, v = tag.split(":", 1)
|
|
393
|
+
if k in tag_obj:
|
|
394
|
+
if isinstance(tag_obj[k], list):
|
|
395
|
+
tag_obj[k].append(v)
|
|
396
|
+
else:
|
|
397
|
+
tag_obj[k] = [tag_obj[k], v]
|
|
398
|
+
else:
|
|
399
|
+
tag_obj[k] = v
|
|
400
|
+
return tag_obj
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def build_event_platform_list_response(
|
|
404
|
+
spans: List[Dict[str, Any]],
|
|
405
|
+
request_id: str,
|
|
406
|
+
limit: int = 100,
|
|
407
|
+
) -> Dict[str, Any]:
|
|
408
|
+
"""Build Event Platform list response from spans."""
|
|
409
|
+
children_map = compute_children_ids(spans[:limit])
|
|
410
|
+
events = []
|
|
411
|
+
|
|
412
|
+
for span in spans[:limit]:
|
|
413
|
+
meta = span.get("meta", {})
|
|
414
|
+
metrics = span.get("metrics", {})
|
|
415
|
+
span_id = span.get("span_id", str(uuid.uuid4()))
|
|
416
|
+
trace_id = span.get("trace_id", "")
|
|
417
|
+
status = span.get("status", "ok")
|
|
418
|
+
name = span.get("name", "")
|
|
419
|
+
duration = span.get("duration", 0)
|
|
420
|
+
start_ns = span.get("start_ns", int(time.time() * 1_000_000_000))
|
|
421
|
+
tags = span.get("tags", [])
|
|
422
|
+
span_kind = meta.get("span", {}).get("kind", "llm")
|
|
423
|
+
ml_app = span.get("ml_app", span.get("_ui_ml_app", "unknown"))
|
|
424
|
+
service = span.get("service", "")
|
|
425
|
+
env = span.get("env", "")
|
|
426
|
+
children_ids = children_map.get(span_id, [])
|
|
427
|
+
span_links = span.get("span_links", [])
|
|
428
|
+
tag_obj = _tags_to_dict(tags)
|
|
429
|
+
|
|
430
|
+
event_id = f"AZ{uuid.uuid4().hex[:20]}"
|
|
431
|
+
timestamp_ms = start_ns // 1_000_000
|
|
432
|
+
timestamp_iso = datetime.utcfromtimestamp(timestamp_ms / 1000).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
|
|
433
|
+
|
|
434
|
+
# Get model metadata
|
|
435
|
+
model_metadata = meta.get("metadata", {})
|
|
436
|
+
model_name = model_metadata.get("model_name", meta.get("model_name", ""))
|
|
437
|
+
model_provider = model_metadata.get("model_provider", meta.get("model_provider", ""))
|
|
438
|
+
|
|
439
|
+
# Compute start and end timestamps in milliseconds
|
|
440
|
+
end_ns = start_ns + duration
|
|
441
|
+
start_ms = start_ns // 1_000_000
|
|
442
|
+
end_ms = end_ns // 1_000_000
|
|
443
|
+
|
|
444
|
+
# Build the custom object (this is where the actual span data lives)
|
|
445
|
+
# The UI accesses this via getMlObsAttrs(event) which returns event.custom
|
|
446
|
+
custom_data = {
|
|
447
|
+
"_dd": {
|
|
448
|
+
"apm_trace_id": trace_id,
|
|
449
|
+
"ootb_status": "success" if status == "ok" else "error",
|
|
450
|
+
"stage": "processed",
|
|
451
|
+
"document_version": 3,
|
|
452
|
+
},
|
|
453
|
+
"duration": duration,
|
|
454
|
+
"start": start_ms,
|
|
455
|
+
"end": end_ms,
|
|
456
|
+
"event_type": "span",
|
|
457
|
+
"kind": span_kind, # Also at top level for easier access
|
|
458
|
+
"meta": {
|
|
459
|
+
"span": {
|
|
460
|
+
"kind": span_kind,
|
|
461
|
+
},
|
|
462
|
+
"kind": span_kind, # Also directly in meta
|
|
463
|
+
"input": meta.get("input", {}),
|
|
464
|
+
"output": meta.get("output", {}),
|
|
465
|
+
"error": meta.get("error") if status == "error" else None,
|
|
466
|
+
"model_name": model_name,
|
|
467
|
+
"model_provider": model_provider,
|
|
468
|
+
},
|
|
469
|
+
"metrics": {
|
|
470
|
+
"input_tokens": metrics.get("input_tokens", 0),
|
|
471
|
+
"output_tokens": metrics.get("output_tokens", 0),
|
|
472
|
+
"total_tokens": metrics.get("total_tokens", 0),
|
|
473
|
+
"estimated_input_cost": 0,
|
|
474
|
+
"estimated_output_cost": 0,
|
|
475
|
+
"estimated_total_cost": 0,
|
|
476
|
+
},
|
|
477
|
+
"ml_app": ml_app,
|
|
478
|
+
"name": name,
|
|
479
|
+
"resource": name, # Usually same as name
|
|
480
|
+
"parent_id": span.get("parent_id", "undefined"),
|
|
481
|
+
"children_ids": children_ids, # Computed from parent relationships
|
|
482
|
+
"span_links": span_links, # From SDK for agentic execution graph
|
|
483
|
+
"span_id": span_id,
|
|
484
|
+
"start_ns": start_ns,
|
|
485
|
+
"status": status,
|
|
486
|
+
"error": 1 if status == "error" else 0,
|
|
487
|
+
"tags": tags,
|
|
488
|
+
"trace_id": trace_id,
|
|
489
|
+
"service": service,
|
|
490
|
+
"env": env,
|
|
491
|
+
"trace": {
|
|
492
|
+
"estimated_total_cost": 0,
|
|
493
|
+
},
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
# Build columns array [status, ?, ?, ml_app, service, ?, ?, duration]
|
|
497
|
+
columns = [
|
|
498
|
+
status,
|
|
499
|
+
None,
|
|
500
|
+
None,
|
|
501
|
+
ml_app,
|
|
502
|
+
service,
|
|
503
|
+
None,
|
|
504
|
+
None,
|
|
505
|
+
duration,
|
|
506
|
+
]
|
|
507
|
+
|
|
508
|
+
events.append(
|
|
509
|
+
{
|
|
510
|
+
"columns": columns,
|
|
511
|
+
"datadog.index": "llmobs",
|
|
512
|
+
"event": {
|
|
513
|
+
"custom": custom_data,
|
|
514
|
+
"discovery_timestamp": timestamp_ms,
|
|
515
|
+
"env": env,
|
|
516
|
+
"id": event_id,
|
|
517
|
+
"parent_id": span.get("parent_id", "undefined"),
|
|
518
|
+
"service": service,
|
|
519
|
+
"source": "integration",
|
|
520
|
+
"span_id": span_id,
|
|
521
|
+
"status": "info",
|
|
522
|
+
"tag": tag_obj,
|
|
523
|
+
"tags": tags,
|
|
524
|
+
"tiebreaker": hash(span_id) % 2147483647,
|
|
525
|
+
"timestamp": timestamp_iso,
|
|
526
|
+
"trace_id": trace_id,
|
|
527
|
+
"version": "",
|
|
528
|
+
},
|
|
529
|
+
"event_id": event_id,
|
|
530
|
+
"id": f"AwAAA{uuid.uuid4().hex[:40]}",
|
|
531
|
+
}
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
return {
|
|
535
|
+
"elapsed": 23,
|
|
536
|
+
"hitCount": len(events),
|
|
537
|
+
"requestId": request_id,
|
|
538
|
+
"result": {
|
|
539
|
+
"events": events,
|
|
540
|
+
},
|
|
541
|
+
"status": "done",
|
|
542
|
+
"type": "status",
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
class LLMObsEventPlatformAPI:
|
|
547
|
+
"""Handler for Event Platform API requests."""
|
|
548
|
+
|
|
549
|
+
def __init__(self, agent: "Agent"):
|
|
550
|
+
self.agent = agent
|
|
551
|
+
self._query_results: Dict[str, Dict[str, Any]] = {}
|
|
552
|
+
self.decoded_llmobs_span_events: Dict[int, List[Dict[str, Any]]] = {}
|
|
553
|
+
|
|
554
|
+
def get_llmobs_spans(self, token: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
555
|
+
"""Get all LLMObs spans from stored requests."""
|
|
556
|
+
requests = self.agent._requests_by_session(token) if token else self.agent._requests
|
|
557
|
+
all_spans = []
|
|
558
|
+
|
|
559
|
+
for req in requests:
|
|
560
|
+
if req.path == "/evp_proxy/v2/api/v2/llmobs":
|
|
561
|
+
try:
|
|
562
|
+
data = self.agent._request_data(req)
|
|
563
|
+
content_type = req.content_type or ""
|
|
564
|
+
req_id = id(req) # only brittle if agent requests are cleared
|
|
565
|
+
if req_id not in self.decoded_llmobs_span_events:
|
|
566
|
+
events = decode_llmobs_payload(data, content_type)
|
|
567
|
+
spans = extract_spans_from_events(events)
|
|
568
|
+
self.decoded_llmobs_span_events[req_id] = spans
|
|
569
|
+
else:
|
|
570
|
+
spans = self.decoded_llmobs_span_events[req_id]
|
|
571
|
+
all_spans.extend(spans)
|
|
572
|
+
except Exception as e:
|
|
573
|
+
log.warning(f"Failed to extract spans from request: {e}")
|
|
574
|
+
|
|
575
|
+
all_spans.sort(key=lambda s: s.get("start_ns", 0), reverse=True)
|
|
576
|
+
return all_spans
|
|
577
|
+
|
|
578
|
+
async def handle_logs_analytics_list(self, request: Request) -> web.Response:
|
|
579
|
+
"""Handle POST /api/unstable/llm-obs-query-rewriter/list endpoint."""
|
|
580
|
+
try:
|
|
581
|
+
# Only require type=llmobs for the query-rewriter endpoint
|
|
582
|
+
if "/llm-obs-query-rewriter/" in request.path:
|
|
583
|
+
query_type = request.query.get("type", "")
|
|
584
|
+
if query_type != "llmobs":
|
|
585
|
+
return web.json_response({"error": "Only llmobs queries are supported"}, status=400)
|
|
586
|
+
|
|
587
|
+
body = await request.json()
|
|
588
|
+
list_params = body.get("list", {})
|
|
589
|
+
limit = list_params.get("limit", 100)
|
|
590
|
+
query_str = list_params.get("search", {}).get("query", "")
|
|
591
|
+
|
|
592
|
+
spans = self.get_llmobs_spans()
|
|
593
|
+
if query_str:
|
|
594
|
+
spans = apply_filters(spans, parse_filter_query(query_str))
|
|
595
|
+
|
|
596
|
+
request_id = str(uuid.uuid4())
|
|
597
|
+
response = build_event_platform_list_response(spans, request_id, limit)
|
|
598
|
+
self._query_results[request_id] = response
|
|
599
|
+
|
|
600
|
+
return web.json_response(response)
|
|
601
|
+
except Exception as e:
|
|
602
|
+
log.error(f"Error handling llm-obs list: {e}")
|
|
603
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
604
|
+
|
|
605
|
+
async def handle_logs_analytics_get(self, request: Request) -> web.Response:
|
|
606
|
+
"""Handle GET /api/unstable/llm-obs-query-rewriter/list/{requestId} endpoint."""
|
|
607
|
+
try:
|
|
608
|
+
request_id = request.match_info.get("request_id", "")
|
|
609
|
+
if request_id in self._query_results:
|
|
610
|
+
return web.json_response(self._query_results[request_id])
|
|
611
|
+
return web.Response(status=410) # Gone
|
|
612
|
+
except Exception as e:
|
|
613
|
+
log.error(f"Error handling llm-obs get: {e}")
|
|
614
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
615
|
+
|
|
616
|
+
async def handle_aggregate(self, request: Request) -> web.Response:
|
|
617
|
+
"""Handle POST /api/unstable/llm-obs-query-rewriter/aggregate endpoint."""
|
|
618
|
+
try:
|
|
619
|
+
spans = self.get_llmobs_spans()
|
|
620
|
+
response = {
|
|
621
|
+
"elapsed": 50,
|
|
622
|
+
"requestId": str(uuid.uuid4()),
|
|
623
|
+
"result": {"buckets": [{"computes": {"c0": len(spans)}}], "status": "done"},
|
|
624
|
+
"status": "done",
|
|
625
|
+
"type": "aggregate",
|
|
626
|
+
}
|
|
627
|
+
return web.json_response(response)
|
|
628
|
+
except Exception as e:
|
|
629
|
+
log.error(f"Error handling aggregate: {e}")
|
|
630
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
631
|
+
|
|
632
|
+
async def handle_fetch_one(self, request: Request) -> web.Response:
|
|
633
|
+
"""Handle POST /api/unstable/llm-obs-query-rewriter/fetch_one endpoint."""
|
|
634
|
+
try:
|
|
635
|
+
body = await request.json()
|
|
636
|
+
event_id = body.get("eventId", "")
|
|
637
|
+
spans = self.get_llmobs_spans()
|
|
638
|
+
|
|
639
|
+
found_span = None
|
|
640
|
+
for span in spans:
|
|
641
|
+
span_id = span.get("span_id", "")
|
|
642
|
+
if span_id == event_id or event_id.endswith(span_id):
|
|
643
|
+
found_span = span
|
|
644
|
+
break
|
|
645
|
+
|
|
646
|
+
if not found_span:
|
|
647
|
+
if spans:
|
|
648
|
+
found_span = spans[0]
|
|
649
|
+
else:
|
|
650
|
+
return web.json_response({"error": "Span not found"}, status=404)
|
|
651
|
+
|
|
652
|
+
meta = found_span.get("meta", {})
|
|
653
|
+
metrics = found_span.get("metrics", {})
|
|
654
|
+
span_id = found_span.get("span_id", str(uuid.uuid4()))
|
|
655
|
+
trace_id = found_span.get("trace_id", "")
|
|
656
|
+
status = found_span.get("status", "ok")
|
|
657
|
+
name = found_span.get("name", "")
|
|
658
|
+
duration = found_span.get("duration", 0)
|
|
659
|
+
start_ns = found_span.get("start_ns", int(time.time() * 1_000_000_000))
|
|
660
|
+
tags = found_span.get("tags", [])
|
|
661
|
+
span_kind = meta.get("span", {}).get("kind", "llm")
|
|
662
|
+
ml_app = found_span.get("ml_app", found_span.get("_ui_ml_app", "unknown"))
|
|
663
|
+
service = found_span.get("service", "")
|
|
664
|
+
env = found_span.get("env", "")
|
|
665
|
+
|
|
666
|
+
tag_obj = {}
|
|
667
|
+
for tag in tags:
|
|
668
|
+
if isinstance(tag, str) and ":" in tag:
|
|
669
|
+
k, v = tag.split(":", 1)
|
|
670
|
+
tag_obj[k] = v
|
|
671
|
+
|
|
672
|
+
timestamp_ms = start_ns // 1_000_000
|
|
673
|
+
timestamp_iso = datetime.utcfromtimestamp(timestamp_ms / 1000).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
|
|
674
|
+
|
|
675
|
+
model_metadata = meta.get("metadata", {})
|
|
676
|
+
model_name = model_metadata.get("model_name", meta.get("model_name", ""))
|
|
677
|
+
model_provider = model_metadata.get("model_provider", meta.get("model_provider", ""))
|
|
678
|
+
|
|
679
|
+
custom_data = {
|
|
680
|
+
"_dd": {
|
|
681
|
+
"apm_trace_id": trace_id,
|
|
682
|
+
"ootb_status": "success" if status == "ok" else "error",
|
|
683
|
+
"stage": "processed",
|
|
684
|
+
"document_version": 3,
|
|
685
|
+
},
|
|
686
|
+
"duration": duration,
|
|
687
|
+
"event_type": "span",
|
|
688
|
+
"kind": span_kind,
|
|
689
|
+
"meta": {
|
|
690
|
+
"span": {"kind": span_kind},
|
|
691
|
+
"kind": span_kind,
|
|
692
|
+
"input": meta.get("input", {}),
|
|
693
|
+
"output": meta.get("output", {}),
|
|
694
|
+
"error": meta.get("error") if status == "error" else None,
|
|
695
|
+
"model_name": model_name,
|
|
696
|
+
"model_provider": model_provider,
|
|
697
|
+
},
|
|
698
|
+
"metrics": {
|
|
699
|
+
"input_tokens": metrics.get("input_tokens", 0),
|
|
700
|
+
"output_tokens": metrics.get("output_tokens", 0),
|
|
701
|
+
"total_tokens": metrics.get("total_tokens", 0),
|
|
702
|
+
},
|
|
703
|
+
"ml_app": ml_app,
|
|
704
|
+
"name": name,
|
|
705
|
+
"parent_id": found_span.get("parent_id", "undefined"),
|
|
706
|
+
"span_id": span_id,
|
|
707
|
+
"start_ns": start_ns,
|
|
708
|
+
"status": status,
|
|
709
|
+
"tags": tags,
|
|
710
|
+
"trace_id": trace_id,
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
response = {
|
|
714
|
+
"type": "status",
|
|
715
|
+
"requestId": str(uuid.uuid4()),
|
|
716
|
+
"status": "done",
|
|
717
|
+
"elapsed": 21,
|
|
718
|
+
"hitCount": 1,
|
|
719
|
+
"result": {
|
|
720
|
+
"trace_id": trace_id,
|
|
721
|
+
"span_id": span_id,
|
|
722
|
+
"custom": custom_data,
|
|
723
|
+
"trace_id_low": trace_id,
|
|
724
|
+
"source": "integration",
|
|
725
|
+
"tiebreaker": hash(span_id) % 2147483647,
|
|
726
|
+
"env": env,
|
|
727
|
+
"version": "",
|
|
728
|
+
"discovery_timestamp": timestamp_ms,
|
|
729
|
+
"tags": tags,
|
|
730
|
+
"event_id": event_id,
|
|
731
|
+
"service": service,
|
|
732
|
+
"parent_id": found_span.get("parent_id", "undefined"),
|
|
733
|
+
"datadog.index": "llmobs",
|
|
734
|
+
"id": event_id,
|
|
735
|
+
"tag": tag_obj,
|
|
736
|
+
"timestamp": timestamp_iso,
|
|
737
|
+
"status": "info",
|
|
738
|
+
},
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
return web.json_response(response)
|
|
742
|
+
except Exception as e:
|
|
743
|
+
log.error(f"Error handling fetch_one: {e}")
|
|
744
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
745
|
+
|
|
746
|
+
async def handle_trace(self, request: Request) -> web.Response:
|
|
747
|
+
"""Handle GET /api/ui/llm-obs/v1/trace/{trace_id} endpoint."""
|
|
748
|
+
try:
|
|
749
|
+
trace_id = request.match_info.get("trace_id", "")
|
|
750
|
+
span_id_filter = request.query.get("filter[span_id]", "")
|
|
751
|
+
|
|
752
|
+
all_spans = self.get_llmobs_spans()
|
|
753
|
+
trace_spans = [s for s in all_spans if s.get("trace_id") == trace_id]
|
|
754
|
+
|
|
755
|
+
if not trace_spans and span_id_filter:
|
|
756
|
+
trace_spans = [s for s in all_spans if s.get("span_id") == span_id_filter]
|
|
757
|
+
|
|
758
|
+
if not trace_spans and all_spans:
|
|
759
|
+
trace_spans = [all_spans[0]]
|
|
760
|
+
|
|
761
|
+
if not trace_spans:
|
|
762
|
+
return web.json_response({"error": "Trace not found"}, status=404)
|
|
763
|
+
|
|
764
|
+
children_map = compute_children_ids(trace_spans)
|
|
765
|
+
spans_dict = {}
|
|
766
|
+
root_id = None
|
|
767
|
+
|
|
768
|
+
for span in trace_spans:
|
|
769
|
+
span_id = span.get("span_id", "")
|
|
770
|
+
meta = span.get("meta", {})
|
|
771
|
+
metrics = span.get("metrics", {})
|
|
772
|
+
tags = span.get("tags", [])
|
|
773
|
+
span_kind = meta.get("span", {}).get("kind", "llm")
|
|
774
|
+
ml_app = span.get("ml_app", span.get("_ui_ml_app", "unknown"))
|
|
775
|
+
service = span.get("service", "")
|
|
776
|
+
children_ids = children_map.get(span_id, [])
|
|
777
|
+
span_links = span.get("span_links", [])
|
|
778
|
+
|
|
779
|
+
parent_id = span.get("parent_id", "undefined")
|
|
780
|
+
if not root_id and (not parent_id or parent_id == "undefined"):
|
|
781
|
+
root_id = span_id
|
|
782
|
+
|
|
783
|
+
model_metadata = meta.get("metadata", {})
|
|
784
|
+
model_name = model_metadata.get("model_name", meta.get("model_name", ""))
|
|
785
|
+
model_provider = model_metadata.get("model_provider", meta.get("model_provider", ""))
|
|
786
|
+
|
|
787
|
+
start_ns = span.get("start_ns", 0)
|
|
788
|
+
duration_ns = span.get("duration", 0)
|
|
789
|
+
|
|
790
|
+
spans_dict[span_id] = {
|
|
791
|
+
"trace_id": span.get("trace_id", ""),
|
|
792
|
+
"span_id": span_id,
|
|
793
|
+
"parent_id": parent_id,
|
|
794
|
+
"children_ids": children_ids,
|
|
795
|
+
"span_links": span_links,
|
|
796
|
+
"name": span.get("name", ""),
|
|
797
|
+
"resource": span.get("name", ""),
|
|
798
|
+
"tags": tags,
|
|
799
|
+
"status": span.get("status", "ok"),
|
|
800
|
+
"error": 1 if span.get("status") == "error" else 0,
|
|
801
|
+
"start": start_ns // 1_000_000,
|
|
802
|
+
"end": (start_ns + duration_ns) // 1_000_000,
|
|
803
|
+
"duration": duration_ns,
|
|
804
|
+
"service": service,
|
|
805
|
+
"env": span.get("env", ""),
|
|
806
|
+
"ml_app": ml_app,
|
|
807
|
+
"kind": span_kind,
|
|
808
|
+
"meta": {
|
|
809
|
+
"ml_app": ml_app,
|
|
810
|
+
"kind": span_kind,
|
|
811
|
+
"span": {"kind": span_kind},
|
|
812
|
+
"error": meta.get("error", {}),
|
|
813
|
+
"input": meta.get("input", {}),
|
|
814
|
+
"output": meta.get("output", {}),
|
|
815
|
+
"expected_output": {},
|
|
816
|
+
"model_name": model_name,
|
|
817
|
+
"model_provider": model_provider,
|
|
818
|
+
},
|
|
819
|
+
"metrics": metrics,
|
|
820
|
+
"_dd": {"apm_trace_id": span.get("trace_id", "")},
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
if not root_id and spans_dict:
|
|
824
|
+
root_id = list(spans_dict.keys())[0]
|
|
825
|
+
|
|
826
|
+
response = {
|
|
827
|
+
"data": {
|
|
828
|
+
"id": str(uuid.uuid4()),
|
|
829
|
+
"type": "trace",
|
|
830
|
+
"attributes": {
|
|
831
|
+
"root_id": root_id,
|
|
832
|
+
"spans": spans_dict,
|
|
833
|
+
"trace_state": {"Error": "", "warnings": None, "convention_violations": {}},
|
|
834
|
+
},
|
|
835
|
+
},
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
return web.json_response(response)
|
|
839
|
+
except Exception as e:
|
|
840
|
+
log.error(f"Error handling trace: {e}")
|
|
841
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
842
|
+
|
|
843
|
+
async def handle_facets_list(self, request: Request) -> web.Response:
|
|
844
|
+
"""Handle GET /api/ui/event-platform/llmobs/facets endpoint (stub)."""
|
|
845
|
+
return web.json_response({"facets": {"llmobs": []}})
|
|
846
|
+
|
|
847
|
+
async def handle_facet_info(self, request: Request) -> web.Response:
|
|
848
|
+
"""Handle POST /api/unstable/llm-obs-query-rewriter/facet_info endpoint.
|
|
849
|
+
|
|
850
|
+
Returns facet values with counts for the specified facet path.
|
|
851
|
+
Supports optional search/filter query to compute values from filtered spans.
|
|
852
|
+
"""
|
|
853
|
+
try:
|
|
854
|
+
body = await request.json()
|
|
855
|
+
log.debug(f"facet_info request: {json.dumps(body, indent=2)[:500]}")
|
|
856
|
+
|
|
857
|
+
facet_info = body.get("facet_info", {})
|
|
858
|
+
facet_path = facet_info.get("path", "")
|
|
859
|
+
limit = facet_info.get("limit", 10)
|
|
860
|
+
term_search = facet_info.get("termSearch", {}).get("query", "")
|
|
861
|
+
search_query = facet_info.get("search", {}).get("query", "")
|
|
862
|
+
|
|
863
|
+
# Strip @ prefix for field lookup
|
|
864
|
+
field_path = facet_path.lstrip("@")
|
|
865
|
+
|
|
866
|
+
# Get spans, optionally filtered by search query
|
|
867
|
+
spans = self.get_llmobs_spans()
|
|
868
|
+
if search_query:
|
|
869
|
+
parsed_query = parse_filter_query(search_query)
|
|
870
|
+
spans = apply_filters(spans, parsed_query)
|
|
871
|
+
|
|
872
|
+
# Compute facet values from spans
|
|
873
|
+
value_counts: Dict[str, int] = {}
|
|
874
|
+
for span in spans:
|
|
875
|
+
value = get_span_field_value(span, field_path)
|
|
876
|
+
if value is not None:
|
|
877
|
+
value_str = str(value)
|
|
878
|
+
value_counts[value_str] = value_counts.get(value_str, 0) + 1
|
|
879
|
+
|
|
880
|
+
# Sort by count descending and limit
|
|
881
|
+
sorted_values = sorted(value_counts.items(), key=lambda x: -x[1])[:limit]
|
|
882
|
+
|
|
883
|
+
# Apply term search filter if provided
|
|
884
|
+
if term_search:
|
|
885
|
+
term_lower = term_search.lower()
|
|
886
|
+
sorted_values = [(v, c) for v, c in sorted_values if term_lower in v.lower()][:limit]
|
|
887
|
+
|
|
888
|
+
# Build response
|
|
889
|
+
fields = [{"field": value, "value": count} for value, count in sorted_values]
|
|
890
|
+
|
|
891
|
+
response = {
|
|
892
|
+
"elapsed": 10,
|
|
893
|
+
"requestId": str(uuid.uuid4()),
|
|
894
|
+
"result": {"fields": fields, "status": "done"},
|
|
895
|
+
"status": "done",
|
|
896
|
+
}
|
|
897
|
+
|
|
898
|
+
log.debug(f"facet_info response for {facet_path}: {len(fields)} values")
|
|
899
|
+
return web.json_response(response)
|
|
900
|
+
|
|
901
|
+
except Exception as e:
|
|
902
|
+
log.error(f"Error handling facet info: {e}")
|
|
903
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
904
|
+
|
|
905
|
+
async def handle_facet_range_info(self, request: Request) -> web.Response:
|
|
906
|
+
"""Handle POST /api/unstable/llm-obs-query-rewriter/facet_range_info endpoint.
|
|
907
|
+
|
|
908
|
+
Returns min/max values for range facets like duration, tokens, cost.
|
|
909
|
+
Supports optional search/filter query to compute range from filtered spans.
|
|
910
|
+
"""
|
|
911
|
+
try:
|
|
912
|
+
body = await request.json()
|
|
913
|
+
log.debug(f"facet_range_info request: {json.dumps(body, indent=2)[:500]}")
|
|
914
|
+
|
|
915
|
+
facet_range_info = body.get("facet_range_info", {})
|
|
916
|
+
facet_path = facet_range_info.get("path", "")
|
|
917
|
+
search_query = facet_range_info.get("search", {}).get("query", "")
|
|
918
|
+
|
|
919
|
+
# Strip @ prefix for field lookup
|
|
920
|
+
field_path = facet_path.lstrip("@")
|
|
921
|
+
|
|
922
|
+
# Get spans, optionally filtered by search query
|
|
923
|
+
spans = self.get_llmobs_spans()
|
|
924
|
+
if search_query:
|
|
925
|
+
parsed_query = parse_filter_query(search_query)
|
|
926
|
+
spans = apply_filters(spans, parsed_query)
|
|
927
|
+
|
|
928
|
+
# Compute range from spans
|
|
929
|
+
values = []
|
|
930
|
+
for span in spans:
|
|
931
|
+
value = get_span_field_value(span, field_path)
|
|
932
|
+
if value is not None:
|
|
933
|
+
try:
|
|
934
|
+
values.append(float(value))
|
|
935
|
+
except (ValueError, TypeError):
|
|
936
|
+
pass
|
|
937
|
+
|
|
938
|
+
range_data = {
|
|
939
|
+
"min": min(values) if values else 0,
|
|
940
|
+
"max": max(values) if values else 0,
|
|
941
|
+
}
|
|
942
|
+
|
|
943
|
+
response = {
|
|
944
|
+
"elapsed": 10,
|
|
945
|
+
"requestId": str(uuid.uuid4()),
|
|
946
|
+
"result": {"min": range_data["min"], "max": range_data["max"], "status": "done"},
|
|
947
|
+
"status": "done",
|
|
948
|
+
}
|
|
949
|
+
|
|
950
|
+
log.debug(f"facet_range_info response for {facet_path}: min={range_data['min']}, max={range_data['max']}")
|
|
951
|
+
return web.json_response(response)
|
|
952
|
+
|
|
953
|
+
except Exception as e:
|
|
954
|
+
log.error(f"Error handling facet range info: {e}")
|
|
955
|
+
return web.json_response({"error": str(e)}, status=500)
|
|
956
|
+
|
|
957
|
+
async def handle_query_scalar(self, request: Request) -> web.Response:
|
|
958
|
+
"""Handle POST /api/ui/query/scalar endpoint."""
|
|
959
|
+
return web.json_response(
|
|
960
|
+
{
|
|
961
|
+
"data": [
|
|
962
|
+
{
|
|
963
|
+
"type": "scalar_response",
|
|
964
|
+
"attributes": {
|
|
965
|
+
"columns": [],
|
|
966
|
+
},
|
|
967
|
+
}
|
|
968
|
+
],
|
|
969
|
+
}
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
def get_routes(self) -> List[web.RouteDef]:
|
|
973
|
+
"""Return the routes for this API (all handlers wrapped with CORS support)."""
|
|
974
|
+
return [
|
|
975
|
+
# LLM Obs query rewriter endpoints
|
|
976
|
+
web.route("*", "/api/unstable/llm-obs-query-rewriter/list", with_cors(self.handle_logs_analytics_list)),
|
|
977
|
+
web.route(
|
|
978
|
+
"*", "/api/unstable/llm-obs-query-rewriter/list/{request_id}", with_cors(self.handle_logs_analytics_get)
|
|
979
|
+
),
|
|
980
|
+
web.route("*", "/api/unstable/llm-obs-query-rewriter/aggregate", with_cors(self.handle_aggregate)),
|
|
981
|
+
web.route("*", "/api/unstable/llm-obs-query-rewriter/fetch_one", with_cors(self.handle_fetch_one)),
|
|
982
|
+
web.route("*", "/api/unstable/llm-obs-query-rewriter/facet_info", with_cors(self.handle_facet_info)),
|
|
983
|
+
web.route(
|
|
984
|
+
"*", "/api/unstable/llm-obs-query-rewriter/facet_range_info", with_cors(self.handle_facet_range_info)
|
|
985
|
+
),
|
|
986
|
+
# Facets list endpoint
|
|
987
|
+
web.route("*", "/api/ui/event-platform/llmobs/facets", with_cors(self.handle_facets_list)),
|
|
988
|
+
# Legacy logs-analytics endpoints
|
|
989
|
+
web.route("*", "/api/v1/logs-analytics/list", with_cors(self.handle_logs_analytics_list)),
|
|
990
|
+
web.route("*", "/api/v1/logs-analytics/list/{request_id}", with_cors(self.handle_logs_analytics_get)),
|
|
991
|
+
web.route("*", "/api/v1/logs-analytics/aggregate", with_cors(self.handle_aggregate)),
|
|
992
|
+
web.route("*", "/api/v1/logs-analytics/fetch_one", with_cors(self.handle_fetch_one)),
|
|
993
|
+
# LLM Obs trace endpoint
|
|
994
|
+
web.route("*", "/api/ui/llm-obs/v1/trace/{trace_id}", with_cors(self.handle_trace)),
|
|
995
|
+
# Query scalar endpoint
|
|
996
|
+
web.route("*", "/api/ui/query/scalar", with_cors(self.handle_query_scalar)),
|
|
997
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ddapm-test-agent
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.40.0
|
|
4
4
|
Summary: Test agent for Datadog APM client libraries
|
|
5
5
|
Home-page: https://github.com/Datadog/dd-apm-test-agent
|
|
6
6
|
Author: Kyle Verhoog
|
|
@@ -140,6 +140,16 @@ At the trace-level, proxying can also be disabled by including the `X-Datadog-Ag
|
|
|
140
140
|
is handled, regardless of whether an agent URL is set.
|
|
141
141
|
|
|
142
142
|
|
|
143
|
+
### LLM Observability Proxy
|
|
144
|
+
|
|
145
|
+
The test agent provides a mechanism to dual ship LLM Observability events to Datadog, regardless of whether the Datadog agent is running.
|
|
146
|
+
If using the Datadog agent, set the `DD_AGENT_URL` environment variable or `--agent-url` command-line argument to the URL of the Datadog agent (see [Proxy](#proxy) for more details).
|
|
147
|
+
|
|
148
|
+
If not running a Datadog agent, set the `DD_SITE` environment variable or `--dd-site` command-line argument to the site of the Datadog instance to forward events to. Additionally, set the `DD_API_KEY` environment variable or `--dd-api-key` command-line argument to the API key to use for the Datadog instance.
|
|
149
|
+
|
|
150
|
+
To disable LLM Observability event forwarding, set the `DISABLE_LLMOBS_DATA_FORWARDING` environment variable or `--disable-llmobs-data-forwarding` command-line argument to `true`.
|
|
151
|
+
|
|
152
|
+
|
|
143
153
|
### Snapshot testing
|
|
144
154
|
|
|
145
155
|
The test agent provides a form of [characterization testing](https://en.wikipedia.org/wiki/Characterization_test) which
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
ddapm_test_agent/__init__.py,sha256=IEYMDM-xI0IoHYSYw4Eva5263puB_crrrbLstOCScRw,106
|
|
2
|
-
ddapm_test_agent/agent.py,sha256=
|
|
2
|
+
ddapm_test_agent/agent.py,sha256=rHT3e2CcvLFGM6lP2qKfzTrAFG2Ny3lDGtoyXYw5rpo,96992
|
|
3
3
|
ddapm_test_agent/apmtelemetry.py,sha256=w_9-yUDh1dgox-FfLqeOHU2C14GcjOjen-_SVagiZrc,861
|
|
4
4
|
ddapm_test_agent/checks.py,sha256=pBa4YKZQVA8qaTVJ_XgMA6TmlUZNh99YOrCFJA7fwo0,6865
|
|
5
5
|
ddapm_test_agent/client.py,sha256=ViEmiRX9Y3SQ-KBhSc-FdzBmIVIe8Ij9jj-Q6VGyzLY,7359
|
|
@@ -7,6 +7,7 @@ ddapm_test_agent/cmd.py,sha256=UL8dVGBN4j77Nyx9EJrwE9LLmoPNgru81h9f1qrZLyc,2451
|
|
|
7
7
|
ddapm_test_agent/context.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
ddapm_test_agent/fmt.py,sha256=BarFfTI8bd_2gFBrRc40lKgJiUP3T7VMp4NQUeJ9ZRY,3675
|
|
9
9
|
ddapm_test_agent/integration.py,sha256=DbrPb6UvyIDSHcmKe6ZJVaieNeUXalb15yd0smvhgAA,265
|
|
10
|
+
ddapm_test_agent/llmobs_event_platform.py,sha256=TvHPaXNVEdKvHAjdNC0EMvt5NnIYWHY7DzBcW1J3-4c,39045
|
|
10
11
|
ddapm_test_agent/logs.py,sha256=NDM-FPL52oCL1tL75XJ0xbBLIxyZkS99iZkQ-weOQns,2901
|
|
11
12
|
ddapm_test_agent/metrics.py,sha256=EZo7lSec2oAiH7tUqavKZ2MJM7TwbuFGE3AT3cXwmSM,3988
|
|
12
13
|
ddapm_test_agent/remoteconfig.py,sha256=nqpG5qrz-aB20Xi7SArY6HfztB_WXcjWH4qkNUjLaQs,3606
|
|
@@ -31,10 +32,10 @@ ddapm_test_agent/templates/snapshots.html,sha256=uWiYK9yDP9S88nxYNr0vLhYkWSzzj9C
|
|
|
31
32
|
ddapm_test_agent/templates/trace_detail.html,sha256=EqC6l_jo2Afrf511_-pEDgz6-TEogTjtjawMVxzw7G4,1235
|
|
32
33
|
ddapm_test_agent/templates/tracer_flares.html,sha256=wwq7Yaq7shXJineHT38A2tIHIEoirs5ryWu7raFco80,16272
|
|
33
34
|
ddapm_test_agent/templates/traces.html,sha256=-RB_Nc3NzQQXApw5Y37KbmkU4Z41ehBCGouRhgojaRo,805
|
|
34
|
-
ddapm_test_agent-1.
|
|
35
|
-
ddapm_test_agent-1.
|
|
36
|
-
ddapm_test_agent-1.
|
|
37
|
-
ddapm_test_agent-1.
|
|
38
|
-
ddapm_test_agent-1.
|
|
39
|
-
ddapm_test_agent-1.
|
|
40
|
-
ddapm_test_agent-1.
|
|
35
|
+
ddapm_test_agent-1.40.0.dist-info/licenses/LICENSE.BSD3,sha256=J9S_Tq-hhvteDV2W8R0rqht5DZHkmvgdx3gnLZg4j6Q,1493
|
|
36
|
+
ddapm_test_agent-1.40.0.dist-info/licenses/LICENSE.apache2,sha256=5V2RruBHZQIcPyceiv51DjjvdvhgsgS4pnXAOHDuZkQ,11342
|
|
37
|
+
ddapm_test_agent-1.40.0.dist-info/METADATA,sha256=L61QWt0OxilgX_MQroT-64fu2HQBth8kVI1LFPctibI,31310
|
|
38
|
+
ddapm_test_agent-1.40.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
39
|
+
ddapm_test_agent-1.40.0.dist-info/entry_points.txt,sha256=ulayVs6YJ-0Ej2kxbwn39wOHDVXbyQgFgsbRQmXydcs,250
|
|
40
|
+
ddapm_test_agent-1.40.0.dist-info/top_level.txt,sha256=A9jiKOrrg6VjFAk-mtlSVYN4wr0VsZe58ehGR6IW47U,17
|
|
41
|
+
ddapm_test_agent-1.40.0.dist-info/RECORD,,
|
|
File without changes
|
{ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/licenses/LICENSE.BSD3
RENAMED
|
File without changes
|
{ddapm_test_agent-1.39.0.dist-info → ddapm_test_agent-1.40.0.dist-info}/licenses/LICENSE.apache2
RENAMED
|
File without changes
|
|
File without changes
|