ddapm-test-agent 1.31.1__py3-none-any.whl → 1.33.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddapm_test_agent/agent.py +239 -2
- ddapm_test_agent/client.py +63 -20
- ddapm_test_agent/logs.py +67 -0
- ddapm_test_agent/metrics.py +94 -0
- ddapm_test_agent/trace.py +399 -0
- ddapm_test_agent/vcr_proxy.py +80 -23
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/METADATA +63 -2
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/RECORD +13 -11
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/WHEEL +0 -0
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/entry_points.txt +0 -0
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/licenses/LICENSE.BSD3 +0 -0
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/licenses/LICENSE.apache2 +0 -0
- {ddapm_test_agent-1.31.1.dist-info → ddapm_test_agent-1.33.0.dist-info}/top_level.txt +0 -0
ddapm_test_agent/agent.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import argparse
|
|
2
|
+
import asyncio
|
|
2
3
|
import atexit
|
|
3
4
|
import base64
|
|
4
5
|
from collections import OrderedDict
|
|
@@ -12,6 +13,7 @@ import pprint
|
|
|
12
13
|
import re
|
|
13
14
|
import socket
|
|
14
15
|
import sys
|
|
16
|
+
from typing import Any
|
|
15
17
|
from typing import Awaitable
|
|
16
18
|
from typing import Callable
|
|
17
19
|
from typing import DefaultDict
|
|
@@ -32,8 +34,11 @@ from aiohttp import web
|
|
|
32
34
|
from aiohttp.web import HTTPException
|
|
33
35
|
from aiohttp.web import Request
|
|
34
36
|
from aiohttp.web import middleware
|
|
37
|
+
from grpc import aio as grpc_aio
|
|
35
38
|
from msgpack.exceptions import ExtraData as MsgPackExtraDataException
|
|
36
39
|
from multidict import CIMultiDict
|
|
40
|
+
from opentelemetry.proto.collector.logs.v1.logs_service_pb2_grpc import add_LogsServiceServicer_to_server
|
|
41
|
+
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import add_MetricsServiceServicer_to_server
|
|
37
42
|
|
|
38
43
|
from . import _get_version
|
|
39
44
|
from . import trace_snapshot
|
|
@@ -44,10 +49,17 @@ from .checks import CheckTrace
|
|
|
44
49
|
from .checks import Checks
|
|
45
50
|
from .checks import start_trace
|
|
46
51
|
from .integration import Integration
|
|
52
|
+
from .logs import LOGS_ENDPOINT
|
|
53
|
+
from .logs import OTLPLogsGRPCServicer
|
|
54
|
+
from .logs import decode_logs_request
|
|
55
|
+
from .metrics import METRICS_ENDPOINT
|
|
56
|
+
from .metrics import OTLPMetricsGRPCServicer
|
|
57
|
+
from .metrics import decode_metrics_request
|
|
47
58
|
from .remoteconfig import RemoteConfigServer
|
|
48
59
|
from .trace import Span
|
|
49
60
|
from .trace import Trace
|
|
50
61
|
from .trace import TraceMap
|
|
62
|
+
from .trace import decode_v1 as trace_decode_v1
|
|
51
63
|
from .trace import decode_v04 as trace_decode_v04
|
|
52
64
|
from .trace import decode_v05 as trace_decode_v05
|
|
53
65
|
from .trace import decode_v07 as trace_decode_v07
|
|
@@ -66,6 +78,12 @@ from .tracestats import v06StatsPayload
|
|
|
66
78
|
from .vcr_proxy import proxy_request
|
|
67
79
|
|
|
68
80
|
|
|
81
|
+
# Default ports
|
|
82
|
+
DEFAULT_APM_PORT = 8126
|
|
83
|
+
DEFAULT_OTLP_HTTP_PORT = 4318
|
|
84
|
+
DEFAULT_OTLP_GRPC_PORT = 4317
|
|
85
|
+
|
|
86
|
+
|
|
69
87
|
class NoSuchSessionException(Exception):
|
|
70
88
|
pass
|
|
71
89
|
|
|
@@ -247,6 +265,7 @@ class Agent:
|
|
|
247
265
|
"/v0.4/traces",
|
|
248
266
|
"/v0.5/traces",
|
|
249
267
|
"/v0.7/traces",
|
|
268
|
+
"/v1.0/traces",
|
|
250
269
|
"/v0.6/stats",
|
|
251
270
|
"/v0.7/config",
|
|
252
271
|
"/telemetry/proxy/api/v2/apmtelemetry",
|
|
@@ -428,6 +447,8 @@ class Agent:
|
|
|
428
447
|
return self._decode_v05_traces(req)
|
|
429
448
|
elif req.match_info.handler == self.handle_v07_traces:
|
|
430
449
|
return self._decode_v07_traces(req)
|
|
450
|
+
elif req.match_info.handler == self.handle_v1_traces:
|
|
451
|
+
return self._decode_v1_traces(req)
|
|
431
452
|
return []
|
|
432
453
|
|
|
433
454
|
async def _traces_by_session(self, token: Optional[str]) -> List[Trace]:
|
|
@@ -486,6 +507,34 @@ class Agent:
|
|
|
486
507
|
stats.append(s)
|
|
487
508
|
return stats
|
|
488
509
|
|
|
510
|
+
async def _logs_by_session(self, token: Optional[str]) -> List[Dict[str, Any]]:
|
|
511
|
+
"""Return the logs that belong to the given session token.
|
|
512
|
+
|
|
513
|
+
If token is None or if the token was used to manually start a session
|
|
514
|
+
with /session-start then return all logs that were sent since the last
|
|
515
|
+
/session-start request was made.
|
|
516
|
+
"""
|
|
517
|
+
logs: List[Dict[str, Any]] = []
|
|
518
|
+
for req in self._requests_by_session(token):
|
|
519
|
+
if req.match_info.handler == self.handle_v1_logs:
|
|
520
|
+
logs_data = self._decode_v1_logs(req)
|
|
521
|
+
logs.append(logs_data)
|
|
522
|
+
return logs
|
|
523
|
+
|
|
524
|
+
async def _metrics_by_session(self, token: Optional[str]) -> List[Dict[str, Any]]:
|
|
525
|
+
"""Return the metrics that belong to the given session token.
|
|
526
|
+
|
|
527
|
+
If token is None or if the token was used to manually start a session
|
|
528
|
+
with /session-start then return all metrics that were sent since the last
|
|
529
|
+
/session-start request was made.
|
|
530
|
+
"""
|
|
531
|
+
metrics: List[Dict[str, Any]] = []
|
|
532
|
+
for req in self._requests_by_session(token):
|
|
533
|
+
if req.match_info.handler == self.handle_v1_metrics:
|
|
534
|
+
metrics_data = self._decode_v1_metrics(req)
|
|
535
|
+
metrics.append(metrics_data)
|
|
536
|
+
return metrics
|
|
537
|
+
|
|
489
538
|
async def _integration_requests_by_session(
|
|
490
539
|
self,
|
|
491
540
|
token: Optional[str],
|
|
@@ -554,10 +603,30 @@ class Agent:
|
|
|
554
603
|
raw_data = self._request_data(request)
|
|
555
604
|
return trace_decode_v07(raw_data)
|
|
556
605
|
|
|
606
|
+
def _decode_v1_traces(self, request: Request) -> v04TracePayload:
|
|
607
|
+
raw_data = self._request_data(request)
|
|
608
|
+
return trace_decode_v1(raw_data)
|
|
609
|
+
|
|
557
610
|
def _decode_v06_tracestats(self, request: Request) -> v06StatsPayload:
|
|
558
611
|
raw_data = self._request_data(request)
|
|
559
612
|
return tracestats_decode_v06(raw_data)
|
|
560
613
|
|
|
614
|
+
def _decode_v1_logs(self, request: Request) -> Dict[str, Any]:
|
|
615
|
+
raw_data = self._request_data(request)
|
|
616
|
+
content_type = request.headers.get("Content-Type", "").lower().strip()
|
|
617
|
+
try:
|
|
618
|
+
return decode_logs_request(raw_data, content_type)
|
|
619
|
+
except Exception as e:
|
|
620
|
+
raise web.HTTPBadRequest(text=str(e))
|
|
621
|
+
|
|
622
|
+
def _decode_v1_metrics(self, request: Request) -> Dict[str, Any]:
|
|
623
|
+
raw_data = self._request_data(request)
|
|
624
|
+
content_type = request.headers.get("Content-Type", "").lower().strip()
|
|
625
|
+
try:
|
|
626
|
+
return decode_metrics_request(raw_data, content_type)
|
|
627
|
+
except Exception as e:
|
|
628
|
+
raise web.HTTPBadRequest(text=str(e))
|
|
629
|
+
|
|
561
630
|
async def handle_v04_traces(self, request: Request) -> web.Response:
|
|
562
631
|
return await self._handle_traces(request, version="v0.4")
|
|
563
632
|
|
|
@@ -567,6 +636,9 @@ class Agent:
|
|
|
567
636
|
async def handle_v07_traces(self, request: Request) -> web.Response:
|
|
568
637
|
return await self._handle_traces(request, version="v0.7")
|
|
569
638
|
|
|
639
|
+
async def handle_v1_traces(self, request: Request) -> web.Response:
|
|
640
|
+
return await self._handle_traces(request, version="v1")
|
|
641
|
+
|
|
570
642
|
async def handle_v06_tracestats(self, request: Request) -> web.Response:
|
|
571
643
|
stats = self._decode_v06_tracestats(request)
|
|
572
644
|
nstats = len(stats["Stats"])
|
|
@@ -581,6 +653,36 @@ class Agent:
|
|
|
581
653
|
log.info("received /v0.1/pipeline_stats payload")
|
|
582
654
|
return web.HTTPOk()
|
|
583
655
|
|
|
656
|
+
async def handle_v1_logs(self, request: Request) -> web.Response:
|
|
657
|
+
logs_data = self._decode_v1_logs(request)
|
|
658
|
+
num_resource_logs = len(logs_data.get("resource_logs", []))
|
|
659
|
+
total_log_records = sum(
|
|
660
|
+
len(scope_log.get("log_records", []))
|
|
661
|
+
for resource_log in logs_data.get("resource_logs", [])
|
|
662
|
+
for scope_log in resource_log.get("scope_logs", [])
|
|
663
|
+
)
|
|
664
|
+
log.info(
|
|
665
|
+
"received /v1/logs payload with %r resource log(s) containing %r log record(s)",
|
|
666
|
+
num_resource_logs,
|
|
667
|
+
total_log_records,
|
|
668
|
+
)
|
|
669
|
+
return web.HTTPOk()
|
|
670
|
+
|
|
671
|
+
async def handle_v1_metrics(self, request: Request) -> web.Response:
|
|
672
|
+
metrics_data = self._decode_v1_metrics(request)
|
|
673
|
+
num_resource_metrics = len(metrics_data.get("resource_metrics", []))
|
|
674
|
+
total_metrics = sum(
|
|
675
|
+
len(scope_metric.get("metrics", []))
|
|
676
|
+
for resource_metric in metrics_data.get("resource_metrics", [])
|
|
677
|
+
for scope_metric in resource_metric.get("scope_metrics", [])
|
|
678
|
+
)
|
|
679
|
+
log.info(
|
|
680
|
+
"received /v1/metrics payload with %r resource metric(s) containing %r metric(s)",
|
|
681
|
+
num_resource_metrics,
|
|
682
|
+
total_metrics,
|
|
683
|
+
)
|
|
684
|
+
return web.HTTPOk()
|
|
685
|
+
|
|
584
686
|
async def handle_v07_remoteconfig(self, request: Request) -> web.Response:
|
|
585
687
|
"""Emulates Remote Config endpoint: /v0.7/config"""
|
|
586
688
|
token = _session_token(request)
|
|
@@ -732,7 +834,7 @@ class Agent:
|
|
|
732
834
|
headers={"Datadog-Agent-State": "03e868b3ecdd62a91423cc4c3917d0d151fb9fa486736911ab7f5a0750c63824"},
|
|
733
835
|
)
|
|
734
836
|
|
|
735
|
-
async def _handle_traces(self, request: Request, version: Literal["v0.4", "v0.5", "v0.7"]) -> web.Response:
|
|
837
|
+
async def _handle_traces(self, request: Request, version: Literal["v0.4", "v0.5", "v0.7", "v1"]) -> web.Response:
|
|
736
838
|
token = request["session_token"]
|
|
737
839
|
checks: Checks = request.app["checks"]
|
|
738
840
|
headers = request.headers
|
|
@@ -753,6 +855,8 @@ class Agent:
|
|
|
753
855
|
traces = self._decode_v05_traces(request)
|
|
754
856
|
elif version == "v0.7":
|
|
755
857
|
traces = self._decode_v07_traces(request)
|
|
858
|
+
elif version == "v1":
|
|
859
|
+
traces = self._decode_v1_traces(request)
|
|
756
860
|
log.info(
|
|
757
861
|
"received trace for token %r payload with %r trace chunks",
|
|
758
862
|
token,
|
|
@@ -941,6 +1045,16 @@ class Agent:
|
|
|
941
1045
|
stats = await self._tracestats_by_session(token)
|
|
942
1046
|
return web.json_response(stats)
|
|
943
1047
|
|
|
1048
|
+
async def handle_session_logs(self, request: Request) -> web.Response:
|
|
1049
|
+
token = request["session_token"]
|
|
1050
|
+
logs = await self._logs_by_session(token)
|
|
1051
|
+
return web.json_response(logs)
|
|
1052
|
+
|
|
1053
|
+
async def handle_session_metrics(self, request: Request) -> web.Response:
|
|
1054
|
+
token = request["session_token"]
|
|
1055
|
+
metrics = await self._metrics_by_session(token)
|
|
1056
|
+
return web.json_response(metrics)
|
|
1057
|
+
|
|
944
1058
|
async def handle_session_requests(self, request: Request) -> web.Response:
|
|
945
1059
|
token = request["session_token"]
|
|
946
1060
|
resp = []
|
|
@@ -957,6 +1071,8 @@ class Agent:
|
|
|
957
1071
|
self.handle_v1_tracer_flare,
|
|
958
1072
|
self.handle_evp_proxy_v2_api_v2_llmobs,
|
|
959
1073
|
self.handle_evp_proxy_v2_llmobs_eval_metric,
|
|
1074
|
+
self.handle_v1_logs,
|
|
1075
|
+
self.handle_v1_metrics,
|
|
960
1076
|
):
|
|
961
1077
|
continue
|
|
962
1078
|
resp.append(
|
|
@@ -1174,6 +1290,59 @@ class Agent:
|
|
|
1174
1290
|
return response
|
|
1175
1291
|
|
|
1176
1292
|
|
|
1293
|
+
def make_otlp_http_app(agent: Agent) -> web.Application:
|
|
1294
|
+
"""Create a separate HTTP application for OTLP endpoints using the shared agent instance."""
|
|
1295
|
+
|
|
1296
|
+
@middleware
|
|
1297
|
+
async def otlp_store_request_middleware(request: Request, handler: _Handler) -> web.Response:
|
|
1298
|
+
# Always store requests for OTLP endpoints
|
|
1299
|
+
await agent._store_request(request)
|
|
1300
|
+
return await handler(request)
|
|
1301
|
+
|
|
1302
|
+
app = web.Application(
|
|
1303
|
+
middlewares=[
|
|
1304
|
+
otlp_store_request_middleware, # type: ignore
|
|
1305
|
+
session_token_middleware, # type: ignore
|
|
1306
|
+
],
|
|
1307
|
+
)
|
|
1308
|
+
|
|
1309
|
+
# Add only OTLP HTTP endpoints
|
|
1310
|
+
app.add_routes(
|
|
1311
|
+
[
|
|
1312
|
+
web.post(LOGS_ENDPOINT, agent.handle_v1_logs),
|
|
1313
|
+
web.post(METRICS_ENDPOINT, agent.handle_v1_metrics),
|
|
1314
|
+
web.get("/test/session/requests", agent.handle_session_requests),
|
|
1315
|
+
web.get("/test/session/logs", agent.handle_session_logs),
|
|
1316
|
+
web.get("/test/session/metrics", agent.handle_session_metrics),
|
|
1317
|
+
web.get("/test/session/clear", agent.handle_session_clear),
|
|
1318
|
+
web.get("/test/session/start", agent.handle_session_start),
|
|
1319
|
+
]
|
|
1320
|
+
)
|
|
1321
|
+
|
|
1322
|
+
return app
|
|
1323
|
+
|
|
1324
|
+
|
|
1325
|
+
async def make_otlp_grpc_server_async(agent: Agent, http_port: int, grpc_port: int) -> Any:
|
|
1326
|
+
"""Create and start a separate GRPC server for OTLP endpoints that forwards to HTTP server."""
|
|
1327
|
+
# Define the servicer class only when GRPC is available
|
|
1328
|
+
server = grpc_aio.server()
|
|
1329
|
+
|
|
1330
|
+
# Add the OTLP logs servicer
|
|
1331
|
+
logs_servicer = OTLPLogsGRPCServicer(http_port)
|
|
1332
|
+
add_LogsServiceServicer_to_server(logs_servicer, server)
|
|
1333
|
+
|
|
1334
|
+
# Add the OTLP metrics servicer
|
|
1335
|
+
metrics_servicer = OTLPMetricsGRPCServicer(http_port)
|
|
1336
|
+
add_MetricsServiceServicer_to_server(metrics_servicer, server)
|
|
1337
|
+
|
|
1338
|
+
# Setup and start the server
|
|
1339
|
+
listen_addr = f"[::]:{grpc_port}"
|
|
1340
|
+
server.add_insecure_port(listen_addr)
|
|
1341
|
+
await server.start()
|
|
1342
|
+
|
|
1343
|
+
return server
|
|
1344
|
+
|
|
1345
|
+
|
|
1177
1346
|
def make_app(
|
|
1178
1347
|
enabled_checks: List[str],
|
|
1179
1348
|
log_span_fmt: str,
|
|
@@ -1209,6 +1378,8 @@ def make_app(
|
|
|
1209
1378
|
web.put("/v0.5/traces", agent.handle_v05_traces),
|
|
1210
1379
|
web.post("/v0.7/traces", agent.handle_v07_traces),
|
|
1211
1380
|
web.put("/v0.7/traces", agent.handle_v07_traces),
|
|
1381
|
+
web.post("/v1.0/traces", agent.handle_v1_traces),
|
|
1382
|
+
web.put("/v1.0/traces", agent.handle_v1_traces),
|
|
1212
1383
|
web.post("/v0.6/stats", agent.handle_v06_tracestats),
|
|
1213
1384
|
web.post("/v0.1/pipeline_stats", agent.handle_v01_pipelinestats),
|
|
1214
1385
|
web.put("/v0.6/stats", agent.handle_v06_tracestats),
|
|
@@ -1262,6 +1433,7 @@ def make_app(
|
|
|
1262
1433
|
],
|
|
1263
1434
|
enabled=enabled_checks,
|
|
1264
1435
|
)
|
|
1436
|
+
app["agent"] = agent
|
|
1265
1437
|
app["checks"] = checks
|
|
1266
1438
|
app["snapshot_dir"] = snapshot_dir
|
|
1267
1439
|
app["snapshot_ci_mode"] = snapshot_ci_mode
|
|
@@ -1293,6 +1465,18 @@ def main(args: Optional[List[str]] = None) -> None:
|
|
|
1293
1465
|
help="Print version info and exit.",
|
|
1294
1466
|
)
|
|
1295
1467
|
parser.add_argument("-p", "--port", type=int, default=int(os.environ.get("PORT", 8126)))
|
|
1468
|
+
parser.add_argument(
|
|
1469
|
+
"--otlp-http-port",
|
|
1470
|
+
type=int,
|
|
1471
|
+
default=int(os.environ.get("OTLP_HTTP_PORT", 4318)),
|
|
1472
|
+
help="Port to listen for OTLP HTTP requests (default: 4318)",
|
|
1473
|
+
)
|
|
1474
|
+
parser.add_argument(
|
|
1475
|
+
"--otlp-grpc-port",
|
|
1476
|
+
type=int,
|
|
1477
|
+
default=int(os.environ.get("OTLP_GRPC_PORT", 4317)),
|
|
1478
|
+
help="Port to listen for OTLP GRPC requests (default: 4317)",
|
|
1479
|
+
)
|
|
1296
1480
|
parser.add_argument(
|
|
1297
1481
|
"--snapshot-dir",
|
|
1298
1482
|
type=str,
|
|
@@ -1444,7 +1628,60 @@ def main(args: Optional[List[str]] = None) -> None:
|
|
|
1444
1628
|
vcr_cassettes_directory=parsed_args.vcr_cassettes_directory,
|
|
1445
1629
|
)
|
|
1446
1630
|
|
|
1447
|
-
|
|
1631
|
+
# Validate port configuration
|
|
1632
|
+
if parsed_args.port == parsed_args.otlp_http_port:
|
|
1633
|
+
raise ValueError("APM and OTLP HTTP ports cannot be the same")
|
|
1634
|
+
if parsed_args.port == parsed_args.otlp_grpc_port:
|
|
1635
|
+
raise ValueError("APM and OTLP GRPC ports cannot be the same")
|
|
1636
|
+
if parsed_args.otlp_http_port == parsed_args.otlp_grpc_port:
|
|
1637
|
+
raise ValueError("OTLP HTTP and GRPC ports cannot be the same")
|
|
1638
|
+
|
|
1639
|
+
# Get the shared agent instance from the main app
|
|
1640
|
+
agent = app["agent"]
|
|
1641
|
+
otlp_http_app = make_otlp_http_app(agent)
|
|
1642
|
+
|
|
1643
|
+
async def run_servers():
|
|
1644
|
+
"""Run APM and OTLP HTTP servers concurrently."""
|
|
1645
|
+
# Create runners for both apps
|
|
1646
|
+
apm_runner = web.AppRunner(app)
|
|
1647
|
+
await apm_runner.setup()
|
|
1648
|
+
|
|
1649
|
+
otlp_http_runner = web.AppRunner(otlp_http_app)
|
|
1650
|
+
await otlp_http_runner.setup()
|
|
1651
|
+
|
|
1652
|
+
# Start GRPC server if available (async creation)
|
|
1653
|
+
otlp_grpc_server = await make_otlp_grpc_server_async(
|
|
1654
|
+
agent, parsed_args.otlp_http_port, parsed_args.otlp_grpc_port
|
|
1655
|
+
)
|
|
1656
|
+
|
|
1657
|
+
# Create sites for both apps
|
|
1658
|
+
if apm_sock:
|
|
1659
|
+
apm_site = web.SockSite(apm_runner, apm_sock)
|
|
1660
|
+
else:
|
|
1661
|
+
apm_site = web.TCPSite(apm_runner, port=parsed_args.port)
|
|
1662
|
+
|
|
1663
|
+
otlp_http_site = web.TCPSite(otlp_http_runner, port=parsed_args.otlp_http_port)
|
|
1664
|
+
|
|
1665
|
+
# Start both servers concurrently
|
|
1666
|
+
await asyncio.gather(apm_site.start(), otlp_http_site.start())
|
|
1667
|
+
|
|
1668
|
+
print(f"======== Running APM server on port {parsed_args.port} ========")
|
|
1669
|
+
print(f"======== Running OTLP HTTP server on port {parsed_args.otlp_http_port} ========")
|
|
1670
|
+
print(f"======== Running OTLP GRPC server on port {parsed_args.otlp_grpc_port} ========")
|
|
1671
|
+
print("(Press CTRL+C to quit)")
|
|
1672
|
+
|
|
1673
|
+
try:
|
|
1674
|
+
# Keep the servers running
|
|
1675
|
+
await asyncio.Event().wait()
|
|
1676
|
+
except KeyboardInterrupt:
|
|
1677
|
+
pass
|
|
1678
|
+
finally:
|
|
1679
|
+
await apm_runner.cleanup()
|
|
1680
|
+
await otlp_http_runner.cleanup()
|
|
1681
|
+
await otlp_grpc_server.stop(grace=5.0)
|
|
1682
|
+
|
|
1683
|
+
# Run the servers
|
|
1684
|
+
asyncio.run(run_servers())
|
|
1448
1685
|
|
|
1449
1686
|
|
|
1450
1687
|
if __name__ == "__main__":
|
ddapm_test_agent/client.py
CHANGED
|
@@ -9,7 +9,7 @@ import requests
|
|
|
9
9
|
from ddapm_test_agent.trace import Trace
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class
|
|
12
|
+
class TestClient:
|
|
13
13
|
__test__ = False
|
|
14
14
|
|
|
15
15
|
def __init__(self, base_url: str):
|
|
@@ -19,6 +19,28 @@ class TestAgentClient:
|
|
|
19
19
|
def _url(self, path: str) -> str:
|
|
20
20
|
return urllib.parse.urljoin(self._base_url, path)
|
|
21
21
|
|
|
22
|
+
def requests(self, **kwargs: Any) -> List[Any]:
|
|
23
|
+
resp = self._session.get(self._url("/test/session/requests"), **kwargs)
|
|
24
|
+
json = resp.json()
|
|
25
|
+
return cast(List[Any], json)
|
|
26
|
+
|
|
27
|
+
def clear(self, **kwargs: Any) -> None:
|
|
28
|
+
self._session.get(self._url("/test/session/clear"), **kwargs)
|
|
29
|
+
|
|
30
|
+
def wait_to_start(self, num_tries: int = 50, delay: float = 0.1) -> None:
|
|
31
|
+
exc = []
|
|
32
|
+
for i in range(num_tries):
|
|
33
|
+
try:
|
|
34
|
+
self.requests()
|
|
35
|
+
except requests.exceptions.RequestException as e:
|
|
36
|
+
exc.append(e)
|
|
37
|
+
time.sleep(delay)
|
|
38
|
+
else:
|
|
39
|
+
return
|
|
40
|
+
raise AssertionError(f"Test agent did not start in time ({num_tries * delay} seconds). Got {exc[-1]}")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class TestAgentClient(TestClient):
|
|
22
44
|
def traces(self, clear: bool = False, **kwargs: Any) -> List[Trace]:
|
|
23
45
|
resp = self._session.get(self._url("/test/session/traces"), **kwargs)
|
|
24
46
|
if clear:
|
|
@@ -26,11 +48,6 @@ class TestAgentClient:
|
|
|
26
48
|
json = resp.json()
|
|
27
49
|
return cast(List[Trace], json)
|
|
28
50
|
|
|
29
|
-
def requests(self, **kwargs: Any) -> List[Any]:
|
|
30
|
-
resp = self._session.get(self._url("/test/session/requests"), **kwargs)
|
|
31
|
-
json = resp.json()
|
|
32
|
-
return cast(List[Any], json)
|
|
33
|
-
|
|
34
51
|
def raw_telemetry(self, clear: bool = False) -> List[Any]:
|
|
35
52
|
raw_reqs = self.requests()
|
|
36
53
|
reqs = []
|
|
@@ -47,9 +64,6 @@ class TestAgentClient:
|
|
|
47
64
|
self.clear()
|
|
48
65
|
return cast(List[Any], resp.json())
|
|
49
66
|
|
|
50
|
-
def clear(self, **kwargs: Any) -> None:
|
|
51
|
-
self._session.get(self._url("/test/session/clear"), **kwargs)
|
|
52
|
-
|
|
53
67
|
def info(self, **kwargs):
|
|
54
68
|
resp = self._session.get(self._url("/info"), **kwargs)
|
|
55
69
|
json = resp.json()
|
|
@@ -126,14 +140,43 @@ class TestAgentClient:
|
|
|
126
140
|
time.sleep(0.01)
|
|
127
141
|
raise AssertionError("Telemetry event %r not found" % event_name)
|
|
128
142
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
143
|
+
|
|
144
|
+
class TestOTLPClient(TestClient):
|
|
145
|
+
def __init__(self, host: str = "127.0.0.1", http_port: int = 4318, scheme: str = "http"):
|
|
146
|
+
# OTLP grpc server will forward all requests to the http server
|
|
147
|
+
# so we can use the same client to receive logs for both http and grpc endpoints
|
|
148
|
+
super().__init__(f"{scheme}://{host}:{http_port}")
|
|
149
|
+
|
|
150
|
+
def logs(self, clear: bool = False, **kwargs: Any) -> List[Any]:
|
|
151
|
+
resp = self._session.get(self._url("/test/session/logs"), **kwargs)
|
|
152
|
+
if clear:
|
|
153
|
+
self.clear()
|
|
154
|
+
return cast(List[Any], resp.json())
|
|
155
|
+
|
|
156
|
+
def wait_for_num_logs(self, num: int, clear: bool = False, wait_loops: int = 30) -> List[Any]:
|
|
157
|
+
"""Wait for `num` logs to be received from the test agent."""
|
|
158
|
+
for _ in range(wait_loops):
|
|
159
|
+
logs = self.logs(clear=False)
|
|
160
|
+
if len(logs) == num:
|
|
161
|
+
if clear:
|
|
162
|
+
self.clear()
|
|
163
|
+
return logs
|
|
164
|
+
time.sleep(0.1)
|
|
165
|
+
raise ValueError("Number (%r) of logs not available from test agent, got %r" % (num, len(logs)))
|
|
166
|
+
|
|
167
|
+
def metrics(self, clear: bool = False, **kwargs: Any) -> List[Any]:
|
|
168
|
+
resp = self._session.get(self._url("/test/session/metrics"), **kwargs)
|
|
169
|
+
if clear:
|
|
170
|
+
self.clear()
|
|
171
|
+
return cast(List[Any], resp.json())
|
|
172
|
+
|
|
173
|
+
def wait_for_num_metrics(self, num: int, clear: bool = False, wait_loops: int = 30) -> List[Any]:
|
|
174
|
+
"""Wait for `num` metrics to be received from the test agent."""
|
|
175
|
+
for _ in range(wait_loops):
|
|
176
|
+
metrics = self.metrics(clear=False)
|
|
177
|
+
if len(metrics) == num:
|
|
178
|
+
if clear:
|
|
179
|
+
self.clear()
|
|
180
|
+
return metrics
|
|
181
|
+
time.sleep(0.1)
|
|
182
|
+
raise ValueError("Number (%r) of metrics not available from test agent, got %r" % (num, len(metrics)))
|
ddapm_test_agent/logs.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""OTLP Logs handling for the test agent."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Any
|
|
6
|
+
from typing import Dict
|
|
7
|
+
|
|
8
|
+
from aiohttp import ClientSession
|
|
9
|
+
from google.protobuf.json_format import MessageToDict
|
|
10
|
+
from grpc import aio as grpc_aio
|
|
11
|
+
from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ExportLogsServiceRequest
|
|
12
|
+
from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ExportLogsServiceResponse
|
|
13
|
+
from opentelemetry.proto.collector.logs.v1.logs_service_pb2_grpc import LogsServiceServicer
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
LOGS_ENDPOINT = "/v1/logs"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
log = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def decode_logs_request(request_body: bytes, content_type: str) -> Dict[str, Any]:
|
|
23
|
+
if content_type == "application/json":
|
|
24
|
+
parsed_json = json.loads(request_body)
|
|
25
|
+
if not isinstance(parsed_json, dict):
|
|
26
|
+
raise Exception("JSON payload must be an object")
|
|
27
|
+
return parsed_json
|
|
28
|
+
elif content_type == "application/x-protobuf":
|
|
29
|
+
export_request = ExportLogsServiceRequest()
|
|
30
|
+
export_request.ParseFromString(request_body)
|
|
31
|
+
return protobuf_to_dict(export_request)
|
|
32
|
+
else:
|
|
33
|
+
raise ValueError(f"Content-Type must be application/x-protobuf or application/json, got {content_type}")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def protobuf_to_dict(pb_obj: Any) -> Dict[str, Any]:
|
|
37
|
+
return MessageToDict(pb_obj, preserving_proto_field_name=True)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class OTLPLogsGRPCServicer(LogsServiceServicer):
|
|
41
|
+
|
|
42
|
+
def __init__(self, http_port: int):
|
|
43
|
+
self.http_url = f"http://127.0.0.1:{http_port}"
|
|
44
|
+
|
|
45
|
+
async def Export(
|
|
46
|
+
self, request: ExportLogsServiceRequest, context: grpc_aio.ServicerContext
|
|
47
|
+
) -> ExportLogsServiceResponse:
|
|
48
|
+
try:
|
|
49
|
+
protobuf_data = request.SerializeToString()
|
|
50
|
+
headers = {"Content-Type": "application/x-protobuf"}
|
|
51
|
+
metadata = dict(context.invocation_metadata())
|
|
52
|
+
if "session-token" in metadata:
|
|
53
|
+
headers["Session-Token"] = metadata["session-token"]
|
|
54
|
+
async with ClientSession(self.http_url) as session:
|
|
55
|
+
async with session.post(LOGS_ENDPOINT, headers=headers, data=protobuf_data) as resp:
|
|
56
|
+
context.set_trailing_metadata([("http-status", str(resp.status))])
|
|
57
|
+
response = ExportLogsServiceResponse()
|
|
58
|
+
if resp.status >= 400:
|
|
59
|
+
response.partial_success.rejected_log_records = len(request.resource_logs)
|
|
60
|
+
response.partial_success.error_message = f"HTTP {resp.status}: {await resp.text()}"
|
|
61
|
+
return response
|
|
62
|
+
except Exception as e:
|
|
63
|
+
context.set_trailing_metadata([("http-status", "500"), ("error", str(e))])
|
|
64
|
+
response = ExportLogsServiceResponse()
|
|
65
|
+
response.partial_success.rejected_log_records = len(request.resource_logs)
|
|
66
|
+
response.partial_success.error_message = f"Forward failed: {str(e)}"
|
|
67
|
+
return response
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""OTLP Metrics handling for the test agent."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Any
|
|
6
|
+
from typing import Dict
|
|
7
|
+
|
|
8
|
+
from aiohttp import ClientSession
|
|
9
|
+
from google.protobuf.json_format import MessageToDict
|
|
10
|
+
from grpc import aio as grpc_aio
|
|
11
|
+
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ExportMetricsServiceRequest
|
|
12
|
+
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ExportMetricsServiceResponse
|
|
13
|
+
from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import MetricsServiceServicer
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
METRICS_ENDPOINT = "/v1/metrics"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
log = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def decode_metrics_request(request_body: bytes, content_type: str) -> Dict[str, Any]:
|
|
23
|
+
if content_type == "application/json":
|
|
24
|
+
parsed_json = json.loads(request_body)
|
|
25
|
+
if not isinstance(parsed_json, dict):
|
|
26
|
+
raise Exception("JSON payload must be an object")
|
|
27
|
+
return parsed_json
|
|
28
|
+
elif content_type == "application/x-protobuf":
|
|
29
|
+
export_request = ExportMetricsServiceRequest()
|
|
30
|
+
export_request.ParseFromString(request_body)
|
|
31
|
+
return protobuf_to_dict(export_request)
|
|
32
|
+
else:
|
|
33
|
+
raise ValueError(f"Content-Type must be application/x-protobuf or application/json, got {content_type}")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def protobuf_to_dict(pb_obj: Any) -> Dict[str, Any]:
|
|
37
|
+
return MessageToDict(pb_obj, preserving_proto_field_name=True)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class OTLPMetricsGRPCServicer(MetricsServiceServicer):
|
|
41
|
+
|
|
42
|
+
def __init__(self, http_port: int):
|
|
43
|
+
self.http_url = f"http://127.0.0.1:{http_port}"
|
|
44
|
+
|
|
45
|
+
def _count_data_points(self, request: ExportMetricsServiceRequest) -> int:
|
|
46
|
+
return len(
|
|
47
|
+
[
|
|
48
|
+
dp
|
|
49
|
+
for rm in request.resource_metrics
|
|
50
|
+
for sm in rm.scope_metrics
|
|
51
|
+
for m in sm.metrics
|
|
52
|
+
for dp in (
|
|
53
|
+
m.gauge.data_points
|
|
54
|
+
if m.HasField("gauge")
|
|
55
|
+
else (
|
|
56
|
+
m.sum.data_points
|
|
57
|
+
if m.HasField("sum")
|
|
58
|
+
else (
|
|
59
|
+
m.histogram.data_points
|
|
60
|
+
if m.HasField("histogram")
|
|
61
|
+
else (
|
|
62
|
+
m.exponential_histogram.data_points
|
|
63
|
+
if m.HasField("exponential_histogram")
|
|
64
|
+
else m.summary.data_points if m.HasField("summary") else []
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
)
|
|
68
|
+
)
|
|
69
|
+
]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
async def Export(
|
|
73
|
+
self, request: ExportMetricsServiceRequest, context: grpc_aio.ServicerContext
|
|
74
|
+
) -> ExportMetricsServiceResponse:
|
|
75
|
+
try:
|
|
76
|
+
protobuf_data = request.SerializeToString()
|
|
77
|
+
headers = {"Content-Type": "application/x-protobuf"}
|
|
78
|
+
metadata = dict(context.invocation_metadata())
|
|
79
|
+
if "session-token" in metadata:
|
|
80
|
+
headers["Session-Token"] = metadata["session-token"]
|
|
81
|
+
async with ClientSession(self.http_url) as session:
|
|
82
|
+
async with session.post(METRICS_ENDPOINT, headers=headers, data=protobuf_data) as resp:
|
|
83
|
+
context.set_trailing_metadata([("http-status", str(resp.status))])
|
|
84
|
+
response = ExportMetricsServiceResponse()
|
|
85
|
+
if resp.status >= 400:
|
|
86
|
+
response.partial_success.rejected_data_points = self._count_data_points(request)
|
|
87
|
+
response.partial_success.error_message = f"HTTP {resp.status}: {await resp.text()}"
|
|
88
|
+
return response
|
|
89
|
+
except Exception as e:
|
|
90
|
+
context.set_trailing_metadata([("http-status", "500"), ("error", str(e))])
|
|
91
|
+
response = ExportMetricsServiceResponse()
|
|
92
|
+
response.partial_success.rejected_data_points = self._count_data_points(request)
|
|
93
|
+
response.partial_success.error_message = f"Forward failed: {str(e)}"
|
|
94
|
+
return response
|