agentrun-sdk 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agentrun-sdk might be problematic. Click here for more details.

Files changed (115) hide show
  1. agentrun_operation_sdk/cli/__init__.py +1 -0
  2. agentrun_operation_sdk/cli/cli.py +19 -0
  3. agentrun_operation_sdk/cli/common.py +21 -0
  4. agentrun_operation_sdk/cli/runtime/__init__.py +1 -0
  5. agentrun_operation_sdk/cli/runtime/commands.py +203 -0
  6. agentrun_operation_sdk/client/client.py +75 -0
  7. agentrun_operation_sdk/operations/runtime/__init__.py +8 -0
  8. agentrun_operation_sdk/operations/runtime/configure.py +101 -0
  9. agentrun_operation_sdk/operations/runtime/launch.py +82 -0
  10. agentrun_operation_sdk/operations/runtime/models.py +31 -0
  11. agentrun_operation_sdk/services/runtime.py +152 -0
  12. agentrun_operation_sdk/utils/logging_config.py +72 -0
  13. agentrun_operation_sdk/utils/runtime/config.py +94 -0
  14. agentrun_operation_sdk/utils/runtime/container.py +280 -0
  15. agentrun_operation_sdk/utils/runtime/entrypoint.py +203 -0
  16. agentrun_operation_sdk/utils/runtime/schema.py +56 -0
  17. agentrun_sdk/__init__.py +7 -0
  18. agentrun_sdk/agent/__init__.py +25 -0
  19. agentrun_sdk/agent/agent.py +696 -0
  20. agentrun_sdk/agent/agent_result.py +46 -0
  21. agentrun_sdk/agent/conversation_manager/__init__.py +26 -0
  22. agentrun_sdk/agent/conversation_manager/conversation_manager.py +88 -0
  23. agentrun_sdk/agent/conversation_manager/null_conversation_manager.py +46 -0
  24. agentrun_sdk/agent/conversation_manager/sliding_window_conversation_manager.py +179 -0
  25. agentrun_sdk/agent/conversation_manager/summarizing_conversation_manager.py +252 -0
  26. agentrun_sdk/agent/state.py +97 -0
  27. agentrun_sdk/event_loop/__init__.py +9 -0
  28. agentrun_sdk/event_loop/event_loop.py +499 -0
  29. agentrun_sdk/event_loop/streaming.py +319 -0
  30. agentrun_sdk/experimental/__init__.py +4 -0
  31. agentrun_sdk/experimental/hooks/__init__.py +15 -0
  32. agentrun_sdk/experimental/hooks/events.py +123 -0
  33. agentrun_sdk/handlers/__init__.py +10 -0
  34. agentrun_sdk/handlers/callback_handler.py +70 -0
  35. agentrun_sdk/hooks/__init__.py +49 -0
  36. agentrun_sdk/hooks/events.py +80 -0
  37. agentrun_sdk/hooks/registry.py +247 -0
  38. agentrun_sdk/models/__init__.py +10 -0
  39. agentrun_sdk/models/anthropic.py +432 -0
  40. agentrun_sdk/models/bedrock.py +649 -0
  41. agentrun_sdk/models/litellm.py +225 -0
  42. agentrun_sdk/models/llamaapi.py +438 -0
  43. agentrun_sdk/models/mistral.py +539 -0
  44. agentrun_sdk/models/model.py +95 -0
  45. agentrun_sdk/models/ollama.py +357 -0
  46. agentrun_sdk/models/openai.py +436 -0
  47. agentrun_sdk/models/sagemaker.py +598 -0
  48. agentrun_sdk/models/writer.py +449 -0
  49. agentrun_sdk/multiagent/__init__.py +22 -0
  50. agentrun_sdk/multiagent/a2a/__init__.py +15 -0
  51. agentrun_sdk/multiagent/a2a/executor.py +148 -0
  52. agentrun_sdk/multiagent/a2a/server.py +252 -0
  53. agentrun_sdk/multiagent/base.py +92 -0
  54. agentrun_sdk/multiagent/graph.py +555 -0
  55. agentrun_sdk/multiagent/swarm.py +656 -0
  56. agentrun_sdk/py.typed +1 -0
  57. agentrun_sdk/session/__init__.py +18 -0
  58. agentrun_sdk/session/file_session_manager.py +216 -0
  59. agentrun_sdk/session/repository_session_manager.py +152 -0
  60. agentrun_sdk/session/s3_session_manager.py +272 -0
  61. agentrun_sdk/session/session_manager.py +73 -0
  62. agentrun_sdk/session/session_repository.py +51 -0
  63. agentrun_sdk/telemetry/__init__.py +21 -0
  64. agentrun_sdk/telemetry/config.py +194 -0
  65. agentrun_sdk/telemetry/metrics.py +476 -0
  66. agentrun_sdk/telemetry/metrics_constants.py +15 -0
  67. agentrun_sdk/telemetry/tracer.py +563 -0
  68. agentrun_sdk/tools/__init__.py +17 -0
  69. agentrun_sdk/tools/decorator.py +569 -0
  70. agentrun_sdk/tools/executor.py +137 -0
  71. agentrun_sdk/tools/loader.py +152 -0
  72. agentrun_sdk/tools/mcp/__init__.py +13 -0
  73. agentrun_sdk/tools/mcp/mcp_agent_tool.py +99 -0
  74. agentrun_sdk/tools/mcp/mcp_client.py +423 -0
  75. agentrun_sdk/tools/mcp/mcp_instrumentation.py +322 -0
  76. agentrun_sdk/tools/mcp/mcp_types.py +63 -0
  77. agentrun_sdk/tools/registry.py +607 -0
  78. agentrun_sdk/tools/structured_output.py +421 -0
  79. agentrun_sdk/tools/tools.py +217 -0
  80. agentrun_sdk/tools/watcher.py +136 -0
  81. agentrun_sdk/types/__init__.py +5 -0
  82. agentrun_sdk/types/collections.py +23 -0
  83. agentrun_sdk/types/content.py +188 -0
  84. agentrun_sdk/types/event_loop.py +48 -0
  85. agentrun_sdk/types/exceptions.py +81 -0
  86. agentrun_sdk/types/guardrails.py +254 -0
  87. agentrun_sdk/types/media.py +89 -0
  88. agentrun_sdk/types/session.py +152 -0
  89. agentrun_sdk/types/streaming.py +201 -0
  90. agentrun_sdk/types/tools.py +258 -0
  91. agentrun_sdk/types/traces.py +5 -0
  92. agentrun_sdk-0.1.2.dist-info/METADATA +51 -0
  93. agentrun_sdk-0.1.2.dist-info/RECORD +115 -0
  94. agentrun_sdk-0.1.2.dist-info/WHEEL +5 -0
  95. agentrun_sdk-0.1.2.dist-info/entry_points.txt +2 -0
  96. agentrun_sdk-0.1.2.dist-info/top_level.txt +3 -0
  97. agentrun_wrapper/__init__.py +11 -0
  98. agentrun_wrapper/_utils/__init__.py +6 -0
  99. agentrun_wrapper/_utils/endpoints.py +16 -0
  100. agentrun_wrapper/identity/__init__.py +5 -0
  101. agentrun_wrapper/identity/auth.py +211 -0
  102. agentrun_wrapper/memory/__init__.py +6 -0
  103. agentrun_wrapper/memory/client.py +1697 -0
  104. agentrun_wrapper/memory/constants.py +103 -0
  105. agentrun_wrapper/memory/controlplane.py +626 -0
  106. agentrun_wrapper/py.typed +1 -0
  107. agentrun_wrapper/runtime/__init__.py +13 -0
  108. agentrun_wrapper/runtime/app.py +473 -0
  109. agentrun_wrapper/runtime/context.py +34 -0
  110. agentrun_wrapper/runtime/models.py +25 -0
  111. agentrun_wrapper/services/__init__.py +1 -0
  112. agentrun_wrapper/services/identity.py +192 -0
  113. agentrun_wrapper/tools/__init__.py +6 -0
  114. agentrun_wrapper/tools/browser_client.py +325 -0
  115. agentrun_wrapper/tools/code_interpreter_client.py +186 -0
@@ -0,0 +1,476 @@
1
+ """Utilities for collecting and reporting performance metrics in the SDK."""
2
+
3
+ import logging
4
+ import time
5
+ import uuid
6
+ from dataclasses import dataclass, field
7
+ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
8
+
9
+ import opentelemetry.metrics as metrics_api
10
+ from opentelemetry.metrics import Counter, Histogram, Meter
11
+
12
+ from ..telemetry import metrics_constants as constants
13
+ from ..types.content import Message
14
+ from ..types.streaming import Metrics, Usage
15
+ from ..types.tools import ToolUse
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class Trace:
21
+ """A trace representing a single operation or step in the execution flow."""
22
+
23
+ def __init__(
24
+ self,
25
+ name: str,
26
+ parent_id: Optional[str] = None,
27
+ start_time: Optional[float] = None,
28
+ raw_name: Optional[str] = None,
29
+ metadata: Optional[Dict[str, Any]] = None,
30
+ message: Optional[Message] = None,
31
+ ) -> None:
32
+ """Initialize a new trace.
33
+
34
+ Args:
35
+ name: Human-readable name of the operation being traced.
36
+ parent_id: ID of the parent trace, if this is a child operation.
37
+ start_time: Timestamp when the trace started.
38
+ If not provided, the current time will be used.
39
+ raw_name: System level name.
40
+ metadata: Additional contextual information about the trace.
41
+ message: Message associated with the trace.
42
+ """
43
+ self.id: str = str(uuid.uuid4())
44
+ self.name: str = name
45
+ self.raw_name: Optional[str] = raw_name
46
+ self.parent_id: Optional[str] = parent_id
47
+ self.start_time: float = start_time if start_time is not None else time.time()
48
+ self.end_time: Optional[float] = None
49
+ self.children: List["Trace"] = []
50
+ self.metadata: Dict[str, Any] = metadata or {}
51
+ self.message: Optional[Message] = message
52
+
53
+ def end(self, end_time: Optional[float] = None) -> None:
54
+ """Mark the trace as complete with the given or current timestamp.
55
+
56
+ Args:
57
+ end_time: Timestamp to use as the end time.
58
+ If not provided, the current time will be used.
59
+ """
60
+ self.end_time = end_time if end_time is not None else time.time()
61
+
62
+ def add_child(self, child: "Trace") -> None:
63
+ """Add a child trace to this trace.
64
+
65
+ Args:
66
+ child: The child trace to add.
67
+ """
68
+ self.children.append(child)
69
+
70
+ def duration(self) -> Optional[float]:
71
+ """Calculate the duration of this trace.
72
+
73
+ Returns:
74
+ The duration in seconds, or None if the trace hasn't ended yet.
75
+ """
76
+ return None if self.end_time is None else self.end_time - self.start_time
77
+
78
+ def add_message(self, message: Message) -> None:
79
+ """Add a message to the trace.
80
+
81
+ Args:
82
+ message: The message to add.
83
+ """
84
+ self.message = message
85
+
86
+ def to_dict(self) -> Dict[str, Any]:
87
+ """Convert the trace to a dictionary representation.
88
+
89
+ Returns:
90
+ A dictionary containing all trace information, suitable for serialization.
91
+ """
92
+ return {
93
+ "id": self.id,
94
+ "name": self.name,
95
+ "raw_name": self.raw_name,
96
+ "parent_id": self.parent_id,
97
+ "start_time": self.start_time,
98
+ "end_time": self.end_time,
99
+ "duration": self.duration(),
100
+ "children": [child.to_dict() for child in self.children],
101
+ "metadata": self.metadata,
102
+ "message": self.message,
103
+ }
104
+
105
+
106
+ @dataclass
107
+ class ToolMetrics:
108
+ """Metrics for a specific tool's usage.
109
+
110
+ Attributes:
111
+ tool: The tool being tracked.
112
+ call_count: Number of times the tool has been called.
113
+ success_count: Number of successful tool calls.
114
+ error_count: Number of failed tool calls.
115
+ total_time: Total execution time across all calls in seconds.
116
+ """
117
+
118
+ tool: ToolUse
119
+ call_count: int = 0
120
+ success_count: int = 0
121
+ error_count: int = 0
122
+ total_time: float = 0.0
123
+
124
+ def add_call(
125
+ self,
126
+ tool: ToolUse,
127
+ duration: float,
128
+ success: bool,
129
+ metrics_client: "MetricsClient",
130
+ attributes: Optional[Dict[str, Any]] = None,
131
+ ) -> None:
132
+ """Record a new tool call with its outcome.
133
+
134
+ Args:
135
+ tool: The tool that was called.
136
+ duration: How long the call took in seconds.
137
+ success: Whether the call was successful.
138
+ metrics_client: The metrics client for recording the metrics.
139
+ attributes: attributes of the metrics.
140
+ """
141
+ self.tool = tool # Update with latest tool state
142
+ self.call_count += 1
143
+ self.total_time += duration
144
+ metrics_client.tool_call_count.add(1, attributes=attributes)
145
+ metrics_client.tool_duration.record(duration, attributes=attributes)
146
+ if success:
147
+ self.success_count += 1
148
+ metrics_client.tool_success_count.add(1, attributes=attributes)
149
+ else:
150
+ self.error_count += 1
151
+ metrics_client.tool_error_count.add(1, attributes=attributes)
152
+
153
+
154
+ @dataclass
155
+ class EventLoopMetrics:
156
+ """Aggregated metrics for an event loop's execution.
157
+
158
+ Attributes:
159
+ cycle_count: Number of event loop cycles executed.
160
+ tool_metrics: Metrics for each tool used, keyed by tool name.
161
+ cycle_durations: List of durations for each cycle in seconds.
162
+ traces: List of execution traces.
163
+ accumulated_usage: Accumulated token usage across all model invocations.
164
+ accumulated_metrics: Accumulated performance metrics across all model invocations.
165
+ """
166
+
167
+ cycle_count: int = 0
168
+ tool_metrics: Dict[str, ToolMetrics] = field(default_factory=dict)
169
+ cycle_durations: List[float] = field(default_factory=list)
170
+ traces: List[Trace] = field(default_factory=list)
171
+ accumulated_usage: Usage = field(default_factory=lambda: Usage(inputTokens=0, outputTokens=0, totalTokens=0))
172
+ accumulated_metrics: Metrics = field(default_factory=lambda: Metrics(latencyMs=0))
173
+
174
+ @property
175
+ def _metrics_client(self) -> "MetricsClient":
176
+ """Get the singleton MetricsClient instance."""
177
+ return MetricsClient()
178
+
179
+ def start_cycle(
180
+ self,
181
+ attributes: Optional[Dict[str, Any]] = None,
182
+ ) -> Tuple[float, Trace]:
183
+ """Start a new event loop cycle and create a trace for it.
184
+
185
+ Args:
186
+ attributes: attributes of the metrics.
187
+
188
+ Returns:
189
+ A tuple containing the start time and the cycle trace object.
190
+ """
191
+ self._metrics_client.event_loop_cycle_count.add(1, attributes=attributes)
192
+ self._metrics_client.event_loop_start_cycle.add(1, attributes=attributes)
193
+ self.cycle_count += 1
194
+ start_time = time.time()
195
+ cycle_trace = Trace(f"Cycle {self.cycle_count}", start_time=start_time)
196
+ self.traces.append(cycle_trace)
197
+ return start_time, cycle_trace
198
+
199
+ def end_cycle(self, start_time: float, cycle_trace: Trace, attributes: Optional[Dict[str, Any]] = None) -> None:
200
+ """End the current event loop cycle and record its duration.
201
+
202
+ Args:
203
+ start_time: The timestamp when the cycle started.
204
+ cycle_trace: The trace object for this cycle.
205
+ attributes: attributes of the metrics.
206
+ """
207
+ self._metrics_client.event_loop_end_cycle.add(1, attributes)
208
+ end_time = time.time()
209
+ duration = end_time - start_time
210
+ self._metrics_client.event_loop_cycle_duration.record(duration, attributes)
211
+ self.cycle_durations.append(duration)
212
+ cycle_trace.end(end_time)
213
+
214
+ def add_tool_usage(
215
+ self,
216
+ tool: ToolUse,
217
+ duration: float,
218
+ tool_trace: Trace,
219
+ success: bool,
220
+ message: Message,
221
+ ) -> None:
222
+ """Record metrics for a tool invocation.
223
+
224
+ Args:
225
+ tool: The tool that was used.
226
+ duration: How long the tool call took in seconds.
227
+ tool_trace: The trace object for this tool call.
228
+ success: Whether the tool call was successful.
229
+ message: The message associated with the tool call.
230
+ """
231
+ tool_name = tool.get("name", "unknown_tool")
232
+ tool_use_id = tool.get("toolUseId", "unknown")
233
+
234
+ tool_trace.metadata.update(
235
+ {
236
+ "toolUseId": tool_use_id,
237
+ "tool_name": tool_name,
238
+ }
239
+ )
240
+ tool_trace.raw_name = f"{tool_name} - {tool_use_id}"
241
+ tool_trace.add_message(message)
242
+
243
+ self.tool_metrics.setdefault(tool_name, ToolMetrics(tool)).add_call(
244
+ tool,
245
+ duration,
246
+ success,
247
+ self._metrics_client,
248
+ attributes={
249
+ "tool_name": tool_name,
250
+ "tool_use_id": tool_use_id,
251
+ },
252
+ )
253
+ tool_trace.end()
254
+
255
+ def update_usage(self, usage: Usage) -> None:
256
+ """Update the accumulated token usage with new usage data.
257
+
258
+ Args:
259
+ usage: The usage data to add to the accumulated totals.
260
+ """
261
+ self._metrics_client.event_loop_input_tokens.record(usage["inputTokens"])
262
+ self._metrics_client.event_loop_output_tokens.record(usage["outputTokens"])
263
+ self.accumulated_usage["inputTokens"] += usage["inputTokens"]
264
+ self.accumulated_usage["outputTokens"] += usage["outputTokens"]
265
+ self.accumulated_usage["totalTokens"] += usage["totalTokens"]
266
+
267
+ def update_metrics(self, metrics: Metrics) -> None:
268
+ """Update the accumulated performance metrics with new metrics data.
269
+
270
+ Args:
271
+ metrics: The metrics data to add to the accumulated totals.
272
+ """
273
+ self._metrics_client.event_loop_latency.record(metrics["latencyMs"])
274
+ self.accumulated_metrics["latencyMs"] += metrics["latencyMs"]
275
+
276
+ def get_summary(self) -> Dict[str, Any]:
277
+ """Generate a comprehensive summary of all collected metrics.
278
+
279
+ Returns:
280
+ A dictionary containing summarized metrics data.
281
+ This includes cycle statistics, tool usage, traces, and accumulated usage information.
282
+ """
283
+ summary = {
284
+ "total_cycles": self.cycle_count,
285
+ "total_duration": sum(self.cycle_durations),
286
+ "average_cycle_time": (sum(self.cycle_durations) / self.cycle_count if self.cycle_count > 0 else 0),
287
+ "tool_usage": {
288
+ tool_name: {
289
+ "tool_info": {
290
+ "tool_use_id": metrics.tool.get("toolUseId", "N/A"),
291
+ "name": metrics.tool.get("name", "unknown"),
292
+ "input_params": metrics.tool.get("input", {}),
293
+ },
294
+ "execution_stats": {
295
+ "call_count": metrics.call_count,
296
+ "success_count": metrics.success_count,
297
+ "error_count": metrics.error_count,
298
+ "total_time": metrics.total_time,
299
+ "average_time": (metrics.total_time / metrics.call_count if metrics.call_count > 0 else 0),
300
+ "success_rate": (metrics.success_count / metrics.call_count if metrics.call_count > 0 else 0),
301
+ },
302
+ }
303
+ for tool_name, metrics in self.tool_metrics.items()
304
+ },
305
+ "traces": [trace.to_dict() for trace in self.traces],
306
+ "accumulated_usage": self.accumulated_usage,
307
+ "accumulated_metrics": self.accumulated_metrics,
308
+ }
309
+ return summary
310
+
311
+
312
+ def _metrics_summary_to_lines(event_loop_metrics: EventLoopMetrics, allowed_names: Set[str]) -> Iterable[str]:
313
+ """Convert event loop metrics to a series of formatted text lines.
314
+
315
+ Args:
316
+ event_loop_metrics: The metrics to format.
317
+ allowed_names: Set of names that are allowed to be displayed unmodified.
318
+
319
+ Returns:
320
+ An iterable of formatted text lines representing the metrics.
321
+ """
322
+ summary = event_loop_metrics.get_summary()
323
+ yield "Event Loop Metrics Summary:"
324
+ yield (
325
+ f"├─ Cycles: total={summary['total_cycles']}, avg_time={summary['average_cycle_time']:.3f}s, "
326
+ f"total_time={summary['total_duration']:.3f}s"
327
+ )
328
+ yield (
329
+ f"├─ Tokens: in={summary['accumulated_usage']['inputTokens']}, "
330
+ f"out={summary['accumulated_usage']['outputTokens']}, "
331
+ f"total={summary['accumulated_usage']['totalTokens']}"
332
+ )
333
+ yield f"├─ Bedrock Latency: {summary['accumulated_metrics']['latencyMs']}ms"
334
+
335
+ yield "├─ Tool Usage:"
336
+ for tool_name, tool_data in summary.get("tool_usage", {}).items():
337
+ # tool_info = tool_data["tool_info"]
338
+ exec_stats = tool_data["execution_stats"]
339
+
340
+ # Tool header - show just name for multi-call case
341
+ yield f" └─ {tool_name}:"
342
+ # Execution stats
343
+ yield f" ├─ Stats: calls={exec_stats['call_count']}, success={exec_stats['success_count']}"
344
+ yield f" │ errors={exec_stats['error_count']}, success_rate={exec_stats['success_rate']:.1%}"
345
+ yield f" ├─ Timing: avg={exec_stats['average_time']:.3f}s, total={exec_stats['total_time']:.3f}s"
346
+ # All tool calls with their inputs
347
+ yield " └─ Tool Calls:"
348
+ # Show tool use ID and input for each call from the traces
349
+ for trace in event_loop_metrics.traces:
350
+ for child in trace.children:
351
+ if child.metadata.get("tool_name") == tool_name:
352
+ tool_use_id = child.metadata.get("toolUseId", "unknown")
353
+ # tool_input = child.metadata.get('tool_input', {})
354
+ yield f" ├─ {tool_use_id}: {tool_name}"
355
+ # yield f" │ └─ Input: {json.dumps(tool_input, sort_keys=True)}"
356
+
357
+ yield "├─ Execution Trace:"
358
+
359
+ for trace in event_loop_metrics.traces:
360
+ yield from _trace_to_lines(trace.to_dict(), allowed_names=allowed_names, indent=1)
361
+
362
+
363
+ def _trace_to_lines(trace: Dict, allowed_names: Set[str], indent: int) -> Iterable[str]:
364
+ """Convert a trace to a series of formatted text lines.
365
+
366
+ Args:
367
+ trace: The trace dictionary to format.
368
+ allowed_names: Set of names that are allowed to be displayed unmodified.
369
+ indent: The indentation level for the output lines.
370
+
371
+ Returns:
372
+ An iterable of formatted text lines representing the trace.
373
+ """
374
+ duration = trace.get("duration", "N/A")
375
+ duration_str = f"{duration:.4f}s" if isinstance(duration, (int, float)) else str(duration)
376
+
377
+ safe_name = trace.get("raw_name", trace.get("name"))
378
+
379
+ tool_use_id = ""
380
+ # Check if this trace contains tool info with toolUseId
381
+ if trace.get("raw_name") and isinstance(safe_name, str) and " - tooluse_" in safe_name:
382
+ # Already includes toolUseId, use as is
383
+ yield f"{' ' * indent}└─ {safe_name} - Duration: {duration_str}"
384
+ else:
385
+ # Extract toolUseId if it exists in metadata
386
+ metadata = trace.get("metadata", {})
387
+ if isinstance(metadata, dict) and metadata.get("toolUseId"):
388
+ tool_use_id = f" - {metadata['toolUseId']}"
389
+ yield f"{' ' * indent}└─ {safe_name}{tool_use_id} - Duration: {duration_str}"
390
+
391
+ for child in trace.get("children", []):
392
+ yield from _trace_to_lines(child, allowed_names, indent + 1)
393
+
394
+
395
+ def metrics_to_string(event_loop_metrics: EventLoopMetrics, allowed_names: Optional[Set[str]] = None) -> str:
396
+ """Convert event loop metrics to a human-readable string representation.
397
+
398
+ Args:
399
+ event_loop_metrics: The metrics to format.
400
+ allowed_names: Set of names that are allowed to be displayed unmodified.
401
+
402
+ Returns:
403
+ A formatted string representation of the metrics.
404
+ """
405
+ return "\n".join(_metrics_summary_to_lines(event_loop_metrics, allowed_names or set()))
406
+
407
+
408
+ class MetricsClient:
409
+ """Singleton client for managing OpenTelemetry metrics instruments.
410
+
411
+ The actual metrics export destination (console, OTLP endpoint, etc.) is configured
412
+ through OpenTelemetry SDK configuration by users, not by this client.
413
+ """
414
+
415
+ _instance: Optional["MetricsClient"] = None
416
+ meter: Meter
417
+ event_loop_cycle_count: Counter
418
+ event_loop_start_cycle: Counter
419
+ event_loop_end_cycle: Counter
420
+ event_loop_cycle_duration: Histogram
421
+ event_loop_latency: Histogram
422
+ event_loop_input_tokens: Histogram
423
+ event_loop_output_tokens: Histogram
424
+
425
+ tool_call_count: Counter
426
+ tool_success_count: Counter
427
+ tool_error_count: Counter
428
+ tool_duration: Histogram
429
+
430
+ def __new__(cls) -> "MetricsClient":
431
+ """Create or return the singleton instance of MetricsClient.
432
+
433
+ Returns:
434
+ The single MetricsClient instance.
435
+ """
436
+ if cls._instance is None:
437
+ cls._instance = super().__new__(cls)
438
+ return cls._instance
439
+
440
+ def __init__(self) -> None:
441
+ """Initialize the MetricsClient.
442
+
443
+ This method only runs once due to the singleton pattern.
444
+ Sets up the OpenTelemetry meter and creates metric instruments.
445
+ """
446
+ if hasattr(self, "meter"):
447
+ return
448
+
449
+ logger.info("Creating Strands MetricsClient")
450
+ meter_provider: metrics_api.MeterProvider = metrics_api.get_meter_provider()
451
+ self.meter = meter_provider.get_meter(__name__)
452
+ self.create_instruments()
453
+
454
+ def create_instruments(self) -> None:
455
+ """Create and initialize all OpenTelemetry metric instruments."""
456
+ self.event_loop_cycle_count = self.meter.create_counter(
457
+ name=constants.STRANDS_EVENT_LOOP_CYCLE_COUNT, unit="Count"
458
+ )
459
+ self.event_loop_start_cycle = self.meter.create_counter(
460
+ name=constants.STRANDS_EVENT_LOOP_START_CYCLE, unit="Count"
461
+ )
462
+ self.event_loop_end_cycle = self.meter.create_counter(name=constants.STRANDS_EVENT_LOOP_END_CYCLE, unit="Count")
463
+ self.event_loop_cycle_duration = self.meter.create_histogram(
464
+ name=constants.STRANDS_EVENT_LOOP_CYCLE_DURATION, unit="s"
465
+ )
466
+ self.event_loop_latency = self.meter.create_histogram(name=constants.STRANDS_EVENT_LOOP_LATENCY, unit="ms")
467
+ self.tool_call_count = self.meter.create_counter(name=constants.STRANDS_TOOL_CALL_COUNT, unit="Count")
468
+ self.tool_success_count = self.meter.create_counter(name=constants.STRANDS_TOOL_SUCCESS_COUNT, unit="Count")
469
+ self.tool_error_count = self.meter.create_counter(name=constants.STRANDS_TOOL_ERROR_COUNT, unit="Count")
470
+ self.tool_duration = self.meter.create_histogram(name=constants.STRANDS_TOOL_DURATION, unit="s")
471
+ self.event_loop_input_tokens = self.meter.create_histogram(
472
+ name=constants.STRANDS_EVENT_LOOP_INPUT_TOKENS, unit="token"
473
+ )
474
+ self.event_loop_output_tokens = self.meter.create_histogram(
475
+ name=constants.STRANDS_EVENT_LOOP_OUTPUT_TOKENS, unit="token"
476
+ )
@@ -0,0 +1,15 @@
1
+ """Metrics that are emitted in Strands-Agents."""
2
+
3
+ STRANDS_EVENT_LOOP_CYCLE_COUNT = "strands.event_loop.cycle_count"
4
+ STRANDS_EVENT_LOOP_START_CYCLE = "strands.event_loop.start_cycle"
5
+ STRANDS_EVENT_LOOP_END_CYCLE = "strands.event_loop.end_cycle"
6
+ STRANDS_TOOL_CALL_COUNT = "strands.tool.call_count"
7
+ STRANDS_TOOL_SUCCESS_COUNT = "strands.tool.success_count"
8
+ STRANDS_TOOL_ERROR_COUNT = "strands.tool.error_count"
9
+
10
+ # Histograms
11
+ STRANDS_EVENT_LOOP_LATENCY = "strands.event_loop.latency"
12
+ STRANDS_TOOL_DURATION = "strands.tool.duration"
13
+ STRANDS_EVENT_LOOP_CYCLE_DURATION = "strands.event_loop.cycle_duration"
14
+ STRANDS_EVENT_LOOP_INPUT_TOKENS = "strands.event_loop.input.tokens"
15
+ STRANDS_EVENT_LOOP_OUTPUT_TOKENS = "strands.event_loop.output.tokens"