fenra 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,677 @@
1
+ """Anthropic integration with auto-instrumentation via monkey patching."""
2
+
3
+ import functools
4
+ import logging
5
+ from typing import Any
6
+
7
+ from fenra._context import get_context
8
+ from fenra._core import enqueue_transaction
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ _patched_sync = False
13
+ _patched_async = False
14
+ _patched_stream_sync = False
15
+ _patched_stream_async = False
16
+
17
+ # Store original methods for unpatching
18
+ _original_create: Any = None
19
+ _original_create_async: Any = None
20
+ _original_stream: Any = None
21
+ _original_stream_async: Any = None
22
+
23
+
24
+ def _detect_web_search(response: Any, tools: list[Any] | None = None) -> dict[str, Any] | None:
25
+ """
26
+ Detect web search tool usage from response content blocks.
27
+
28
+ Returns a requests usage entry if web search was used, None otherwise.
29
+ """
30
+ if not hasattr(response, "content") or response.content is None:
31
+ return None
32
+
33
+ # Check for web_search_tool_result blocks in response content
34
+ search_count = sum(
35
+ 1
36
+ for block in response.content
37
+ if getattr(block, "type", None) == "web_search_tool_result"
38
+ )
39
+
40
+ # Fallback: check for tool_use blocks with name == "web_search"
41
+ if search_count == 0:
42
+ search_count = sum(
43
+ 1
44
+ for block in response.content
45
+ if getattr(block, "type", None) == "tool_use"
46
+ and getattr(block, "name", None) == "web_search"
47
+ )
48
+
49
+ # If still no count but web search tool was in the request, default to 1
50
+ if search_count == 0 and tools:
51
+ for tool in tools:
52
+ tool_type = tool.get("type", "") if isinstance(tool, dict) else getattr(tool, "type", "")
53
+ if "web_search" in str(tool_type):
54
+ search_count = 1
55
+ break
56
+
57
+ if search_count > 0:
58
+ return {
59
+ "type": "requests",
60
+ "metrics": {"count": search_count, "request_type": "web_search"},
61
+ }
62
+
63
+ return None
64
+
65
+
66
+ def _track_message_response(
67
+ response: Any,
68
+ model: str,
69
+ context: dict[str, Any],
70
+ tools: list[Any] | None = None,
71
+ ) -> None:
72
+ """Extract usage from Anthropic Message response and queue transaction."""
73
+ if not hasattr(response, "usage") or response.usage is None:
74
+ return
75
+
76
+ usage = response.usage
77
+
78
+ input_tokens = getattr(usage, "input_tokens", 0) or 0
79
+ output_tokens = getattr(usage, "output_tokens", 0) or 0
80
+
81
+ metrics: dict[str, Any] = {
82
+ "input_tokens": input_tokens,
83
+ "output_tokens": output_tokens,
84
+ "total_tokens": input_tokens + output_tokens,
85
+ }
86
+
87
+ # Include cached tokens if present (prompt caching)
88
+ cache_read = getattr(usage, "cache_read_input_tokens", 0) or 0
89
+ cache_creation = getattr(usage, "cache_creation_input_tokens", 0) or 0
90
+ if cache_read or cache_creation:
91
+ metrics["cached_tokens"] = cache_read + cache_creation
92
+
93
+ usage_entries: list[dict[str, Any]] = [
94
+ {
95
+ "type": "tokens",
96
+ "metrics": metrics,
97
+ }
98
+ ]
99
+
100
+ # Detect web search tool usage
101
+ web_search_usage = _detect_web_search(response, tools)
102
+ if web_search_usage:
103
+ usage_entries.append(web_search_usage)
104
+
105
+ transaction: dict[str, Any] = {
106
+ "provider": "anthropic",
107
+ "model": model,
108
+ "usage": usage_entries,
109
+ "context": context,
110
+ }
111
+
112
+ # Include raw usage for debugging if available
113
+ try:
114
+ transaction["provider_usage_raw"] = {
115
+ "input_tokens": getattr(usage, "input_tokens", None),
116
+ "output_tokens": getattr(usage, "output_tokens", None),
117
+ "cache_read_input_tokens": getattr(usage, "cache_read_input_tokens", None),
118
+ "cache_creation_input_tokens": getattr(usage, "cache_creation_input_tokens", None),
119
+ }
120
+ except Exception:
121
+ pass
122
+
123
+ enqueue_transaction(transaction)
124
+
125
+
126
+ def _track_stream(
127
+ stream: Any,
128
+ model: str,
129
+ context: dict[str, Any],
130
+ tools: list[Any] | None = None,
131
+ ) -> Any:
132
+ """
133
+ Wrap a streaming response to track usage from the final chunk.
134
+
135
+ For Anthropic's Stream[RawMessageStreamEvent], we need to accumulate
136
+ usage from both message_start and message_delta events. The message_start
137
+ contains initial input_tokens, while message_delta at the end contains
138
+ the final output_tokens.
139
+ """
140
+ accumulated_message = None
141
+ final_usage = None
142
+
143
+ def wrapped_stream() -> Any:
144
+ nonlocal accumulated_message, final_usage
145
+ try:
146
+ for event in stream:
147
+ if hasattr(event, "type"):
148
+ if event.type == "message_start" and hasattr(event, "message"):
149
+ accumulated_message = event.message
150
+ elif event.type == "message_delta" and hasattr(event, "usage"):
151
+ # Capture usage from delta - this has final output_tokens
152
+ if event.usage:
153
+ final_usage = event.usage
154
+ yield event
155
+ finally:
156
+ # After stream is exhausted, track usage
157
+ if accumulated_message:
158
+ # Merge final_usage into accumulated_message if available
159
+ if final_usage and hasattr(accumulated_message, "usage") and accumulated_message.usage:
160
+ # Update output_tokens from delta
161
+ output_tokens = getattr(final_usage, "output_tokens", None)
162
+ if output_tokens is not None:
163
+ accumulated_message.usage.output_tokens = output_tokens
164
+ try:
165
+ _track_message_response(accumulated_message, model, context, tools)
166
+ except Exception as e:
167
+ logger.error(
168
+ f"Error tracking Anthropic streaming usage: {e}", exc_info=True
169
+ )
170
+
171
+ return wrapped_stream()
172
+
173
+
174
+ async def _track_stream_async(
175
+ stream: Any,
176
+ model: str,
177
+ context: dict[str, Any],
178
+ tools: list[Any] | None = None,
179
+ ) -> Any:
180
+ """
181
+ Wrap an async streaming response to track usage from the final chunk.
182
+
183
+ For Anthropic's async stream, we need to accumulate usage from both
184
+ message_start and message_delta events. The message_start contains
185
+ initial input_tokens, while message_delta at the end contains
186
+ the final output_tokens.
187
+ """
188
+ accumulated_message = None
189
+ final_usage = None
190
+
191
+ try:
192
+ async for event in stream:
193
+ if hasattr(event, "type"):
194
+ if event.type == "message_start" and hasattr(event, "message"):
195
+ accumulated_message = event.message
196
+ elif event.type == "message_delta" and hasattr(event, "usage"):
197
+ # Capture usage from delta - this has final output_tokens
198
+ if event.usage:
199
+ final_usage = event.usage
200
+ yield event
201
+ finally:
202
+ # After stream is exhausted, track usage
203
+ if accumulated_message:
204
+ # Merge final_usage into accumulated_message if available
205
+ if final_usage and hasattr(accumulated_message, "usage") and accumulated_message.usage:
206
+ # Update output_tokens from delta
207
+ output_tokens = getattr(final_usage, "output_tokens", None)
208
+ if output_tokens is not None:
209
+ accumulated_message.usage.output_tokens = output_tokens
210
+ try:
211
+ _track_message_response(accumulated_message, model, context, tools)
212
+ except Exception as e:
213
+ logger.error(
214
+ f"Error tracking Anthropic async streaming usage: {e}", exc_info=True
215
+ )
216
+
217
+
218
+ class _TrackedMessageStreamManager:
219
+ """
220
+ Wrapper for MessageStreamManager that tracks usage after stream completion.
221
+ """
222
+
223
+ def __init__(
224
+ self,
225
+ manager: Any,
226
+ model: str,
227
+ context: dict[str, Any],
228
+ tools: list[Any] | None = None,
229
+ ):
230
+ self._manager = manager
231
+ self._model = model
232
+ self._context = context
233
+ self._tools = tools
234
+
235
+ def __enter__(self) -> Any:
236
+ self._stream = self._manager.__enter__()
237
+ return _TrackedMessageStream(
238
+ self._stream, self._model, self._context, self._tools
239
+ )
240
+
241
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any:
242
+ return self._manager.__exit__(exc_type, exc_val, exc_tb)
243
+
244
+
245
+ class _TrackedMessageStream:
246
+ """
247
+ Wrapper for MessageStream that tracks usage when get_final_message is called.
248
+ """
249
+
250
+ def __init__(
251
+ self,
252
+ stream: Any,
253
+ model: str,
254
+ context: dict[str, Any],
255
+ tools: list[Any] | None = None,
256
+ ):
257
+ self._stream = stream
258
+ self._model = model
259
+ self._context = context
260
+ self._tools = tools
261
+ self._tracked = False
262
+
263
+ def __iter__(self) -> Any:
264
+ return iter(self._stream)
265
+
266
+ def __getattr__(self, name: str) -> Any:
267
+ attr = getattr(self._stream, name)
268
+
269
+ if name == "get_final_message" and not self._tracked:
270
+
271
+ @functools.wraps(attr)
272
+ def tracked_get_final_message(*args: Any, **kwargs: Any) -> Any:
273
+ result = attr(*args, **kwargs)
274
+ if not self._tracked:
275
+ self._tracked = True
276
+ try:
277
+ _track_message_response(
278
+ result, self._model, self._context, self._tools
279
+ )
280
+ except Exception as e:
281
+ logger.error(
282
+ f"Error tracking Anthropic stream usage: {e}", exc_info=True
283
+ )
284
+ return result
285
+
286
+ return tracked_get_final_message
287
+
288
+ return attr
289
+
290
+
291
+ class _TrackedAsyncMessageStreamManager:
292
+ """
293
+ Wrapper for AsyncMessageStreamManager that tracks usage after stream completion.
294
+ """
295
+
296
+ def __init__(
297
+ self,
298
+ manager: Any,
299
+ model: str,
300
+ context: dict[str, Any],
301
+ tools: list[Any] | None = None,
302
+ ):
303
+ self._manager = manager
304
+ self._model = model
305
+ self._context = context
306
+ self._tools = tools
307
+
308
+ async def __aenter__(self) -> Any:
309
+ self._stream = await self._manager.__aenter__()
310
+ return _TrackedAsyncMessageStream(
311
+ self._stream, self._model, self._context, self._tools
312
+ )
313
+
314
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any:
315
+ return await self._manager.__aexit__(exc_type, exc_val, exc_tb)
316
+
317
+
318
+ class _TrackedAsyncMessageStream:
319
+ """
320
+ Wrapper for AsyncMessageStream that tracks usage when get_final_message is called.
321
+ """
322
+
323
+ def __init__(
324
+ self,
325
+ stream: Any,
326
+ model: str,
327
+ context: dict[str, Any],
328
+ tools: list[Any] | None = None,
329
+ ):
330
+ self._stream = stream
331
+ self._model = model
332
+ self._context = context
333
+ self._tools = tools
334
+ self._tracked = False
335
+
336
+ def __aiter__(self) -> Any:
337
+ return self._stream.__aiter__()
338
+
339
+ def __getattr__(self, name: str) -> Any:
340
+ attr = getattr(self._stream, name)
341
+
342
+ if name == "get_final_message" and not self._tracked:
343
+
344
+ @functools.wraps(attr)
345
+ async def tracked_get_final_message(*args: Any, **kwargs: Any) -> Any:
346
+ result = await attr(*args, **kwargs)
347
+ if not self._tracked:
348
+ self._tracked = True
349
+ try:
350
+ _track_message_response(
351
+ result, self._model, self._context, self._tools
352
+ )
353
+ except Exception as e:
354
+ logger.error(
355
+ f"Error tracking Anthropic async stream usage: {e}",
356
+ exc_info=True,
357
+ )
358
+ return result
359
+
360
+ return tracked_get_final_message
361
+
362
+ return attr
363
+
364
+
365
+ # ============================================================================
366
+ # Messages.create Patching (sync)
367
+ # ============================================================================
368
+
369
+
370
+ def patch_anthropic() -> None:
371
+ """
372
+ Patch Anthropic Messages client to auto-track usage.
373
+
374
+ This patches the synchronous `Messages.create` method.
375
+ """
376
+ global _patched_sync, _original_create
377
+
378
+ if _patched_sync:
379
+ logger.debug("Anthropic Messages sync already patched, skipping")
380
+ return
381
+
382
+ try:
383
+ from anthropic.resources.messages.messages import Messages
384
+ except ImportError:
385
+ logger.warning(
386
+ "Anthropic SDK not installed. Install with: pip install 'fenra[anthropic]'"
387
+ )
388
+ return
389
+
390
+ original_create = Messages.create
391
+ _original_create = original_create
392
+
393
+ @functools.wraps(original_create)
394
+ def patched_create(self: Messages, *args: Any, **kwargs: Any) -> Any:
395
+ is_streaming = kwargs.get("stream", False)
396
+
397
+ response = original_create(self, *args, **kwargs)
398
+
399
+ # Don't track streaming responses here - they need special handling
400
+ if is_streaming:
401
+ context = get_context()
402
+ model = kwargs.get("model") or getattr(response, "model", "unknown")
403
+ tools = kwargs.get("tools")
404
+ try:
405
+ return _track_stream(response, model, context, tools)
406
+ except Exception as e:
407
+ logger.error(f"Error wrapping Anthropic stream: {e}", exc_info=True)
408
+ return response
409
+
410
+ context = get_context()
411
+ model = kwargs.get("model") or getattr(response, "model", "unknown")
412
+ tools = kwargs.get("tools")
413
+
414
+ try:
415
+ _track_message_response(response, model, context, tools)
416
+ except Exception as e:
417
+ logger.error(f"Error tracking Anthropic usage: {e}", exc_info=True)
418
+
419
+ return response
420
+
421
+ Messages.create = patched_create # type: ignore[assignment]
422
+ _patched_sync = True
423
+ logger.info("Anthropic Messages SDK patched for auto-instrumentation")
424
+
425
+
426
+ # ============================================================================
427
+ # AsyncMessages.create Patching (async)
428
+ # ============================================================================
429
+
430
+
431
+ def patch_anthropic_async() -> None:
432
+ """
433
+ Patch Anthropic Messages async client to auto-track usage.
434
+
435
+ This patches the asynchronous `AsyncMessages.create` method.
436
+ """
437
+ global _patched_async, _original_create_async
438
+
439
+ if _patched_async:
440
+ logger.debug("Anthropic Messages async already patched, skipping")
441
+ return
442
+
443
+ try:
444
+ from anthropic.resources.messages.messages import AsyncMessages
445
+ except ImportError:
446
+ logger.warning(
447
+ "Anthropic SDK not installed. Install with: pip install 'fenra[anthropic]'"
448
+ )
449
+ return
450
+
451
+ original_create = AsyncMessages.create
452
+ _original_create_async = original_create
453
+
454
+ @functools.wraps(original_create)
455
+ async def patched_create(self: AsyncMessages, *args: Any, **kwargs: Any) -> Any:
456
+ is_streaming = kwargs.get("stream", False)
457
+
458
+ response = await original_create(self, *args, **kwargs)
459
+
460
+ # Don't track streaming responses here - they need special handling
461
+ if is_streaming:
462
+ context = get_context()
463
+ model = kwargs.get("model") or getattr(response, "model", "unknown")
464
+ tools = kwargs.get("tools")
465
+ try:
466
+ return _track_stream_async(response, model, context, tools)
467
+ except Exception as e:
468
+ logger.error(
469
+ f"Error wrapping Anthropic async stream: {e}", exc_info=True
470
+ )
471
+ return response
472
+
473
+ context = get_context()
474
+ model = kwargs.get("model") or getattr(response, "model", "unknown")
475
+ tools = kwargs.get("tools")
476
+
477
+ try:
478
+ _track_message_response(response, model, context, tools)
479
+ except Exception as e:
480
+ logger.error(f"Error tracking Anthropic async usage: {e}", exc_info=True)
481
+
482
+ return response
483
+
484
+ AsyncMessages.create = patched_create # type: ignore[assignment]
485
+ _patched_async = True
486
+ logger.info("Anthropic Messages async SDK patched for auto-instrumentation")
487
+
488
+
489
+ # ============================================================================
490
+ # Messages.stream Patching (sync)
491
+ # ============================================================================
492
+
493
+
494
+ def patch_anthropic_stream() -> None:
495
+ """
496
+ Patch Anthropic Messages.stream to auto-track usage.
497
+
498
+ This patches the synchronous `Messages.stream` method which returns
499
+ a MessageStreamManager context manager.
500
+ """
501
+ global _patched_stream_sync, _original_stream
502
+
503
+ if _patched_stream_sync:
504
+ logger.debug("Anthropic Messages.stream sync already patched, skipping")
505
+ return
506
+
507
+ try:
508
+ from anthropic.resources.messages.messages import Messages
509
+ except ImportError:
510
+ logger.warning(
511
+ "Anthropic SDK not installed. Install with: pip install 'fenra[anthropic]'"
512
+ )
513
+ return
514
+
515
+ if not hasattr(Messages, "stream"):
516
+ logger.debug("Anthropic Messages has no stream method, skipping")
517
+ return
518
+
519
+ original_stream = Messages.stream
520
+ _original_stream = original_stream
521
+
522
+ @functools.wraps(original_stream)
523
+ def patched_stream(self: Messages, *args: Any, **kwargs: Any) -> Any:
524
+ manager = original_stream(self, *args, **kwargs)
525
+
526
+ context = get_context()
527
+ model = kwargs.get("model", "unknown")
528
+ tools = kwargs.get("tools")
529
+
530
+ try:
531
+ return _TrackedMessageStreamManager(manager, model, context, tools)
532
+ except Exception as e:
533
+ logger.error(f"Error wrapping Anthropic stream manager: {e}", exc_info=True)
534
+ return manager
535
+
536
+ Messages.stream = patched_stream # type: ignore[assignment]
537
+ _patched_stream_sync = True
538
+ logger.info("Anthropic Messages.stream SDK patched for auto-instrumentation")
539
+
540
+
541
+ # ============================================================================
542
+ # AsyncMessages.stream Patching (async)
543
+ # ============================================================================
544
+
545
+
546
+ def patch_anthropic_stream_async() -> None:
547
+ """
548
+ Patch Anthropic AsyncMessages.stream to auto-track usage.
549
+
550
+ This patches the asynchronous `AsyncMessages.stream` method which returns
551
+ an AsyncMessageStreamManager context manager.
552
+ """
553
+ global _patched_stream_async, _original_stream_async
554
+
555
+ if _patched_stream_async:
556
+ logger.debug("Anthropic AsyncMessages.stream async already patched, skipping")
557
+ return
558
+
559
+ try:
560
+ from anthropic.resources.messages.messages import AsyncMessages
561
+ except ImportError:
562
+ logger.warning(
563
+ "Anthropic SDK not installed. Install with: pip install 'fenra[anthropic]'"
564
+ )
565
+ return
566
+
567
+ if not hasattr(AsyncMessages, "stream"):
568
+ logger.debug("Anthropic AsyncMessages has no stream method, skipping")
569
+ return
570
+
571
+ original_stream = AsyncMessages.stream
572
+ _original_stream_async = original_stream
573
+
574
+ @functools.wraps(original_stream)
575
+ def patched_stream(self: AsyncMessages, *args: Any, **kwargs: Any) -> Any:
576
+ manager = original_stream(self, *args, **kwargs)
577
+
578
+ context = get_context()
579
+ model = kwargs.get("model", "unknown")
580
+ tools = kwargs.get("tools")
581
+
582
+ try:
583
+ return _TrackedAsyncMessageStreamManager(manager, model, context, tools)
584
+ except Exception as e:
585
+ logger.error(
586
+ f"Error wrapping Anthropic async stream manager: {e}", exc_info=True
587
+ )
588
+ return manager
589
+
590
+ AsyncMessages.stream = patched_stream # type: ignore[assignment]
591
+ _patched_stream_async = True
592
+ logger.info("Anthropic AsyncMessages.stream SDK patched for auto-instrumentation")
593
+
594
+
595
+ # ============================================================================
596
+ # Unpatch Functions
597
+ # ============================================================================
598
+
599
+
600
+ def unpatch_anthropic() -> None:
601
+ """Restore original Anthropic Messages create method."""
602
+ global _patched_sync, _original_create
603
+
604
+ if not _patched_sync or _original_create is None:
605
+ return
606
+
607
+ try:
608
+ from anthropic.resources.messages.messages import Messages
609
+
610
+ Messages.create = _original_create # type: ignore[assignment]
611
+ _patched_sync = False
612
+ _original_create = None
613
+ logger.info("Anthropic Messages SDK unpatched")
614
+ except ImportError:
615
+ pass
616
+
617
+
618
+ def unpatch_anthropic_async() -> None:
619
+ """Restore original Anthropic Messages async create method."""
620
+ global _patched_async, _original_create_async
621
+
622
+ if not _patched_async or _original_create_async is None:
623
+ return
624
+
625
+ try:
626
+ from anthropic.resources.messages.messages import AsyncMessages
627
+
628
+ AsyncMessages.create = _original_create_async # type: ignore[assignment]
629
+ _patched_async = False
630
+ _original_create_async = None
631
+ logger.info("Anthropic Messages async SDK unpatched")
632
+ except ImportError:
633
+ pass
634
+
635
+
636
+ def unpatch_anthropic_stream() -> None:
637
+ """Restore original Anthropic Messages.stream method."""
638
+ global _patched_stream_sync, _original_stream
639
+
640
+ if not _patched_stream_sync or _original_stream is None:
641
+ return
642
+
643
+ try:
644
+ from anthropic.resources.messages.messages import Messages
645
+
646
+ Messages.stream = _original_stream # type: ignore[assignment]
647
+ _patched_stream_sync = False
648
+ _original_stream = None
649
+ logger.info("Anthropic Messages.stream SDK unpatched")
650
+ except ImportError:
651
+ pass
652
+
653
+
654
+ def unpatch_anthropic_stream_async() -> None:
655
+ """Restore original Anthropic AsyncMessages.stream method."""
656
+ global _patched_stream_async, _original_stream_async
657
+
658
+ if not _patched_stream_async or _original_stream_async is None:
659
+ return
660
+
661
+ try:
662
+ from anthropic.resources.messages.messages import AsyncMessages
663
+
664
+ AsyncMessages.stream = _original_stream_async # type: ignore[assignment]
665
+ _patched_stream_async = False
666
+ _original_stream_async = None
667
+ logger.info("Anthropic AsyncMessages.stream SDK unpatched")
668
+ except ImportError:
669
+ pass
670
+
671
+
672
+ def unpatch_anthropic_all() -> None:
673
+ """Restore all original Anthropic SDK methods."""
674
+ unpatch_anthropic()
675
+ unpatch_anthropic_async()
676
+ unpatch_anthropic_stream()
677
+ unpatch_anthropic_stream_async()