netra-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of netra-sdk might be problematic. Click here for more details.

Files changed (42) hide show
  1. netra/__init__.py +148 -0
  2. netra/anonymizer/__init__.py +7 -0
  3. netra/anonymizer/anonymizer.py +79 -0
  4. netra/anonymizer/base.py +159 -0
  5. netra/anonymizer/fp_anonymizer.py +182 -0
  6. netra/config.py +111 -0
  7. netra/decorators.py +167 -0
  8. netra/exceptions/__init__.py +6 -0
  9. netra/exceptions/injection.py +33 -0
  10. netra/exceptions/pii.py +46 -0
  11. netra/input_scanner.py +142 -0
  12. netra/instrumentation/__init__.py +257 -0
  13. netra/instrumentation/aiohttp/__init__.py +378 -0
  14. netra/instrumentation/aiohttp/version.py +1 -0
  15. netra/instrumentation/cohere/__init__.py +446 -0
  16. netra/instrumentation/cohere/version.py +1 -0
  17. netra/instrumentation/google_genai/__init__.py +506 -0
  18. netra/instrumentation/google_genai/config.py +5 -0
  19. netra/instrumentation/google_genai/utils.py +31 -0
  20. netra/instrumentation/google_genai/version.py +1 -0
  21. netra/instrumentation/httpx/__init__.py +545 -0
  22. netra/instrumentation/httpx/version.py +1 -0
  23. netra/instrumentation/instruments.py +78 -0
  24. netra/instrumentation/mistralai/__init__.py +545 -0
  25. netra/instrumentation/mistralai/config.py +5 -0
  26. netra/instrumentation/mistralai/utils.py +30 -0
  27. netra/instrumentation/mistralai/version.py +1 -0
  28. netra/instrumentation/weaviate/__init__.py +121 -0
  29. netra/instrumentation/weaviate/version.py +1 -0
  30. netra/pii.py +757 -0
  31. netra/processors/__init__.py +4 -0
  32. netra/processors/session_span_processor.py +55 -0
  33. netra/processors/span_aggregation_processor.py +365 -0
  34. netra/scanner.py +104 -0
  35. netra/session.py +185 -0
  36. netra/session_manager.py +96 -0
  37. netra/tracer.py +99 -0
  38. netra/version.py +1 -0
  39. netra_sdk-0.1.0.dist-info/LICENCE +201 -0
  40. netra_sdk-0.1.0.dist-info/METADATA +573 -0
  41. netra_sdk-0.1.0.dist-info/RECORD +42 -0
  42. netra_sdk-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,545 @@
1
+ """OpenTelemetry Mistral AI instrumentation"""
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+ from typing import Any, AsyncGenerator, Callable, Collection, Dict, Generator, Optional, Tuple, Union
7
+
8
+ from mistralai import AssistantMessage, ChatCompletionChoice, UsageInfo
9
+ from mistralai.models import ChatCompletionResponse
10
+ from opentelemetry import context as context_api
11
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
12
+ from opentelemetry.instrumentation.utils import (
13
+ _SUPPRESS_INSTRUMENTATION_KEY,
14
+ unwrap,
15
+ )
16
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_RESPONSE_ID
17
+ from opentelemetry.semconv_ai import (
18
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
19
+ LLMRequestTypeValues,
20
+ SpanAttributes,
21
+ )
22
+ from opentelemetry.trace import SpanKind, get_tracer, set_span_in_context
23
+ from opentelemetry.trace.status import Status, StatusCode
24
+ from wrapt import wrap_function_wrapper
25
+
26
+ from netra.instrumentation.mistralai.config import Config
27
+ from netra.instrumentation.mistralai.utils import dont_throw
28
+ from netra.instrumentation.mistralai.version import __version__
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+ _instruments = ("mistralai >= 1.0.0",)
33
+
34
+ WRAPPED_METHODS = [
35
+ {
36
+ "module": "mistralai.chat",
37
+ "object": "Chat",
38
+ "method": "complete",
39
+ "span_name": "mistralai.chat.complete",
40
+ "streaming": False,
41
+ "is_async": False,
42
+ },
43
+ {
44
+ "module": "mistralai.chat",
45
+ "object": "Chat",
46
+ "method": "complete_async",
47
+ "span_name": "mistralai.chat.complete_async",
48
+ "streaming": False,
49
+ "is_async": True,
50
+ },
51
+ {
52
+ "module": "mistralai.chat",
53
+ "object": "Chat",
54
+ "method": "stream",
55
+ "span_name": "mistralai.chat.stream",
56
+ "streaming": True,
57
+ "is_async": False,
58
+ },
59
+ {
60
+ "module": "mistralai.chat",
61
+ "object": "Chat",
62
+ "method": "stream_async",
63
+ "span_name": "mistralai.chat.stream_async",
64
+ "streaming": True,
65
+ "is_async": True,
66
+ },
67
+ {
68
+ "module": "mistralai.embeddings",
69
+ "object": "Embeddings",
70
+ "method": "create",
71
+ "span_name": "mistralai.embeddings",
72
+ "streaming": False,
73
+ "is_async": False,
74
+ },
75
+ ]
76
+
77
+
78
+ def should_send_prompts() -> bool:
79
+ return (os.getenv("TRACELOOP_TRACE_CONTENT") or "true").lower() == "true" or context_api.get_value(
80
+ "override_enable_content_tracing"
81
+ )
82
+
83
+
84
+ def _set_span_attribute(span: Any, name: str, value: Any) -> None:
85
+ if value is not None:
86
+ if value != "":
87
+ span.set_attribute(name, value)
88
+ return
89
+
90
+
91
+ @dont_throw
92
+ def _set_input_attributes(
93
+ span: Any, llm_request_type: LLMRequestTypeValues, to_wrap: Dict[str, Any], kwargs: dict[str, Any]
94
+ ) -> None:
95
+ _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
96
+ _set_span_attribute(
97
+ span,
98
+ SpanAttributes.LLM_IS_STREAMING,
99
+ to_wrap.get("streaming"),
100
+ )
101
+
102
+ if should_send_prompts():
103
+ if llm_request_type == LLMRequestTypeValues.CHAT:
104
+ messages = kwargs.get("messages", [])
105
+ for index, message in enumerate(messages):
106
+ # Handle both dict and object message formats
107
+ if hasattr(message, "content"):
108
+ content = message.content
109
+ role = message.role
110
+ else:
111
+ content = message.get("content", "")
112
+ role = message.get("role", "user")
113
+
114
+ _set_span_attribute(
115
+ span,
116
+ f"{SpanAttributes.LLM_PROMPTS}.{index}.content",
117
+ content,
118
+ )
119
+ _set_span_attribute(
120
+ span,
121
+ f"{SpanAttributes.LLM_PROMPTS}.{index}.role",
122
+ role,
123
+ )
124
+ else:
125
+ input_data = kwargs.get("input") or kwargs.get("inputs")
126
+
127
+ if isinstance(input_data, str):
128
+ _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.role", "user")
129
+ _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.content", input_data)
130
+ elif isinstance(input_data, list):
131
+ for index, prompt in enumerate(input_data):
132
+ _set_span_attribute(
133
+ span,
134
+ f"{SpanAttributes.LLM_PROMPTS}.{index}.role",
135
+ "user",
136
+ )
137
+ _set_span_attribute(
138
+ span,
139
+ f"{SpanAttributes.LLM_PROMPTS}.{index}.content",
140
+ str(prompt),
141
+ )
142
+
143
+
144
+ @dont_throw
145
+ def _set_response_attributes(span: Any, llm_request_type: LLMRequestTypeValues, response: Any) -> None:
146
+ # Handle both object and dict response formats
147
+ response_id = getattr(response, "id", None) or response.get("id") if hasattr(response, "get") else None
148
+ _set_span_attribute(span, GEN_AI_RESPONSE_ID, response_id)
149
+
150
+ if llm_request_type == LLMRequestTypeValues.EMBEDDING:
151
+ return
152
+
153
+ if should_send_prompts():
154
+ choices = getattr(response, "choices", None) or response.get("choices", []) if hasattr(response, "get") else []
155
+ for index, choice in enumerate(choices):
156
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
157
+
158
+ # Handle both object and dict choice formats
159
+ if hasattr(choice, "finish_reason"):
160
+ finish_reason = choice.finish_reason
161
+ message = choice.message
162
+ else:
163
+ finish_reason = choice.get("finish_reason")
164
+ message = choice.get("message", {})
165
+
166
+ _set_span_attribute(
167
+ span,
168
+ f"{prefix}.finish_reason",
169
+ finish_reason,
170
+ )
171
+
172
+ # Handle message content
173
+ if hasattr(message, "content"):
174
+ content = message.content
175
+ role = message.role
176
+ else:
177
+ content = message.get("content", "")
178
+ role = message.get("role", "assistant")
179
+
180
+ _set_span_attribute(
181
+ span,
182
+ f"{prefix}.content",
183
+ (content if isinstance(content, str) else json.dumps(content)),
184
+ )
185
+ _set_span_attribute(
186
+ span,
187
+ f"{prefix}.role",
188
+ role,
189
+ )
190
+
191
+ # Handle model attribute
192
+ if hasattr(response, "model"):
193
+ model = response.model
194
+ _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, model)
195
+
196
+ # Handle usage information
197
+ if not hasattr(response, "usage"):
198
+ return
199
+
200
+ usage = response.usage
201
+
202
+ if hasattr(usage, "prompt_tokens"):
203
+ input_tokens = usage.prompt_tokens
204
+ output_tokens = usage.completion_tokens or 0
205
+ total_tokens = usage.total_tokens
206
+ else:
207
+ input_tokens = usage.get("prompt_tokens", 0)
208
+ output_tokens = usage.get("completion_tokens", 0)
209
+ total_tokens = usage.get("total_tokens", 0)
210
+
211
+ _set_span_attribute(
212
+ span,
213
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
214
+ total_tokens,
215
+ )
216
+ _set_span_attribute(
217
+ span,
218
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
219
+ output_tokens,
220
+ )
221
+ _set_span_attribute(
222
+ span,
223
+ SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
224
+ input_tokens,
225
+ )
226
+
227
+
228
+ def _accumulate_streaming_response(
229
+ span: Any, llm_request_type: LLMRequestTypeValues, response: Any, token: Any = None
230
+ ) -> Generator[Any, None, None]:
231
+ accumulated_response = ChatCompletionResponse(
232
+ id="",
233
+ object="",
234
+ created=0,
235
+ model="",
236
+ choices=[],
237
+ usage=UsageInfo(prompt_tokens=0, total_tokens=0, completion_tokens=0),
238
+ )
239
+
240
+ try:
241
+ for res in response:
242
+ yield res
243
+
244
+ data = None
245
+ if hasattr(res, "data"):
246
+ data = res.data
247
+
248
+ if data is not None and hasattr(data, "model") and data.model:
249
+ accumulated_response.model = data.model
250
+ if data is not None and hasattr(data, "usage") and data.usage:
251
+ accumulated_response.usage = data.usage
252
+ # ID is the same for all chunks, so it's safe to overwrite it every time
253
+ if data is not None and hasattr(data, "id") and data.id:
254
+ accumulated_response.id = data.id
255
+
256
+ choices = getattr(data, "choices", [])
257
+ for idx, choice in enumerate(choices):
258
+ if len(accumulated_response.choices) <= idx:
259
+ accumulated_response.choices.append(
260
+ ChatCompletionChoice(
261
+ index=idx,
262
+ message=AssistantMessage(role="assistant", content=""),
263
+ finish_reason=choice.finish_reason,
264
+ )
265
+ )
266
+
267
+ if hasattr(choice, "finish_reason"):
268
+ accumulated_response.choices[idx].finish_reason = choice.finish_reason
269
+
270
+ # Handle delta content
271
+ delta = getattr(choice, "delta", None)
272
+ if delta:
273
+ if hasattr(delta, "content") and delta.content:
274
+ accumulated_response.choices[idx].message.content += delta.content
275
+ if hasattr(delta, "role") and delta.role:
276
+ accumulated_response.choices[idx].message.role = delta.role
277
+
278
+ _set_response_attributes(span, llm_request_type, accumulated_response)
279
+ span.set_status(Status(StatusCode.OK))
280
+ finally:
281
+ span.end()
282
+ if token is not None:
283
+ context_api.detach(token)
284
+
285
+
286
+ async def _aaccumulate_streaming_response(
287
+ span: Any, llm_request_type: LLMRequestTypeValues, response: Any, token: Any = None
288
+ ) -> AsyncGenerator[Any, None]:
289
+ accumulated_response = ChatCompletionResponse(
290
+ id="",
291
+ object="",
292
+ created=0,
293
+ model="",
294
+ choices=[],
295
+ usage=UsageInfo(prompt_tokens=0, total_tokens=0, completion_tokens=0),
296
+ )
297
+
298
+ try:
299
+ async for res in response:
300
+ yield res
301
+
302
+ data = None
303
+ if hasattr(res, "data"):
304
+ data = res.data
305
+
306
+ if data is not None and hasattr(data, "model") and data.model:
307
+ accumulated_response.model = data.model
308
+ if data is not None and hasattr(data, "usage") and data.usage:
309
+ accumulated_response.usage = data.usage
310
+ # Id is the same for all chunks, so it's safe to overwrite it every time
311
+ if data is not None and hasattr(data, "id") and data.id:
312
+ accumulated_response.id = data.id
313
+
314
+ choices = getattr(data, "choices", [])
315
+ for idx, choice in enumerate(choices):
316
+ if len(accumulated_response.choices) <= idx:
317
+ accumulated_response.choices.append(
318
+ ChatCompletionChoice(
319
+ index=idx,
320
+ message=AssistantMessage(role="assistant", content=""),
321
+ finish_reason=choice.finish_reason,
322
+ )
323
+ )
324
+
325
+ if hasattr(choice, "finish_reason"):
326
+ accumulated_response.choices[idx].finish_reason = choice.finish_reason
327
+
328
+ # Handle delta content
329
+ delta = getattr(choice, "delta", None)
330
+ if delta:
331
+ if hasattr(delta, "content") and delta.content:
332
+ accumulated_response.choices[idx].message.content += delta.content
333
+ if hasattr(delta, "role") and delta.role:
334
+ accumulated_response.choices[idx].message.role = delta.role
335
+
336
+ _set_response_attributes(span, llm_request_type, accumulated_response)
337
+ span.set_status(Status(StatusCode.OK))
338
+ finally:
339
+ span.end()
340
+ if token is not None:
341
+ context_api.detach(token)
342
+
343
+
344
+ def _with_tracer_wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
345
+ """Helper for providing tracer for wrapper functions."""
346
+
347
+ def _with_tracer(tracer: Any, to_wrap: Dict[str, Any]) -> Callable[..., Any]:
348
+ def wrapper(wrapped: Callable[..., Any], instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
349
+ return func(tracer, to_wrap, wrapped, instance, args, kwargs)
350
+
351
+ return wrapper
352
+
353
+ return _with_tracer
354
+
355
+
356
+ def _llm_request_type_by_method(method_name: Optional[str]) -> LLMRequestTypeValues:
357
+ if method_name in ["complete", "complete_async", "stream", "stream_async"]:
358
+ return LLMRequestTypeValues.CHAT
359
+ elif method_name == "create" and "embeddings" in method_name:
360
+ return LLMRequestTypeValues.EMBEDDING
361
+ else:
362
+ return LLMRequestTypeValues.UNKNOWN
363
+
364
+
365
+ @_with_tracer_wrapper
366
+ def _wrap(
367
+ tracer: Any,
368
+ to_wrap: Dict[str, Any],
369
+ wrapped: Callable[..., Any],
370
+ instance: Any,
371
+ args: Tuple[Any, ...],
372
+ kwargs: Dict[str, Any],
373
+ ) -> Any:
374
+ """Instruments and calls every function defined in TO_WRAP."""
375
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
376
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
377
+ ):
378
+ return wrapped(*args, **kwargs)
379
+
380
+ name = to_wrap.get("span_name")
381
+ llm_request_type = _llm_request_type_by_method(to_wrap.get("method"))
382
+
383
+ if to_wrap.get("streaming"):
384
+ span = tracer.start_span(
385
+ name,
386
+ kind=SpanKind.CLIENT,
387
+ attributes={
388
+ SpanAttributes.LLM_SYSTEM: "MistralAI",
389
+ SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
390
+ },
391
+ )
392
+
393
+ ctx = set_span_in_context(span)
394
+ token = context_api.attach(ctx)
395
+
396
+ try:
397
+ if span.is_recording():
398
+ _set_input_attributes(span, llm_request_type, to_wrap, kwargs)
399
+
400
+ response = wrapped(*args, **kwargs)
401
+
402
+ if response:
403
+ return _accumulate_streaming_response(span, llm_request_type, response, token)
404
+ else:
405
+ span.set_status(Status(StatusCode.ERROR))
406
+ span.end()
407
+ context_api.detach(token)
408
+
409
+ return response
410
+ except Exception:
411
+ span.set_status(Status(StatusCode.ERROR))
412
+ span.end()
413
+ context_api.detach(token)
414
+ raise
415
+ else:
416
+ with tracer.start_as_current_span(
417
+ name,
418
+ kind=SpanKind.CLIENT,
419
+ attributes={
420
+ SpanAttributes.LLM_SYSTEM: "MistralAI",
421
+ SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
422
+ },
423
+ ) as span:
424
+ if span.is_recording():
425
+ _set_input_attributes(span, llm_request_type, to_wrap, kwargs)
426
+
427
+ response = wrapped(*args, **kwargs)
428
+
429
+ if response:
430
+ if span.is_recording():
431
+ _set_response_attributes(span, llm_request_type, response)
432
+ span.set_status(Status(StatusCode.OK))
433
+
434
+ return response
435
+
436
+
437
+ @_with_tracer_wrapper
438
+ async def _awrap(
439
+ tracer: Any,
440
+ to_wrap: Dict[str, Any],
441
+ wrapped: Callable[..., Any],
442
+ instance: Any,
443
+ args: Tuple[Any, ...],
444
+ kwargs: Dict[str, Any],
445
+ ) -> Any:
446
+ """Instruments and calls every function defined in TO_WRAP."""
447
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
448
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
449
+ ):
450
+ return await wrapped(*args, **kwargs)
451
+
452
+ name = to_wrap.get("span_name")
453
+ llm_request_type = _llm_request_type_by_method(to_wrap.get("method"))
454
+
455
+ if to_wrap.get("streaming"):
456
+ span = tracer.start_span(
457
+ name,
458
+ kind=SpanKind.CLIENT,
459
+ attributes={
460
+ SpanAttributes.LLM_SYSTEM: "MistralAI",
461
+ SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
462
+ },
463
+ )
464
+
465
+ ctx = set_span_in_context(span)
466
+ token = context_api.attach(ctx)
467
+
468
+ try:
469
+ if span.is_recording():
470
+ _set_input_attributes(span, llm_request_type, to_wrap, kwargs)
471
+
472
+ response = await wrapped(*args, **kwargs)
473
+
474
+ if response:
475
+ return _aaccumulate_streaming_response(span, llm_request_type, response, token)
476
+ else:
477
+ span.set_status(Status(StatusCode.ERROR))
478
+ span.end()
479
+ context_api.detach(token)
480
+
481
+ return response
482
+ except Exception:
483
+ span.set_status(Status(StatusCode.ERROR))
484
+ span.end()
485
+ context_api.detach(token)
486
+ raise
487
+ else:
488
+ with tracer.start_as_current_span(
489
+ name,
490
+ kind=SpanKind.CLIENT,
491
+ attributes={
492
+ SpanAttributes.LLM_SYSTEM: "MistralAI",
493
+ SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
494
+ },
495
+ ) as span:
496
+ if span.is_recording():
497
+ _set_input_attributes(span, llm_request_type, to_wrap, kwargs)
498
+
499
+ response = await wrapped(*args, **kwargs)
500
+
501
+ if response:
502
+ if span.is_recording():
503
+ _set_response_attributes(span, llm_request_type, response)
504
+ span.set_status(Status(StatusCode.OK))
505
+
506
+ return response
507
+
508
+
509
+ class MistralAiInstrumentor(BaseInstrumentor): # type: ignore[misc]
510
+ """An instrumentor for Mistral AI's client library."""
511
+
512
+ def __init__(self, exception_logger: Optional[Callable[[Exception], None]] = None) -> None:
513
+ super().__init__()
514
+ Config.exception_logger = exception_logger
515
+
516
+ def instrumentation_dependencies(self) -> Collection[str]:
517
+ return _instruments
518
+
519
+ def _instrument(self, **kwargs: Any) -> None:
520
+ tracer_provider = kwargs.get("tracer_provider")
521
+ tracer = get_tracer(__name__, __version__, tracer_provider)
522
+ for wrapped_method in WRAPPED_METHODS:
523
+ module_name = wrapped_method.get("module")
524
+ object_name = wrapped_method.get("object")
525
+ method_name = wrapped_method.get("method")
526
+ is_async = wrapped_method.get("is_async")
527
+
528
+ wrapper_func = _awrap if is_async else _wrap
529
+
530
+ wrap_function_wrapper(
531
+ module_name,
532
+ f"{object_name}.{method_name}",
533
+ wrapper_func(tracer, wrapped_method),
534
+ )
535
+
536
+ def _uninstrument(self, **kwargs: Any) -> None:
537
+ for wrapped_method in WRAPPED_METHODS:
538
+ module_name = wrapped_method.get("module")
539
+ object_name = wrapped_method.get("object")
540
+ method_name = wrapped_method.get("method")
541
+
542
+ unwrap(
543
+ f"{module_name}.{object_name}",
544
+ method_name,
545
+ )
@@ -0,0 +1,5 @@
1
+ from typing import Callable, Optional
2
+
3
+
4
+ class Config:
5
+ exception_logger: Optional[Callable[[Exception], None]] = None
@@ -0,0 +1,30 @@
1
+ import logging
2
+ import traceback
3
+ from typing import Any, Callable
4
+
5
+ from netra.instrumentation.mistralai.config import Config
6
+
7
+
8
+ def dont_throw(func: Callable[..., Any]) -> Callable[..., Any]:
9
+ """
10
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
11
+
12
+ @param func: The function to wrap
13
+ @return: The wrapper function
14
+ """
15
+ # Obtain a logger specific to the function's module
16
+ logger = logging.getLogger(func.__module__)
17
+
18
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
19
+ try:
20
+ return func(*args, **kwargs)
21
+ except Exception as e:
22
+ logger.debug(
23
+ "OpenLLMetry failed to trace in %s, error: %s",
24
+ func.__name__,
25
+ traceback.format_exc(),
26
+ )
27
+ if Config.exception_logger:
28
+ Config.exception_logger(e)
29
+
30
+ return wrapper
@@ -0,0 +1 @@
1
+ __version__ = "1.8.2"