aitracer 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
python/client.py ADDED
@@ -0,0 +1,437 @@
1
+ """AITracer main client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import atexit
6
+ import os
7
+ import threading
8
+ import time
9
+ import uuid
10
+ from contextlib import contextmanager
11
+ from typing import TYPE_CHECKING, Any, Optional
12
+
13
+ from aitracer.pii import PIIDetector
14
+ from aitracer.queue import LogQueue
15
+ from aitracer.session import Session
16
+ from aitracer.trace import Trace
17
+ from aitracer.wrappers.openai_wrapper import wrap_openai_client
18
+ from aitracer.wrappers.anthropic_wrapper import wrap_anthropic_client
19
+ from aitracer.wrappers.gemini_wrapper import wrap_gemini_model
20
+
21
+ if TYPE_CHECKING:
22
+ from openai import OpenAI
23
+ from anthropic import Anthropic
24
+ from google.generativeai import GenerativeModel
25
+
26
+
27
+ class AITracer:
28
+ """
29
+ AITracer client for monitoring AI/LLM applications.
30
+
31
+ Usage:
32
+ tracer = AITracer(api_key="at-xxxxxxxx", project="my-chatbot")
33
+ client = tracer.wrap_openai(OpenAI())
34
+
35
+ response = client.chat.completions.create(
36
+ model="gpt-4o",
37
+ messages=[{"role": "user", "content": "Hello!"}]
38
+ )
39
+ """
40
+
41
+ _instance: Optional["AITracer"] = None
42
+ _lock = threading.Lock()
43
+
44
+ def __init__(
45
+ self,
46
+ api_key: Optional[str] = None,
47
+ project: Optional[str] = None,
48
+ *,
49
+ base_url: str = "https://api.aitracer.co",
50
+ sync: bool = False,
51
+ flush_on_exit: bool = True,
52
+ batch_size: int = 10,
53
+ flush_interval: float = 5.0,
54
+ enabled: bool = True,
55
+ # PII settings
56
+ pii_detection: bool = False,
57
+ pii_action: str = "mask",
58
+ pii_types: Optional[list[str]] = None,
59
+ ):
60
+ """
61
+ Initialize AITracer client.
62
+
63
+ Args:
64
+ api_key: AITracer API key. Can also be set via AITRACER_API_KEY env var.
65
+ project: Project name or ID. Can also be set via AITRACER_PROJECT env var.
66
+ base_url: AITracer API base URL.
67
+ sync: If True, send logs synchronously (useful for Lambda/serverless).
68
+ flush_on_exit: If True, flush pending logs on program exit.
69
+ batch_size: Number of logs to batch before sending.
70
+ flush_interval: Seconds between automatic flushes.
71
+ enabled: If False, disable all logging (useful for testing).
72
+ pii_detection: If True, enable PII detection and masking.
73
+ pii_action: Action for detected PII - "mask", "redact", "hash", "none".
74
+ pii_types: List of PII types to detect. Default: ["email", "phone", "credit_card", "ssn"].
75
+ """
76
+ self.api_key = api_key or os.environ.get("AITRACER_API_KEY")
77
+ self.project = project or os.environ.get("AITRACER_PROJECT")
78
+ self.base_url = base_url.rstrip("/")
79
+ self.sync = sync
80
+ self.enabled = enabled
81
+
82
+ # PII detection settings
83
+ self.pii_detection = pii_detection
84
+ self.pii_action = pii_action
85
+ self.pii_types = pii_types or ["email", "phone", "credit_card", "ssn"]
86
+ self._pii_detector: Optional[PIIDetector] = None
87
+ if self.pii_detection:
88
+ self._pii_detector = PIIDetector(
89
+ enabled_types=self.pii_types,
90
+ action=self.pii_action,
91
+ )
92
+
93
+ if not self.api_key:
94
+ raise ValueError(
95
+ "API key is required. Set it via api_key parameter or AITRACER_API_KEY env var."
96
+ )
97
+
98
+ if not self.project:
99
+ raise ValueError(
100
+ "Project is required. Set it via project parameter or AITRACER_PROJECT env var."
101
+ )
102
+
103
+ # Log queue for async sending
104
+ self._queue = LogQueue(
105
+ api_key=self.api_key,
106
+ base_url=self.base_url,
107
+ batch_size=batch_size,
108
+ flush_interval=flush_interval,
109
+ sync=sync,
110
+ )
111
+
112
+ # Current trace context (thread-local)
113
+ self._trace_context = threading.local()
114
+
115
+ # Current session context (thread-local)
116
+ self._session_context = threading.local()
117
+
118
+ # Register flush on exit
119
+ if flush_on_exit:
120
+ atexit.register(self.flush)
121
+
122
+ # Set as singleton instance
123
+ with AITracer._lock:
124
+ AITracer._instance = self
125
+
126
+ @classmethod
127
+ def get_instance(cls) -> Optional["AITracer"]:
128
+ """Get the current AITracer instance."""
129
+ return cls._instance
130
+
131
+ def wrap_openai(self, client: "OpenAI") -> "OpenAI":
132
+ """
133
+ Wrap an OpenAI client to automatically log all API calls.
134
+
135
+ Args:
136
+ client: OpenAI client instance.
137
+
138
+ Returns:
139
+ Wrapped OpenAI client.
140
+ """
141
+ return wrap_openai_client(client, self)
142
+
143
+ def wrap_anthropic(self, client: "Anthropic") -> "Anthropic":
144
+ """
145
+ Wrap an Anthropic client to automatically log all API calls.
146
+
147
+ Args:
148
+ client: Anthropic client instance.
149
+
150
+ Returns:
151
+ Wrapped Anthropic client.
152
+ """
153
+ return wrap_anthropic_client(client, self)
154
+
155
+ def wrap_gemini(self, model: "GenerativeModel") -> "GenerativeModel":
156
+ """
157
+ Wrap a Google Gemini GenerativeModel to automatically log all API calls.
158
+
159
+ Args:
160
+ model: GenerativeModel instance.
161
+
162
+ Returns:
163
+ Wrapped GenerativeModel.
164
+
165
+ Example:
166
+ import google.generativeai as genai
167
+ genai.configure(api_key="your-api-key")
168
+ model = genai.GenerativeModel("gemini-1.5-flash")
169
+ model = tracer.wrap_gemini(model)
170
+ """
171
+ return wrap_gemini_model(model, self)
172
+
173
+ @contextmanager
174
+ def trace(
175
+ self,
176
+ trace_id: Optional[str] = None,
177
+ name: Optional[str] = None,
178
+ ):
179
+ """
180
+ Create a trace context for grouping related API calls.
181
+
182
+ Usage:
183
+ with tracer.trace("user-query-123") as trace:
184
+ response1 = client.chat.completions.create(...)
185
+ response2 = client.chat.completions.create(...)
186
+ trace.set_metadata({"user_id": "user-456"})
187
+
188
+ Args:
189
+ trace_id: Optional trace ID. Auto-generated if not provided.
190
+ name: Optional name for the trace.
191
+
192
+ Yields:
193
+ Trace object for setting metadata.
194
+ """
195
+ trace = Trace(
196
+ trace_id=trace_id or str(uuid.uuid4()),
197
+ name=name,
198
+ )
199
+
200
+ # Store in thread-local context
201
+ previous_trace = getattr(self._trace_context, "current", None)
202
+ self._trace_context.current = trace
203
+
204
+ try:
205
+ yield trace
206
+ finally:
207
+ self._trace_context.current = previous_trace
208
+
209
+ def get_current_trace(self) -> Optional[Trace]:
210
+ """Get the current trace context."""
211
+ return getattr(self._trace_context, "current", None)
212
+
213
+ @contextmanager
214
+ def session(
215
+ self,
216
+ session_id: Optional[str] = None,
217
+ user_id: Optional[str] = None,
218
+ name: Optional[str] = None,
219
+ metadata: Optional[dict] = None,
220
+ ):
221
+ """
222
+ Create a session context for tracking user interactions over time.
223
+
224
+ A session groups multiple logs/traces that belong to the same user interaction flow.
225
+
226
+ Usage:
227
+ with tracer.session(user_id="user-123") as session:
228
+ response = client.chat.completions.create(...)
229
+ session.thumbs_up() # Record positive feedback for last response
230
+
231
+ Args:
232
+ session_id: Optional session ID. Auto-generated if not provided.
233
+ user_id: Optional user identifier.
234
+ name: Optional name for the session.
235
+ metadata: Optional metadata dictionary.
236
+
237
+ Yields:
238
+ Session object for recording events and feedback.
239
+ """
240
+ sess = Session(
241
+ session_id=session_id or str(uuid.uuid4()),
242
+ user_id=user_id,
243
+ name=name,
244
+ metadata=metadata or {},
245
+ _tracer=self,
246
+ )
247
+
248
+ # Store in thread-local context
249
+ previous_session = getattr(self._session_context, "current", None)
250
+ self._session_context.current = sess
251
+
252
+ # Start session on server
253
+ if self.enabled:
254
+ self._start_session(sess)
255
+
256
+ try:
257
+ yield sess
258
+ finally:
259
+ # End session on server
260
+ if self.enabled:
261
+ self._end_session(sess)
262
+ self._session_context.current = previous_session
263
+
264
+ def get_current_session(self) -> Optional[Session]:
265
+ """Get the current session context."""
266
+ return getattr(self._session_context, "current", None)
267
+
268
+ def _start_session(self, session: Session) -> None:
269
+ """Internal: Start a session on the server."""
270
+ import httpx
271
+
272
+ try:
273
+ with httpx.Client() as client:
274
+ client.post(
275
+ f"{self.base_url}/api/v1/sessions/",
276
+ headers={
277
+ "Authorization": f"Bearer {self.api_key}",
278
+ "Content-Type": "application/json",
279
+ },
280
+ json={
281
+ "session_id": session.session_id,
282
+ "user_id": session.user_id,
283
+ "name": session.name,
284
+ "metadata": session.metadata,
285
+ "tags": session.tags,
286
+ "project": self.project,
287
+ },
288
+ timeout=10.0,
289
+ )
290
+ except Exception:
291
+ pass # Silently fail - don't break user code
292
+
293
+ def _end_session(self, session: Session) -> None:
294
+ """Internal: End a session on the server."""
295
+ import httpx
296
+
297
+ try:
298
+ with httpx.Client() as client:
299
+ client.patch(
300
+ f"{self.base_url}/api/v1/sessions/{session.session_id}/end/",
301
+ headers={
302
+ "Authorization": f"Bearer {self.api_key}",
303
+ "Content-Type": "application/json",
304
+ },
305
+ json={
306
+ "metadata": session.metadata,
307
+ "tags": session.tags,
308
+ },
309
+ timeout=10.0,
310
+ )
311
+ except Exception:
312
+ pass
313
+
314
+ def _send_session_event(self, session_id: str, event: dict) -> None:
315
+ """Internal: Send a session event to the server."""
316
+ import httpx
317
+
318
+ try:
319
+ with httpx.Client() as client:
320
+ client.post(
321
+ f"{self.base_url}/api/v1/sessions/{session_id}/events/",
322
+ headers={
323
+ "Authorization": f"Bearer {self.api_key}",
324
+ "Content-Type": "application/json",
325
+ },
326
+ json=event,
327
+ timeout=10.0,
328
+ )
329
+ except Exception:
330
+ pass
331
+
332
+ def _send_feedback(self, session_id: str, feedback: dict) -> None:
333
+ """Internal: Send user feedback to the server."""
334
+ import httpx
335
+
336
+ try:
337
+ with httpx.Client() as client:
338
+ client.post(
339
+ f"{self.base_url}/api/v1/sessions/{session_id}/feedback/",
340
+ headers={
341
+ "Authorization": f"Bearer {self.api_key}",
342
+ "Content-Type": "application/json",
343
+ },
344
+ json=feedback,
345
+ timeout=10.0,
346
+ )
347
+ except Exception:
348
+ pass
349
+
350
+ def log(
351
+ self,
352
+ *,
353
+ model: str,
354
+ provider: str,
355
+ input_data: Any,
356
+ output_data: Any,
357
+ input_tokens: int = 0,
358
+ output_tokens: int = 0,
359
+ latency_ms: int = 0,
360
+ status: str = "success",
361
+ error_message: Optional[str] = None,
362
+ metadata: Optional[dict] = None,
363
+ tags: Optional[list[str]] = None,
364
+ trace_id: Optional[str] = None,
365
+ span_id: Optional[str] = None,
366
+ parent_span_id: Optional[str] = None,
367
+ ) -> None:
368
+ """
369
+ Log an LLM API call.
370
+
371
+ This is called automatically when using wrapped clients,
372
+ but can also be called manually for custom logging.
373
+ """
374
+ if not self.enabled:
375
+ return
376
+
377
+ # Apply PII detection and masking if enabled
378
+ processed_input = input_data
379
+ processed_output = output_data
380
+ if self._pii_detector:
381
+ processed_input, _ = self._pii_detector.process_json(input_data)
382
+ if output_data:
383
+ processed_output, _ = self._pii_detector.process_json(output_data)
384
+
385
+ # Get trace context
386
+ current_trace = self.get_current_trace()
387
+ if current_trace and not trace_id:
388
+ trace_id = current_trace.trace_id
389
+ if current_trace.metadata:
390
+ metadata = {**(metadata or {}), **current_trace.metadata}
391
+ if current_trace.tags:
392
+ tags = list(set((tags or []) + current_trace.tags))
393
+
394
+ # Get session context
395
+ session_id = None
396
+ current_session = self.get_current_session()
397
+ if current_session:
398
+ session_id = current_session.session_id
399
+ if current_session.user_id:
400
+ metadata = {**(metadata or {}), "user_id": current_session.user_id}
401
+
402
+ # Generate span_id
403
+ generated_span_id = span_id or str(uuid.uuid4())
404
+
405
+ log_entry = {
406
+ "project": self.project,
407
+ "model": model,
408
+ "provider": provider,
409
+ "input": processed_input,
410
+ "output": processed_output,
411
+ "input_tokens": input_tokens,
412
+ "output_tokens": output_tokens,
413
+ "latency_ms": latency_ms,
414
+ "status": status,
415
+ "error_message": error_message,
416
+ "metadata": metadata,
417
+ "tags": tags,
418
+ "trace_id": trace_id,
419
+ "span_id": generated_span_id,
420
+ "parent_span_id": parent_span_id,
421
+ "session_id": session_id,
422
+ }
423
+
424
+ self._queue.add(log_entry)
425
+
426
+ # Update session's last log ID for feedback association
427
+ if current_session:
428
+ current_session._set_last_log_id(generated_span_id)
429
+
430
+ def flush(self) -> None:
431
+ """Flush all pending logs immediately."""
432
+ self._queue.flush()
433
+
434
+ def shutdown(self) -> None:
435
+ """Shutdown the client and flush pending logs."""
436
+ self.flush()
437
+ self._queue.shutdown()
@@ -0,0 +1,5 @@
1
+ """AITracer integrations with popular LLM frameworks."""
2
+
3
+ from .langchain import AITracerCallbackHandler
4
+
5
+ __all__ = ["AITracerCallbackHandler"]