aitracer 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
python/queue.py ADDED
@@ -0,0 +1,219 @@
1
+ """Log queue for batching and async sending."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import logging
7
+ import queue
8
+ import threading
9
+ import time
10
+ from typing import Any, Optional
11
+
12
+ import httpx
13
+
14
+ logger = logging.getLogger("aitracer")
15
+
16
+
17
+ class LogQueue:
18
+ """
19
+ Queue for batching and asynchronously sending logs to AITracer API.
20
+
21
+ Features:
22
+ - Batches logs for efficient sending
23
+ - Background thread for async sending
24
+ - Automatic flush on interval
25
+ - Graceful shutdown with flush
26
+ - Sync mode for serverless environments
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ api_key: str,
32
+ base_url: str,
33
+ batch_size: int = 10,
34
+ flush_interval: float = 5.0,
35
+ sync: bool = False,
36
+ max_queue_size: int = 10000,
37
+ timeout: float = 30.0,
38
+ ):
39
+ """
40
+ Initialize the log queue.
41
+
42
+ Args:
43
+ api_key: AITracer API key.
44
+ base_url: AITracer API base URL.
45
+ batch_size: Number of logs to batch before sending.
46
+ flush_interval: Seconds between automatic flushes.
47
+ sync: If True, send logs synchronously.
48
+ max_queue_size: Maximum queue size before dropping logs.
49
+ timeout: HTTP request timeout in seconds.
50
+ """
51
+ self.api_key = api_key
52
+ self.base_url = base_url
53
+ self.batch_size = batch_size
54
+ self.flush_interval = flush_interval
55
+ self.sync = sync
56
+ self.max_queue_size = max_queue_size
57
+ self.timeout = timeout
58
+
59
+ self._queue: queue.Queue[dict[str, Any]] = queue.Queue(maxsize=max_queue_size)
60
+ self._shutdown = threading.Event()
61
+ self._worker_thread: Optional[threading.Thread] = None
62
+
63
+ # HTTP client
64
+ self._client = httpx.Client(
65
+ base_url=base_url,
66
+ headers={
67
+ "Authorization": f"Bearer {api_key}",
68
+ "Content-Type": "application/json",
69
+ },
70
+ timeout=timeout,
71
+ )
72
+
73
+ # Start background worker if async mode
74
+ if not sync:
75
+ self._start_worker()
76
+
77
+ def _start_worker(self) -> None:
78
+ """Start the background worker thread."""
79
+ self._worker_thread = threading.Thread(
80
+ target=self._worker_loop,
81
+ daemon=True,
82
+ name="aitracer-worker",
83
+ )
84
+ self._worker_thread.start()
85
+
86
+ def _worker_loop(self) -> None:
87
+ """Background worker loop for processing queued logs."""
88
+ batch: list[dict[str, Any]] = []
89
+ last_flush = time.time()
90
+
91
+ while not self._shutdown.is_set():
92
+ try:
93
+ # Try to get an item with timeout
94
+ try:
95
+ item = self._queue.get(timeout=0.1)
96
+ batch.append(item)
97
+ self._queue.task_done()
98
+ except queue.Empty:
99
+ pass
100
+
101
+ # Check if we should flush
102
+ should_flush = (
103
+ len(batch) >= self.batch_size
104
+ or (batch and time.time() - last_flush >= self.flush_interval)
105
+ )
106
+
107
+ if should_flush and batch:
108
+ self._send_batch(batch)
109
+ batch = []
110
+ last_flush = time.time()
111
+
112
+ except Exception as e:
113
+ logger.exception("Error in worker loop: %s", e)
114
+ time.sleep(1)
115
+
116
+ # Final flush on shutdown
117
+ if batch:
118
+ self._send_batch(batch)
119
+
120
+ # Drain remaining items
121
+ while not self._queue.empty():
122
+ try:
123
+ item = self._queue.get_nowait()
124
+ batch.append(item)
125
+ self._queue.task_done()
126
+ if len(batch) >= self.batch_size:
127
+ self._send_batch(batch)
128
+ batch = []
129
+ except queue.Empty:
130
+ break
131
+
132
+ if batch:
133
+ self._send_batch(batch)
134
+
135
+ def _send_batch(self, batch: list[dict[str, Any]]) -> None:
136
+ """Send a batch of logs to the API."""
137
+ if not batch:
138
+ return
139
+
140
+ try:
141
+ response = self._client.post(
142
+ "/v1/logs/batch",
143
+ json={"logs": batch},
144
+ )
145
+ response.raise_for_status()
146
+ logger.debug("Sent %d logs successfully", len(batch))
147
+ except httpx.HTTPStatusError as e:
148
+ logger.error(
149
+ "Failed to send logs: HTTP %d - %s",
150
+ e.response.status_code,
151
+ e.response.text,
152
+ )
153
+ except httpx.RequestError as e:
154
+ logger.error("Failed to send logs: %s", e)
155
+ except Exception as e:
156
+ logger.exception("Unexpected error sending logs: %s", e)
157
+
158
+ def _send_single(self, log: dict[str, Any]) -> None:
159
+ """Send a single log synchronously."""
160
+ try:
161
+ response = self._client.post(
162
+ "/v1/logs",
163
+ json=log,
164
+ )
165
+ response.raise_for_status()
166
+ logger.debug("Sent log successfully")
167
+ except httpx.HTTPStatusError as e:
168
+ logger.error(
169
+ "Failed to send log: HTTP %d - %s",
170
+ e.response.status_code,
171
+ e.response.text,
172
+ )
173
+ except httpx.RequestError as e:
174
+ logger.error("Failed to send log: %s", e)
175
+ except Exception as e:
176
+ logger.exception("Unexpected error sending log: %s", e)
177
+
178
+ def add(self, log: dict[str, Any]) -> None:
179
+ """
180
+ Add a log to the queue.
181
+
182
+ Args:
183
+ log: Log entry dictionary.
184
+ """
185
+ if self.sync:
186
+ # Sync mode: send immediately
187
+ self._send_single(log)
188
+ else:
189
+ # Async mode: add to queue
190
+ try:
191
+ self._queue.put_nowait(log)
192
+ except queue.Full:
193
+ logger.warning("Log queue is full, dropping log")
194
+
195
+ def flush(self) -> None:
196
+ """Flush all pending logs immediately."""
197
+ if self.sync:
198
+ return # Nothing to flush in sync mode
199
+
200
+ # Collect all items
201
+ batch: list[dict[str, Any]] = []
202
+ while not self._queue.empty():
203
+ try:
204
+ item = self._queue.get_nowait()
205
+ batch.append(item)
206
+ self._queue.task_done()
207
+ except queue.Empty:
208
+ break
209
+
210
+ # Send in batches
211
+ for i in range(0, len(batch), self.batch_size):
212
+ self._send_batch(batch[i : i + self.batch_size])
213
+
214
+ def shutdown(self) -> None:
215
+ """Shutdown the queue and wait for pending logs."""
216
+ self._shutdown.set()
217
+ if self._worker_thread:
218
+ self._worker_thread.join(timeout=10.0)
219
+ self._client.close()
python/session.py ADDED
@@ -0,0 +1,144 @@
1
+ """Session tracking for AITracer."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import uuid
6
+ from dataclasses import dataclass, field
7
+ from datetime import datetime
8
+ from typing import TYPE_CHECKING, Any, Optional
9
+
10
+ if TYPE_CHECKING:
11
+ from aitracer.client import AITracer
12
+
13
+
14
+ @dataclass
15
+ class Session:
16
+ """
17
+ Session for tracking user interactions over time.
18
+
19
+ A session groups multiple logs/traces that belong to the same user interaction flow.
20
+
21
+ Usage:
22
+ with tracer.session(user_id="user-123") as session:
23
+ response = client.chat.completions.create(...)
24
+ session.event("user_action", data={"action": "clicked_button"})
25
+ session.feedback(log_id="...", type="thumbs_up")
26
+ """
27
+
28
+ session_id: str
29
+ user_id: Optional[str] = None
30
+ name: Optional[str] = None
31
+ metadata: dict = field(default_factory=dict)
32
+ tags: list[str] = field(default_factory=list)
33
+
34
+ # Internal
35
+ _tracer: Optional["AITracer"] = field(default=None, repr=False)
36
+ _started_at: datetime = field(default_factory=datetime.utcnow)
37
+ _events: list[dict] = field(default_factory=list)
38
+ _feedbacks: list[dict] = field(default_factory=list)
39
+ _last_log_id: Optional[str] = field(default=None, repr=False)
40
+
41
+ def set_metadata(self, key: str, value: Any) -> None:
42
+ """Set a metadata value."""
43
+ self.metadata[key] = value
44
+
45
+ def add_tag(self, tag: str) -> None:
46
+ """Add a tag to the session."""
47
+ if tag not in self.tags:
48
+ self.tags.append(tag)
49
+
50
+ def event(
51
+ self,
52
+ event_type: str,
53
+ *,
54
+ name: Optional[str] = None,
55
+ data: Optional[dict] = None,
56
+ log_id: Optional[str] = None,
57
+ ) -> None:
58
+ """
59
+ Record a custom event in the session.
60
+
61
+ Args:
62
+ event_type: Type of event (e.g., "message", "response", "tool_call", "error", "custom")
63
+ name: Optional event name
64
+ data: Optional event data
65
+ log_id: Optional associated log ID
66
+ """
67
+ event = {
68
+ "event_type": event_type,
69
+ "name": name,
70
+ "data": data or {},
71
+ "log_id": log_id or self._last_log_id,
72
+ "timestamp": datetime.utcnow().isoformat() + "Z",
73
+ }
74
+ self._events.append(event)
75
+
76
+ # Send event immediately if tracer is available
77
+ if self._tracer and self._tracer.enabled:
78
+ self._tracer._send_session_event(self.session_id, event)
79
+
80
+ def feedback(
81
+ self,
82
+ feedback_type: str,
83
+ *,
84
+ log_id: Optional[str] = None,
85
+ score: Optional[int] = None,
86
+ comment: Optional[str] = None,
87
+ tags: Optional[list[str]] = None,
88
+ ) -> None:
89
+ """
90
+ Record user feedback for an AI response.
91
+
92
+ Args:
93
+ feedback_type: Type of feedback ("thumbs_up", "thumbs_down", "rating", "text")
94
+ log_id: Log ID the feedback relates to. Defaults to last log in session.
95
+ score: Numeric score (1-5 for rating, 1/-1 for thumbs)
96
+ comment: Text comment
97
+ tags: Feedback tags (e.g., ["helpful", "incorrect"])
98
+ """
99
+ # Auto-set score for thumbs feedback
100
+ if score is None:
101
+ if feedback_type == "thumbs_up":
102
+ score = 1
103
+ elif feedback_type == "thumbs_down":
104
+ score = -1
105
+
106
+ feedback = {
107
+ "feedback_type": feedback_type,
108
+ "log_id": log_id or self._last_log_id,
109
+ "score": score,
110
+ "comment": comment,
111
+ "tags": tags or [],
112
+ "user_id": self.user_id,
113
+ "timestamp": datetime.utcnow().isoformat() + "Z",
114
+ }
115
+ self._feedbacks.append(feedback)
116
+
117
+ # Send feedback immediately if tracer is available
118
+ if self._tracer and self._tracer.enabled:
119
+ self._tracer._send_feedback(self.session_id, feedback)
120
+
121
+ def thumbs_up(self, log_id: Optional[str] = None, comment: Optional[str] = None) -> None:
122
+ """Record a thumbs up feedback."""
123
+ self.feedback("thumbs_up", log_id=log_id, comment=comment)
124
+
125
+ def thumbs_down(self, log_id: Optional[str] = None, comment: Optional[str] = None) -> None:
126
+ """Record a thumbs down feedback."""
127
+ self.feedback("thumbs_down", log_id=log_id, comment=comment)
128
+
129
+ def rate(self, score: int, log_id: Optional[str] = None, comment: Optional[str] = None) -> None:
130
+ """
131
+ Record a rating feedback (1-5).
132
+
133
+ Args:
134
+ score: Rating from 1 to 5
135
+ log_id: Optional log ID
136
+ comment: Optional comment
137
+ """
138
+ if not 1 <= score <= 5:
139
+ raise ValueError("Score must be between 1 and 5")
140
+ self.feedback("rating", log_id=log_id, score=score, comment=comment)
141
+
142
+ def _set_last_log_id(self, log_id: str) -> None:
143
+ """Internal: Set the last log ID for feedback association."""
144
+ self._last_log_id = log_id
python/trace.py ADDED
@@ -0,0 +1,65 @@
1
+ """Trace context for grouping related API calls."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Any, Optional
7
+
8
+
9
+ @dataclass
10
+ class Trace:
11
+ """
12
+ Trace context for grouping related API calls.
13
+
14
+ Usage:
15
+ with tracer.trace("user-query-123") as trace:
16
+ response1 = client.chat.completions.create(...)
17
+ trace.set_metadata({"user_id": "user-456"})
18
+ trace.add_tag("production")
19
+ """
20
+
21
+ trace_id: str
22
+ name: Optional[str] = None
23
+ metadata: dict[str, Any] = field(default_factory=dict)
24
+ tags: list[str] = field(default_factory=list)
25
+
26
+ def set_metadata(self, metadata: dict[str, Any]) -> "Trace":
27
+ """
28
+ Set metadata for this trace.
29
+
30
+ Args:
31
+ metadata: Dictionary of key-value pairs.
32
+
33
+ Returns:
34
+ Self for chaining.
35
+ """
36
+ self.metadata.update(metadata)
37
+ return self
38
+
39
+ def add_tag(self, tag: str) -> "Trace":
40
+ """
41
+ Add a tag to this trace.
42
+
43
+ Args:
44
+ tag: Tag string.
45
+
46
+ Returns:
47
+ Self for chaining.
48
+ """
49
+ if tag not in self.tags:
50
+ self.tags.append(tag)
51
+ return self
52
+
53
+ def add_tags(self, tags: list[str]) -> "Trace":
54
+ """
55
+ Add multiple tags to this trace.
56
+
57
+ Args:
58
+ tags: List of tag strings.
59
+
60
+ Returns:
61
+ Self for chaining.
62
+ """
63
+ for tag in tags:
64
+ self.add_tag(tag)
65
+ return self
@@ -0,0 +1,7 @@
1
+ """LLM client wrappers."""
2
+
3
+ from aitracer.wrappers.openai_wrapper import wrap_openai_client
4
+ from aitracer.wrappers.anthropic_wrapper import wrap_anthropic_client
5
+ from aitracer.wrappers.gemini_wrapper import wrap_gemini_model
6
+
7
+ __all__ = ["wrap_openai_client", "wrap_anthropic_client", "wrap_gemini_model"]
@@ -0,0 +1,208 @@
1
+ """Anthropic client wrapper for automatic logging."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import time
6
+ import uuid
7
+ from functools import wraps
8
+ from typing import TYPE_CHECKING, Any, Iterator
9
+
10
+ if TYPE_CHECKING:
11
+ from anthropic import Anthropic
12
+ from anthropic.types import Message, RawMessageStreamEvent
13
+ from aitracer.client import AITracer
14
+
15
+
16
+ def wrap_anthropic_client(client: "Anthropic", tracer: "AITracer") -> "Anthropic":
17
+ """
18
+ Wrap an Anthropic client to automatically log all API calls.
19
+
20
+ Args:
21
+ client: Anthropic client instance.
22
+ tracer: AITracer instance.
23
+
24
+ Returns:
25
+ Wrapped Anthropic client (same instance, modified in place).
26
+ """
27
+ # Store original method
28
+ original_create = client.messages.create
29
+
30
+ @wraps(original_create)
31
+ def wrapped_create(*args: Any, **kwargs: Any) -> Any:
32
+ """Wrapped messages.create method."""
33
+ start_time = time.time()
34
+ span_id = str(uuid.uuid4())
35
+
36
+ # Extract request data
37
+ model = kwargs.get("model", "unknown")
38
+ messages = kwargs.get("messages", [])
39
+ system = kwargs.get("system")
40
+ max_tokens = kwargs.get("max_tokens", 1024)
41
+ stream = kwargs.get("stream", False)
42
+
43
+ try:
44
+ response = original_create(*args, **kwargs)
45
+
46
+ if stream:
47
+ # Handle streaming response
48
+ return _wrap_stream_response(
49
+ response=response,
50
+ tracer=tracer,
51
+ model=model,
52
+ messages=messages,
53
+ system=system,
54
+ start_time=start_time,
55
+ span_id=span_id,
56
+ )
57
+ else:
58
+ # Handle non-streaming response
59
+ latency_ms = int((time.time() - start_time) * 1000)
60
+ _log_message(
61
+ tracer=tracer,
62
+ model=model,
63
+ messages=messages,
64
+ system=system,
65
+ response=response,
66
+ latency_ms=latency_ms,
67
+ span_id=span_id,
68
+ )
69
+ return response
70
+
71
+ except Exception as e:
72
+ # Log error
73
+ latency_ms = int((time.time() - start_time) * 1000)
74
+ tracer.log(
75
+ model=model,
76
+ provider="anthropic",
77
+ input_data=_build_input_data(messages, system),
78
+ output_data=None,
79
+ latency_ms=latency_ms,
80
+ status="error",
81
+ error_message=str(e),
82
+ span_id=span_id,
83
+ )
84
+ raise
85
+
86
+ # Replace method
87
+ client.messages.create = wrapped_create # type: ignore
88
+
89
+ return client
90
+
91
+
92
+ def _wrap_stream_response(
93
+ response: Iterator["RawMessageStreamEvent"],
94
+ tracer: "AITracer",
95
+ model: str,
96
+ messages: list,
97
+ system: Any,
98
+ start_time: float,
99
+ span_id: str,
100
+ ) -> Iterator["RawMessageStreamEvent"]:
101
+ """Wrap streaming response to log after completion."""
102
+ content_parts: list[str] = []
103
+ input_tokens = 0
104
+ output_tokens = 0
105
+
106
+ try:
107
+ for event in response:
108
+ # Accumulate content from text deltas
109
+ if hasattr(event, "type"):
110
+ if event.type == "content_block_delta":
111
+ if hasattr(event, "delta") and hasattr(event.delta, "text"):
112
+ content_parts.append(event.delta.text)
113
+ elif event.type == "message_delta":
114
+ if hasattr(event, "usage"):
115
+ output_tokens = getattr(event.usage, "output_tokens", 0)
116
+ elif event.type == "message_start":
117
+ if hasattr(event, "message") and hasattr(event.message, "usage"):
118
+ input_tokens = getattr(event.message.usage, "input_tokens", 0)
119
+
120
+ yield event
121
+
122
+ # Log after stream completes
123
+ latency_ms = int((time.time() - start_time) * 1000)
124
+ full_content = "".join(content_parts)
125
+
126
+ tracer.log(
127
+ model=model,
128
+ provider="anthropic",
129
+ input_data=_build_input_data(messages, system),
130
+ output_data={"content": full_content},
131
+ input_tokens=input_tokens,
132
+ output_tokens=output_tokens,
133
+ latency_ms=latency_ms,
134
+ status="success",
135
+ span_id=span_id,
136
+ )
137
+
138
+ except Exception as e:
139
+ latency_ms = int((time.time() - start_time) * 1000)
140
+ tracer.log(
141
+ model=model,
142
+ provider="anthropic",
143
+ input_data=_build_input_data(messages, system),
144
+ output_data=None,
145
+ latency_ms=latency_ms,
146
+ status="error",
147
+ error_message=str(e),
148
+ span_id=span_id,
149
+ )
150
+ raise
151
+
152
+
153
+ def _log_message(
154
+ tracer: "AITracer",
155
+ model: str,
156
+ messages: list,
157
+ system: Any,
158
+ response: "Message",
159
+ latency_ms: int,
160
+ span_id: str,
161
+ ) -> None:
162
+ """Log a non-streaming message."""
163
+ # Extract response content
164
+ output_content = None
165
+ if response.content:
166
+ text_parts = []
167
+ for block in response.content:
168
+ if hasattr(block, "text"):
169
+ text_parts.append(block.text)
170
+ output_content = "".join(text_parts)
171
+
172
+ input_tokens = getattr(response.usage, "input_tokens", 0) if response.usage else 0
173
+ output_tokens = getattr(response.usage, "output_tokens", 0) if response.usage else 0
174
+
175
+ tracer.log(
176
+ model=model,
177
+ provider="anthropic",
178
+ input_data=_build_input_data(messages, system),
179
+ output_data={"content": output_content},
180
+ input_tokens=input_tokens,
181
+ output_tokens=output_tokens,
182
+ latency_ms=latency_ms,
183
+ status="success",
184
+ span_id=span_id,
185
+ )
186
+
187
+
188
+ def _build_input_data(messages: list, system: Any) -> dict:
189
+ """Build input data dictionary."""
190
+ input_data: dict[str, Any] = {"messages": _serialize_messages(messages)}
191
+ if system:
192
+ input_data["system"] = system if isinstance(system, str) else str(system)
193
+ return input_data
194
+
195
+
196
+ def _serialize_messages(messages: list) -> list[dict]:
197
+ """Serialize messages to JSON-compatible format."""
198
+ result = []
199
+ for msg in messages:
200
+ if isinstance(msg, dict):
201
+ result.append(msg)
202
+ elif hasattr(msg, "model_dump"):
203
+ result.append(msg.model_dump())
204
+ elif hasattr(msg, "__dict__"):
205
+ result.append(msg.__dict__)
206
+ else:
207
+ result.append({"content": str(msg)})
208
+ return result