epi-recorder 2.1.3__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- epi_analyzer/__init__.py +9 -0
- epi_analyzer/detector.py +337 -0
- epi_cli/__init__.py +4 -0
- epi_cli/__main__.py +4 -0
- epi_cli/chat.py +21 -3
- epi_cli/debug.py +107 -0
- epi_cli/keys.py +4 -0
- epi_cli/ls.py +5 -1
- epi_cli/main.py +8 -0
- epi_cli/record.py +4 -0
- epi_cli/run.py +12 -4
- epi_cli/verify.py +4 -0
- epi_cli/view.py +4 -0
- epi_core/__init__.py +5 -1
- epi_core/container.py +68 -55
- epi_core/redactor.py +4 -0
- epi_core/schemas.py +6 -2
- epi_core/serialize.py +4 -0
- epi_core/storage.py +186 -0
- epi_core/trust.py +4 -0
- epi_recorder/__init__.py +13 -1
- epi_recorder/api.py +211 -5
- epi_recorder/async_api.py +151 -0
- epi_recorder/bootstrap.py +4 -0
- epi_recorder/environment.py +4 -0
- epi_recorder/patcher.py +79 -19
- epi_recorder/test_import.py +2 -0
- epi_recorder/test_script.py +2 -0
- epi_recorder/wrappers/__init__.py +16 -0
- epi_recorder/wrappers/base.py +79 -0
- epi_recorder/wrappers/openai.py +178 -0
- epi_recorder-2.3.0.dist-info/METADATA +269 -0
- epi_recorder-2.3.0.dist-info/RECORD +41 -0
- {epi_recorder-2.1.3.dist-info → epi_recorder-2.3.0.dist-info}/WHEEL +1 -1
- epi_recorder-2.3.0.dist-info/licenses/LICENSE +21 -0
- {epi_recorder-2.1.3.dist-info → epi_recorder-2.3.0.dist-info}/top_level.txt +1 -0
- epi_viewer_static/app.js +113 -7
- epi_viewer_static/crypto.js +3 -0
- epi_viewer_static/index.html +4 -2
- epi_viewer_static/viewer_lite.css +3 -1
- epi_postinstall.py +0 -197
- epi_recorder-2.1.3.dist-info/METADATA +0 -577
- epi_recorder-2.1.3.dist-info/RECORD +0 -34
- epi_recorder-2.1.3.dist-info/licenses/LICENSE +0 -201
- {epi_recorder-2.1.3.dist-info → epi_recorder-2.3.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
4
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
5
|
+
from contextlib import asynccontextmanager
|
|
6
|
+
from typing import Optional, Dict, Any
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
|
|
9
|
+
from epi_core.storage import EpiStorage
|
|
10
|
+
from epi_core.schemas import StepModel
|
|
11
|
+
|
|
12
|
+
class AsyncRecorder:
|
|
13
|
+
"""
|
|
14
|
+
Async-native recorder that doesn't block the event loop.
|
|
15
|
+
Uses background thread for SQLite writes.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, session_name: str, output_dir: str = "."):
|
|
19
|
+
self.session_name = session_name
|
|
20
|
+
self.output_dir = output_dir
|
|
21
|
+
|
|
22
|
+
# Thread-safe queue for steps
|
|
23
|
+
self._queue = asyncio.Queue()
|
|
24
|
+
|
|
25
|
+
# Background thread executor (1 thread is enough for SQLite)
|
|
26
|
+
self._executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="epi_writer")
|
|
27
|
+
|
|
28
|
+
# Storage instance (created in background thread)
|
|
29
|
+
self._storage: Optional[EpiStorage] = None
|
|
30
|
+
self._writer_task: Optional[asyncio.Task] = None
|
|
31
|
+
|
|
32
|
+
# State tracking
|
|
33
|
+
self._step_count = 0
|
|
34
|
+
self._done = asyncio.Event()
|
|
35
|
+
self._error: Optional[Exception] = None
|
|
36
|
+
|
|
37
|
+
async def start(self):
|
|
38
|
+
"""Initialize storage in background thread and start writer"""
|
|
39
|
+
# Create storage in thread (SQLite init is also blocking)
|
|
40
|
+
loop = asyncio.get_event_loop()
|
|
41
|
+
self._storage = await loop.run_in_executor(
|
|
42
|
+
self._executor,
|
|
43
|
+
lambda: EpiStorage(self.session_name, self.output_dir)
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Start background writer task
|
|
47
|
+
self._writer_task = asyncio.create_task(self._writer_loop())
|
|
48
|
+
|
|
49
|
+
async def record_step(self, step_type: str, content: dict):
|
|
50
|
+
"""Non-blocking step recording"""
|
|
51
|
+
if self._error:
|
|
52
|
+
raise self._error
|
|
53
|
+
|
|
54
|
+
self._step_count += 1
|
|
55
|
+
|
|
56
|
+
# Put in queue (never blocks, just buffers in memory)
|
|
57
|
+
await self._queue.put({
|
|
58
|
+
'index': self._step_count,
|
|
59
|
+
'type': step_type,
|
|
60
|
+
'content': content,
|
|
61
|
+
'timestamp': datetime.utcnow() # StepModel expects datetime
|
|
62
|
+
})
|
|
63
|
+
|
|
64
|
+
async def _writer_loop(self):
|
|
65
|
+
"""Background task: Drains queue to SQLite"""
|
|
66
|
+
try:
|
|
67
|
+
while True:
|
|
68
|
+
# Wait for item with timeout to check for shutdown
|
|
69
|
+
try:
|
|
70
|
+
step_data = await asyncio.wait_for(self._queue.get(), timeout=0.5)
|
|
71
|
+
except asyncio.TimeoutError:
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
if step_data is None: # Shutdown sentinel
|
|
75
|
+
self._queue.task_done()
|
|
76
|
+
break
|
|
77
|
+
|
|
78
|
+
# Write to SQLite in background thread (non-blocking for async)
|
|
79
|
+
loop = asyncio.get_event_loop()
|
|
80
|
+
await loop.run_in_executor(
|
|
81
|
+
self._executor,
|
|
82
|
+
self._write_to_storage,
|
|
83
|
+
step_data
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
self._queue.task_done()
|
|
87
|
+
|
|
88
|
+
except asyncio.CancelledError:
|
|
89
|
+
# Graceful shutdown
|
|
90
|
+
pass
|
|
91
|
+
except Exception as e:
|
|
92
|
+
self._error = e
|
|
93
|
+
|
|
94
|
+
def _write_to_storage(self, step_data: dict):
|
|
95
|
+
"""Synchronous SQLite write (runs in background thread)"""
|
|
96
|
+
if self._storage:
|
|
97
|
+
# Construct StepModel
|
|
98
|
+
step = StepModel(
|
|
99
|
+
index=step_data['index'],
|
|
100
|
+
timestamp=step_data['timestamp'],
|
|
101
|
+
kind=step_data['type'],
|
|
102
|
+
content=step_data['content']
|
|
103
|
+
)
|
|
104
|
+
self._storage.add_step(step)
|
|
105
|
+
|
|
106
|
+
async def stop(self):
|
|
107
|
+
"""Finalize: Drain queue, close storage"""
|
|
108
|
+
if not self._writer_task:
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
# Signal writer to finish
|
|
112
|
+
await self._queue.put(None)
|
|
113
|
+
await self._queue.join()
|
|
114
|
+
|
|
115
|
+
# Wait for task
|
|
116
|
+
self._writer_task.cancel()
|
|
117
|
+
try:
|
|
118
|
+
await self._writer_task
|
|
119
|
+
except asyncio.CancelledError:
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
# Finalize storage in background thread
|
|
123
|
+
if self._storage:
|
|
124
|
+
loop = asyncio.get_event_loop()
|
|
125
|
+
await loop.run_in_executor(
|
|
126
|
+
self._executor,
|
|
127
|
+
self._storage.finalize
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Shutdown executor
|
|
131
|
+
self._executor.shutdown(wait=True)
|
|
132
|
+
|
|
133
|
+
@asynccontextmanager
|
|
134
|
+
async def record_async(session_name: str, output_dir: str = "."):
|
|
135
|
+
"""
|
|
136
|
+
Async context manager for recording.
|
|
137
|
+
|
|
138
|
+
Usage:
|
|
139
|
+
async with record_async("my_agent") as rec:
|
|
140
|
+
await agent.arun("task") # Non-blocking
|
|
141
|
+
"""
|
|
142
|
+
recorder = AsyncRecorder(session_name, output_dir)
|
|
143
|
+
await recorder.start()
|
|
144
|
+
try:
|
|
145
|
+
yield recorder
|
|
146
|
+
finally:
|
|
147
|
+
await recorder.stop()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
|
epi_recorder/bootstrap.py
CHANGED
epi_recorder/environment.py
CHANGED
epi_recorder/patcher.py
CHANGED
|
@@ -14,6 +14,7 @@ from functools import wraps
|
|
|
14
14
|
|
|
15
15
|
from epi_core.schemas import StepModel
|
|
16
16
|
from epi_core.redactor import get_default_redactor
|
|
17
|
+
from epi_core.storage import EpiStorage
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
class RecordingContext:
|
|
@@ -32,7 +33,6 @@ class RecordingContext:
|
|
|
32
33
|
enable_redaction: Whether to redact secrets (default: True)
|
|
33
34
|
"""
|
|
34
35
|
self.output_dir = output_dir
|
|
35
|
-
# self.steps: List[StepModel] = [] # Removed for scalability
|
|
36
36
|
self.step_index = 0
|
|
37
37
|
self.enable_redaction = enable_redaction
|
|
38
38
|
self.redactor = get_default_redactor() if enable_redaction else None
|
|
@@ -40,9 +40,16 @@ class RecordingContext:
|
|
|
40
40
|
# Ensure output directory exists
|
|
41
41
|
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
42
42
|
|
|
43
|
-
#
|
|
43
|
+
# Initialize SQLite storage (crash-safe, atomic)
|
|
44
|
+
import uuid
|
|
45
|
+
session_id = str(uuid.uuid4())[:8]
|
|
46
|
+
self.storage = EpiStorage(session_id, self.output_dir)
|
|
47
|
+
|
|
48
|
+
# Keep JSONL path for backwards compatibility
|
|
44
49
|
self.steps_file = self.output_dir / "steps.jsonl"
|
|
45
|
-
|
|
50
|
+
|
|
51
|
+
# Create empty steps.jsonl file immediately (for tests and early access)
|
|
52
|
+
self.steps_file.touch(exist_ok=True)
|
|
46
53
|
|
|
47
54
|
def add_step(self, kind: str, content: Dict[str, Any]) -> None:
|
|
48
55
|
"""
|
|
@@ -93,24 +100,36 @@ class RecordingContext:
|
|
|
93
100
|
f.write(step.model_dump_json() + '\n')
|
|
94
101
|
|
|
95
102
|
|
|
96
|
-
|
|
97
|
-
|
|
103
|
+
import contextvars
|
|
104
|
+
|
|
105
|
+
# Thread-safe and async-safe recording context storage
|
|
106
|
+
_recording_context: contextvars.ContextVar[Optional[RecordingContext]] = contextvars.ContextVar(
|
|
107
|
+
'epi_recording_context',
|
|
108
|
+
default=None
|
|
109
|
+
)
|
|
98
110
|
|
|
99
111
|
|
|
100
|
-
def set_recording_context(context: RecordingContext) ->
|
|
101
|
-
"""
|
|
102
|
-
|
|
103
|
-
|
|
112
|
+
def set_recording_context(context: Optional[RecordingContext]) -> contextvars.Token:
|
|
113
|
+
"""
|
|
114
|
+
Set recording context for current execution context (thread or async task).
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
context: RecordingContext instance or None to clear
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Token for resetting context later
|
|
121
|
+
"""
|
|
122
|
+
return _recording_context.set(context)
|
|
104
123
|
|
|
105
124
|
|
|
106
125
|
def get_recording_context() -> Optional[RecordingContext]:
|
|
107
|
-
"""Get
|
|
108
|
-
return _recording_context
|
|
126
|
+
"""Get recording context for current execution context."""
|
|
127
|
+
return _recording_context.get()
|
|
109
128
|
|
|
110
129
|
|
|
111
130
|
def is_recording() -> bool:
|
|
112
|
-
"""Check if recording is active."""
|
|
113
|
-
return _recording_context is not None
|
|
131
|
+
"""Check if recording is active in current execution context."""
|
|
132
|
+
return _recording_context.get() is not None
|
|
114
133
|
|
|
115
134
|
|
|
116
135
|
# ==================== OpenAI Patcher ====================
|
|
@@ -155,8 +174,9 @@ def _patch_openai_v1() -> bool:
|
|
|
155
174
|
from openai import OpenAI
|
|
156
175
|
from openai.resources.chat import completions
|
|
157
176
|
|
|
158
|
-
# Store original method
|
|
177
|
+
# Store original method for unpatching
|
|
159
178
|
original_create = completions.Completions.create
|
|
179
|
+
_original_methods["openai.chat.completions.create"] = original_create
|
|
160
180
|
|
|
161
181
|
@wraps(original_create)
|
|
162
182
|
def wrapped_create(self, *args, **kwargs):
|
|
@@ -534,13 +554,53 @@ def patch_all() -> Dict[str, bool]:
|
|
|
534
554
|
return results
|
|
535
555
|
|
|
536
556
|
|
|
557
|
+
# Store original methods for unpatching
|
|
558
|
+
_original_methods: Dict[str, Any] = {}
|
|
559
|
+
|
|
560
|
+
|
|
537
561
|
def unpatch_all() -> None:
|
|
538
562
|
"""
|
|
539
563
|
Unpatch all providers (restore original methods).
|
|
540
564
|
|
|
541
|
-
|
|
542
|
-
|
|
565
|
+
Restores any methods that were patched by patch_all(), patch_openai(),
|
|
566
|
+
patch_gemini(), or patch_requests().
|
|
543
567
|
"""
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
568
|
+
global _original_methods
|
|
569
|
+
|
|
570
|
+
# Restore OpenAI v1+ if patched
|
|
571
|
+
if "openai.chat.completions.create" in _original_methods:
|
|
572
|
+
try:
|
|
573
|
+
from openai.resources.chat import completions
|
|
574
|
+
completions.Completions.create = _original_methods["openai.chat.completions.create"]
|
|
575
|
+
except ImportError:
|
|
576
|
+
pass
|
|
577
|
+
|
|
578
|
+
# Restore OpenAI legacy if patched
|
|
579
|
+
if "openai.ChatCompletion.create" in _original_methods:
|
|
580
|
+
try:
|
|
581
|
+
import openai
|
|
582
|
+
openai.ChatCompletion.create = _original_methods["openai.ChatCompletion.create"]
|
|
583
|
+
except (ImportError, AttributeError):
|
|
584
|
+
pass
|
|
585
|
+
|
|
586
|
+
# Restore Gemini if patched
|
|
587
|
+
if "gemini.generate_content" in _original_methods:
|
|
588
|
+
try:
|
|
589
|
+
import google.generativeai as genai
|
|
590
|
+
genai.GenerativeModel.generate_content = _original_methods["gemini.generate_content"]
|
|
591
|
+
except ImportError:
|
|
592
|
+
pass
|
|
593
|
+
|
|
594
|
+
# Restore requests if patched
|
|
595
|
+
if "requests.Session.request" in _original_methods:
|
|
596
|
+
try:
|
|
597
|
+
import requests
|
|
598
|
+
requests.Session.request = _original_methods["requests.Session.request"]
|
|
599
|
+
except ImportError:
|
|
600
|
+
pass
|
|
601
|
+
|
|
602
|
+
# Clear stored originals
|
|
603
|
+
_original_methods.clear()
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
|
epi_recorder/test_import.py
CHANGED
epi_recorder/test_script.py
CHANGED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
EPI Wrapper Clients - Proxy wrappers for LLM clients.
|
|
3
|
+
|
|
4
|
+
Provides transparent tracing without monkey patching.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from epi_recorder.wrappers.openai import wrap_openai, TracedOpenAI, TracedCompletions, TracedChat
|
|
8
|
+
from epi_recorder.wrappers.base import TracedClientBase
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"wrap_openai",
|
|
12
|
+
"TracedOpenAI",
|
|
13
|
+
"TracedCompletions",
|
|
14
|
+
"TracedChat",
|
|
15
|
+
"TracedClientBase",
|
|
16
|
+
]
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base classes for EPI traced clients.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class TracedClientBase(ABC):
|
|
11
|
+
"""
|
|
12
|
+
Base class for traced LLM client wrappers.
|
|
13
|
+
|
|
14
|
+
Provides common functionality for logging LLM calls
|
|
15
|
+
to the active EPI recording session.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, client: Any):
|
|
19
|
+
"""
|
|
20
|
+
Initialize traced client wrapper.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
client: The original LLM client to wrap
|
|
24
|
+
"""
|
|
25
|
+
self._client = client
|
|
26
|
+
|
|
27
|
+
def _get_session(self):
|
|
28
|
+
"""Get the current active EPI recording session."""
|
|
29
|
+
from epi_recorder.api import get_current_session
|
|
30
|
+
return get_current_session()
|
|
31
|
+
|
|
32
|
+
def _log_request(self, provider: str, model: str, messages: list, **kwargs) -> None:
|
|
33
|
+
"""Log an LLM request to the active session."""
|
|
34
|
+
session = self._get_session()
|
|
35
|
+
if session:
|
|
36
|
+
session.log_step("llm.request", {
|
|
37
|
+
"provider": provider,
|
|
38
|
+
"model": model,
|
|
39
|
+
"messages": messages,
|
|
40
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
41
|
+
**kwargs
|
|
42
|
+
})
|
|
43
|
+
|
|
44
|
+
def _log_response(
|
|
45
|
+
self,
|
|
46
|
+
provider: str,
|
|
47
|
+
model: str,
|
|
48
|
+
content: str,
|
|
49
|
+
usage: Optional[dict] = None,
|
|
50
|
+
latency_seconds: Optional[float] = None,
|
|
51
|
+
**kwargs
|
|
52
|
+
) -> None:
|
|
53
|
+
"""Log an LLM response to the active session."""
|
|
54
|
+
session = self._get_session()
|
|
55
|
+
if session:
|
|
56
|
+
response_data = {
|
|
57
|
+
"provider": provider,
|
|
58
|
+
"model": model,
|
|
59
|
+
"choices": [{"message": {"role": "assistant", "content": content}}],
|
|
60
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
61
|
+
}
|
|
62
|
+
if usage:
|
|
63
|
+
response_data["usage"] = usage
|
|
64
|
+
if latency_seconds is not None:
|
|
65
|
+
response_data["latency_seconds"] = round(latency_seconds, 3)
|
|
66
|
+
response_data.update(kwargs)
|
|
67
|
+
session.log_step("llm.response", response_data)
|
|
68
|
+
|
|
69
|
+
def _log_error(self, provider: str, error: Exception, **kwargs) -> None:
|
|
70
|
+
"""Log an LLM error to the active session."""
|
|
71
|
+
session = self._get_session()
|
|
72
|
+
if session:
|
|
73
|
+
session.log_step("llm.error", {
|
|
74
|
+
"provider": provider,
|
|
75
|
+
"error": str(error),
|
|
76
|
+
"error_type": type(error).__name__,
|
|
77
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
78
|
+
**kwargs
|
|
79
|
+
})
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI wrapper for EPI tracing.
|
|
3
|
+
|
|
4
|
+
Provides a proxy wrapper that automatically logs all LLM calls
|
|
5
|
+
without monkey patching.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
from epi_recorder.wrappers.base import TracedClientBase
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class TracedCompletions:
|
|
16
|
+
"""Proxy wrapper for openai.chat.completions."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, completions: Any, provider: str = "openai"):
|
|
19
|
+
self._completions = completions
|
|
20
|
+
self._provider = provider
|
|
21
|
+
|
|
22
|
+
def _get_session(self):
|
|
23
|
+
"""Get the current active EPI recording session."""
|
|
24
|
+
from epi_recorder.api import get_current_session
|
|
25
|
+
return get_current_session()
|
|
26
|
+
|
|
27
|
+
def create(self, *args, **kwargs) -> Any:
|
|
28
|
+
"""
|
|
29
|
+
Create a chat completion with automatic EPI tracing.
|
|
30
|
+
|
|
31
|
+
All arguments are passed through to the underlying client.
|
|
32
|
+
"""
|
|
33
|
+
session = self._get_session()
|
|
34
|
+
|
|
35
|
+
# Extract request info
|
|
36
|
+
model = kwargs.get("model", "unknown")
|
|
37
|
+
messages = kwargs.get("messages", [])
|
|
38
|
+
|
|
39
|
+
# Log request if session is active
|
|
40
|
+
if session:
|
|
41
|
+
session.log_step("llm.request", {
|
|
42
|
+
"provider": self._provider,
|
|
43
|
+
"model": model,
|
|
44
|
+
"messages": messages,
|
|
45
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
46
|
+
})
|
|
47
|
+
|
|
48
|
+
# Call original method
|
|
49
|
+
start_time = time.time()
|
|
50
|
+
try:
|
|
51
|
+
response = self._completions.create(*args, **kwargs)
|
|
52
|
+
latency = time.time() - start_time
|
|
53
|
+
|
|
54
|
+
# Log response if session is active
|
|
55
|
+
if session:
|
|
56
|
+
# Extract response content
|
|
57
|
+
choices = []
|
|
58
|
+
for choice in response.choices:
|
|
59
|
+
msg = choice.message
|
|
60
|
+
choices.append({
|
|
61
|
+
"message": {
|
|
62
|
+
"role": getattr(msg, "role", "assistant"),
|
|
63
|
+
"content": getattr(msg, "content", ""),
|
|
64
|
+
},
|
|
65
|
+
"finish_reason": getattr(choice, "finish_reason", None),
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
# Extract usage
|
|
69
|
+
usage = None
|
|
70
|
+
if hasattr(response, "usage") and response.usage:
|
|
71
|
+
usage = {
|
|
72
|
+
"prompt_tokens": getattr(response.usage, "prompt_tokens", 0),
|
|
73
|
+
"completion_tokens": getattr(response.usage, "completion_tokens", 0),
|
|
74
|
+
"total_tokens": getattr(response.usage, "total_tokens", 0),
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
session.log_step("llm.response", {
|
|
78
|
+
"provider": self._provider,
|
|
79
|
+
"model": model,
|
|
80
|
+
"choices": choices,
|
|
81
|
+
"usage": usage,
|
|
82
|
+
"latency_seconds": round(latency, 3),
|
|
83
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
84
|
+
})
|
|
85
|
+
|
|
86
|
+
return response
|
|
87
|
+
|
|
88
|
+
except Exception as e:
|
|
89
|
+
latency = time.time() - start_time
|
|
90
|
+
|
|
91
|
+
# Log error if session is active
|
|
92
|
+
if session:
|
|
93
|
+
session.log_step("llm.error", {
|
|
94
|
+
"provider": self._provider,
|
|
95
|
+
"model": model,
|
|
96
|
+
"error": str(e),
|
|
97
|
+
"error_type": type(e).__name__,
|
|
98
|
+
"latency_seconds": round(latency, 3),
|
|
99
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
100
|
+
})
|
|
101
|
+
|
|
102
|
+
raise
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class TracedChat:
|
|
106
|
+
"""Proxy wrapper for openai.chat."""
|
|
107
|
+
|
|
108
|
+
def __init__(self, chat: Any, provider: str = "openai"):
|
|
109
|
+
self._chat = chat
|
|
110
|
+
self._provider = provider
|
|
111
|
+
self.completions = TracedCompletions(chat.completions, provider)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class TracedOpenAI(TracedClientBase):
|
|
115
|
+
"""
|
|
116
|
+
Traced OpenAI client wrapper.
|
|
117
|
+
|
|
118
|
+
Wraps an OpenAI client and automatically logs all LLM calls
|
|
119
|
+
to the active EPI recording session.
|
|
120
|
+
|
|
121
|
+
Usage:
|
|
122
|
+
from openai import OpenAI
|
|
123
|
+
from epi_recorder.wrappers import wrap_openai
|
|
124
|
+
|
|
125
|
+
client = wrap_openai(OpenAI())
|
|
126
|
+
|
|
127
|
+
with record("my_agent.epi"):
|
|
128
|
+
response = client.chat.completions.create(
|
|
129
|
+
model="gpt-4",
|
|
130
|
+
messages=[{"role": "user", "content": "Hello"}]
|
|
131
|
+
)
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
def __init__(self, client: Any, provider: str = "openai"):
|
|
135
|
+
"""
|
|
136
|
+
Initialize traced OpenAI client.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
client: OpenAI client instance
|
|
140
|
+
provider: Provider name for logging (default: "openai")
|
|
141
|
+
"""
|
|
142
|
+
super().__init__(client)
|
|
143
|
+
self._provider = provider
|
|
144
|
+
self.chat = TracedChat(client.chat, provider)
|
|
145
|
+
|
|
146
|
+
def __getattr__(self, name: str) -> Any:
|
|
147
|
+
"""
|
|
148
|
+
Forward attribute access to underlying client.
|
|
149
|
+
|
|
150
|
+
This allows access to non-chat APIs (embeddings, files, etc.)
|
|
151
|
+
without explicit wrapping.
|
|
152
|
+
"""
|
|
153
|
+
return getattr(self._client, name)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def wrap_openai(client: Any, provider: str = "openai") -> TracedOpenAI:
|
|
157
|
+
"""
|
|
158
|
+
Wrap an OpenAI client for EPI tracing.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
client: OpenAI client instance
|
|
162
|
+
provider: Provider name for logging (default: "openai")
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
TracedOpenAI wrapper
|
|
166
|
+
|
|
167
|
+
Usage:
|
|
168
|
+
from openai import OpenAI
|
|
169
|
+
from epi_recorder.wrappers import wrap_openai
|
|
170
|
+
|
|
171
|
+
# Wrap the client once
|
|
172
|
+
client = wrap_openai(OpenAI())
|
|
173
|
+
|
|
174
|
+
# Use normally - calls are automatically traced when inside record()
|
|
175
|
+
with record("my_agent.epi"):
|
|
176
|
+
response = client.chat.completions.create(...)
|
|
177
|
+
"""
|
|
178
|
+
return TracedOpenAI(client, provider)
|