epi-recorder 2.2.0__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
epi_core/schemas.py CHANGED
@@ -18,7 +18,7 @@ class ManifestModel(BaseModel):
18
18
  """
19
19
 
20
20
  spec_version: str = Field(
21
- default="2.2.0",
21
+ default="2.3.0",
22
22
  description="EPI specification version"
23
23
  )
24
24
 
@@ -148,5 +148,5 @@ class StepModel(BaseModel):
148
148
  )
149
149
 
150
150
 
151
-
151
+
152
152
 
epi_recorder/__init__.py CHANGED
@@ -4,7 +4,7 @@ EPI Recorder - Runtime interception and workflow capture.
4
4
  Python API for recording AI workflows with cryptographic verification.
5
5
  """
6
6
 
7
- __version__ = "2.2.0"
7
+ __version__ = "2.3.0"
8
8
 
9
9
  # Export Python API
10
10
  from epi_recorder.api import (
@@ -13,13 +13,21 @@ from epi_recorder.api import (
13
13
  get_current_session
14
14
  )
15
15
 
16
+ # Export wrapper clients (new in v2.3.0)
17
+ from epi_recorder.wrappers import (
18
+ wrap_openai,
19
+ TracedOpenAI,
20
+ )
21
+
16
22
  __all__ = [
17
23
  "EpiRecorderSession",
18
24
  "record",
19
25
  "get_current_session",
26
+ "wrap_openai",
27
+ "TracedOpenAI",
20
28
  "__version__"
21
29
  ]
22
30
 
23
31
 
24
-
32
+
25
33
 
epi_recorder/api.py CHANGED
@@ -54,6 +54,8 @@ class EpiRecorderSession:
54
54
  metrics: Optional[Dict[str, Union[float, str]]] = None,
55
55
  approved_by: Optional[str] = None,
56
56
  metadata_tags: Optional[List[str]] = None, # Renamed to avoid conflict with tags parameter
57
+ # Legacy mode (deprecated)
58
+ legacy_patching: bool = False,
57
59
  ):
58
60
  """
59
61
  Initialize EPI recording session.
@@ -70,6 +72,7 @@ class EpiRecorderSession:
70
72
  metrics: Key-value metrics for this workflow (accuracy, latency, etc.)
71
73
  approved_by: Person or entity who approved this workflow execution
72
74
  metadata_tags: Tags for categorizing this workflow (renamed from tags to avoid conflict)
75
+ legacy_patching: Enable deprecated monkey patching mode (default: False)
73
76
  """
74
77
  self.output_path = Path(output_path)
75
78
  self.workflow_name = workflow_name or "untitled"
@@ -85,6 +88,9 @@ class EpiRecorderSession:
85
88
  self.approved_by = approved_by
86
89
  self.metadata_tags = metadata_tags
87
90
 
91
+ # Legacy mode flag (deprecated)
92
+ self.legacy_patching = legacy_patching
93
+
88
94
  # Runtime state
89
95
  self.temp_dir: Optional[Path] = None
90
96
  self.recording_context: Optional[RecordingContext] = None
@@ -117,9 +123,17 @@ class EpiRecorderSession:
117
123
  set_recording_context(self.recording_context)
118
124
  _thread_local.active_session = self
119
125
 
120
- # Patch LLM libraries and HTTP
121
- from epi_recorder.patcher import patch_all
122
- patch_all()
126
+ # Only patch LLM libraries if legacy mode is enabled (deprecated)
127
+ if self.legacy_patching:
128
+ import warnings
129
+ warnings.warn(
130
+ "legacy_patching is deprecated and will be removed in v3.0.0. "
131
+ "Use epi.log_llm_call() or wrapper clients (wrap_openai) instead.",
132
+ DeprecationWarning,
133
+ stacklevel=2
134
+ )
135
+ from epi_recorder.patcher import patch_all
136
+ patch_all()
123
137
 
124
138
  # Log session start
125
139
  self.log_step("session.start", {
@@ -250,6 +264,172 @@ class EpiRecorderSession:
250
264
  **response_payload
251
265
  })
252
266
 
267
+ def log_llm_call(
268
+ self,
269
+ response: Any,
270
+ messages: Optional[List[Dict[str, str]]] = None,
271
+ provider: str = "auto"
272
+ ) -> None:
273
+ """
274
+ Log a complete LLM call (request + response) from any provider.
275
+
276
+ Auto-detects OpenAI, Anthropic, and Gemini response objects.
277
+ This is the RECOMMENDED way to log LLM calls without monkey patching.
278
+
279
+ Args:
280
+ response: The LLM response object (OpenAI, Anthropic, Gemini, etc.)
281
+ messages: Optional original messages (for request logging)
282
+ provider: Provider name ("auto" to detect, or "openai", "anthropic", etc.)
283
+
284
+ Example:
285
+ with record("my_agent.epi") as epi:
286
+ response = client.chat.completions.create(
287
+ model="gpt-4",
288
+ messages=[{"role": "user", "content": "Hello"}]
289
+ )
290
+ epi.log_llm_call(response, messages=[{"role": "user", "content": "Hello"}])
291
+ """
292
+ if not self._entered:
293
+ raise RuntimeError("Cannot log LLM call outside of context manager")
294
+
295
+ # Auto-detect provider and extract data
296
+ model = "unknown"
297
+ content = ""
298
+ usage = None
299
+ choices = []
300
+
301
+ # Try OpenAI format
302
+ if hasattr(response, "choices") and hasattr(response, "model"):
303
+ provider = "openai" if provider == "auto" else provider
304
+ model = getattr(response, "model", "unknown")
305
+
306
+ for choice in response.choices:
307
+ msg = choice.message
308
+ choices.append({
309
+ "message": {
310
+ "role": getattr(msg, "role", "assistant"),
311
+ "content": getattr(msg, "content", ""),
312
+ },
313
+ "finish_reason": getattr(choice, "finish_reason", None),
314
+ })
315
+ if not content:
316
+ content = getattr(msg, "content", "")
317
+
318
+ if hasattr(response, "usage") and response.usage:
319
+ usage = {
320
+ "prompt_tokens": getattr(response.usage, "prompt_tokens", 0),
321
+ "completion_tokens": getattr(response.usage, "completion_tokens", 0),
322
+ "total_tokens": getattr(response.usage, "total_tokens", 0),
323
+ }
324
+
325
+ # Try Anthropic format
326
+ elif hasattr(response, "content") and hasattr(response, "model"):
327
+ provider = "anthropic" if provider == "auto" else provider
328
+ model = getattr(response, "model", "unknown")
329
+
330
+ # Anthropic returns content as a list of content blocks
331
+ content_blocks = getattr(response, "content", [])
332
+ if content_blocks and hasattr(content_blocks[0], "text"):
333
+ content = content_blocks[0].text
334
+ choices = [{"message": {"role": "assistant", "content": content}}]
335
+
336
+ if hasattr(response, "usage"):
337
+ usage = {
338
+ "input_tokens": getattr(response.usage, "input_tokens", 0),
339
+ "output_tokens": getattr(response.usage, "output_tokens", 0),
340
+ }
341
+
342
+ # Try Gemini format
343
+ elif hasattr(response, "text") and hasattr(response, "candidates"):
344
+ provider = "gemini" if provider == "auto" else provider
345
+ model = "gemini"
346
+ content = getattr(response, "text", "")
347
+ choices = [{"message": {"role": "assistant", "content": content}}]
348
+
349
+ # Fallback: try to extract as dict or string
350
+ else:
351
+ provider = provider if provider != "auto" else "unknown"
352
+ if isinstance(response, dict):
353
+ content = str(response.get("content", response))
354
+ else:
355
+ content = str(response)
356
+ choices = [{"message": {"role": "assistant", "content": content}}]
357
+
358
+ # Log request if messages provided
359
+ if messages:
360
+ self.log_step("llm.request", {
361
+ "provider": provider,
362
+ "model": model,
363
+ "messages": messages,
364
+ "timestamp": datetime.utcnow().isoformat(),
365
+ })
366
+
367
+ # Log response
368
+ response_data = {
369
+ "provider": provider,
370
+ "model": model,
371
+ "choices": choices,
372
+ "timestamp": datetime.utcnow().isoformat(),
373
+ }
374
+ if usage:
375
+ response_data["usage"] = usage
376
+
377
+ self.log_step("llm.response", response_data)
378
+
379
+ def log_chat(
380
+ self,
381
+ model: str,
382
+ messages: List[Dict[str, str]],
383
+ response_content: str,
384
+ provider: str = "custom",
385
+ usage: Optional[Dict[str, int]] = None,
386
+ **metadata
387
+ ) -> None:
388
+ """
389
+ Simplified logging for chat completions.
390
+
391
+ Use this when you have the raw data instead of response objects.
392
+
393
+ Args:
394
+ model: Model name (e.g., "gpt-4", "claude-3")
395
+ messages: The messages sent to the model
396
+ response_content: The assistant's response text
397
+ provider: Provider name (default: "custom")
398
+ usage: Optional token usage dict
399
+ **metadata: Additional metadata to include
400
+
401
+ Example:
402
+ epi.log_chat(
403
+ model="gpt-4",
404
+ messages=[{"role": "user", "content": "Hello"}],
405
+ response_content="Hi there!",
406
+ tokens=150
407
+ )
408
+ """
409
+ if not self._entered:
410
+ raise RuntimeError("Cannot log chat outside of context manager")
411
+
412
+ # Log request
413
+ self.log_step("llm.request", {
414
+ "provider": provider,
415
+ "model": model,
416
+ "messages": messages,
417
+ "timestamp": datetime.utcnow().isoformat(),
418
+ **metadata
419
+ })
420
+
421
+ # Log response
422
+ response_data = {
423
+ "provider": provider,
424
+ "model": model,
425
+ "choices": [{"message": {"role": "assistant", "content": response_content}}],
426
+ "timestamp": datetime.utcnow().isoformat(),
427
+ }
428
+ if usage:
429
+ response_data["usage"] = usage
430
+
431
+ self.log_step("llm.response", response_data)
432
+
253
433
  def log_artifact(
254
434
  self,
255
435
  file_path: Path,
@@ -615,5 +795,5 @@ def get_current_session() -> Optional[EpiRecorderSession]:
615
795
  return getattr(_thread_local, 'active_session', None)
616
796
 
617
797
 
618
-
798
+
619
799
 
epi_recorder/patcher.py CHANGED
@@ -47,6 +47,9 @@ class RecordingContext:
47
47
 
48
48
  # Keep JSONL path for backwards compatibility
49
49
  self.steps_file = self.output_dir / "steps.jsonl"
50
+
51
+ # Create empty steps.jsonl file immediately (for tests and early access)
52
+ self.steps_file.touch(exist_ok=True)
50
53
 
51
54
  def add_step(self, kind: str, content: Dict[str, Any]) -> None:
52
55
  """
@@ -171,8 +174,9 @@ def _patch_openai_v1() -> bool:
171
174
  from openai import OpenAI
172
175
  from openai.resources.chat import completions
173
176
 
174
- # Store original method
177
+ # Store original method for unpatching
175
178
  original_create = completions.Completions.create
179
+ _original_methods["openai.chat.completions.create"] = original_create
176
180
 
177
181
  @wraps(original_create)
178
182
  def wrapped_create(self, *args, **kwargs):
@@ -550,17 +554,53 @@ def patch_all() -> Dict[str, bool]:
550
554
  return results
551
555
 
552
556
 
557
+ # Store original methods for unpatching
558
+ _original_methods: Dict[str, Any] = {}
559
+
560
+
553
561
  def unpatch_all() -> None:
554
562
  """
555
563
  Unpatch all providers (restore original methods).
556
564
 
557
- Note: This is a placeholder for future implementation.
558
- Full unpatching requires storing original methods.
565
+ Restores any methods that were patched by patch_all(), patch_openai(),
566
+ patch_gemini(), or patch_requests().
559
567
  """
560
- # For MVP, we don't implement unpatching
561
- # In production, store original methods and restore them
562
- pass
568
+ global _original_methods
569
+
570
+ # Restore OpenAI v1+ if patched
571
+ if "openai.chat.completions.create" in _original_methods:
572
+ try:
573
+ from openai.resources.chat import completions
574
+ completions.Completions.create = _original_methods["openai.chat.completions.create"]
575
+ except ImportError:
576
+ pass
577
+
578
+ # Restore OpenAI legacy if patched
579
+ if "openai.ChatCompletion.create" in _original_methods:
580
+ try:
581
+ import openai
582
+ openai.ChatCompletion.create = _original_methods["openai.ChatCompletion.create"]
583
+ except (ImportError, AttributeError):
584
+ pass
585
+
586
+ # Restore Gemini if patched
587
+ if "gemini.generate_content" in _original_methods:
588
+ try:
589
+ import google.generativeai as genai
590
+ genai.GenerativeModel.generate_content = _original_methods["gemini.generate_content"]
591
+ except ImportError:
592
+ pass
593
+
594
+ # Restore requests if patched
595
+ if "requests.Session.request" in _original_methods:
596
+ try:
597
+ import requests
598
+ requests.Session.request = _original_methods["requests.Session.request"]
599
+ except ImportError:
600
+ pass
601
+
602
+ # Clear stored originals
603
+ _original_methods.clear()
563
604
 
564
605
 
565
-
566
606
 
@@ -0,0 +1,16 @@
1
+ """
2
+ EPI Wrapper Clients - Proxy wrappers for LLM clients.
3
+
4
+ Provides transparent tracing without monkey patching.
5
+ """
6
+
7
+ from epi_recorder.wrappers.openai import wrap_openai, TracedOpenAI, TracedCompletions, TracedChat
8
+ from epi_recorder.wrappers.base import TracedClientBase
9
+
10
+ __all__ = [
11
+ "wrap_openai",
12
+ "TracedOpenAI",
13
+ "TracedCompletions",
14
+ "TracedChat",
15
+ "TracedClientBase",
16
+ ]
@@ -0,0 +1,79 @@
1
+ """
2
+ Base classes for EPI traced clients.
3
+ """
4
+
5
+ from abc import ABC, abstractmethod
6
+ from typing import Any, Optional
7
+ from datetime import datetime
8
+
9
+
10
+ class TracedClientBase(ABC):
11
+ """
12
+ Base class for traced LLM client wrappers.
13
+
14
+ Provides common functionality for logging LLM calls
15
+ to the active EPI recording session.
16
+ """
17
+
18
+ def __init__(self, client: Any):
19
+ """
20
+ Initialize traced client wrapper.
21
+
22
+ Args:
23
+ client: The original LLM client to wrap
24
+ """
25
+ self._client = client
26
+
27
+ def _get_session(self):
28
+ """Get the current active EPI recording session."""
29
+ from epi_recorder.api import get_current_session
30
+ return get_current_session()
31
+
32
+ def _log_request(self, provider: str, model: str, messages: list, **kwargs) -> None:
33
+ """Log an LLM request to the active session."""
34
+ session = self._get_session()
35
+ if session:
36
+ session.log_step("llm.request", {
37
+ "provider": provider,
38
+ "model": model,
39
+ "messages": messages,
40
+ "timestamp": datetime.utcnow().isoformat(),
41
+ **kwargs
42
+ })
43
+
44
+ def _log_response(
45
+ self,
46
+ provider: str,
47
+ model: str,
48
+ content: str,
49
+ usage: Optional[dict] = None,
50
+ latency_seconds: Optional[float] = None,
51
+ **kwargs
52
+ ) -> None:
53
+ """Log an LLM response to the active session."""
54
+ session = self._get_session()
55
+ if session:
56
+ response_data = {
57
+ "provider": provider,
58
+ "model": model,
59
+ "choices": [{"message": {"role": "assistant", "content": content}}],
60
+ "timestamp": datetime.utcnow().isoformat(),
61
+ }
62
+ if usage:
63
+ response_data["usage"] = usage
64
+ if latency_seconds is not None:
65
+ response_data["latency_seconds"] = round(latency_seconds, 3)
66
+ response_data.update(kwargs)
67
+ session.log_step("llm.response", response_data)
68
+
69
+ def _log_error(self, provider: str, error: Exception, **kwargs) -> None:
70
+ """Log an LLM error to the active session."""
71
+ session = self._get_session()
72
+ if session:
73
+ session.log_step("llm.error", {
74
+ "provider": provider,
75
+ "error": str(error),
76
+ "error_type": type(error).__name__,
77
+ "timestamp": datetime.utcnow().isoformat(),
78
+ **kwargs
79
+ })
@@ -0,0 +1,178 @@
1
+ """
2
+ OpenAI wrapper for EPI tracing.
3
+
4
+ Provides a proxy wrapper that automatically logs all LLM calls
5
+ without monkey patching.
6
+ """
7
+
8
+ import time
9
+ from typing import Any, Optional
10
+ from datetime import datetime
11
+
12
+ from epi_recorder.wrappers.base import TracedClientBase
13
+
14
+
15
+ class TracedCompletions:
16
+ """Proxy wrapper for openai.chat.completions."""
17
+
18
+ def __init__(self, completions: Any, provider: str = "openai"):
19
+ self._completions = completions
20
+ self._provider = provider
21
+
22
+ def _get_session(self):
23
+ """Get the current active EPI recording session."""
24
+ from epi_recorder.api import get_current_session
25
+ return get_current_session()
26
+
27
+ def create(self, *args, **kwargs) -> Any:
28
+ """
29
+ Create a chat completion with automatic EPI tracing.
30
+
31
+ All arguments are passed through to the underlying client.
32
+ """
33
+ session = self._get_session()
34
+
35
+ # Extract request info
36
+ model = kwargs.get("model", "unknown")
37
+ messages = kwargs.get("messages", [])
38
+
39
+ # Log request if session is active
40
+ if session:
41
+ session.log_step("llm.request", {
42
+ "provider": self._provider,
43
+ "model": model,
44
+ "messages": messages,
45
+ "timestamp": datetime.utcnow().isoformat(),
46
+ })
47
+
48
+ # Call original method
49
+ start_time = time.time()
50
+ try:
51
+ response = self._completions.create(*args, **kwargs)
52
+ latency = time.time() - start_time
53
+
54
+ # Log response if session is active
55
+ if session:
56
+ # Extract response content
57
+ choices = []
58
+ for choice in response.choices:
59
+ msg = choice.message
60
+ choices.append({
61
+ "message": {
62
+ "role": getattr(msg, "role", "assistant"),
63
+ "content": getattr(msg, "content", ""),
64
+ },
65
+ "finish_reason": getattr(choice, "finish_reason", None),
66
+ })
67
+
68
+ # Extract usage
69
+ usage = None
70
+ if hasattr(response, "usage") and response.usage:
71
+ usage = {
72
+ "prompt_tokens": getattr(response.usage, "prompt_tokens", 0),
73
+ "completion_tokens": getattr(response.usage, "completion_tokens", 0),
74
+ "total_tokens": getattr(response.usage, "total_tokens", 0),
75
+ }
76
+
77
+ session.log_step("llm.response", {
78
+ "provider": self._provider,
79
+ "model": model,
80
+ "choices": choices,
81
+ "usage": usage,
82
+ "latency_seconds": round(latency, 3),
83
+ "timestamp": datetime.utcnow().isoformat(),
84
+ })
85
+
86
+ return response
87
+
88
+ except Exception as e:
89
+ latency = time.time() - start_time
90
+
91
+ # Log error if session is active
92
+ if session:
93
+ session.log_step("llm.error", {
94
+ "provider": self._provider,
95
+ "model": model,
96
+ "error": str(e),
97
+ "error_type": type(e).__name__,
98
+ "latency_seconds": round(latency, 3),
99
+ "timestamp": datetime.utcnow().isoformat(),
100
+ })
101
+
102
+ raise
103
+
104
+
105
+ class TracedChat:
106
+ """Proxy wrapper for openai.chat."""
107
+
108
+ def __init__(self, chat: Any, provider: str = "openai"):
109
+ self._chat = chat
110
+ self._provider = provider
111
+ self.completions = TracedCompletions(chat.completions, provider)
112
+
113
+
114
+ class TracedOpenAI(TracedClientBase):
115
+ """
116
+ Traced OpenAI client wrapper.
117
+
118
+ Wraps an OpenAI client and automatically logs all LLM calls
119
+ to the active EPI recording session.
120
+
121
+ Usage:
122
+ from openai import OpenAI
123
+ from epi_recorder.wrappers import wrap_openai
124
+
125
+ client = wrap_openai(OpenAI())
126
+
127
+ with record("my_agent.epi"):
128
+ response = client.chat.completions.create(
129
+ model="gpt-4",
130
+ messages=[{"role": "user", "content": "Hello"}]
131
+ )
132
+ """
133
+
134
+ def __init__(self, client: Any, provider: str = "openai"):
135
+ """
136
+ Initialize traced OpenAI client.
137
+
138
+ Args:
139
+ client: OpenAI client instance
140
+ provider: Provider name for logging (default: "openai")
141
+ """
142
+ super().__init__(client)
143
+ self._provider = provider
144
+ self.chat = TracedChat(client.chat, provider)
145
+
146
+ def __getattr__(self, name: str) -> Any:
147
+ """
148
+ Forward attribute access to underlying client.
149
+
150
+ This allows access to non-chat APIs (embeddings, files, etc.)
151
+ without explicit wrapping.
152
+ """
153
+ return getattr(self._client, name)
154
+
155
+
156
+ def wrap_openai(client: Any, provider: str = "openai") -> TracedOpenAI:
157
+ """
158
+ Wrap an OpenAI client for EPI tracing.
159
+
160
+ Args:
161
+ client: OpenAI client instance
162
+ provider: Provider name for logging (default: "openai")
163
+
164
+ Returns:
165
+ TracedOpenAI wrapper
166
+
167
+ Usage:
168
+ from openai import OpenAI
169
+ from epi_recorder.wrappers import wrap_openai
170
+
171
+ # Wrap the client once
172
+ client = wrap_openai(OpenAI())
173
+
174
+ # Use normally - calls are automatically traced when inside record()
175
+ with record("my_agent.epi"):
176
+ response = client.chat.completions.create(...)
177
+ """
178
+ return TracedOpenAI(client, provider)