epi-recorder 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
epi_cli/chat.py ADDED
@@ -0,0 +1,193 @@
1
+ """
2
+ EPI CLI Chat - Interactive evidence querying with AI.
3
+
4
+ Allows users to ask natural language questions about their .epi evidence files.
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import warnings
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+ import typer
14
+ from rich.console import Console
15
+ from rich.markdown import Markdown
16
+ from rich.panel import Panel
17
+ from rich.prompt import Prompt
18
+ import google.api_core.exceptions
19
+
20
+ from epi_core.container import EPIContainer
21
+
22
+
23
+ console = Console()
24
+
25
+
26
+ def load_steps_from_epi(epi_path: Path) -> list:
27
+ """Load steps from an .epi file."""
28
+ import tempfile
29
+
30
+ temp_dir = Path(tempfile.mkdtemp())
31
+ extracted = EPIContainer.unpack(epi_path, temp_dir)
32
+
33
+ steps_file = extracted / "steps.jsonl"
34
+ if not steps_file.exists():
35
+ return []
36
+
37
+ steps = []
38
+ with open(steps_file, 'r', encoding='utf-8') as f:
39
+ for line in f:
40
+ if line.strip():
41
+ steps.append(json.loads(line))
42
+
43
+ return steps
44
+
45
+
46
+ def chat(
47
+ epi_file: Path = typer.Argument(..., help="Path to .epi file to chat with"),
48
+ model: str = typer.Option("gemini-2.0-flash", "--model", "-m", help="Gemini model to use")
49
+ ):
50
+ """
51
+ Chat with your evidence file using AI.
52
+
53
+ Ask natural language questions about what happened in your recording.
54
+
55
+ Example:
56
+ epi chat my_recording.epi
57
+ """
58
+ # Resolve path
59
+ if not epi_file.exists():
60
+ # Try epi-recordings directory
61
+ recordings_dir = Path("./epi-recordings")
62
+ potential_path = recordings_dir / f"{epi_file.stem}.epi"
63
+ if potential_path.exists():
64
+ epi_file = potential_path
65
+ else:
66
+ console.print(f"[red]Error:[/red] File not found: {epi_file}")
67
+ raise typer.Exit(1)
68
+
69
+ # Check for API key
70
+ api_key = os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY")
71
+ if not api_key:
72
+ console.print(Panel(
73
+ "[yellow]No API key found![/yellow]\n\n"
74
+ "Set your Google AI API key:\n"
75
+ " [cyan]set GOOGLE_API_KEY=your-key-here[/cyan] (Windows)\n"
76
+ " [cyan]export GOOGLE_API_KEY=your-key-here[/cyan] (Mac/Linux)\n\n"
77
+ "Get a free key at: [link]https://makersuite.google.com/app/apikey[/link]",
78
+ title="[!] API Key Required",
79
+ border_style="yellow"
80
+ ))
81
+ raise typer.Exit(1)
82
+
83
+ # Load the .epi file
84
+ console.print(f"\n[dim]Loading evidence from:[/dim] {epi_file}")
85
+
86
+ try:
87
+ manifest = EPIContainer.read_manifest(epi_file)
88
+ steps = load_steps_from_epi(epi_file)
89
+ except Exception as e:
90
+ console.print(f"[red]Error loading .epi file:[/red] {e}")
91
+ raise typer.Exit(1)
92
+
93
+ # Initialize Gemini
94
+ try:
95
+ import warnings
96
+ with warnings.catch_warnings():
97
+ warnings.simplefilter("ignore")
98
+ import google.generativeai as genai
99
+
100
+ genai.configure(api_key=api_key)
101
+ ai_model = genai.GenerativeModel(model)
102
+ except ImportError:
103
+ console.print(Panel(
104
+ "[red]Google Generative AI package not installed![/red]\n\n"
105
+ "Install it with:\n"
106
+ " [cyan]pip install google-generativeai[/cyan]",
107
+ title="[X] Missing Dependency",
108
+ border_style="red"
109
+ ))
110
+ raise typer.Exit(1)
111
+ except Exception as e:
112
+ console.print(f"[red]Error initializing Gemini:[/red] {e}")
113
+ raise typer.Exit(1)
114
+
115
+ # Build context
116
+ context = f"""You are an expert assistant analyzing an EPI evidence recording file.
117
+
118
+ The recording contains cryptographically signed, tamper-proof evidence of an AI workflow execution.
119
+
120
+ Recording metadata:
121
+ - Created: {manifest.created_at}
122
+ - Goal: {manifest.goal or 'Not specified'}
123
+ - Command: {manifest.cli_command or 'Not specified'}
124
+ - Workflow ID: {manifest.workflow_id}
125
+ - Total steps: {len(steps)}
126
+
127
+ Here are the recorded steps (this is the timeline of events):
128
+ {json.dumps(steps[:50], indent=2, default=str)[:8000]}
129
+
130
+ When answering questions:
131
+ 1. Be specific and cite step indices when relevant
132
+ 2. Distinguish between LLM requests, responses, and other events
133
+ 3. If asked about security, note that API keys are automatically redacted
134
+ 4. Keep answers concise but informative
135
+ """
136
+
137
+ # Start chat session
138
+ chat_session = ai_model.start_chat(history=[])
139
+
140
+ # Display header
141
+ console.print()
142
+ console.print(Panel(
143
+ f"[bold cyan]EPI Evidence Chat[/bold cyan]\n\n"
144
+ f"[dim]File:[/dim] {epi_file.name}\n"
145
+ f"[dim]Steps:[/dim] {len(steps)}\n"
146
+ f"[dim]Model:[/dim] {model}\n\n"
147
+ f"Ask questions about this evidence recording.\n"
148
+ f"Type [yellow]exit[/yellow] or [yellow]quit[/yellow] to end the session.",
149
+ border_style="cyan"
150
+ ))
151
+ console.print()
152
+
153
+ # Chat loop
154
+ while True:
155
+ try:
156
+ question = Prompt.ask("[bold cyan]You[/bold cyan]")
157
+ except (KeyboardInterrupt, EOFError):
158
+ console.print("\n[dim]Goodbye![/dim]")
159
+ break
160
+
161
+ if question.lower() in ('exit', 'quit', 'q'):
162
+ console.print("[dim]Goodbye![/dim]")
163
+ break
164
+
165
+ if not question.strip():
166
+ continue
167
+
168
+ # Send to Gemini with context
169
+ try:
170
+ full_prompt = f"{context}\n\nUser question: {question}"
171
+ response = chat_session.send_message(full_prompt)
172
+
173
+ console.print()
174
+ console.print("[bold green]AI:[/bold green]")
175
+ console.print(Markdown(response.text))
176
+ console.print()
177
+
178
+ except google.api_core.exceptions.ResourceExhausted:
179
+ console.print(Panel(
180
+ "[yellow]API Quota Exceeded[/yellow]\n\n"
181
+ "You have hit the rate limit for the Gemini API (free tier).\n"
182
+ "Please wait a minute before trying again.",
183
+ title="[!] Rate Limit",
184
+ border_style="yellow"
185
+ ))
186
+ except google.api_core.exceptions.NotFound:
187
+ console.print(f"[red]Error:[/red] The model '{model}' was not found. Try using a different model with --model.")
188
+ except google.api_core.exceptions.InvalidArgument as e:
189
+ console.print(f"[red]Error:[/red] Invalid argument: {e}")
190
+ except Exception as e:
191
+ console.print(f"[red]Error:[/red] {e}")
192
+ console.print("[dim]Try asking a different question.[/dim]")
193
+ console.print()
epi_cli/main.py CHANGED
@@ -12,7 +12,9 @@ from epi_cli.keys import generate_default_keypair_if_missing
12
12
  # Create Typer app
13
13
  app = typer.Typer(
14
14
  name="epi",
15
- help="""EPI - Evidence Packaged Infrastructure for AI workflows
15
+ help="""EPI - The PDF for AI Evidence.
16
+
17
+ Cryptographic proof of what Autonomous AI Systems actually did.
16
18
 
17
19
  Commands:
18
20
  run <script.py> Record, auto-verify and open viewer. (Zero-config)
@@ -120,6 +122,10 @@ app.add_typer(view_app, name="view", help="Open recording in browser (name resol
120
122
  from epi_cli.ls import ls as ls_command
121
123
  app.command(name="ls", help="List local recordings (./epi-recordings/)")(ls_command)
122
124
 
125
+ # NEW: chat command (v2.1.3 - AI-powered evidence querying)
126
+ from epi_cli.chat import chat as chat_command
127
+ app.command(name="chat", help="Chat with your evidence file using AI")(chat_command)
128
+
123
129
  # Phase 1: keys command (for manual key management)
124
130
  @app.command()
125
131
  def keys(
epi_core/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
  EPI Core - Core data structures, serialization, and container management.
3
3
  """
4
4
 
5
- __version__ = "2.1.0"
5
+ __version__ = "2.1.3"
6
6
 
7
7
  from epi_core.schemas import ManifestModel, StepModel
8
8
  from epi_core.serialize import get_canonical_hash
epi_core/container.py CHANGED
@@ -80,6 +80,8 @@ class EPIContainer:
80
80
  # Read template and assets
81
81
  template_html = template_path.read_text(encoding="utf-8")
82
82
  app_js = app_js_path.read_text(encoding="utf-8") if app_js_path.exists() else ""
83
+ crypto_js_path = viewer_static_dir / "crypto.js"
84
+ crypto_js = crypto_js_path.read_text(encoding="utf-8") if crypto_js_path.exists() else ""
83
85
  css_styles = css_path.read_text(encoding="utf-8") if css_path.exists() else ""
84
86
 
85
87
  # Read steps from steps.jsonl
@@ -112,10 +114,16 @@ class EPIContainer:
112
114
  f'<style>{css_styles}</style>'
113
115
  )
114
116
 
115
- # Inline app.js
117
+ # Inline crypto.js and app.js
118
+ js_content = ""
119
+ if crypto_js:
120
+ js_content += f"<script>{crypto_js}</script>\n"
121
+ if app_js:
122
+ js_content += f"<script>{app_js}</script>"
123
+
116
124
  html_with_js = html_with_css.replace(
117
125
  '<script src="app.js"></script>',
118
- f'<script>{app_js}</script>'
126
+ js_content
119
127
  )
120
128
 
121
129
  return html_with_js
epi_core/schemas.py CHANGED
@@ -18,7 +18,7 @@ class ManifestModel(BaseModel):
18
18
  """
19
19
 
20
20
  spec_version: str = Field(
21
- default="1.0-keystone",
21
+ default="1.1-json",
22
22
  description="EPI specification version"
23
23
  )
24
24
 
@@ -47,6 +47,11 @@ class ManifestModel(BaseModel):
47
47
  description="Mapping of file paths to their SHA-256 hashes for integrity verification"
48
48
  )
49
49
 
50
+ public_key: Optional[str] = Field(
51
+ default=None,
52
+ description="Hex-encoded public key used for verification"
53
+ )
54
+
50
55
  signature: Optional[str] = Field(
51
56
  default=None,
52
57
  description="Ed25519 signature of the canonical CBOR hash of this manifest (excluding signature field)"
epi_core/serialize.py CHANGED
@@ -87,27 +87,56 @@ def get_canonical_hash(model: BaseModel, exclude_fields: set[str] | None = None)
87
87
  else:
88
88
  return value
89
89
 
90
+ # Normalize datetime and UUID fields to strings
90
91
  model_dict = normalize_value(model_dict)
91
92
 
92
93
  if exclude_fields:
93
94
  for field in exclude_fields:
94
95
  model_dict.pop(field, None)
95
-
96
+
97
+ # JSON Canonicalization for Spec v1.1+
98
+ # Check if model has spec_version and if it indicates JSON usage
99
+ # We default to CBOR for backward compatibility
100
+
101
+ use_json = False
102
+
103
+ # Check spec_version in model or dict
104
+ spec_version = model_dict.get("spec_version")
105
+ if spec_version and (spec_version.startswith("1.1") or "json" in spec_version):
106
+ use_json = True
107
+
108
+ if use_json:
109
+ return _get_json_canonical_hash(model_dict)
110
+ else:
111
+ return _get_cbor_canonical_hash(model_dict)
112
+
113
+
114
+ def _get_json_canonical_hash(data: Any) -> str:
115
+ """Compute canonical SHA-256 hash using JSON (RFC 8785 style)."""
116
+ import json
117
+
118
+ # Dump to JSON with sorted keys and no whitespace
119
+ json_bytes = json.dumps(
120
+ data,
121
+ sort_keys=True,
122
+ separators=(',', ':'),
123
+ ensure_ascii=False
124
+ ).encode("utf-8")
125
+
126
+ return hashlib.sha256(json_bytes).hexdigest()
127
+
128
+
129
+ def _get_cbor_canonical_hash(data: Any) -> str:
130
+ """Compute canonical SHA-256 hash using CBOR (Legacy v1.0)."""
96
131
  # Encode to canonical CBOR
97
- # canonical=True ensures:
98
- # - Keys are sorted lexicographically
99
- # - Minimal encoding is used
100
- # - Deterministic representation
101
132
  cbor_bytes = cbor2.dumps(
102
- model_dict,
133
+ data,
103
134
  canonical=True,
104
135
  default=_cbor_default_encoder
105
136
  )
106
137
 
107
138
  # Compute SHA-256 hash
108
- hash_obj = hashlib.sha256(cbor_bytes)
109
-
110
- return hash_obj.hexdigest()
139
+ return hashlib.sha256(cbor_bytes).hexdigest()
111
140
 
112
141
 
113
142
  def verify_hash(model: BaseModel, expected_hash: str, exclude_fields: set[str] | None = None) -> bool:
epi_core/trust.py CHANGED
@@ -53,6 +53,16 @@ def sign_manifest(
53
53
  SigningError: If signing fails
54
54
  """
55
55
  try:
56
+ # Derive public key and add to manifest
57
+ public_key_obj = private_key.public_key()
58
+ public_key_hex = public_key_obj.public_bytes(
59
+ encoding=serialization.Encoding.Raw,
60
+ format=serialization.PublicFormat.Raw
61
+ ).hex()
62
+
63
+ # We must update the manifest BEFORE hashing so the public key is signed
64
+ manifest.public_key = public_key_hex
65
+
56
66
  # Compute canonical hash (excluding signature field)
57
67
  manifest_hash = get_canonical_hash(manifest, exclude_fields={"signature"})
58
68
  hash_bytes = bytes.fromhex(manifest_hash)
epi_recorder/__init__.py CHANGED
@@ -4,7 +4,7 @@ EPI Recorder - Runtime interception and workflow capture.
4
4
  Python API for recording AI workflows with cryptographic verification.
5
5
  """
6
6
 
7
- __version__ = "2.1.0"
7
+ __version__ = "2.1.3"
8
8
 
9
9
  # Export Python API
10
10
  from epi_recorder.api import (
epi_recorder/patcher.py CHANGED
@@ -325,7 +325,113 @@ def _patch_openai_legacy() -> bool:
325
325
  return False
326
326
 
327
327
 
328
- return results
328
+ # ==================== Google Gemini Patcher ====================
329
+
330
+ def patch_gemini() -> bool:
331
+ """
332
+ Patch Google Generative AI library to intercept Gemini API calls.
333
+
334
+ Returns:
335
+ bool: True if patching succeeded, False otherwise
336
+ """
337
+ try:
338
+ import warnings
339
+ with warnings.catch_warnings():
340
+ warnings.simplefilter("ignore")
341
+ import google.generativeai as genai
342
+ from google.generativeai.types import GenerateContentResponse
343
+
344
+ # Get the GenerativeModel class
345
+ GenerativeModel = genai.GenerativeModel
346
+
347
+ # Store original method
348
+ original_generate_content = GenerativeModel.generate_content
349
+
350
+ @wraps(original_generate_content)
351
+ def wrapped_generate_content(self, *args, **kwargs):
352
+ """Wrapped Gemini generate_content with recording."""
353
+
354
+ # Only record if context is active
355
+ if not is_recording():
356
+ return original_generate_content(self, *args, **kwargs)
357
+
358
+ context = get_recording_context()
359
+ start_time = time.time()
360
+
361
+ # Extract prompt from args/kwargs
362
+ contents = args[0] if args else kwargs.get("contents", "")
363
+
364
+ # Capture request
365
+ request_data = {
366
+ "provider": "google",
367
+ "method": "GenerativeModel.generate_content",
368
+ "model": getattr(self, '_model_name', getattr(self, 'model_name', 'gemini')),
369
+ "contents": str(contents)[:2000], # Truncate long prompts
370
+ "generation_config": str(kwargs.get("generation_config", {})),
371
+ }
372
+
373
+ # Log request step
374
+ context.add_step("llm.request", request_data)
375
+
376
+ # Execute original call
377
+ try:
378
+ response = original_generate_content(self, *args, **kwargs)
379
+ elapsed = time.time() - start_time
380
+
381
+ # Capture response
382
+ response_text = ""
383
+ try:
384
+ if hasattr(response, 'text'):
385
+ response_text = response.text[:2000] # Truncate long responses
386
+ elif hasattr(response, 'parts'):
387
+ response_text = str(response.parts)[:2000]
388
+ except Exception:
389
+ response_text = "[Response text extraction failed]"
390
+
391
+ response_data = {
392
+ "provider": "google",
393
+ "model": getattr(self, '_model_name', getattr(self, 'model_name', 'gemini')),
394
+ "response": response_text,
395
+ "latency_seconds": round(elapsed, 3)
396
+ }
397
+
398
+ # Try to get usage info if available
399
+ try:
400
+ if hasattr(response, 'usage_metadata'):
401
+ usage = response.usage_metadata
402
+ response_data["usage"] = {
403
+ "prompt_tokens": getattr(usage, 'prompt_token_count', None),
404
+ "completion_tokens": getattr(usage, 'candidates_token_count', None),
405
+ "total_tokens": getattr(usage, 'total_token_count', None)
406
+ }
407
+ except Exception:
408
+ pass
409
+
410
+ # Log response step
411
+ context.add_step("llm.response", response_data)
412
+
413
+ return response
414
+
415
+ except Exception as e:
416
+ # Log error step
417
+ context.add_step("llm.error", {
418
+ "provider": "google",
419
+ "error": str(e),
420
+ "error_type": type(e).__name__
421
+ })
422
+ raise
423
+
424
+ # Apply patch
425
+ GenerativeModel.generate_content = wrapped_generate_content
426
+
427
+ return True
428
+
429
+ except ImportError:
430
+ # google-generativeai not installed
431
+ return False
432
+ except Exception as e:
433
+ print(f"Warning: Failed to patch Gemini: {e}")
434
+ return False
329
435
 
330
436
 
331
437
  def patch_requests() -> bool:
@@ -419,6 +525,9 @@ def patch_all() -> Dict[str, bool]:
419
525
  # Patch OpenAI
420
526
  results["openai"] = patch_openai()
421
527
 
528
+ # Patch Google Gemini
529
+ results["gemini"] = patch_gemini()
530
+
422
531
  # Patch generic requests (covers LangChain, Anthropic, etc.)
423
532
  results["requests"] = patch_requests()
424
533