glacis 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glacis/__init__.py +88 -0
- glacis/__main__.py +76 -0
- glacis/client.py +847 -0
- glacis/crypto.py +121 -0
- glacis/integrations/__init__.py +12 -0
- glacis/integrations/anthropic.py +222 -0
- glacis/integrations/openai.py +208 -0
- glacis/models.py +293 -0
- glacis/storage.py +331 -0
- glacis/streaming.py +363 -0
- glacis/wasm/s3p_core_wasi.wasm +0 -0
- glacis/wasm_runtime.py +519 -0
- glacis-0.1.0.dist-info/METADATA +324 -0
- glacis-0.1.0.dist-info/RECORD +16 -0
- glacis-0.1.0.dist-info/WHEEL +4 -0
- glacis-0.1.0.dist-info/licenses/LICENSE +190 -0
glacis/crypto.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"""
|
|
2
|
+
RFC 8785 Canonical JSON and SHA-256 Hashing
|
|
3
|
+
|
|
4
|
+
This module provides deterministic JSON serialization and hashing that produces
|
|
5
|
+
identical output to the TypeScript and Rust implementations.
|
|
6
|
+
|
|
7
|
+
The canonical JSON format follows RFC 8785:
|
|
8
|
+
- Object keys are sorted lexicographically by Unicode code point
|
|
9
|
+
- No whitespace between elements
|
|
10
|
+
- Numbers without unnecessary precision
|
|
11
|
+
- Recursive canonicalization of nested structures
|
|
12
|
+
|
|
13
|
+
Example:
|
|
14
|
+
>>> from glacis.crypto import hash_payload
|
|
15
|
+
>>> hash1 = hash_payload({"b": 2, "a": 1})
|
|
16
|
+
>>> hash2 = hash_payload({"a": 1, "b": 2})
|
|
17
|
+
>>> assert hash1 == hash2 # Keys are sorted
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import hashlib
|
|
21
|
+
import json
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def canonical_json(data: Any) -> str:
|
|
26
|
+
"""
|
|
27
|
+
Serialize data to RFC 8785 canonical JSON.
|
|
28
|
+
|
|
29
|
+
This produces deterministic JSON that is identical across all runtimes
|
|
30
|
+
(Python, TypeScript, Rust).
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
data: Any JSON-serializable value
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Canonical JSON string
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
ValueError: If data contains non-serializable values (NaN, Infinity)
|
|
40
|
+
|
|
41
|
+
Example:
|
|
42
|
+
>>> canonical_json({"b": 2, "a": 1})
|
|
43
|
+
'{"a":1,"b":2}'
|
|
44
|
+
"""
|
|
45
|
+
return _canonicalize_value(data)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _canonicalize_value(value: Any) -> str:
|
|
49
|
+
"""Recursively canonicalize a value."""
|
|
50
|
+
if value is None:
|
|
51
|
+
return "null"
|
|
52
|
+
|
|
53
|
+
if isinstance(value, bool):
|
|
54
|
+
return "true" if value else "false"
|
|
55
|
+
|
|
56
|
+
if isinstance(value, (int, float)):
|
|
57
|
+
# Check for non-finite numbers (not valid in JSON)
|
|
58
|
+
if isinstance(value, float):
|
|
59
|
+
if value != value: # NaN check
|
|
60
|
+
raise ValueError("Cannot canonicalize NaN")
|
|
61
|
+
if value == float("inf") or value == float("-inf"):
|
|
62
|
+
raise ValueError("Cannot canonicalize Infinity")
|
|
63
|
+
|
|
64
|
+
# Use Python's default number serialization
|
|
65
|
+
# For integers, this produces no decimal point
|
|
66
|
+
# For floats, this matches JavaScript's behavior
|
|
67
|
+
return json.dumps(value)
|
|
68
|
+
|
|
69
|
+
if isinstance(value, str):
|
|
70
|
+
# Use json.dumps for proper string escaping
|
|
71
|
+
return json.dumps(value)
|
|
72
|
+
|
|
73
|
+
if isinstance(value, (list, tuple)):
|
|
74
|
+
elements = [_canonicalize_value(item) for item in value]
|
|
75
|
+
return "[" + ",".join(elements) + "]"
|
|
76
|
+
|
|
77
|
+
if isinstance(value, dict):
|
|
78
|
+
# Sort keys lexicographically by Unicode code point (RFC 8785)
|
|
79
|
+
sorted_keys = sorted(value.keys())
|
|
80
|
+
pairs = []
|
|
81
|
+
for key in sorted_keys:
|
|
82
|
+
# Skip None values (like undefined in JavaScript)
|
|
83
|
+
if value[key] is not None or key in value:
|
|
84
|
+
pairs.append(f"{json.dumps(key)}:{_canonicalize_value(value[key])}")
|
|
85
|
+
return "{" + ",".join(pairs) + "}"
|
|
86
|
+
|
|
87
|
+
raise ValueError(f"Cannot canonicalize value of type: {type(value).__name__}")
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def hash_payload(data: Any) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Hash data using RFC 8785 canonical JSON + SHA-256.
|
|
93
|
+
|
|
94
|
+
This is the primary hashing function for the transparency log.
|
|
95
|
+
Produces identical hashes across Python, TypeScript, and Rust runtimes.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
data: Any JSON-serializable value
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Hex-encoded SHA-256 hash (64 characters)
|
|
102
|
+
|
|
103
|
+
Example:
|
|
104
|
+
>>> hash_payload({"b": 2, "a": 1})
|
|
105
|
+
'a1b2c3...' # 64 hex characters
|
|
106
|
+
"""
|
|
107
|
+
canonical = canonical_json(data)
|
|
108
|
+
return hashlib.sha256(canonical.encode("utf-8")).hexdigest()
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def hash_bytes(data: bytes) -> str:
|
|
112
|
+
"""
|
|
113
|
+
Hash raw bytes using SHA-256.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
data: Raw bytes to hash
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Hex-encoded SHA-256 hash (64 characters)
|
|
120
|
+
"""
|
|
121
|
+
return hashlib.sha256(data).hexdigest()
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GLACIS integrations for popular AI providers.
|
|
3
|
+
|
|
4
|
+
These integrations provide drop-in wrappers that automatically attest
|
|
5
|
+
all API calls to the GLACIS transparency log.
|
|
6
|
+
|
|
7
|
+
Available integrations:
|
|
8
|
+
- OpenAI: `from glacis.integrations.openai import attested_openai`
|
|
9
|
+
- Anthropic: `from glacis.integrations.anthropic import attested_anthropic`
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
__all__ = []
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GLACIS integration for Anthropic.
|
|
3
|
+
|
|
4
|
+
Provides an attested Anthropic client wrapper that automatically logs all
|
|
5
|
+
messages to the GLACIS transparency log.
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
>>> from glacis.integrations.anthropic import attested_anthropic
|
|
9
|
+
>>> client = attested_anthropic(glacis_api_key="glsk_live_xxx", anthropic_api_key="sk-xxx")
|
|
10
|
+
>>> response = client.messages.create(
|
|
11
|
+
... model="claude-3-opus-20240229",
|
|
12
|
+
... messages=[{"role": "user", "content": "Hello!"}]
|
|
13
|
+
... )
|
|
14
|
+
# Response is automatically attested to GLACIS
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from anthropic import Anthropic
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def attested_anthropic(
|
|
24
|
+
glacis_api_key: str,
|
|
25
|
+
anthropic_api_key: Optional[str] = None,
|
|
26
|
+
glacis_base_url: str = "https://api.glacis.dev",
|
|
27
|
+
service_id: str = "anthropic",
|
|
28
|
+
debug: bool = False,
|
|
29
|
+
**anthropic_kwargs: Any,
|
|
30
|
+
) -> "Anthropic":
|
|
31
|
+
"""
|
|
32
|
+
Create an attested Anthropic client.
|
|
33
|
+
|
|
34
|
+
All messages are automatically attested to the GLACIS transparency log.
|
|
35
|
+
The input (messages) and output (response) are hashed locally - the actual
|
|
36
|
+
content never leaves your infrastructure.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
glacis_api_key: GLACIS API key
|
|
40
|
+
anthropic_api_key: Anthropic API key (default: from ANTHROPIC_API_KEY env var)
|
|
41
|
+
glacis_base_url: GLACIS API base URL
|
|
42
|
+
service_id: Service identifier for attestations
|
|
43
|
+
debug: Enable debug logging
|
|
44
|
+
**anthropic_kwargs: Additional arguments passed to Anthropic client
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Wrapped Anthropic client
|
|
48
|
+
|
|
49
|
+
Example:
|
|
50
|
+
>>> client = attested_anthropic(
|
|
51
|
+
... glacis_api_key="glsk_live_xxx",
|
|
52
|
+
... anthropic_api_key="sk-xxx"
|
|
53
|
+
... )
|
|
54
|
+
>>> response = client.messages.create(
|
|
55
|
+
... model="claude-3-opus-20240229",
|
|
56
|
+
... max_tokens=1024,
|
|
57
|
+
... messages=[{"role": "user", "content": "Hello!"}]
|
|
58
|
+
... )
|
|
59
|
+
"""
|
|
60
|
+
try:
|
|
61
|
+
from anthropic import Anthropic
|
|
62
|
+
except ImportError:
|
|
63
|
+
raise ImportError(
|
|
64
|
+
"Anthropic integration requires the 'anthropic' package. "
|
|
65
|
+
"Install it with: pip install glacis[anthropic]"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
from glacis import Glacis
|
|
69
|
+
|
|
70
|
+
glacis = Glacis(
|
|
71
|
+
api_key=glacis_api_key,
|
|
72
|
+
base_url=glacis_base_url,
|
|
73
|
+
debug=debug,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Create the Anthropic client
|
|
77
|
+
client_kwargs: dict[str, Any] = {**anthropic_kwargs}
|
|
78
|
+
if anthropic_api_key:
|
|
79
|
+
client_kwargs["api_key"] = anthropic_api_key
|
|
80
|
+
|
|
81
|
+
client = Anthropic(**client_kwargs)
|
|
82
|
+
|
|
83
|
+
# Wrap the messages create method
|
|
84
|
+
original_create = client.messages.create
|
|
85
|
+
|
|
86
|
+
def attested_create(*args: Any, **kwargs: Any) -> Any:
|
|
87
|
+
# Extract input
|
|
88
|
+
messages = kwargs.get("messages", [])
|
|
89
|
+
model = kwargs.get("model", "unknown")
|
|
90
|
+
system = kwargs.get("system")
|
|
91
|
+
|
|
92
|
+
# Make the API call
|
|
93
|
+
response = original_create(*args, **kwargs)
|
|
94
|
+
|
|
95
|
+
# Attest the interaction
|
|
96
|
+
try:
|
|
97
|
+
input_data: dict[str, Any] = {
|
|
98
|
+
"model": model,
|
|
99
|
+
"messages": messages,
|
|
100
|
+
}
|
|
101
|
+
if system:
|
|
102
|
+
input_data["system"] = system
|
|
103
|
+
|
|
104
|
+
glacis.attest(
|
|
105
|
+
service_id=service_id,
|
|
106
|
+
operation_type="completion",
|
|
107
|
+
input=input_data,
|
|
108
|
+
output={
|
|
109
|
+
"model": response.model,
|
|
110
|
+
"content": [
|
|
111
|
+
{
|
|
112
|
+
"type": block.type,
|
|
113
|
+
"text": getattr(block, "text", None),
|
|
114
|
+
}
|
|
115
|
+
for block in response.content
|
|
116
|
+
],
|
|
117
|
+
"stop_reason": response.stop_reason,
|
|
118
|
+
"usage": {
|
|
119
|
+
"input_tokens": response.usage.input_tokens,
|
|
120
|
+
"output_tokens": response.usage.output_tokens,
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
metadata={"provider": "anthropic", "model": model},
|
|
124
|
+
)
|
|
125
|
+
except Exception as e:
|
|
126
|
+
if debug:
|
|
127
|
+
print(f"[glacis] Attestation failed: {e}")
|
|
128
|
+
|
|
129
|
+
return response
|
|
130
|
+
|
|
131
|
+
# Replace the create method
|
|
132
|
+
client.messages.create = attested_create # type: ignore
|
|
133
|
+
|
|
134
|
+
return client
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def attested_async_anthropic(
|
|
138
|
+
glacis_api_key: str,
|
|
139
|
+
anthropic_api_key: Optional[str] = None,
|
|
140
|
+
glacis_base_url: str = "https://api.glacis.dev",
|
|
141
|
+
service_id: str = "anthropic",
|
|
142
|
+
debug: bool = False,
|
|
143
|
+
**anthropic_kwargs: Any,
|
|
144
|
+
) -> Any:
|
|
145
|
+
"""
|
|
146
|
+
Create an attested async Anthropic client.
|
|
147
|
+
|
|
148
|
+
Same as `attested_anthropic` but for async usage.
|
|
149
|
+
|
|
150
|
+
Example:
|
|
151
|
+
>>> client = attested_async_anthropic(glacis_api_key="glsk_live_xxx")
|
|
152
|
+
>>> response = await client.messages.create(...)
|
|
153
|
+
"""
|
|
154
|
+
try:
|
|
155
|
+
from anthropic import AsyncAnthropic
|
|
156
|
+
except ImportError:
|
|
157
|
+
raise ImportError(
|
|
158
|
+
"Anthropic integration requires the 'anthropic' package. "
|
|
159
|
+
"Install it with: pip install glacis[anthropic]"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
from glacis import AsyncGlacis
|
|
163
|
+
|
|
164
|
+
glacis = AsyncGlacis(
|
|
165
|
+
api_key=glacis_api_key,
|
|
166
|
+
base_url=glacis_base_url,
|
|
167
|
+
debug=debug,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
client_kwargs: dict[str, Any] = {**anthropic_kwargs}
|
|
171
|
+
if anthropic_api_key:
|
|
172
|
+
client_kwargs["api_key"] = anthropic_api_key
|
|
173
|
+
|
|
174
|
+
client = AsyncAnthropic(**client_kwargs)
|
|
175
|
+
|
|
176
|
+
original_create = client.messages.create
|
|
177
|
+
|
|
178
|
+
async def attested_create(*args: Any, **kwargs: Any) -> Any:
|
|
179
|
+
messages = kwargs.get("messages", [])
|
|
180
|
+
model = kwargs.get("model", "unknown")
|
|
181
|
+
system = kwargs.get("system")
|
|
182
|
+
|
|
183
|
+
response = await original_create(*args, **kwargs)
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
input_data: dict[str, Any] = {
|
|
187
|
+
"model": model,
|
|
188
|
+
"messages": messages,
|
|
189
|
+
}
|
|
190
|
+
if system:
|
|
191
|
+
input_data["system"] = system
|
|
192
|
+
|
|
193
|
+
await glacis.attest(
|
|
194
|
+
service_id=service_id,
|
|
195
|
+
operation_type="completion",
|
|
196
|
+
input=input_data,
|
|
197
|
+
output={
|
|
198
|
+
"model": response.model,
|
|
199
|
+
"content": [
|
|
200
|
+
{
|
|
201
|
+
"type": block.type,
|
|
202
|
+
"text": getattr(block, "text", None),
|
|
203
|
+
}
|
|
204
|
+
for block in response.content
|
|
205
|
+
],
|
|
206
|
+
"stop_reason": response.stop_reason,
|
|
207
|
+
"usage": {
|
|
208
|
+
"input_tokens": response.usage.input_tokens,
|
|
209
|
+
"output_tokens": response.usage.output_tokens,
|
|
210
|
+
},
|
|
211
|
+
},
|
|
212
|
+
metadata={"provider": "anthropic", "model": model},
|
|
213
|
+
)
|
|
214
|
+
except Exception as e:
|
|
215
|
+
if debug:
|
|
216
|
+
print(f"[glacis] Attestation failed: {e}")
|
|
217
|
+
|
|
218
|
+
return response
|
|
219
|
+
|
|
220
|
+
client.messages.create = attested_create # type: ignore
|
|
221
|
+
|
|
222
|
+
return client
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GLACIS integration for OpenAI.
|
|
3
|
+
|
|
4
|
+
Provides an attested OpenAI client wrapper that automatically logs all
|
|
5
|
+
completions to the GLACIS transparency log. Supports both online (server-witnessed)
|
|
6
|
+
and offline (locally-signed) modes.
|
|
7
|
+
|
|
8
|
+
Example (online):
|
|
9
|
+
>>> from glacis.integrations.openai import attested_openai
|
|
10
|
+
>>> client = attested_openai(glacis_api_key="glsk_live_xxx", openai_api_key="sk-xxx")
|
|
11
|
+
>>> response = client.chat.completions.create(
|
|
12
|
+
... model="gpt-4",
|
|
13
|
+
... messages=[{"role": "user", "content": "Hello!"}]
|
|
14
|
+
... )
|
|
15
|
+
# Response is automatically attested to GLACIS
|
|
16
|
+
|
|
17
|
+
Example (offline):
|
|
18
|
+
>>> client = attested_openai(openai_api_key="sk-xxx", offline=True, signing_seed=seed)
|
|
19
|
+
>>> response = client.chat.completions.create(
|
|
20
|
+
... model="gpt-4o",
|
|
21
|
+
... messages=[{"role": "user", "content": "Hello!"}],
|
|
22
|
+
... )
|
|
23
|
+
>>> receipt = get_last_receipt()
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from __future__ import annotations
|
|
27
|
+
|
|
28
|
+
import threading
|
|
29
|
+
from typing import TYPE_CHECKING, Any, Optional, Union
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from openai import OpenAI
|
|
33
|
+
|
|
34
|
+
from glacis.models import AttestReceipt, OfflineAttestReceipt
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# Thread-local storage for the last receipt
|
|
38
|
+
_thread_local = threading.local()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_last_receipt() -> Optional[Union["AttestReceipt", "OfflineAttestReceipt"]]:
|
|
42
|
+
"""
|
|
43
|
+
Get the last attestation receipt from the current thread.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
The last AttestReceipt or OfflineAttestReceipt, or None if no attestation
|
|
47
|
+
has been made in this thread.
|
|
48
|
+
"""
|
|
49
|
+
return getattr(_thread_local, "last_receipt", None)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def attested_openai(
|
|
53
|
+
glacis_api_key: Optional[str] = None,
|
|
54
|
+
openai_api_key: Optional[str] = None,
|
|
55
|
+
glacis_base_url: str = "https://api.glacis.dev",
|
|
56
|
+
service_id: str = "openai",
|
|
57
|
+
debug: bool = False,
|
|
58
|
+
offline: bool = False,
|
|
59
|
+
signing_seed: Optional[bytes] = None,
|
|
60
|
+
**openai_kwargs: Any,
|
|
61
|
+
) -> "OpenAI":
|
|
62
|
+
"""
|
|
63
|
+
Create an attested OpenAI client.
|
|
64
|
+
|
|
65
|
+
All chat completions are automatically attested. Supports both online and offline modes.
|
|
66
|
+
Note: Streaming is not currently supported.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
glacis_api_key: GLACIS API key (required for online mode)
|
|
70
|
+
openai_api_key: OpenAI API key (default: from OPENAI_API_KEY env var)
|
|
71
|
+
glacis_base_url: GLACIS API base URL
|
|
72
|
+
service_id: Service identifier for attestations
|
|
73
|
+
debug: Enable debug logging
|
|
74
|
+
offline: Enable offline mode (local signing, no server)
|
|
75
|
+
signing_seed: 32-byte Ed25519 signing seed (required for offline mode)
|
|
76
|
+
**openai_kwargs: Additional arguments passed to OpenAI client
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Wrapped OpenAI client
|
|
80
|
+
|
|
81
|
+
Example (online):
|
|
82
|
+
>>> client = attested_openai(
|
|
83
|
+
... glacis_api_key="glsk_live_xxx",
|
|
84
|
+
... openai_api_key="sk-xxx"
|
|
85
|
+
... )
|
|
86
|
+
>>> response = client.chat.completions.create(
|
|
87
|
+
... model="gpt-4",
|
|
88
|
+
... messages=[{"role": "user", "content": "Hello!"}]
|
|
89
|
+
... )
|
|
90
|
+
|
|
91
|
+
Example (offline):
|
|
92
|
+
>>> import os
|
|
93
|
+
>>> seed = os.urandom(32)
|
|
94
|
+
>>> client = attested_openai(
|
|
95
|
+
... openai_api_key="sk-xxx",
|
|
96
|
+
... offline=True,
|
|
97
|
+
... signing_seed=seed,
|
|
98
|
+
... )
|
|
99
|
+
>>> response = client.chat.completions.create(
|
|
100
|
+
... model="gpt-4o",
|
|
101
|
+
... messages=[{"role": "user", "content": "Hello!"}],
|
|
102
|
+
... )
|
|
103
|
+
>>> receipt = get_last_receipt()
|
|
104
|
+
>>> assert receipt.witness_status == "UNVERIFIED"
|
|
105
|
+
"""
|
|
106
|
+
try:
|
|
107
|
+
from openai import OpenAI
|
|
108
|
+
except ImportError:
|
|
109
|
+
raise ImportError(
|
|
110
|
+
"OpenAI integration requires the 'openai' package. "
|
|
111
|
+
"Install it with: pip install glacis[openai]"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
from glacis import Glacis
|
|
115
|
+
|
|
116
|
+
# Create Glacis client (online or offline)
|
|
117
|
+
if offline:
|
|
118
|
+
if not signing_seed:
|
|
119
|
+
raise ValueError("signing_seed is required for offline mode")
|
|
120
|
+
glacis = Glacis(
|
|
121
|
+
mode="offline",
|
|
122
|
+
signing_seed=signing_seed,
|
|
123
|
+
debug=debug,
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
if not glacis_api_key:
|
|
127
|
+
raise ValueError("glacis_api_key is required for online mode")
|
|
128
|
+
glacis = Glacis(
|
|
129
|
+
api_key=glacis_api_key,
|
|
130
|
+
base_url=glacis_base_url,
|
|
131
|
+
debug=debug,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Create the OpenAI client
|
|
135
|
+
client_kwargs: dict[str, Any] = {**openai_kwargs}
|
|
136
|
+
if openai_api_key:
|
|
137
|
+
client_kwargs["api_key"] = openai_api_key
|
|
138
|
+
|
|
139
|
+
client = OpenAI(**client_kwargs)
|
|
140
|
+
|
|
141
|
+
# Wrap the chat completions create method
|
|
142
|
+
original_create = client.chat.completions.create
|
|
143
|
+
|
|
144
|
+
def attested_create(*args: Any, **kwargs: Any) -> Any:
|
|
145
|
+
# Check for streaming - not supported
|
|
146
|
+
if kwargs.get("stream", False):
|
|
147
|
+
raise NotImplementedError(
|
|
148
|
+
"Streaming is not currently supported with attested_openai. "
|
|
149
|
+
"Use stream=False for now."
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Extract input
|
|
153
|
+
messages = kwargs.get("messages", [])
|
|
154
|
+
model = kwargs.get("model", "unknown")
|
|
155
|
+
|
|
156
|
+
# Make the API call
|
|
157
|
+
response = original_create(*args, **kwargs)
|
|
158
|
+
|
|
159
|
+
# Attest the response
|
|
160
|
+
try:
|
|
161
|
+
receipt = glacis.attest(
|
|
162
|
+
service_id=service_id,
|
|
163
|
+
operation_type="completion",
|
|
164
|
+
input={
|
|
165
|
+
"model": model,
|
|
166
|
+
"messages": messages,
|
|
167
|
+
},
|
|
168
|
+
output={
|
|
169
|
+
"model": response.model,
|
|
170
|
+
"choices": [
|
|
171
|
+
{
|
|
172
|
+
"message": {
|
|
173
|
+
"role": c.message.role,
|
|
174
|
+
"content": c.message.content,
|
|
175
|
+
},
|
|
176
|
+
"finish_reason": c.finish_reason,
|
|
177
|
+
}
|
|
178
|
+
for c in response.choices
|
|
179
|
+
],
|
|
180
|
+
"usage": {
|
|
181
|
+
"prompt_tokens": (
|
|
182
|
+
response.usage.prompt_tokens if response.usage else 0
|
|
183
|
+
),
|
|
184
|
+
"completion_tokens": (
|
|
185
|
+
response.usage.completion_tokens if response.usage else 0
|
|
186
|
+
),
|
|
187
|
+
"total_tokens": (
|
|
188
|
+
response.usage.total_tokens if response.usage else 0
|
|
189
|
+
),
|
|
190
|
+
}
|
|
191
|
+
if response.usage
|
|
192
|
+
else None,
|
|
193
|
+
},
|
|
194
|
+
metadata={"provider": "openai", "model": model},
|
|
195
|
+
)
|
|
196
|
+
_thread_local.last_receipt = receipt
|
|
197
|
+
if debug:
|
|
198
|
+
print(f"[glacis] Attestation created: {receipt.attestation_id}")
|
|
199
|
+
except Exception as e:
|
|
200
|
+
if debug:
|
|
201
|
+
print(f"[glacis] Attestation failed: {e}")
|
|
202
|
+
|
|
203
|
+
return response
|
|
204
|
+
|
|
205
|
+
# Replace the create method
|
|
206
|
+
client.chat.completions.create = attested_create # type: ignore
|
|
207
|
+
|
|
208
|
+
return client
|