glacis 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glacis/__init__.py +62 -1
- glacis/__main__.py +1 -80
- glacis/client.py +60 -31
- glacis/config.py +141 -0
- glacis/controls/__init__.py +232 -0
- glacis/controls/base.py +104 -0
- glacis/controls/jailbreak.py +224 -0
- glacis/controls/pii.py +855 -0
- glacis/crypto.py +70 -1
- glacis/integrations/__init__.py +53 -3
- glacis/integrations/anthropic.py +207 -142
- glacis/integrations/base.py +476 -0
- glacis/integrations/openai.py +156 -121
- glacis/models.py +277 -24
- glacis/storage.py +324 -8
- glacis/verify.py +154 -0
- glacis-0.2.0.dist-info/METADATA +275 -0
- glacis-0.2.0.dist-info/RECORD +21 -0
- glacis/wasm/s3p_core_wasi.wasm +0 -0
- glacis/wasm_runtime.py +0 -533
- glacis-0.1.3.dist-info/METADATA +0 -324
- glacis-0.1.3.dist-info/RECORD +0 -16
- {glacis-0.1.3.dist-info → glacis-0.2.0.dist-info}/WHEEL +0 -0
- {glacis-0.1.3.dist-info → glacis-0.2.0.dist-info}/licenses/LICENSE +0 -0
glacis/integrations/openai.py
CHANGED
|
@@ -1,53 +1,44 @@
|
|
|
1
1
|
"""
|
|
2
2
|
GLACIS integration for OpenAI.
|
|
3
3
|
|
|
4
|
-
Provides an attested OpenAI client wrapper that automatically
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
>>>
|
|
11
|
-
>>>
|
|
12
|
-
...
|
|
13
|
-
...
|
|
4
|
+
Provides an attested OpenAI client wrapper that automatically:
|
|
5
|
+
1. Runs enabled controls (PII/PHI redaction, jailbreak detection, etc.)
|
|
6
|
+
2. Logs all completions to the GLACIS transparency log
|
|
7
|
+
3. Creates control plane attestations
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
>>> from glacis.integrations.openai import attested_openai, get_last_receipt
|
|
11
|
+
>>> client = attested_openai(
|
|
12
|
+
... openai_api_key="sk-xxx",
|
|
13
|
+
... offline=True,
|
|
14
|
+
... signing_seed=os.urandom(32),
|
|
14
15
|
... )
|
|
15
|
-
# Response is automatically attested to GLACIS
|
|
16
|
-
|
|
17
|
-
Example (offline):
|
|
18
|
-
>>> client = attested_openai(openai_api_key="sk-xxx", offline=True, signing_seed=seed)
|
|
19
16
|
>>> response = client.chat.completions.create(
|
|
20
17
|
... model="gpt-4o",
|
|
21
|
-
... messages=[{"role": "user", "content": "Hello!"}]
|
|
18
|
+
... messages=[{"role": "user", "content": "Hello!"}]
|
|
22
19
|
... )
|
|
23
20
|
>>> receipt = get_last_receipt()
|
|
24
21
|
"""
|
|
25
22
|
|
|
26
23
|
from __future__ import annotations
|
|
27
24
|
|
|
28
|
-
import
|
|
29
|
-
|
|
25
|
+
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
|
|
26
|
+
|
|
27
|
+
from glacis.integrations.base import (
|
|
28
|
+
GlacisBlockedError,
|
|
29
|
+
create_controls_runner,
|
|
30
|
+
create_glacis_client,
|
|
31
|
+
get_evidence,
|
|
32
|
+
get_last_receipt,
|
|
33
|
+
initialize_config,
|
|
34
|
+
set_last_receipt,
|
|
35
|
+
store_evidence,
|
|
36
|
+
suppress_noisy_loggers,
|
|
37
|
+
)
|
|
30
38
|
|
|
31
39
|
if TYPE_CHECKING:
|
|
32
40
|
from openai import OpenAI
|
|
33
41
|
|
|
34
|
-
from glacis.models import AttestReceipt, OfflineAttestReceipt
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
# Thread-local storage for the last receipt
|
|
38
|
-
_thread_local = threading.local()
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def get_last_receipt() -> Optional[Union["AttestReceipt", "OfflineAttestReceipt"]]:
|
|
42
|
-
"""
|
|
43
|
-
Get the last attestation receipt from the current thread.
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
The last AttestReceipt or OfflineAttestReceipt, or None if no attestation
|
|
47
|
-
has been made in this thread.
|
|
48
|
-
"""
|
|
49
|
-
return getattr(_thread_local, "last_receipt", None)
|
|
50
|
-
|
|
51
42
|
|
|
52
43
|
def attested_openai(
|
|
53
44
|
glacis_api_key: Optional[str] = None,
|
|
@@ -55,15 +46,14 @@ def attested_openai(
|
|
|
55
46
|
glacis_base_url: str = "https://api.glacis.io",
|
|
56
47
|
service_id: str = "openai",
|
|
57
48
|
debug: bool = False,
|
|
58
|
-
offline: bool =
|
|
49
|
+
offline: Optional[bool] = None,
|
|
59
50
|
signing_seed: Optional[bytes] = None,
|
|
51
|
+
redaction: Union[bool, Literal["fast", "full"], None] = None,
|
|
52
|
+
config: Optional[str] = None,
|
|
60
53
|
**openai_kwargs: Any,
|
|
61
54
|
) -> "OpenAI":
|
|
62
55
|
"""
|
|
63
|
-
Create an attested OpenAI client.
|
|
64
|
-
|
|
65
|
-
All chat completions are automatically attested. Supports both online and offline modes.
|
|
66
|
-
Note: Streaming is not currently supported.
|
|
56
|
+
Create an attested OpenAI client with controls (PII redaction, jailbreak detection).
|
|
67
57
|
|
|
68
58
|
Args:
|
|
69
59
|
glacis_api_key: GLACIS API key (required for online mode)
|
|
@@ -73,36 +63,19 @@ def attested_openai(
|
|
|
73
63
|
debug: Enable debug logging
|
|
74
64
|
offline: Enable offline mode (local signing, no server)
|
|
75
65
|
signing_seed: 32-byte Ed25519 signing seed (required for offline mode)
|
|
66
|
+
redaction: PII/PHI redaction mode - "fast", "full", True, False, or None
|
|
67
|
+
config: Path to glacis.yaml config file
|
|
76
68
|
**openai_kwargs: Additional arguments passed to OpenAI client
|
|
77
69
|
|
|
78
70
|
Returns:
|
|
79
71
|
Wrapped OpenAI client
|
|
80
72
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
... glacis_api_key="glsk_live_xxx",
|
|
84
|
-
... openai_api_key="sk-xxx"
|
|
85
|
-
... )
|
|
86
|
-
>>> response = client.chat.completions.create(
|
|
87
|
-
... model="gpt-4",
|
|
88
|
-
... messages=[{"role": "user", "content": "Hello!"}]
|
|
89
|
-
... )
|
|
90
|
-
|
|
91
|
-
Example (offline):
|
|
92
|
-
>>> import os
|
|
93
|
-
>>> seed = os.urandom(32)
|
|
94
|
-
>>> client = attested_openai(
|
|
95
|
-
... openai_api_key="sk-xxx",
|
|
96
|
-
... offline=True,
|
|
97
|
-
... signing_seed=seed,
|
|
98
|
-
... )
|
|
99
|
-
>>> response = client.chat.completions.create(
|
|
100
|
-
... model="gpt-4o",
|
|
101
|
-
... messages=[{"role": "user", "content": "Hello!"}],
|
|
102
|
-
... )
|
|
103
|
-
>>> receipt = get_last_receipt()
|
|
104
|
-
>>> assert receipt.witness_status == "UNVERIFIED"
|
|
73
|
+
Raises:
|
|
74
|
+
GlacisBlockedError: If a control blocks the request
|
|
105
75
|
"""
|
|
76
|
+
# Suppress noisy loggers
|
|
77
|
+
suppress_noisy_loggers(["openai", "openai._base_client"])
|
|
78
|
+
|
|
106
79
|
try:
|
|
107
80
|
from openai import OpenAI
|
|
108
81
|
except ImportError:
|
|
@@ -111,25 +84,26 @@ def attested_openai(
|
|
|
111
84
|
"Install it with: pip install glacis[openai]"
|
|
112
85
|
)
|
|
113
86
|
|
|
114
|
-
from glacis import Glacis
|
|
115
87
|
|
|
116
|
-
#
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
88
|
+
# Initialize config and determine modes
|
|
89
|
+
cfg, effective_offline, effective_service_id = initialize_config(
|
|
90
|
+
config_path=config,
|
|
91
|
+
redaction=redaction,
|
|
92
|
+
offline=offline,
|
|
93
|
+
glacis_api_key=glacis_api_key,
|
|
94
|
+
default_service_id="openai",
|
|
95
|
+
service_id=service_id,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Create controls runner and Glacis client
|
|
99
|
+
controls_runner = create_controls_runner(cfg, debug)
|
|
100
|
+
glacis = create_glacis_client(
|
|
101
|
+
offline=effective_offline,
|
|
102
|
+
signing_seed=signing_seed,
|
|
103
|
+
glacis_api_key=glacis_api_key,
|
|
104
|
+
glacis_base_url=glacis_base_url,
|
|
105
|
+
debug=debug,
|
|
106
|
+
)
|
|
133
107
|
|
|
134
108
|
# Create the OpenAI client
|
|
135
109
|
client_kwargs: dict[str, Any] = {**openai_kwargs}
|
|
@@ -142,67 +116,128 @@ def attested_openai(
|
|
|
142
116
|
original_create = client.chat.completions.create
|
|
143
117
|
|
|
144
118
|
def attested_create(*args: Any, **kwargs: Any) -> Any:
|
|
145
|
-
# Check for streaming - not supported
|
|
146
119
|
if kwargs.get("stream", False):
|
|
147
120
|
raise NotImplementedError(
|
|
148
121
|
"Streaming is not currently supported with attested_openai. "
|
|
149
122
|
"Use stream=False for now."
|
|
150
123
|
)
|
|
151
124
|
|
|
152
|
-
# Extract input
|
|
153
125
|
messages = kwargs.get("messages", [])
|
|
154
126
|
model = kwargs.get("model", "unknown")
|
|
155
127
|
|
|
156
|
-
#
|
|
128
|
+
# Run controls if enabled
|
|
129
|
+
if controls_runner:
|
|
130
|
+
from glacis.integrations.base import (
|
|
131
|
+
ControlResultsAccumulator,
|
|
132
|
+
create_control_plane_attestation_from_accumulator,
|
|
133
|
+
handle_blocked_request,
|
|
134
|
+
process_text_for_controls,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
accumulator = ControlResultsAccumulator()
|
|
138
|
+
processed_messages = []
|
|
139
|
+
|
|
140
|
+
# Find the last user message index (the new message to check)
|
|
141
|
+
last_user_idx = -1
|
|
142
|
+
for i, msg in enumerate(messages):
|
|
143
|
+
if isinstance(msg, dict) and msg.get("role") == "user":
|
|
144
|
+
last_user_idx = i
|
|
145
|
+
|
|
146
|
+
for i, msg in enumerate(messages):
|
|
147
|
+
role = msg.get("role", "") if isinstance(msg, dict) else ""
|
|
148
|
+
# Only run controls on the LAST user message (the new one)
|
|
149
|
+
if (
|
|
150
|
+
isinstance(msg, dict)
|
|
151
|
+
and isinstance(msg.get("content"), str)
|
|
152
|
+
and role == "user"
|
|
153
|
+
and i == last_user_idx
|
|
154
|
+
):
|
|
155
|
+
content = msg["content"]
|
|
156
|
+
final_text = process_text_for_controls(controls_runner, content, accumulator)
|
|
157
|
+
processed_messages.append({**msg, "content": final_text})
|
|
158
|
+
else:
|
|
159
|
+
processed_messages.append(msg)
|
|
160
|
+
|
|
161
|
+
kwargs["messages"] = processed_messages
|
|
162
|
+
messages = processed_messages
|
|
163
|
+
|
|
164
|
+
# Build control plane attestation
|
|
165
|
+
control_plane_results = create_control_plane_attestation_from_accumulator(
|
|
166
|
+
accumulator, cfg, model, "openai", "chat.completions"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Check if we need to block BEFORE making the API call
|
|
170
|
+
if accumulator.should_block:
|
|
171
|
+
handle_blocked_request(
|
|
172
|
+
glacis_client=glacis,
|
|
173
|
+
service_id=effective_service_id,
|
|
174
|
+
input_data={"model": model, "messages": messages},
|
|
175
|
+
control_plane_results=control_plane_results,
|
|
176
|
+
provider="openai",
|
|
177
|
+
model=model,
|
|
178
|
+
jailbreak_score=accumulator.jailbreak_summary.score
|
|
179
|
+
if accumulator.jailbreak_summary
|
|
180
|
+
else 0.0,
|
|
181
|
+
debug=debug,
|
|
182
|
+
)
|
|
183
|
+
else:
|
|
184
|
+
control_plane_results = None
|
|
185
|
+
|
|
186
|
+
# Make the API call (only if not blocked)
|
|
157
187
|
response = original_create(*args, **kwargs)
|
|
158
188
|
|
|
159
|
-
#
|
|
189
|
+
# Build input/output data
|
|
190
|
+
input_data = {"model": model, "messages": messages}
|
|
191
|
+
output_data = {
|
|
192
|
+
"model": response.model,
|
|
193
|
+
"choices": [
|
|
194
|
+
{
|
|
195
|
+
"message": {"role": c.message.role, "content": c.message.content},
|
|
196
|
+
"finish_reason": c.finish_reason,
|
|
197
|
+
}
|
|
198
|
+
for c in response.choices
|
|
199
|
+
],
|
|
200
|
+
"usage": {
|
|
201
|
+
"prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
|
|
202
|
+
"completion_tokens": response.usage.completion_tokens if response.usage else 0,
|
|
203
|
+
"total_tokens": response.usage.total_tokens if response.usage else 0,
|
|
204
|
+
} if response.usage else None,
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
# Attest and store
|
|
160
208
|
try:
|
|
161
209
|
receipt = glacis.attest(
|
|
162
|
-
service_id=
|
|
210
|
+
service_id=effective_service_id,
|
|
163
211
|
operation_type="completion",
|
|
164
|
-
input=
|
|
165
|
-
|
|
166
|
-
"messages": messages,
|
|
167
|
-
},
|
|
168
|
-
output={
|
|
169
|
-
"model": response.model,
|
|
170
|
-
"choices": [
|
|
171
|
-
{
|
|
172
|
-
"message": {
|
|
173
|
-
"role": c.message.role,
|
|
174
|
-
"content": c.message.content,
|
|
175
|
-
},
|
|
176
|
-
"finish_reason": c.finish_reason,
|
|
177
|
-
}
|
|
178
|
-
for c in response.choices
|
|
179
|
-
],
|
|
180
|
-
"usage": {
|
|
181
|
-
"prompt_tokens": (
|
|
182
|
-
response.usage.prompt_tokens if response.usage else 0
|
|
183
|
-
),
|
|
184
|
-
"completion_tokens": (
|
|
185
|
-
response.usage.completion_tokens if response.usage else 0
|
|
186
|
-
),
|
|
187
|
-
"total_tokens": (
|
|
188
|
-
response.usage.total_tokens if response.usage else 0
|
|
189
|
-
),
|
|
190
|
-
}
|
|
191
|
-
if response.usage
|
|
192
|
-
else None,
|
|
193
|
-
},
|
|
212
|
+
input=input_data,
|
|
213
|
+
output=output_data,
|
|
194
214
|
metadata={"provider": "openai", "model": model},
|
|
215
|
+
control_plane_results=control_plane_results,
|
|
216
|
+
)
|
|
217
|
+
set_last_receipt(receipt)
|
|
218
|
+
store_evidence(
|
|
219
|
+
receipt=receipt,
|
|
220
|
+
service_id=effective_service_id,
|
|
221
|
+
operation_type="completion",
|
|
222
|
+
input_data=input_data,
|
|
223
|
+
output_data=output_data,
|
|
224
|
+
control_plane_results=control_plane_results,
|
|
225
|
+
metadata={"provider": "openai", "model": model},
|
|
226
|
+
debug=debug,
|
|
195
227
|
)
|
|
196
|
-
_thread_local.last_receipt = receipt
|
|
197
|
-
if debug:
|
|
198
|
-
print(f"[glacis] Attestation created: {receipt.attestation_id}")
|
|
199
228
|
except Exception as e:
|
|
200
229
|
if debug:
|
|
201
230
|
print(f"[glacis] Attestation failed: {e}")
|
|
202
231
|
|
|
203
232
|
return response
|
|
204
233
|
|
|
205
|
-
# Replace the create method
|
|
206
234
|
client.chat.completions.create = attested_create # type: ignore
|
|
207
|
-
|
|
208
235
|
return client
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
__all__ = [
|
|
239
|
+
"attested_openai",
|
|
240
|
+
"get_last_receipt",
|
|
241
|
+
"get_evidence",
|
|
242
|
+
"GlacisBlockedError",
|
|
243
|
+
]
|