control-zero 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. control_zero/__init__.py +31 -0
  2. control_zero/client.py +584 -0
  3. control_zero/integrations/crewai/__init__.py +53 -0
  4. control_zero/integrations/crewai/agent.py +267 -0
  5. control_zero/integrations/crewai/crew.py +381 -0
  6. control_zero/integrations/crewai/task.py +291 -0
  7. control_zero/integrations/crewai/tool.py +299 -0
  8. control_zero/integrations/langchain/__init__.py +58 -0
  9. control_zero/integrations/langchain/agent.py +311 -0
  10. control_zero/integrations/langchain/callbacks.py +441 -0
  11. control_zero/integrations/langchain/chain.py +319 -0
  12. control_zero/integrations/langchain/graph.py +441 -0
  13. control_zero/integrations/langchain/tool.py +271 -0
  14. control_zero/llm/__init__.py +77 -0
  15. control_zero/llm/anthropic/__init__.py +35 -0
  16. control_zero/llm/anthropic/client.py +136 -0
  17. control_zero/llm/anthropic/messages.py +375 -0
  18. control_zero/llm/base.py +551 -0
  19. control_zero/llm/cohere/__init__.py +32 -0
  20. control_zero/llm/cohere/client.py +402 -0
  21. control_zero/llm/gemini/__init__.py +34 -0
  22. control_zero/llm/gemini/client.py +486 -0
  23. control_zero/llm/groq/__init__.py +32 -0
  24. control_zero/llm/groq/client.py +330 -0
  25. control_zero/llm/mistral/__init__.py +32 -0
  26. control_zero/llm/mistral/client.py +319 -0
  27. control_zero/llm/ollama/__init__.py +31 -0
  28. control_zero/llm/ollama/client.py +439 -0
  29. control_zero/llm/openai/__init__.py +34 -0
  30. control_zero/llm/openai/chat.py +331 -0
  31. control_zero/llm/openai/client.py +182 -0
  32. control_zero/logging/__init__.py +5 -0
  33. control_zero/logging/async_logger.py +65 -0
  34. control_zero/mcp/__init__.py +5 -0
  35. control_zero/mcp/middleware.py +148 -0
  36. control_zero/policy/__init__.py +5 -0
  37. control_zero/policy/enforcer.py +99 -0
  38. control_zero/secrets/__init__.py +5 -0
  39. control_zero/secrets/manager.py +77 -0
  40. control_zero/types.py +51 -0
  41. control_zero-0.2.0.dist-info/METADATA +216 -0
  42. control_zero-0.2.0.dist-info/RECORD +44 -0
  43. control_zero-0.2.0.dist-info/WHEEL +4 -0
  44. control_zero-0.2.0.dist-info/licenses/LICENSE +17 -0
@@ -0,0 +1,331 @@
1
+ """
2
+ Governed OpenAI Chat Completions.
3
+
4
+ Provides governance wrapper for OpenAI's chat completions API,
5
+ including support for function calling, streaming, and tool use.
6
+ """
7
+
8
+ import time
9
+ from typing import Any, Dict, Iterator, List, Optional, Union
10
+
11
+ from control_zero.llm.base import (
12
+ GovernanceAction,
13
+ GovernedChatMixin,
14
+ LLMUsageMetrics,
15
+ estimate_cost,
16
+ )
17
+ from control_zero.policy import PolicyDeniedError
18
+
19
+
20
+ class GovernedChatCompletions(GovernedChatMixin):
21
+ """
22
+ Governed wrapper for OpenAI chat completions.
23
+
24
+ Supports:
25
+ - Standard chat completions
26
+ - Streaming responses
27
+ - Function calling
28
+ - Tool use
29
+ - Response format (JSON mode)
30
+ """
31
+
32
+ def __init__(self, governed_client: Any): # GovernedOpenAI
33
+ self._governed = governed_client
34
+ self._client = governed_client._client
35
+
36
+ def create(
37
+ self,
38
+ *,
39
+ model: str,
40
+ messages: List[Dict[str, Any]],
41
+ functions: Optional[List[Dict[str, Any]]] = None,
42
+ function_call: Optional[Union[str, Dict[str, str]]] = None,
43
+ tools: Optional[List[Dict[str, Any]]] = None,
44
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
45
+ stream: bool = False,
46
+ max_tokens: Optional[int] = None,
47
+ temperature: Optional[float] = None,
48
+ top_p: Optional[float] = None,
49
+ n: Optional[int] = None,
50
+ stop: Optional[Union[str, List[str]]] = None,
51
+ presence_penalty: Optional[float] = None,
52
+ frequency_penalty: Optional[float] = None,
53
+ logit_bias: Optional[Dict[str, float]] = None,
54
+ user: Optional[str] = None,
55
+ response_format: Optional[Dict[str, str]] = None,
56
+ seed: Optional[int] = None,
57
+ **kwargs,
58
+ ) -> Any:
59
+ """
60
+ Create a governed chat completion.
61
+
62
+ All parameters match the OpenAI API, with added governance:
63
+ - Model access is checked against policy
64
+ - Cost is estimated and checked against limits
65
+ - Functions/tools are filtered by policy
66
+ - PII is detected and optionally masked
67
+ - Request is logged for audit
68
+
69
+ Args:
70
+ model: Model to use (e.g., "gpt-4o")
71
+ messages: List of message dicts with role and content
72
+ functions: Legacy function definitions (deprecated)
73
+ function_call: How to handle function calls (deprecated)
74
+ tools: Tool definitions for function calling
75
+ tool_choice: How to handle tool selection
76
+ stream: Whether to stream the response
77
+ max_tokens: Maximum tokens in response
78
+ temperature: Sampling temperature
79
+ top_p: Nucleus sampling parameter
80
+ n: Number of completions to generate
81
+ stop: Stop sequences
82
+ presence_penalty: Presence penalty
83
+ frequency_penalty: Frequency penalty
84
+ logit_bias: Token biases
85
+ user: End-user ID for abuse tracking
86
+ response_format: Response format (e.g., {"type": "json_object"})
87
+ seed: Random seed for deterministic outputs
88
+ **kwargs: Additional parameters passed to OpenAI
89
+
90
+ Returns:
91
+ ChatCompletion response or stream iterator
92
+
93
+ Raises:
94
+ PolicyDeniedError: If request violates governance policy
95
+ """
96
+ start_time = time.time()
97
+
98
+ # Estimate tokens for governance checks
99
+ estimated_input_tokens = self._estimate_message_tokens(messages)
100
+
101
+ # Determine which functions/tools to check
102
+ tools_to_check = tools or []
103
+ if functions:
104
+ # Convert legacy functions to tools format for checking
105
+ tools_to_check = [
106
+ {"type": "function", "function": f} for f in functions
107
+ ]
108
+
109
+ # Run pre-request governance checks
110
+ self._governed._pre_request_checks(
111
+ model=model,
112
+ action=GovernanceAction.CHAT_COMPLETION,
113
+ messages=messages,
114
+ functions=tools_to_check,
115
+ estimated_tokens=estimated_input_tokens,
116
+ )
117
+
118
+ # Process messages according to governance policies
119
+ processed_messages = self._process_messages_for_governance(messages)
120
+
121
+ # Filter functions/tools according to policy
122
+ filtered_tools = self._filter_functions_for_governance(tools)
123
+ filtered_functions = None
124
+ if functions:
125
+ # Handle legacy functions
126
+ filtered_functions = self._filter_functions_for_governance(
127
+ [{"name": f["name"], **f} for f in functions]
128
+ )
129
+ if filtered_functions:
130
+ filtered_functions = [
131
+ {k: v for k, v in f.items() if k != "type"}
132
+ for f in filtered_functions
133
+ ]
134
+
135
+ # Build request kwargs
136
+ request_kwargs = {
137
+ "model": model,
138
+ "messages": processed_messages,
139
+ }
140
+
141
+ # Add optional parameters
142
+ if filtered_tools is not None:
143
+ request_kwargs["tools"] = filtered_tools
144
+ if tool_choice is not None and filtered_tools:
145
+ request_kwargs["tool_choice"] = tool_choice
146
+ if filtered_functions is not None:
147
+ request_kwargs["functions"] = filtered_functions
148
+ if function_call is not None and filtered_functions:
149
+ request_kwargs["function_call"] = function_call
150
+ if max_tokens is not None:
151
+ # Apply governance limit
152
+ max_output = self._governed._config.content_policy.max_output_tokens
153
+ if max_output:
154
+ max_tokens = min(max_tokens, max_output)
155
+ request_kwargs["max_tokens"] = max_tokens
156
+ if temperature is not None:
157
+ request_kwargs["temperature"] = temperature
158
+ if top_p is not None:
159
+ request_kwargs["top_p"] = top_p
160
+ if n is not None:
161
+ request_kwargs["n"] = n
162
+ if stop is not None:
163
+ request_kwargs["stop"] = stop
164
+ if presence_penalty is not None:
165
+ request_kwargs["presence_penalty"] = presence_penalty
166
+ if frequency_penalty is not None:
167
+ request_kwargs["frequency_penalty"] = frequency_penalty
168
+ if logit_bias is not None:
169
+ request_kwargs["logit_bias"] = logit_bias
170
+ if user is not None:
171
+ request_kwargs["user"] = user
172
+ elif self._governed._user_context.get("user_id"):
173
+ request_kwargs["user"] = str(self._governed._user_context["user_id"])
174
+ if response_format is not None:
175
+ request_kwargs["response_format"] = response_format
176
+ if seed is not None:
177
+ request_kwargs["seed"] = seed
178
+
179
+ # Add any extra kwargs
180
+ request_kwargs.update(kwargs)
181
+
182
+ # Handle streaming
183
+ if stream:
184
+ request_kwargs["stream"] = True
185
+ return self._create_stream(request_kwargs, start_time, model)
186
+
187
+ # Make the API call
188
+ try:
189
+ response = self._client.chat.completions.create(**request_kwargs)
190
+ latency_ms = int((time.time() - start_time) * 1000)
191
+
192
+ # Extract usage metrics
193
+ usage = getattr(response, "usage", None)
194
+ input_tokens = usage.prompt_tokens if usage else estimated_input_tokens
195
+ output_tokens = usage.completion_tokens if usage else 0
196
+ total_tokens = usage.total_tokens if usage else input_tokens + output_tokens
197
+
198
+ # Count function calls
199
+ function_call_count = 0
200
+ for choice in response.choices:
201
+ if hasattr(choice.message, "tool_calls") and choice.message.tool_calls:
202
+ function_call_count += len(choice.message.tool_calls)
203
+ if hasattr(choice.message, "function_call") and choice.message.function_call:
204
+ function_call_count += 1
205
+
206
+ # Record metrics
207
+ metrics = LLMUsageMetrics(
208
+ provider="openai",
209
+ model=model,
210
+ action=GovernanceAction.CHAT_COMPLETION,
211
+ input_tokens=input_tokens,
212
+ output_tokens=output_tokens,
213
+ total_tokens=total_tokens,
214
+ latency_ms=latency_ms,
215
+ estimated_cost=estimate_cost(model, input_tokens, output_tokens),
216
+ function_calls=function_call_count,
217
+ cached=getattr(usage, "cached_tokens", 0) > 0 if usage else False,
218
+ )
219
+
220
+ # Update tracking and log
221
+ self._governed._post_request_update(metrics)
222
+ self._governed._log_request(model, GovernanceAction.CHAT_COMPLETION, metrics)
223
+
224
+ return response
225
+
226
+ except PolicyDeniedError:
227
+ raise
228
+ except Exception as e:
229
+ latency_ms = int((time.time() - start_time) * 1000)
230
+
231
+ # Log error
232
+ metrics = LLMUsageMetrics(
233
+ provider="openai",
234
+ model=model,
235
+ action=GovernanceAction.CHAT_COMPLETION,
236
+ latency_ms=latency_ms,
237
+ )
238
+ self._governed._log_request(
239
+ model, GovernanceAction.CHAT_COMPLETION, metrics,
240
+ status="error", error=str(e)
241
+ )
242
+ raise
243
+
244
+ def _create_stream(
245
+ self,
246
+ request_kwargs: Dict[str, Any],
247
+ start_time: float,
248
+ model: str,
249
+ ) -> Iterator[Any]:
250
+ """
251
+ Create a governed streaming response.
252
+
253
+ Streams the response while tracking tokens and logging at completion.
254
+ """
255
+ total_tokens = 0
256
+ output_content = []
257
+ function_call_count = 0
258
+
259
+ try:
260
+ stream = self._client.chat.completions.create(**request_kwargs)
261
+
262
+ for chunk in stream:
263
+ total_tokens += 1 # Approximate token counting for streams
264
+
265
+ # Track content
266
+ if chunk.choices:
267
+ delta = chunk.choices[0].delta
268
+ if hasattr(delta, "content") and delta.content:
269
+ output_content.append(delta.content)
270
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
271
+ function_call_count += len(delta.tool_calls)
272
+
273
+ yield chunk
274
+
275
+ # Calculate final metrics
276
+ latency_ms = int((time.time() - start_time) * 1000)
277
+ estimated_output_tokens = len("".join(output_content).split()) * 1.3
278
+ estimated_input_tokens = request_kwargs.get("messages", [])
279
+ estimated_input_tokens = self._estimate_message_tokens(estimated_input_tokens)
280
+
281
+ metrics = LLMUsageMetrics(
282
+ provider="openai",
283
+ model=model,
284
+ action=GovernanceAction.CHAT_COMPLETION,
285
+ input_tokens=int(estimated_input_tokens),
286
+ output_tokens=int(estimated_output_tokens),
287
+ total_tokens=int(estimated_input_tokens + estimated_output_tokens),
288
+ latency_ms=latency_ms,
289
+ estimated_cost=estimate_cost(
290
+ model, int(estimated_input_tokens), int(estimated_output_tokens)
291
+ ),
292
+ function_calls=function_call_count,
293
+ )
294
+
295
+ self._governed._post_request_update(metrics)
296
+ self._governed._log_request(model, GovernanceAction.CHAT_COMPLETION, metrics)
297
+
298
+ except Exception as e:
299
+ latency_ms = int((time.time() - start_time) * 1000)
300
+ metrics = LLMUsageMetrics(
301
+ provider="openai",
302
+ model=model,
303
+ action=GovernanceAction.CHAT_COMPLETION,
304
+ latency_ms=latency_ms,
305
+ )
306
+ self._governed._log_request(
307
+ model, GovernanceAction.CHAT_COMPLETION, metrics,
308
+ status="error", error=str(e)
309
+ )
310
+ raise
311
+
312
+ def _estimate_message_tokens(self, messages: List[Dict[str, Any]]) -> int:
313
+ """
314
+ Estimate token count for messages.
315
+
316
+ This is a rough estimate based on word count.
317
+ For accurate counts, use tiktoken.
318
+ """
319
+ total_chars = 0
320
+ for msg in messages:
321
+ content = msg.get("content", "")
322
+ if isinstance(content, str):
323
+ total_chars += len(content)
324
+ elif isinstance(content, list):
325
+ # Multi-modal content
326
+ for part in content:
327
+ if isinstance(part, dict) and part.get("type") == "text":
328
+ total_chars += len(part.get("text", ""))
329
+
330
+ # Rough estimate: ~4 chars per token
331
+ return max(1, total_chars // 4)
@@ -0,0 +1,182 @@
1
+ """
2
+ Governed OpenAI client wrapper.
3
+
4
+ Provides governance features for the OpenAI Python SDK including:
5
+ - Model access control
6
+ - Cost tracking and limits
7
+ - Function calling governance
8
+ - PII detection and masking
9
+ - Audit logging
10
+ """
11
+
12
+ from typing import Any, Dict, Optional
13
+
14
+ from control_zero.llm.base import (
15
+ GovernedLLM,
16
+ LLMGovernanceConfig,
17
+ )
18
+ from control_zero.llm.openai.chat import GovernedChatCompletions
19
+
20
+
21
+ class GovernedChat:
22
+ """Governed wrapper for OpenAI chat namespace."""
23
+
24
+ def __init__(self, governed_client: "GovernedOpenAI"):
25
+ self._governed_client = governed_client
26
+ self._completions = GovernedChatCompletions(governed_client)
27
+
28
+ @property
29
+ def completions(self) -> GovernedChatCompletions:
30
+ """Access governed chat completions."""
31
+ return self._completions
32
+
33
+
34
+ class GovernedOpenAI(GovernedLLM):
35
+ """
36
+ Governed wrapper for the OpenAI Python SDK.
37
+
38
+ This class wraps an OpenAI client instance and adds governance
39
+ features including policy enforcement, cost tracking, and audit logging.
40
+
41
+ Example:
42
+ from control_zero import ControlZeroClient
43
+ from control_zero.llm.openai import GovernedOpenAI
44
+ from openai import OpenAI
45
+
46
+ cz = ControlZeroClient(api_key="...")
47
+ cz.initialize()
48
+
49
+ client = OpenAI()
50
+ governed = GovernedOpenAI(client=client, control_zero=cz)
51
+
52
+ # Configure governance
53
+ from control_zero.llm import LLMGovernanceConfig, ModelPolicy, CostPolicy
54
+
55
+ governed = GovernedOpenAI(
56
+ client=client,
57
+ control_zero=cz,
58
+ config=LLMGovernanceConfig(
59
+ model_policy=ModelPolicy(
60
+ allowed_models=["gpt-4o", "gpt-4o-mini"],
61
+ max_tokens_per_request=4096
62
+ ),
63
+ cost_policy=CostPolicy(
64
+ max_cost_per_day=10.00,
65
+ max_requests_per_day=1000
66
+ )
67
+ ),
68
+ user_context={"user_id": "user_123", "role": "developer"}
69
+ )
70
+
71
+ # Make governed API call
72
+ response = governed.chat.completions.create(
73
+ model="gpt-4o",
74
+ messages=[{"role": "user", "content": "Hello!"}]
75
+ )
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ client: Any, # openai.OpenAI
81
+ control_zero: Any, # ControlZeroClient
82
+ config: Optional[LLMGovernanceConfig] = None,
83
+ user_context: Optional[Dict[str, Any]] = None,
84
+ ):
85
+ """
86
+ Initialize a governed OpenAI client.
87
+
88
+ Args:
89
+ client: An OpenAI client instance
90
+ control_zero: Control Zero client for policy and logging
91
+ config: Governance configuration
92
+ user_context: Context about the current user
93
+ """
94
+ super().__init__(client, control_zero, config, user_context)
95
+
96
+ # Create governed namespaces
97
+ self._chat = GovernedChat(self)
98
+
99
+ @property
100
+ def provider_name(self) -> str:
101
+ return "openai"
102
+
103
+ @property
104
+ def chat(self) -> GovernedChat:
105
+ """Access governed chat completions."""
106
+ return self._chat
107
+
108
+ @property
109
+ def models(self):
110
+ """Access the underlying models endpoint (pass-through)."""
111
+ return self._client.models
112
+
113
+ @property
114
+ def embeddings(self):
115
+ """Access the underlying embeddings endpoint.
116
+
117
+ TODO: Add governance wrapper for embeddings.
118
+ """
119
+ return self._client.embeddings
120
+
121
+ @property
122
+ def files(self):
123
+ """Access the underlying files endpoint (pass-through)."""
124
+ return self._client.files
125
+
126
+ @property
127
+ def images(self):
128
+ """Access the underlying images endpoint.
129
+
130
+ TODO: Add governance wrapper for image generation.
131
+ """
132
+ return self._client.images
133
+
134
+ @property
135
+ def audio(self):
136
+ """Access the underlying audio endpoint.
137
+
138
+ TODO: Add governance wrapper for audio.
139
+ """
140
+ return self._client.audio
141
+
142
+ @property
143
+ def moderations(self):
144
+ """Access the underlying moderations endpoint (pass-through)."""
145
+ return self._client.moderations
146
+
147
+ def with_user_context(self, user_context: Dict[str, Any]) -> "GovernedOpenAI":
148
+ """
149
+ Create a new governed client with updated user context.
150
+
151
+ This is useful for per-request user context in multi-tenant applications.
152
+
153
+ Args:
154
+ user_context: New user context (merged with existing)
155
+
156
+ Returns:
157
+ New GovernedOpenAI instance with updated context
158
+ """
159
+ merged_context = {**self._user_context, **user_context}
160
+ return GovernedOpenAI(
161
+ client=self._client,
162
+ control_zero=self._cz,
163
+ config=self._config,
164
+ user_context=merged_context,
165
+ )
166
+
167
+ def with_config(self, config: LLMGovernanceConfig) -> "GovernedOpenAI":
168
+ """
169
+ Create a new governed client with updated configuration.
170
+
171
+ Args:
172
+ config: New governance configuration
173
+
174
+ Returns:
175
+ New GovernedOpenAI instance with updated config
176
+ """
177
+ return GovernedOpenAI(
178
+ client=self._client,
179
+ control_zero=self._cz,
180
+ config=config,
181
+ user_context=self._user_context,
182
+ )
@@ -0,0 +1,5 @@
1
+ """Async logging for Control Zero SDK."""
2
+
3
+ from control_zero.logging.async_logger import AsyncLogger
4
+
5
+ __all__ = ["AsyncLogger"]
@@ -0,0 +1,65 @@
1
+ """Async batched logger for audit logs."""
2
+
3
+ import asyncio
4
+ from typing import Optional
5
+
6
+ from control_zero.types import AuditLogEntry
7
+
8
+
9
+ class AsyncLogger:
10
+ """
11
+ Async logger that batches and sends audit logs.
12
+
13
+ Logs are queued locally and flushed periodically to avoid
14
+ impacting the latency of tool calls.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ batch_size: int = 100,
20
+ flush_interval: float = 5.0,
21
+ ):
22
+ self._batch_size = batch_size
23
+ self._flush_interval = flush_interval
24
+ self._queue: asyncio.Queue[AuditLogEntry] = asyncio.Queue()
25
+ self._task: Optional[asyncio.Task[None]] = None
26
+
27
+ async def start(self, send_fn) -> None:
28
+ """Start the background flush task."""
29
+ self._send_fn = send_fn
30
+ self._task = asyncio.create_task(self._flush_loop())
31
+
32
+ async def _flush_loop(self) -> None:
33
+ """Background loop that flushes logs periodically."""
34
+ while True:
35
+ await asyncio.sleep(self._flush_interval)
36
+ await self.flush()
37
+
38
+ async def log(self, entry: AuditLogEntry) -> None:
39
+ """Queue a log entry."""
40
+ await self._queue.put(entry)
41
+
42
+ # Flush immediately if batch is full
43
+ if self._queue.qsize() >= self._batch_size:
44
+ await self.flush()
45
+
46
+ async def flush(self) -> None:
47
+ """Flush queued logs to the server."""
48
+ logs: list[AuditLogEntry] = []
49
+
50
+ while len(logs) < self._batch_size and not self._queue.empty():
51
+ logs.append(await self._queue.get())
52
+
53
+ if logs and hasattr(self, "_send_fn"):
54
+ try:
55
+ await self._send_fn(logs)
56
+ except Exception:
57
+ # Re-queue on failure (best effort)
58
+ for log in logs:
59
+ await self._queue.put(log)
60
+
61
+ async def stop(self) -> None:
62
+ """Stop the logger and flush remaining logs."""
63
+ if self._task:
64
+ self._task.cancel()
65
+ await self.flush()
@@ -0,0 +1,5 @@
1
+ """MCP middleware for Control Zero SDK."""
2
+
3
+ from control_zero.mcp.middleware import MCPMiddleware
4
+
5
+ __all__ = ["MCPMiddleware"]