control-zero 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. control_zero/__init__.py +31 -0
  2. control_zero/client.py +584 -0
  3. control_zero/integrations/crewai/__init__.py +53 -0
  4. control_zero/integrations/crewai/agent.py +267 -0
  5. control_zero/integrations/crewai/crew.py +381 -0
  6. control_zero/integrations/crewai/task.py +291 -0
  7. control_zero/integrations/crewai/tool.py +299 -0
  8. control_zero/integrations/langchain/__init__.py +58 -0
  9. control_zero/integrations/langchain/agent.py +311 -0
  10. control_zero/integrations/langchain/callbacks.py +441 -0
  11. control_zero/integrations/langchain/chain.py +319 -0
  12. control_zero/integrations/langchain/graph.py +441 -0
  13. control_zero/integrations/langchain/tool.py +271 -0
  14. control_zero/llm/__init__.py +77 -0
  15. control_zero/llm/anthropic/__init__.py +35 -0
  16. control_zero/llm/anthropic/client.py +136 -0
  17. control_zero/llm/anthropic/messages.py +375 -0
  18. control_zero/llm/base.py +551 -0
  19. control_zero/llm/cohere/__init__.py +32 -0
  20. control_zero/llm/cohere/client.py +402 -0
  21. control_zero/llm/gemini/__init__.py +34 -0
  22. control_zero/llm/gemini/client.py +486 -0
  23. control_zero/llm/groq/__init__.py +32 -0
  24. control_zero/llm/groq/client.py +330 -0
  25. control_zero/llm/mistral/__init__.py +32 -0
  26. control_zero/llm/mistral/client.py +319 -0
  27. control_zero/llm/ollama/__init__.py +31 -0
  28. control_zero/llm/ollama/client.py +439 -0
  29. control_zero/llm/openai/__init__.py +34 -0
  30. control_zero/llm/openai/chat.py +331 -0
  31. control_zero/llm/openai/client.py +182 -0
  32. control_zero/logging/__init__.py +5 -0
  33. control_zero/logging/async_logger.py +65 -0
  34. control_zero/mcp/__init__.py +5 -0
  35. control_zero/mcp/middleware.py +148 -0
  36. control_zero/policy/__init__.py +5 -0
  37. control_zero/policy/enforcer.py +99 -0
  38. control_zero/secrets/__init__.py +5 -0
  39. control_zero/secrets/manager.py +77 -0
  40. control_zero/types.py +51 -0
  41. control_zero-0.2.0.dist-info/METADATA +216 -0
  42. control_zero-0.2.0.dist-info/RECORD +44 -0
  43. control_zero-0.2.0.dist-info/WHEEL +4 -0
  44. control_zero-0.2.0.dist-info/licenses/LICENSE +17 -0
@@ -0,0 +1,486 @@
1
+ """
2
+ Governed Google Gemini client wrapper.
3
+
4
+ Provides governance features for the Google Generative AI SDK including:
5
+ - Model access control
6
+ - Cost tracking and limits
7
+ - Function calling governance
8
+ - PII detection and masking
9
+ - Audit logging
10
+ """
11
+
12
+ import time
13
+ from typing import Any, Dict, Iterator, List, Optional, Union
14
+
15
+ from control_zero.llm.base import (
16
+ GovernanceAction,
17
+ GovernedLLM,
18
+ GovernedChatMixin,
19
+ LLMGovernanceConfig,
20
+ LLMUsageMetrics,
21
+ estimate_cost,
22
+ )
23
+ from control_zero.policy import PolicyDeniedError
24
+
25
+
26
+ class GovernedGemini(GovernedLLM, GovernedChatMixin):
27
+ """
28
+ Governed wrapper for Google Gemini.
29
+
30
+ This class wraps Gemini's GenerativeModel and adds governance
31
+ features including policy enforcement, cost tracking, and audit logging.
32
+
33
+ Example:
34
+ from control_zero import ControlZeroClient
35
+ from control_zero.llm.gemini import GovernedGemini
36
+ import google.generativeai as genai
37
+
38
+ genai.configure(api_key="...")
39
+
40
+ cz = ControlZeroClient(api_key="...")
41
+ cz.initialize()
42
+
43
+ governed = GovernedGemini(
44
+ model_name="gemini-1.5-pro",
45
+ control_zero=cz,
46
+ config=LLMGovernanceConfig(
47
+ model_policy=ModelPolicy(
48
+ allowed_models=["gemini-1.5-pro", "gemini-1.5-flash"]
49
+ ),
50
+ cost_policy=CostPolicy(
51
+ max_cost_per_day=10.00
52
+ )
53
+ )
54
+ )
55
+
56
+ # Make governed API call
57
+ response = governed.generate_content("Hello!")
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ model_name: str,
63
+ control_zero: Any, # ControlZeroClient
64
+ config: Optional[LLMGovernanceConfig] = None,
65
+ user_context: Optional[Dict[str, Any]] = None,
66
+ generation_config: Optional[Dict[str, Any]] = None,
67
+ safety_settings: Optional[List[Dict[str, Any]]] = None,
68
+ tools: Optional[List[Any]] = None,
69
+ system_instruction: Optional[str] = None,
70
+ ):
71
+ """
72
+ Initialize a governed Gemini model.
73
+
74
+ Args:
75
+ model_name: Name of the Gemini model (e.g., "gemini-1.5-pro")
76
+ control_zero: Control Zero client for policy and logging
77
+ config: Governance configuration
78
+ user_context: Context about the current user
79
+ generation_config: Gemini generation configuration
80
+ safety_settings: Gemini safety settings
81
+ tools: Function/tool definitions
82
+ system_instruction: System instruction for the model
83
+ """
84
+ # Import here to avoid requiring google-generativeai at module load
85
+ try:
86
+ import google.generativeai as genai
87
+ except ImportError:
88
+ raise ImportError(
89
+ "google-generativeai is required for Gemini support. "
90
+ "Install it with: pip install google-generativeai"
91
+ )
92
+
93
+ # Create the underlying model
94
+ model_kwargs = {"model_name": model_name}
95
+ if generation_config:
96
+ model_kwargs["generation_config"] = generation_config
97
+ if safety_settings:
98
+ model_kwargs["safety_settings"] = safety_settings
99
+ if tools:
100
+ model_kwargs["tools"] = tools
101
+ if system_instruction:
102
+ model_kwargs["system_instruction"] = system_instruction
103
+
104
+ client = genai.GenerativeModel(**model_kwargs)
105
+
106
+ super().__init__(client, control_zero, config, user_context)
107
+
108
+ self._model_name = model_name
109
+ self._tools = tools or []
110
+ self._system_instruction = system_instruction
111
+
112
+ @property
113
+ def provider_name(self) -> str:
114
+ return "gemini"
115
+
116
+ @property
117
+ def model_name(self) -> str:
118
+ return self._model_name
119
+
120
+ def generate_content(
121
+ self,
122
+ contents: Union[str, List[Any]],
123
+ *,
124
+ generation_config: Optional[Dict[str, Any]] = None,
125
+ safety_settings: Optional[List[Dict[str, Any]]] = None,
126
+ tools: Optional[List[Any]] = None,
127
+ tool_config: Optional[Dict[str, Any]] = None,
128
+ stream: bool = False,
129
+ **kwargs,
130
+ ) -> Any:
131
+ """
132
+ Generate content with governance.
133
+
134
+ Args:
135
+ contents: Input content (text or parts)
136
+ generation_config: Override generation config
137
+ safety_settings: Override safety settings
138
+ tools: Override tools
139
+ tool_config: Tool configuration
140
+ stream: Whether to stream the response
141
+ **kwargs: Additional parameters
142
+
143
+ Returns:
144
+ GenerateContentResponse or stream iterator
145
+
146
+ Raises:
147
+ PolicyDeniedError: If request violates governance policy
148
+ """
149
+ start_time = time.time()
150
+
151
+ # Convert contents to messages format for governance checks
152
+ messages = self._contents_to_messages(contents)
153
+
154
+ # Estimate tokens
155
+ estimated_tokens = self._estimate_content_tokens(contents)
156
+
157
+ # Get tools for policy check
158
+ tools_to_check = tools or self._tools
159
+ tools_for_policy = []
160
+ if tools_to_check:
161
+ for tool in tools_to_check:
162
+ if hasattr(tool, "function_declarations"):
163
+ for func in tool.function_declarations:
164
+ tools_for_policy.append({
165
+ "name": getattr(func, "name", ""),
166
+ "type": "function",
167
+ })
168
+
169
+ # Run pre-request governance checks
170
+ self._pre_request_checks(
171
+ model=self._model_name,
172
+ action=GovernanceAction.CHAT_COMPLETION,
173
+ messages=messages,
174
+ functions=tools_for_policy,
175
+ estimated_tokens=estimated_tokens,
176
+ )
177
+
178
+ # Process content for PII if enabled
179
+ processed_contents = self._process_content_for_governance(contents)
180
+
181
+ # Build kwargs
182
+ request_kwargs = {}
183
+ if generation_config:
184
+ request_kwargs["generation_config"] = generation_config
185
+ if safety_settings:
186
+ request_kwargs["safety_settings"] = safety_settings
187
+ if tools:
188
+ request_kwargs["tools"] = tools
189
+ if tool_config:
190
+ request_kwargs["tool_config"] = tool_config
191
+ request_kwargs.update(kwargs)
192
+
193
+ # Handle streaming
194
+ if stream:
195
+ return self._generate_stream(processed_contents, request_kwargs, start_time)
196
+
197
+ # Make the API call
198
+ try:
199
+ response = self._client.generate_content(processed_contents, **request_kwargs)
200
+ latency_ms = int((time.time() - start_time) * 1000)
201
+
202
+ # Extract usage metrics
203
+ usage = getattr(response, "usage_metadata", None)
204
+ input_tokens = getattr(usage, "prompt_token_count", estimated_tokens) if usage else estimated_tokens
205
+ output_tokens = getattr(usage, "candidates_token_count", 0) if usage else 0
206
+ total_tokens = getattr(usage, "total_token_count", input_tokens + output_tokens) if usage else input_tokens + output_tokens
207
+
208
+ # Count function calls
209
+ function_call_count = 0
210
+ for candidate in getattr(response, "candidates", []):
211
+ content = getattr(candidate, "content", None)
212
+ if content:
213
+ for part in getattr(content, "parts", []):
214
+ if hasattr(part, "function_call"):
215
+ function_call_count += 1
216
+
217
+ # Record metrics
218
+ metrics = LLMUsageMetrics(
219
+ provider="gemini",
220
+ model=self._model_name,
221
+ action=GovernanceAction.CHAT_COMPLETION,
222
+ input_tokens=input_tokens,
223
+ output_tokens=output_tokens,
224
+ total_tokens=total_tokens,
225
+ latency_ms=latency_ms,
226
+ estimated_cost=estimate_cost(self._model_name, input_tokens, output_tokens),
227
+ function_calls=function_call_count,
228
+ )
229
+
230
+ # Update tracking and log
231
+ self._post_request_update(metrics)
232
+ self._log_request(self._model_name, GovernanceAction.CHAT_COMPLETION, metrics)
233
+
234
+ return response
235
+
236
+ except PolicyDeniedError:
237
+ raise
238
+ except Exception as e:
239
+ latency_ms = int((time.time() - start_time) * 1000)
240
+ metrics = LLMUsageMetrics(
241
+ provider="gemini",
242
+ model=self._model_name,
243
+ action=GovernanceAction.CHAT_COMPLETION,
244
+ latency_ms=latency_ms,
245
+ )
246
+ self._log_request(
247
+ self._model_name, GovernanceAction.CHAT_COMPLETION, metrics,
248
+ status="error", error=str(e)
249
+ )
250
+ raise
251
+
252
+ def _generate_stream(
253
+ self,
254
+ contents: Any,
255
+ request_kwargs: Dict[str, Any],
256
+ start_time: float,
257
+ ) -> Iterator[Any]:
258
+ """Generate a governed streaming response."""
259
+ total_tokens = 0
260
+ function_call_count = 0
261
+
262
+ try:
263
+ stream = self._client.generate_content(contents, stream=True, **request_kwargs)
264
+
265
+ for chunk in stream:
266
+ # Track tokens and function calls
267
+ usage = getattr(chunk, "usage_metadata", None)
268
+ if usage:
269
+ total_tokens = getattr(usage, "total_token_count", total_tokens)
270
+
271
+ for candidate in getattr(chunk, "candidates", []):
272
+ content = getattr(candidate, "content", None)
273
+ if content:
274
+ for part in getattr(content, "parts", []):
275
+ if hasattr(part, "function_call"):
276
+ function_call_count += 1
277
+
278
+ yield chunk
279
+
280
+ # Calculate final metrics
281
+ latency_ms = int((time.time() - start_time) * 1000)
282
+
283
+ metrics = LLMUsageMetrics(
284
+ provider="gemini",
285
+ model=self._model_name,
286
+ action=GovernanceAction.CHAT_COMPLETION,
287
+ total_tokens=total_tokens,
288
+ latency_ms=latency_ms,
289
+ estimated_cost=estimate_cost(self._model_name, total_tokens // 2, total_tokens // 2),
290
+ function_calls=function_call_count,
291
+ )
292
+
293
+ self._post_request_update(metrics)
294
+ self._log_request(self._model_name, GovernanceAction.CHAT_COMPLETION, metrics)
295
+
296
+ except Exception as e:
297
+ latency_ms = int((time.time() - start_time) * 1000)
298
+ metrics = LLMUsageMetrics(
299
+ provider="gemini",
300
+ model=self._model_name,
301
+ action=GovernanceAction.CHAT_COMPLETION,
302
+ latency_ms=latency_ms,
303
+ )
304
+ self._log_request(
305
+ self._model_name, GovernanceAction.CHAT_COMPLETION, metrics,
306
+ status="error", error=str(e)
307
+ )
308
+ raise
309
+
310
+ def start_chat(
311
+ self,
312
+ history: Optional[List[Any]] = None,
313
+ **kwargs,
314
+ ) -> "GovernedChat":
315
+ """
316
+ Start a governed chat session.
317
+
318
+ Args:
319
+ history: Initial chat history
320
+ **kwargs: Additional parameters
321
+
322
+ Returns:
323
+ GovernedChat instance
324
+ """
325
+ chat = self._client.start_chat(history=history, **kwargs)
326
+ return GovernedChat(chat, self)
327
+
328
+ def _contents_to_messages(self, contents: Any) -> List[Dict[str, Any]]:
329
+ """Convert Gemini contents to standard message format."""
330
+ if isinstance(contents, str):
331
+ return [{"role": "user", "content": contents}]
332
+ elif isinstance(contents, list):
333
+ messages = []
334
+ for item in contents:
335
+ if isinstance(item, str):
336
+ messages.append({"role": "user", "content": item})
337
+ elif hasattr(item, "text"):
338
+ messages.append({"role": "user", "content": item.text})
339
+ return messages
340
+ return [{"role": "user", "content": str(contents)}]
341
+
342
+ def _estimate_content_tokens(self, contents: Any) -> int:
343
+ """Estimate token count for content."""
344
+ if isinstance(contents, str):
345
+ return len(contents) // 4
346
+ elif isinstance(contents, list):
347
+ total = 0
348
+ for item in contents:
349
+ if isinstance(item, str):
350
+ total += len(item) // 4
351
+ elif hasattr(item, "text"):
352
+ total += len(item.text) // 4
353
+ return total
354
+ return len(str(contents)) // 4
355
+
356
+ def _process_content_for_governance(self, contents: Any) -> Any:
357
+ """Process content for PII masking if enabled."""
358
+ if not self._config.content_policy.enable_pii_detection:
359
+ return contents
360
+
361
+ if self._config.content_policy.pii_action != "mask":
362
+ return contents
363
+
364
+ if isinstance(contents, str):
365
+ return self._mask_pii(contents)
366
+ elif isinstance(contents, list):
367
+ processed = []
368
+ for item in contents:
369
+ if isinstance(item, str):
370
+ processed.append(self._mask_pii(item))
371
+ else:
372
+ processed.append(item)
373
+ return processed
374
+ return contents
375
+
376
+ def with_user_context(self, user_context: Dict[str, Any]) -> "GovernedGemini":
377
+ """Create a new governed client with updated user context."""
378
+ merged_context = {**self._user_context, **user_context}
379
+ return GovernedGemini(
380
+ model_name=self._model_name,
381
+ control_zero=self._cz,
382
+ config=self._config,
383
+ user_context=merged_context,
384
+ )
385
+
386
+
387
+ class GovernedChat:
388
+ """Governed wrapper for Gemini chat sessions."""
389
+
390
+ def __init__(self, chat: Any, governed_model: GovernedGemini):
391
+ self._chat = chat
392
+ self._governed = governed_model
393
+
394
+ def send_message(
395
+ self,
396
+ content: Union[str, List[Any]],
397
+ *,
398
+ generation_config: Optional[Dict[str, Any]] = None,
399
+ safety_settings: Optional[List[Dict[str, Any]]] = None,
400
+ stream: bool = False,
401
+ **kwargs,
402
+ ) -> Any:
403
+ """
404
+ Send a governed message in the chat.
405
+
406
+ Args:
407
+ content: Message content
408
+ generation_config: Override generation config
409
+ safety_settings: Override safety settings
410
+ stream: Whether to stream the response
411
+ **kwargs: Additional parameters
412
+
413
+ Returns:
414
+ GenerateContentResponse or stream iterator
415
+ """
416
+ start_time = time.time()
417
+
418
+ # Governance checks
419
+ messages = self._governed._contents_to_messages(content)
420
+ estimated_tokens = self._governed._estimate_content_tokens(content)
421
+
422
+ self._governed._pre_request_checks(
423
+ model=self._governed._model_name,
424
+ action=GovernanceAction.CHAT_COMPLETION,
425
+ messages=messages,
426
+ estimated_tokens=estimated_tokens,
427
+ )
428
+
429
+ # Process content
430
+ processed_content = self._governed._process_content_for_governance(content)
431
+
432
+ # Build kwargs
433
+ request_kwargs = {}
434
+ if generation_config:
435
+ request_kwargs["generation_config"] = generation_config
436
+ if safety_settings:
437
+ request_kwargs["safety_settings"] = safety_settings
438
+ request_kwargs.update(kwargs)
439
+
440
+ try:
441
+ if stream:
442
+ response = self._chat.send_message(processed_content, stream=True, **request_kwargs)
443
+ else:
444
+ response = self._chat.send_message(processed_content, **request_kwargs)
445
+
446
+ latency_ms = int((time.time() - start_time) * 1000)
447
+
448
+ # Extract metrics
449
+ usage = getattr(response, "usage_metadata", None)
450
+ input_tokens = getattr(usage, "prompt_token_count", estimated_tokens) if usage else estimated_tokens
451
+ output_tokens = getattr(usage, "candidates_token_count", 0) if usage else 0
452
+
453
+ metrics = LLMUsageMetrics(
454
+ provider="gemini",
455
+ model=self._governed._model_name,
456
+ action=GovernanceAction.CHAT_COMPLETION,
457
+ input_tokens=input_tokens,
458
+ output_tokens=output_tokens,
459
+ total_tokens=input_tokens + output_tokens,
460
+ latency_ms=latency_ms,
461
+ estimated_cost=estimate_cost(self._governed._model_name, input_tokens, output_tokens),
462
+ )
463
+
464
+ self._governed._post_request_update(metrics)
465
+ self._governed._log_request(self._governed._model_name, GovernanceAction.CHAT_COMPLETION, metrics)
466
+
467
+ return response
468
+
469
+ except Exception as e:
470
+ latency_ms = int((time.time() - start_time) * 1000)
471
+ metrics = LLMUsageMetrics(
472
+ provider="gemini",
473
+ model=self._governed._model_name,
474
+ action=GovernanceAction.CHAT_COMPLETION,
475
+ latency_ms=latency_ms,
476
+ )
477
+ self._governed._log_request(
478
+ self._governed._model_name, GovernanceAction.CHAT_COMPLETION, metrics,
479
+ status="error", error=str(e)
480
+ )
481
+ raise
482
+
483
+ @property
484
+ def history(self) -> List[Any]:
485
+ """Get the chat history."""
486
+ return self._chat.history
@@ -0,0 +1,32 @@
1
+ """
2
+ Control Zero Groq Governance Wrapper.
3
+
4
+ This module provides governance wrappers for the Groq Python SDK,
5
+ enabling policy enforcement, cost tracking, and audit logging for
6
+ fast inference API calls.
7
+
8
+ Usage:
9
+ from control_zero import ControlZeroClient
10
+ from control_zero.llm.groq import GovernedGroq
11
+ from groq import Groq
12
+
13
+ # Initialize Control Zero
14
+ cz_client = ControlZeroClient(api_key="cz_live_xxx")
15
+ cz_client.initialize()
16
+
17
+ # Wrap Groq client with governance
18
+ groq_client = Groq()
19
+ governed = GovernedGroq(client=groq_client, control_zero=cz_client)
20
+
21
+ # All calls are now governed
22
+ response = governed.chat.completions.create(
23
+ model="llama-3.1-70b-versatile",
24
+ messages=[{"role": "user", "content": "Hello"}]
25
+ )
26
+ """
27
+
28
+ from control_zero.llm.groq.client import GovernedGroq
29
+
30
+ __all__ = [
31
+ "GovernedGroq",
32
+ ]