control-zero 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. control_zero/__init__.py +31 -0
  2. control_zero/client.py +584 -0
  3. control_zero/integrations/crewai/__init__.py +53 -0
  4. control_zero/integrations/crewai/agent.py +267 -0
  5. control_zero/integrations/crewai/crew.py +381 -0
  6. control_zero/integrations/crewai/task.py +291 -0
  7. control_zero/integrations/crewai/tool.py +299 -0
  8. control_zero/integrations/langchain/__init__.py +58 -0
  9. control_zero/integrations/langchain/agent.py +311 -0
  10. control_zero/integrations/langchain/callbacks.py +441 -0
  11. control_zero/integrations/langchain/chain.py +319 -0
  12. control_zero/integrations/langchain/graph.py +441 -0
  13. control_zero/integrations/langchain/tool.py +271 -0
  14. control_zero/llm/__init__.py +77 -0
  15. control_zero/llm/anthropic/__init__.py +35 -0
  16. control_zero/llm/anthropic/client.py +136 -0
  17. control_zero/llm/anthropic/messages.py +375 -0
  18. control_zero/llm/base.py +551 -0
  19. control_zero/llm/cohere/__init__.py +32 -0
  20. control_zero/llm/cohere/client.py +402 -0
  21. control_zero/llm/gemini/__init__.py +34 -0
  22. control_zero/llm/gemini/client.py +486 -0
  23. control_zero/llm/groq/__init__.py +32 -0
  24. control_zero/llm/groq/client.py +330 -0
  25. control_zero/llm/mistral/__init__.py +32 -0
  26. control_zero/llm/mistral/client.py +319 -0
  27. control_zero/llm/ollama/__init__.py +31 -0
  28. control_zero/llm/ollama/client.py +439 -0
  29. control_zero/llm/openai/__init__.py +34 -0
  30. control_zero/llm/openai/chat.py +331 -0
  31. control_zero/llm/openai/client.py +182 -0
  32. control_zero/logging/__init__.py +5 -0
  33. control_zero/logging/async_logger.py +65 -0
  34. control_zero/mcp/__init__.py +5 -0
  35. control_zero/mcp/middleware.py +148 -0
  36. control_zero/policy/__init__.py +5 -0
  37. control_zero/policy/enforcer.py +99 -0
  38. control_zero/secrets/__init__.py +5 -0
  39. control_zero/secrets/manager.py +77 -0
  40. control_zero/types.py +51 -0
  41. control_zero-0.2.0.dist-info/METADATA +216 -0
  42. control_zero-0.2.0.dist-info/RECORD +44 -0
  43. control_zero-0.2.0.dist-info/WHEEL +4 -0
  44. control_zero-0.2.0.dist-info/licenses/LICENSE +17 -0
@@ -0,0 +1,319 @@
1
+ """
2
+ Governed LangChain Chain wrapper.
3
+
4
+ Provides governance enforcement for LangChain chains including:
5
+ - Chain-level policy checks
6
+ - Input/output logging
7
+ - Cost tracking
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import time
13
+ from typing import Any, Dict, List, Optional, Union
14
+
15
+ try:
16
+ from langchain_core.runnables import Runnable, RunnableConfig
17
+ from langchain.chains.base import Chain
18
+ LANGCHAIN_AVAILABLE = True
19
+ except ImportError:
20
+ LANGCHAIN_AVAILABLE = False
21
+ class Runnable:
22
+ pass
23
+ class RunnableConfig:
24
+ pass
25
+ class Chain:
26
+ pass
27
+
28
+ from control_zero.client import ControlZeroClient
29
+ from control_zero.policy import PolicyDecision, PolicyDeniedError
30
+
31
+
32
+ class GovernedChain:
33
+ """
34
+ Governance wrapper for LangChain chains.
35
+
36
+ Wraps any LangChain chain (including LCEL runnables) with:
37
+ - Policy enforcement
38
+ - Input/output audit logging
39
+ - Error tracking
40
+
41
+ Usage:
42
+ from langchain_core.prompts import ChatPromptTemplate
43
+ from langchain_openai import ChatOpenAI
44
+
45
+ prompt = ChatPromptTemplate.from_messages([...])
46
+ llm = ChatOpenAI()
47
+ chain = prompt | llm
48
+
49
+ governed = GovernedChain(
50
+ chain=chain,
51
+ client=client,
52
+ chain_name="my_chain"
53
+ )
54
+
55
+ result = governed.invoke({"question": "Hello"})
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ chain: Union[Runnable, Chain],
61
+ client: ControlZeroClient,
62
+ chain_name: str = "langchain_chain",
63
+ log_inputs: bool = False,
64
+ log_outputs: bool = False,
65
+ ):
66
+ """
67
+ Initialize governed chain.
68
+
69
+ Args:
70
+ chain: The LangChain chain or runnable to wrap
71
+ client: Control Zero client
72
+ chain_name: Name for logging purposes
73
+ log_inputs: Whether to log input content
74
+ log_outputs: Whether to log output content
75
+ """
76
+ if not LANGCHAIN_AVAILABLE:
77
+ raise ImportError(
78
+ "langchain is required. Install with: pip install langchain-core"
79
+ )
80
+
81
+ self._chain = chain
82
+ self._client = client
83
+ self._chain_name = chain_name
84
+ self._log_inputs = log_inputs
85
+ self._log_outputs = log_outputs
86
+
87
+ def _check_policy(self, method: str) -> PolicyDecision:
88
+ """Check policy for chain execution."""
89
+ if hasattr(self._client, '_policy_cache') and self._client._policy_cache:
90
+ return self._client._policy_cache.evaluate(self._chain_name, method)
91
+ return PolicyDecision(effect="allow")
92
+
93
+ def invoke(
94
+ self,
95
+ input: Union[str, Dict[str, Any]],
96
+ config: Optional[RunnableConfig] = None,
97
+ **kwargs,
98
+ ) -> Any:
99
+ """
100
+ Invoke the chain with governance.
101
+
102
+ Args:
103
+ input: Chain input
104
+ config: Runnable configuration
105
+ **kwargs: Additional arguments
106
+
107
+ Returns:
108
+ Chain output
109
+ """
110
+ decision = self._check_policy("invoke")
111
+ if decision.effect == "deny":
112
+ self._log("invoke", 0, "denied", decision)
113
+ raise PolicyDeniedError(decision)
114
+
115
+ start = time.perf_counter()
116
+
117
+ try:
118
+ result = self._chain.invoke(input, config=config, **kwargs)
119
+ latency_ms = int((time.perf_counter() - start) * 1000)
120
+ self._log("invoke", latency_ms, "success", decision, input, result)
121
+ return result
122
+
123
+ except PolicyDeniedError:
124
+ raise
125
+ except Exception as e:
126
+ latency_ms = int((time.perf_counter() - start) * 1000)
127
+ self._log("invoke", latency_ms, "error", decision, input, error=e)
128
+ raise
129
+
130
+ async def ainvoke(
131
+ self,
132
+ input: Union[str, Dict[str, Any]],
133
+ config: Optional[RunnableConfig] = None,
134
+ **kwargs,
135
+ ) -> Any:
136
+ """Async invoke with governance."""
137
+ decision = self._check_policy("invoke")
138
+ if decision.effect == "deny":
139
+ self._log("invoke", 0, "denied", decision)
140
+ raise PolicyDeniedError(decision)
141
+
142
+ start = time.perf_counter()
143
+
144
+ try:
145
+ result = await self._chain.ainvoke(input, config=config, **kwargs)
146
+ latency_ms = int((time.perf_counter() - start) * 1000)
147
+ self._log("invoke", latency_ms, "success", decision, input, result)
148
+ return result
149
+
150
+ except PolicyDeniedError:
151
+ raise
152
+ except Exception as e:
153
+ latency_ms = int((time.perf_counter() - start) * 1000)
154
+ self._log("invoke", latency_ms, "error", decision, input, error=e)
155
+ raise
156
+
157
+ def stream(
158
+ self,
159
+ input: Union[str, Dict[str, Any]],
160
+ config: Optional[RunnableConfig] = None,
161
+ **kwargs,
162
+ ):
163
+ """
164
+ Stream chain output with governance.
165
+
166
+ Args:
167
+ input: Chain input
168
+ config: Runnable configuration
169
+ **kwargs: Additional arguments
170
+
171
+ Yields:
172
+ Chain output chunks
173
+ """
174
+ decision = self._check_policy("stream")
175
+ if decision.effect == "deny":
176
+ self._log("stream", 0, "denied", decision)
177
+ raise PolicyDeniedError(decision)
178
+
179
+ start = time.perf_counter()
180
+ chunks = []
181
+
182
+ try:
183
+ for chunk in self._chain.stream(input, config=config, **kwargs):
184
+ chunks.append(chunk)
185
+ yield chunk
186
+
187
+ latency_ms = int((time.perf_counter() - start) * 1000)
188
+ self._log("stream", latency_ms, "success", decision, input, chunks)
189
+
190
+ except PolicyDeniedError:
191
+ raise
192
+ except Exception as e:
193
+ latency_ms = int((time.perf_counter() - start) * 1000)
194
+ self._log("stream", latency_ms, "error", decision, input, error=e)
195
+ raise
196
+
197
+ async def astream(
198
+ self,
199
+ input: Union[str, Dict[str, Any]],
200
+ config: Optional[RunnableConfig] = None,
201
+ **kwargs,
202
+ ):
203
+ """Async stream with governance."""
204
+ decision = self._check_policy("stream")
205
+ if decision.effect == "deny":
206
+ self._log("stream", 0, "denied", decision)
207
+ raise PolicyDeniedError(decision)
208
+
209
+ start = time.perf_counter()
210
+ chunks = []
211
+
212
+ try:
213
+ async for chunk in self._chain.astream(input, config=config, **kwargs):
214
+ chunks.append(chunk)
215
+ yield chunk
216
+
217
+ latency_ms = int((time.perf_counter() - start) * 1000)
218
+ self._log("stream", latency_ms, "success", decision, input, chunks)
219
+
220
+ except PolicyDeniedError:
221
+ raise
222
+ except Exception as e:
223
+ latency_ms = int((time.perf_counter() - start) * 1000)
224
+ self._log("stream", latency_ms, "error", decision, input, error=e)
225
+ raise
226
+
227
+ def batch(
228
+ self,
229
+ inputs: List[Union[str, Dict[str, Any]]],
230
+ config: Optional[RunnableConfig] = None,
231
+ **kwargs,
232
+ ) -> List[Any]:
233
+ """
234
+ Batch invoke with governance.
235
+
236
+ Args:
237
+ inputs: List of inputs
238
+ config: Runnable configuration
239
+ **kwargs: Additional arguments
240
+
241
+ Returns:
242
+ List of outputs
243
+ """
244
+ decision = self._check_policy("batch")
245
+ if decision.effect == "deny":
246
+ self._log("batch", 0, "denied", decision)
247
+ raise PolicyDeniedError(decision)
248
+
249
+ start = time.perf_counter()
250
+
251
+ try:
252
+ results = self._chain.batch(inputs, config=config, **kwargs)
253
+ latency_ms = int((time.perf_counter() - start) * 1000)
254
+ self._log("batch", latency_ms, "success", decision, metadata={"batch_size": len(inputs)})
255
+ return results
256
+
257
+ except PolicyDeniedError:
258
+ raise
259
+ except Exception as e:
260
+ latency_ms = int((time.perf_counter() - start) * 1000)
261
+ self._log("batch", latency_ms, "error", decision, error=e)
262
+ raise
263
+
264
+ def _log(
265
+ self,
266
+ method: str,
267
+ latency_ms: int,
268
+ status: str,
269
+ decision: PolicyDecision,
270
+ input: Any = None,
271
+ output: Any = None,
272
+ error: Optional[Exception] = None,
273
+ metadata: Optional[Dict] = None,
274
+ ) -> None:
275
+ """Log chain execution."""
276
+ log_metadata = metadata or {}
277
+
278
+ if self._log_inputs and input:
279
+ log_metadata["input"] = str(input)[:500]
280
+ if self._log_outputs and output:
281
+ log_metadata["output"] = str(output)[:500]
282
+
283
+ self._client._log(
284
+ tool=self._chain_name,
285
+ method=method,
286
+ status=status,
287
+ latency_ms=latency_ms,
288
+ policy_decision=decision,
289
+ error_type=type(error).__name__ if error else None,
290
+ error_message=str(error) if error else None,
291
+ **log_metadata
292
+ )
293
+
294
+ @property
295
+ def chain(self) -> Union[Runnable, Chain]:
296
+ """Get underlying chain."""
297
+ return self._chain
298
+
299
+ def __or__(self, other: Any) -> "GovernedChain":
300
+ """Support pipe operator for LCEL composition."""
301
+ new_chain = self._chain | other
302
+ return GovernedChain(
303
+ chain=new_chain,
304
+ client=self._client,
305
+ chain_name=self._chain_name,
306
+ log_inputs=self._log_inputs,
307
+ log_outputs=self._log_outputs,
308
+ )
309
+
310
+ def __ror__(self, other: Any) -> "GovernedChain":
311
+ """Support reverse pipe operator."""
312
+ new_chain = other | self._chain
313
+ return GovernedChain(
314
+ chain=new_chain,
315
+ client=self._client,
316
+ chain_name=self._chain_name,
317
+ log_inputs=self._log_inputs,
318
+ log_outputs=self._log_outputs,
319
+ )