axonflow 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,281 @@
1
+ """Google Gemini Interceptor for transparent governance.
2
+
3
+ Wraps Google Gemini GenerativeModel to automatically apply AxonFlow governance
4
+ without changing application code.
5
+
6
+ Example:
7
+ >>> import google.generativeai as genai
8
+ >>> from axonflow import AxonFlow
9
+ >>> from axonflow.interceptors.gemini import wrap_gemini_model
10
+ >>>
11
+ >>> genai.configure(api_key="your-api-key")
12
+ >>> model = genai.GenerativeModel('gemini-pro')
13
+ >>> axonflow = AxonFlow(...)
14
+ >>>
15
+ >>> # Wrap the model - governance is now automatic
16
+ >>> wrapped = wrap_gemini_model(model, axonflow, user_token="user-123")
17
+ >>>
18
+ >>> # Use as normal - governance happens invisibly
19
+ >>> response = wrapped.generate_content("What is AI governance?")
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import asyncio
25
+ from functools import wraps
26
+ from typing import TYPE_CHECKING, Any, Callable, TypeVar
27
+
28
+ from axonflow.exceptions import PolicyViolationError
29
+ from axonflow.interceptors.base import BaseInterceptor
30
+
31
+ if TYPE_CHECKING:
32
+ from axonflow.client import AxonFlow
33
+
34
+ T = TypeVar("T")
35
+
36
+
37
+ class GeminiInterceptor(BaseInterceptor):
38
+ """Interceptor for Google Gemini GenerativeModel."""
39
+
40
+ def get_provider_name(self) -> str:
41
+ """Get the provider name."""
42
+ return "gemini"
43
+
44
+ def extract_prompt(self, *args: Any, **_kwargs: Any) -> str:
45
+ """Extract prompt from generate_content arguments."""
46
+ # First positional argument is usually the prompt or contents
47
+ if args:
48
+ content = args[0]
49
+ if isinstance(content, str):
50
+ return content
51
+ if isinstance(content, list):
52
+ # List of Content objects or strings
53
+ texts = []
54
+ for item in content:
55
+ if isinstance(item, str):
56
+ texts.append(item)
57
+ elif hasattr(item, "parts"):
58
+ for part in item.parts:
59
+ if hasattr(part, "text"):
60
+ texts.append(part.text)
61
+ return " ".join(texts)
62
+ return ""
63
+
64
+ def wrap(self, model: Any) -> Any:
65
+ """Wrap Gemini model with governance."""
66
+ return wrap_gemini_model(model, self.axonflow, user_token=self.user_token)
67
+
68
+
69
+ def wrap_gemini_model(
70
+ gemini_model: Any,
71
+ axonflow: AxonFlow,
72
+ *,
73
+ user_token: str = "",
74
+ model_name: str = "gemini-pro",
75
+ ) -> Any:
76
+ """Wrap Gemini GenerativeModel with AxonFlow governance.
77
+
78
+ Args:
79
+ gemini_model: Gemini GenerativeModel to wrap
80
+ axonflow: AxonFlow client for governance
81
+ user_token: User token for policy evaluation
82
+ model_name: Model name for audit logging
83
+
84
+ Returns:
85
+ Wrapped Gemini model with automatic governance
86
+ """
87
+ original_generate = gemini_model.generate_content
88
+ original_generate_async = getattr(gemini_model, "generate_content_async", None)
89
+
90
+ def _extract_prompt(args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
91
+ """Extract prompt from arguments."""
92
+ if args:
93
+ content = args[0]
94
+ if isinstance(content, str):
95
+ return content
96
+ if isinstance(content, list):
97
+ texts = []
98
+ for item in content:
99
+ if isinstance(item, str):
100
+ texts.append(item)
101
+ elif hasattr(item, "parts"):
102
+ for part in item.parts:
103
+ if hasattr(part, "text"):
104
+ texts.append(part.text)
105
+ return " ".join(texts)
106
+ # Check kwargs for 'contents' or 'prompt'
107
+ contents = kwargs.get("contents", kwargs.get("prompt", ""))
108
+ if isinstance(contents, str):
109
+ return contents
110
+ return ""
111
+
112
+ def _get_loop() -> asyncio.AbstractEventLoop:
113
+ """Get or create event loop."""
114
+ try:
115
+ return asyncio.get_event_loop()
116
+ except RuntimeError:
117
+ loop = asyncio.new_event_loop()
118
+ asyncio.set_event_loop(loop)
119
+ return loop
120
+
121
+ @wraps(original_generate)
122
+ def sync_wrapped_generate(*args: Any, **kwargs: Any) -> Any:
123
+ prompt = _extract_prompt(args, kwargs)
124
+
125
+ # Check with AxonFlow (sync)
126
+ loop = _get_loop()
127
+ response = loop.run_until_complete(
128
+ axonflow.execute_query(
129
+ user_token=user_token,
130
+ query=prompt,
131
+ request_type="llm_chat",
132
+ context={
133
+ "provider": "gemini",
134
+ "model": model_name,
135
+ },
136
+ )
137
+ )
138
+
139
+ if response.blocked:
140
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
141
+
142
+ # Call original
143
+ return original_generate(*args, **kwargs)
144
+
145
+ gemini_model.generate_content = sync_wrapped_generate
146
+
147
+ # Also wrap async version if available
148
+ if original_generate_async:
149
+
150
+ @wraps(original_generate_async)
151
+ async def async_wrapped_generate(*args: Any, **kwargs: Any) -> Any:
152
+ prompt = _extract_prompt(args, kwargs)
153
+
154
+ # Check with AxonFlow
155
+ response = await axonflow.execute_query(
156
+ user_token=user_token,
157
+ query=prompt,
158
+ request_type="llm_chat",
159
+ context={
160
+ "provider": "gemini",
161
+ "model": model_name,
162
+ },
163
+ )
164
+
165
+ if response.blocked:
166
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
167
+
168
+ # Call original
169
+ return await original_generate_async(*args, **kwargs)
170
+
171
+ gemini_model.generate_content_async = async_wrapped_generate
172
+
173
+ # Wrap start_chat to return a wrapped ChatSession
174
+ if hasattr(gemini_model, "start_chat"):
175
+ original_start_chat = gemini_model.start_chat
176
+
177
+ @wraps(original_start_chat)
178
+ def wrapped_start_chat(*args: Any, **kwargs: Any) -> Any:
179
+ chat = original_start_chat(*args, **kwargs)
180
+ return _wrap_chat_session(chat, axonflow, user_token, model_name)
181
+
182
+ gemini_model.start_chat = wrapped_start_chat
183
+
184
+ return gemini_model
185
+
186
+
187
+ def _wrap_chat_session(
188
+ chat_session: Any,
189
+ axonflow: AxonFlow,
190
+ user_token: str,
191
+ model_name: str,
192
+ ) -> Any:
193
+ """Wrap a Gemini ChatSession with governance."""
194
+ if not hasattr(chat_session, "send_message"):
195
+ return chat_session
196
+
197
+ original_send = chat_session.send_message
198
+ original_send_async = getattr(chat_session, "send_message_async", None)
199
+
200
+ def _get_loop() -> asyncio.AbstractEventLoop:
201
+ try:
202
+ return asyncio.get_event_loop()
203
+ except RuntimeError:
204
+ loop = asyncio.new_event_loop()
205
+ asyncio.set_event_loop(loop)
206
+ return loop
207
+
208
+ @wraps(original_send)
209
+ def sync_wrapped_send(content: Any, **kwargs: Any) -> Any:
210
+ prompt = content if isinstance(content, str) else str(content)
211
+
212
+ loop = _get_loop()
213
+ response = loop.run_until_complete(
214
+ axonflow.execute_query(
215
+ user_token=user_token,
216
+ query=prompt,
217
+ request_type="llm_chat",
218
+ context={
219
+ "provider": "gemini",
220
+ "model": model_name,
221
+ "chat_session": True,
222
+ },
223
+ )
224
+ )
225
+
226
+ if response.blocked:
227
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
228
+
229
+ return original_send(content, **kwargs)
230
+
231
+ chat_session.send_message = sync_wrapped_send
232
+
233
+ if original_send_async:
234
+
235
+ @wraps(original_send_async)
236
+ async def async_wrapped_send(content: Any, **kwargs: Any) -> Any:
237
+ prompt = content if isinstance(content, str) else str(content)
238
+
239
+ response = await axonflow.execute_query(
240
+ user_token=user_token,
241
+ query=prompt,
242
+ request_type="llm_chat",
243
+ context={
244
+ "provider": "gemini",
245
+ "model": model_name,
246
+ "chat_session": True,
247
+ },
248
+ )
249
+
250
+ if response.blocked:
251
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
252
+
253
+ return await original_send_async(content, **kwargs)
254
+
255
+ chat_session.send_message_async = async_wrapped_send
256
+
257
+ return chat_session
258
+
259
+
260
+ def create_gemini_wrapper(
261
+ axonflow: AxonFlow,
262
+ user_token: str = "",
263
+ model_name: str = "gemini-pro",
264
+ ) -> Callable[[Any], Any]:
265
+ """Create a wrapper function for Gemini model.
266
+
267
+ Args:
268
+ axonflow: AxonFlow client for governance
269
+ user_token: User token for policy evaluation
270
+ model_name: Model name for audit logging
271
+
272
+ Returns:
273
+ Wrapper function that takes a Gemini model
274
+ """
275
+
276
+ def wrapper(gemini_model: Any) -> Any:
277
+ return wrap_gemini_model(
278
+ gemini_model, axonflow, user_token=user_token, model_name=model_name
279
+ )
280
+
281
+ return wrapper
@@ -0,0 +1,253 @@
1
+ """Ollama Interceptor for transparent governance.
2
+
3
+ Wraps Ollama client to automatically apply AxonFlow governance
4
+ without changing application code.
5
+
6
+ Ollama is a local LLM server that runs on localhost:11434 by default.
7
+ No authentication is required.
8
+
9
+ Example:
10
+ >>> from ollama import Client
11
+ >>> from axonflow import AxonFlow
12
+ >>> from axonflow.interceptors.ollama import wrap_ollama_client
13
+ >>>
14
+ >>> ollama = Client(host='http://localhost:11434')
15
+ >>> axonflow = AxonFlow(...)
16
+ >>>
17
+ >>> # Wrap the client - governance is now automatic
18
+ >>> wrapped = wrap_ollama_client(ollama, axonflow, user_token="user-123")
19
+ >>>
20
+ >>> # Use as normal - governance happens invisibly
21
+ >>> response = wrapped.chat(
22
+ ... model='llama2',
23
+ ... messages=[{'role': 'user', 'content': 'Hello!'}]
24
+ ... )
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import asyncio
30
+ from functools import wraps
31
+ from typing import TYPE_CHECKING, Any, Callable, TypeVar
32
+
33
+ from axonflow.exceptions import PolicyViolationError
34
+ from axonflow.interceptors.base import BaseInterceptor
35
+
36
+ if TYPE_CHECKING:
37
+ from axonflow.client import AxonFlow
38
+
39
+ T = TypeVar("T")
40
+
41
+
42
+ class OllamaInterceptor(BaseInterceptor):
43
+ """Interceptor for Ollama client."""
44
+
45
+ def get_provider_name(self) -> str:
46
+ """Get the provider name."""
47
+ return "ollama"
48
+
49
+ def extract_prompt(self, *_args: Any, **kwargs: Any) -> str:
50
+ """Extract prompt from chat or generate arguments."""
51
+ # For chat, extract from messages
52
+ messages = kwargs.get("messages", [])
53
+ if messages:
54
+ return " ".join(m.get("content", "") for m in messages if isinstance(m, dict))
55
+ # For generate, extract from prompt
56
+ prompt = kwargs.get("prompt", "")
57
+ if prompt:
58
+ return str(prompt)
59
+ return ""
60
+
61
+ def wrap(self, client: Any) -> Any:
62
+ """Wrap Ollama client with governance."""
63
+ return wrap_ollama_client(client, self.axonflow, user_token=self.user_token)
64
+
65
+
66
+ def wrap_ollama_client(
67
+ ollama_client: Any,
68
+ axonflow: AxonFlow,
69
+ *,
70
+ user_token: str = "",
71
+ ) -> Any:
72
+ """Wrap Ollama client with AxonFlow governance.
73
+
74
+ Args:
75
+ ollama_client: Ollama client to wrap
76
+ axonflow: AxonFlow client for governance
77
+ user_token: User token for policy evaluation
78
+
79
+ Returns:
80
+ Wrapped Ollama client with automatic governance
81
+ """
82
+
83
+ def _extract_chat_prompt(kwargs: dict[str, Any]) -> str:
84
+ """Extract prompt from chat messages."""
85
+ messages = kwargs.get("messages", [])
86
+ return " ".join(m.get("content", "") for m in messages if isinstance(m, dict))
87
+
88
+ def _get_loop() -> asyncio.AbstractEventLoop:
89
+ """Get or create event loop."""
90
+ try:
91
+ return asyncio.get_event_loop()
92
+ except RuntimeError:
93
+ loop = asyncio.new_event_loop()
94
+ asyncio.set_event_loop(loop)
95
+ return loop
96
+
97
+ # Wrap chat method
98
+ if hasattr(ollama_client, "chat"):
99
+ original_chat = ollama_client.chat
100
+
101
+ @wraps(original_chat)
102
+ def sync_wrapped_chat(*args: Any, **kwargs: Any) -> Any:
103
+ prompt = _extract_chat_prompt(kwargs)
104
+ model = kwargs.get("model", "llama2")
105
+
106
+ loop = _get_loop()
107
+ response = loop.run_until_complete(
108
+ axonflow.execute_query(
109
+ user_token=user_token,
110
+ query=prompt,
111
+ request_type="llm_chat",
112
+ context={
113
+ "provider": "ollama",
114
+ "model": model,
115
+ },
116
+ )
117
+ )
118
+
119
+ if response.blocked:
120
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
121
+
122
+ return original_chat(*args, **kwargs)
123
+
124
+ ollama_client.chat = sync_wrapped_chat
125
+
126
+ # Wrap generate method
127
+ if hasattr(ollama_client, "generate"):
128
+ original_generate = ollama_client.generate
129
+
130
+ @wraps(original_generate)
131
+ def sync_wrapped_generate(*args: Any, **kwargs: Any) -> Any:
132
+ prompt = kwargs.get("prompt", "")
133
+ model = kwargs.get("model", "llama2")
134
+
135
+ loop = _get_loop()
136
+ response = loop.run_until_complete(
137
+ axonflow.execute_query(
138
+ user_token=user_token,
139
+ query=prompt,
140
+ request_type="llm_chat",
141
+ context={
142
+ "provider": "ollama",
143
+ "model": model,
144
+ },
145
+ )
146
+ )
147
+
148
+ if response.blocked:
149
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
150
+
151
+ return original_generate(*args, **kwargs)
152
+
153
+ ollama_client.generate = sync_wrapped_generate
154
+
155
+ return ollama_client
156
+
157
+
158
+ async def wrap_ollama_client_async(
159
+ ollama_client: Any,
160
+ axonflow: AxonFlow,
161
+ *,
162
+ user_token: str = "",
163
+ ) -> Any:
164
+ """Wrap async Ollama client with AxonFlow governance.
165
+
166
+ For use with ollama-python's AsyncClient.
167
+
168
+ Args:
169
+ ollama_client: Async Ollama client to wrap
170
+ axonflow: AxonFlow client for governance
171
+ user_token: User token for policy evaluation
172
+
173
+ Returns:
174
+ Wrapped async Ollama client with automatic governance
175
+ """
176
+
177
+ def _extract_chat_prompt(kwargs: dict[str, Any]) -> str:
178
+ messages = kwargs.get("messages", [])
179
+ return " ".join(m.get("content", "") for m in messages if isinstance(m, dict))
180
+
181
+ # Wrap async chat method
182
+ if hasattr(ollama_client, "chat"):
183
+ original_chat = ollama_client.chat
184
+
185
+ @wraps(original_chat)
186
+ async def async_wrapped_chat(*args: Any, **kwargs: Any) -> Any:
187
+ prompt = _extract_chat_prompt(kwargs)
188
+ model = kwargs.get("model", "llama2")
189
+
190
+ response = await axonflow.execute_query(
191
+ user_token=user_token,
192
+ query=prompt,
193
+ request_type="llm_chat",
194
+ context={
195
+ "provider": "ollama",
196
+ "model": model,
197
+ },
198
+ )
199
+
200
+ if response.blocked:
201
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
202
+
203
+ return await original_chat(*args, **kwargs)
204
+
205
+ ollama_client.chat = async_wrapped_chat
206
+
207
+ # Wrap async generate method
208
+ if hasattr(ollama_client, "generate"):
209
+ original_generate = ollama_client.generate
210
+
211
+ @wraps(original_generate)
212
+ async def async_wrapped_generate(*args: Any, **kwargs: Any) -> Any:
213
+ prompt = kwargs.get("prompt", "")
214
+ model = kwargs.get("model", "llama2")
215
+
216
+ response = await axonflow.execute_query(
217
+ user_token=user_token,
218
+ query=prompt,
219
+ request_type="llm_chat",
220
+ context={
221
+ "provider": "ollama",
222
+ "model": model,
223
+ },
224
+ )
225
+
226
+ if response.blocked:
227
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
228
+
229
+ return await original_generate(*args, **kwargs)
230
+
231
+ ollama_client.generate = async_wrapped_generate
232
+
233
+ return ollama_client
234
+
235
+
236
+ def create_ollama_wrapper(
237
+ axonflow: AxonFlow,
238
+ user_token: str = "",
239
+ ) -> Callable[[Any], Any]:
240
+ """Create a wrapper function for Ollama client.
241
+
242
+ Args:
243
+ axonflow: AxonFlow client for governance
244
+ user_token: User token for policy evaluation
245
+
246
+ Returns:
247
+ Wrapper function that takes an Ollama client
248
+ """
249
+
250
+ def wrapper(ollama_client: Any) -> Any:
251
+ return wrap_ollama_client(ollama_client, axonflow, user_token=user_token)
252
+
253
+ return wrapper
@@ -0,0 +1,160 @@
1
+ """OpenAI Interceptor for transparent governance.
2
+
3
+ Wraps OpenAI client to automatically apply AxonFlow governance
4
+ without changing application code.
5
+
6
+ Example:
7
+ >>> from openai import OpenAI
8
+ >>> from axonflow import AxonFlow
9
+ >>> from axonflow.interceptors.openai import wrap_openai_client
10
+ >>>
11
+ >>> openai = OpenAI()
12
+ >>> axonflow = AxonFlow(...)
13
+ >>>
14
+ >>> # Wrap the client - governance is now automatic
15
+ >>> wrapped = wrap_openai_client(openai, axonflow)
16
+ >>>
17
+ >>> # Use as normal - governance happens invisibly
18
+ >>> response = wrapped.chat.completions.create(
19
+ ... model="gpt-4",
20
+ ... messages=[{"role": "user", "content": "Hello!"}]
21
+ ... )
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ import asyncio
27
+ from functools import wraps
28
+ from typing import TYPE_CHECKING, Any, Callable, TypeVar
29
+
30
+ from axonflow.exceptions import PolicyViolationError
31
+ from axonflow.interceptors.base import BaseInterceptor
32
+
33
+ if TYPE_CHECKING:
34
+ from axonflow.client import AxonFlow
35
+
36
+ T = TypeVar("T")
37
+
38
+
39
+ class OpenAIInterceptor(BaseInterceptor):
40
+ """Interceptor for OpenAI client."""
41
+
42
+ def get_provider_name(self) -> str:
43
+ """Get the provider name."""
44
+ return "openai"
45
+
46
+ def extract_prompt(self, *_args: Any, **kwargs: Any) -> str:
47
+ """Extract prompt from chat completions arguments."""
48
+ messages = kwargs.get("messages", [])
49
+ return " ".join(m.get("content", "") for m in messages if isinstance(m, dict))
50
+
51
+ def wrap(self, client: Any) -> Any:
52
+ """Wrap OpenAI client with governance."""
53
+ return wrap_openai_client(client, self.axonflow, user_token=self.user_token)
54
+
55
+
56
+ def wrap_openai_client(
57
+ openai_client: Any,
58
+ axonflow: AxonFlow,
59
+ *,
60
+ user_token: str = "",
61
+ ) -> Any:
62
+ """Wrap OpenAI client with AxonFlow governance.
63
+
64
+ Args:
65
+ openai_client: OpenAI client to wrap
66
+ axonflow: AxonFlow client for governance
67
+ user_token: User token for policy evaluation
68
+
69
+ Returns:
70
+ Wrapped OpenAI client with automatic governance
71
+ """
72
+ original_create = openai_client.chat.completions.create
73
+
74
+ def _extract_prompt(kwargs: dict[str, Any]) -> str:
75
+ """Extract prompt from messages."""
76
+ messages = kwargs.get("messages", [])
77
+ return " ".join(m.get("content", "") for m in messages if isinstance(m, dict))
78
+
79
+ def _get_loop() -> asyncio.AbstractEventLoop:
80
+ """Get or create event loop."""
81
+ try:
82
+ return asyncio.get_event_loop()
83
+ except RuntimeError:
84
+ loop = asyncio.new_event_loop()
85
+ asyncio.set_event_loop(loop)
86
+ return loop
87
+
88
+ if asyncio.iscoroutinefunction(original_create):
89
+
90
+ @wraps(original_create)
91
+ async def async_wrapped_create(*args: Any, **kwargs: Any) -> Any:
92
+ prompt = _extract_prompt(kwargs)
93
+
94
+ # Check with AxonFlow
95
+ response = await axonflow.execute_query(
96
+ user_token=user_token,
97
+ query=prompt,
98
+ request_type="llm_chat",
99
+ context={
100
+ "provider": "openai",
101
+ "model": kwargs.get("model", "gpt-4"),
102
+ "parameters": {
103
+ k: v for k, v in kwargs.items() if k not in ("messages", "model")
104
+ },
105
+ },
106
+ )
107
+
108
+ if response.blocked:
109
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
110
+
111
+ # Call original
112
+ return await original_create(*args, **kwargs)
113
+
114
+ openai_client.chat.completions.create = async_wrapped_create
115
+ else:
116
+
117
+ @wraps(original_create)
118
+ def sync_wrapped_create(*args: Any, **kwargs: Any) -> Any:
119
+ prompt = _extract_prompt(kwargs)
120
+
121
+ # Check with AxonFlow (sync)
122
+ loop = _get_loop()
123
+ response = loop.run_until_complete(
124
+ axonflow.execute_query(
125
+ user_token=user_token,
126
+ query=prompt,
127
+ request_type="llm_chat",
128
+ context={
129
+ "provider": "openai",
130
+ "model": kwargs.get("model", "gpt-4"),
131
+ },
132
+ )
133
+ )
134
+
135
+ if response.blocked:
136
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
137
+
138
+ # Call original
139
+ return original_create(*args, **kwargs)
140
+
141
+ openai_client.chat.completions.create = sync_wrapped_create
142
+
143
+ return openai_client
144
+
145
+
146
+ def create_openai_wrapper(axonflow: AxonFlow, user_token: str = "") -> Callable[[Any], Any]:
147
+ """Create a wrapper function for OpenAI client.
148
+
149
+ Args:
150
+ axonflow: AxonFlow client for governance
151
+ user_token: User token for policy evaluation
152
+
153
+ Returns:
154
+ Wrapper function that takes an OpenAI client
155
+ """
156
+
157
+ def wrapper(openai_client: Any) -> Any:
158
+ return wrap_openai_client(openai_client, axonflow, user_token=user_token)
159
+
160
+ return wrapper