axonflow 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
axonflow/exceptions.py ADDED
@@ -0,0 +1,103 @@
1
+ """AxonFlow SDK Exceptions.
2
+
3
+ Custom exception hierarchy for clear error handling.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import Any
9
+
10
+
11
+ class AxonFlowError(Exception):
12
+ """Base exception for all AxonFlow errors."""
13
+
14
+ def __init__(self, message: str, details: dict[str, Any] | None = None) -> None:
15
+ self.message = message
16
+ self.details = details or {}
17
+ super().__init__(message)
18
+
19
+
20
+ class ConfigurationError(AxonFlowError):
21
+ """Invalid configuration."""
22
+
23
+
24
+ class AuthenticationError(AxonFlowError):
25
+ """Authentication failed."""
26
+
27
+
28
+ class PolicyViolationError(AxonFlowError):
29
+ """Request blocked by policy."""
30
+
31
+ def __init__(
32
+ self,
33
+ message: str,
34
+ policy: str | None = None,
35
+ block_reason: str | None = None,
36
+ ) -> None:
37
+ super().__init__(
38
+ message,
39
+ details={"policy": policy, "block_reason": block_reason},
40
+ )
41
+ self.policy = policy
42
+ self.block_reason = block_reason
43
+
44
+
45
+ class RateLimitError(AxonFlowError):
46
+ """Rate limit exceeded."""
47
+
48
+ def __init__(
49
+ self,
50
+ message: str,
51
+ limit: int,
52
+ remaining: int,
53
+ reset_at: str | None = None,
54
+ ) -> None:
55
+ super().__init__(
56
+ message,
57
+ details={"limit": limit, "remaining": remaining, "reset_at": reset_at},
58
+ )
59
+ self.limit = limit
60
+ self.remaining = remaining
61
+ self.reset_at = reset_at
62
+
63
+
64
+ class ConnectionError(AxonFlowError):
65
+ """Connection to AxonFlow Agent failed."""
66
+
67
+
68
+ class TimeoutError(AxonFlowError):
69
+ """Request timed out."""
70
+
71
+
72
+ class ConnectorError(AxonFlowError):
73
+ """MCP connector error."""
74
+
75
+ def __init__(
76
+ self,
77
+ message: str,
78
+ connector: str | None = None,
79
+ operation: str | None = None,
80
+ ) -> None:
81
+ super().__init__(
82
+ message,
83
+ details={"connector": connector, "operation": operation},
84
+ )
85
+ self.connector = connector
86
+ self.operation = operation
87
+
88
+
89
+ class PlanExecutionError(AxonFlowError):
90
+ """Multi-agent plan execution failed."""
91
+
92
+ def __init__(
93
+ self,
94
+ message: str,
95
+ plan_id: str | None = None,
96
+ step: str | None = None,
97
+ ) -> None:
98
+ super().__init__(
99
+ message,
100
+ details={"plan_id": plan_id, "step": step},
101
+ )
102
+ self.plan_id = plan_id
103
+ self.step = step
@@ -0,0 +1,20 @@
1
+ """AxonFlow LLM Provider Interceptors.
2
+
3
+ Interceptors allow transparent governance integration with popular LLM providers.
4
+ """
5
+
6
+ from axonflow.interceptors.anthropic import wrap_anthropic_client
7
+ from axonflow.interceptors.base import BaseInterceptor
8
+ from axonflow.interceptors.bedrock import wrap_bedrock_client
9
+ from axonflow.interceptors.gemini import wrap_gemini_model
10
+ from axonflow.interceptors.ollama import wrap_ollama_client
11
+ from axonflow.interceptors.openai import wrap_openai_client
12
+
13
+ __all__ = [
14
+ "BaseInterceptor",
15
+ "wrap_openai_client",
16
+ "wrap_anthropic_client",
17
+ "wrap_gemini_model",
18
+ "wrap_ollama_client",
19
+ "wrap_bedrock_client",
20
+ ]
@@ -0,0 +1,184 @@
1
+ """Anthropic Interceptor for transparent governance.
2
+
3
+ Wraps Anthropic client to automatically apply AxonFlow governance
4
+ without changing application code.
5
+
6
+ Example:
7
+ >>> from anthropic import Anthropic
8
+ >>> from axonflow import AxonFlow
9
+ >>> from axonflow.interceptors.anthropic import wrap_anthropic_client
10
+ >>>
11
+ >>> anthropic = Anthropic()
12
+ >>> axonflow = AxonFlow(...)
13
+ >>>
14
+ >>> # Wrap the client - governance is now automatic
15
+ >>> wrapped = wrap_anthropic_client(anthropic, axonflow)
16
+ >>>
17
+ >>> # Use as normal - governance happens invisibly
18
+ >>> response = wrapped.messages.create(
19
+ ... model="claude-3-sonnet-20240229",
20
+ ... max_tokens=1024,
21
+ ... messages=[{"role": "user", "content": "Hello!"}]
22
+ ... )
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import asyncio
28
+ from functools import wraps
29
+ from typing import TYPE_CHECKING, Any, Callable, TypeVar
30
+
31
+ from axonflow.exceptions import PolicyViolationError
32
+ from axonflow.interceptors.base import BaseInterceptor
33
+
34
+ if TYPE_CHECKING:
35
+ from axonflow.client import AxonFlow
36
+
37
+ T = TypeVar("T")
38
+
39
+
40
+ class AnthropicInterceptor(BaseInterceptor):
41
+ """Interceptor for Anthropic client."""
42
+
43
+ def get_provider_name(self) -> str:
44
+ """Get the provider name."""
45
+ return "anthropic"
46
+
47
+ def extract_prompt(self, *_args: Any, **kwargs: Any) -> str:
48
+ """Extract prompt from messages arguments."""
49
+ messages = kwargs.get("messages", [])
50
+ parts = []
51
+ for m in messages:
52
+ if isinstance(m, dict):
53
+ content = m.get("content", "")
54
+ if isinstance(content, str):
55
+ parts.append(content)
56
+ elif isinstance(content, list):
57
+ # Handle content blocks
58
+ for block in content:
59
+ if isinstance(block, dict) and block.get("type") == "text":
60
+ parts.append(block.get("text", ""))
61
+ return " ".join(parts)
62
+
63
+ def wrap(self, client: Any) -> Any:
64
+ """Wrap Anthropic client with governance."""
65
+ return wrap_anthropic_client(client, self.axonflow, user_token=self.user_token)
66
+
67
+
68
+ def wrap_anthropic_client(
69
+ anthropic_client: Any,
70
+ axonflow: AxonFlow,
71
+ *,
72
+ user_token: str = "",
73
+ ) -> Any:
74
+ """Wrap Anthropic client with AxonFlow governance.
75
+
76
+ Args:
77
+ anthropic_client: Anthropic client to wrap
78
+ axonflow: AxonFlow client for governance
79
+ user_token: User token for policy evaluation
80
+
81
+ Returns:
82
+ Wrapped Anthropic client with automatic governance
83
+ """
84
+ original_create = anthropic_client.messages.create
85
+
86
+ def _extract_prompt(kwargs: dict[str, Any]) -> str:
87
+ """Extract prompt from messages."""
88
+ messages = kwargs.get("messages", [])
89
+ parts = []
90
+ for m in messages:
91
+ if isinstance(m, dict):
92
+ content = m.get("content", "")
93
+ if isinstance(content, str):
94
+ parts.append(content)
95
+ elif isinstance(content, list):
96
+ for block in content:
97
+ if isinstance(block, dict) and block.get("type") == "text":
98
+ parts.append(block.get("text", ""))
99
+ return " ".join(parts)
100
+
101
+ def _get_loop() -> asyncio.AbstractEventLoop:
102
+ """Get or create event loop."""
103
+ try:
104
+ return asyncio.get_event_loop()
105
+ except RuntimeError:
106
+ loop = asyncio.new_event_loop()
107
+ asyncio.set_event_loop(loop)
108
+ return loop
109
+
110
+ if asyncio.iscoroutinefunction(original_create):
111
+
112
+ @wraps(original_create)
113
+ async def async_wrapped_create(*args: Any, **kwargs: Any) -> Any:
114
+ prompt = _extract_prompt(kwargs)
115
+
116
+ # Check with AxonFlow
117
+ response = await axonflow.execute_query(
118
+ user_token=user_token,
119
+ query=prompt,
120
+ request_type="llm_chat",
121
+ context={
122
+ "provider": "anthropic",
123
+ "model": kwargs.get("model", "claude-3-sonnet"),
124
+ "parameters": {
125
+ k: v
126
+ for k, v in kwargs.items()
127
+ if k not in ("messages", "model", "max_tokens")
128
+ },
129
+ },
130
+ )
131
+
132
+ if response.blocked:
133
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
134
+
135
+ # Call original
136
+ return await original_create(*args, **kwargs)
137
+
138
+ anthropic_client.messages.create = async_wrapped_create
139
+ else:
140
+
141
+ @wraps(original_create)
142
+ def sync_wrapped_create(*args: Any, **kwargs: Any) -> Any:
143
+ prompt = _extract_prompt(kwargs)
144
+
145
+ # Check with AxonFlow (sync)
146
+ loop = _get_loop()
147
+ response = loop.run_until_complete(
148
+ axonflow.execute_query(
149
+ user_token=user_token,
150
+ query=prompt,
151
+ request_type="llm_chat",
152
+ context={
153
+ "provider": "anthropic",
154
+ "model": kwargs.get("model", "claude-3-sonnet"),
155
+ },
156
+ )
157
+ )
158
+
159
+ if response.blocked:
160
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
161
+
162
+ # Call original
163
+ return original_create(*args, **kwargs)
164
+
165
+ anthropic_client.messages.create = sync_wrapped_create
166
+
167
+ return anthropic_client
168
+
169
+
170
+ def create_anthropic_wrapper(axonflow: AxonFlow, user_token: str = "") -> Callable[[Any], Any]:
171
+ """Create a wrapper function for Anthropic client.
172
+
173
+ Args:
174
+ axonflow: AxonFlow client for governance
175
+ user_token: User token for policy evaluation
176
+
177
+ Returns:
178
+ Wrapper function that takes an Anthropic client
179
+ """
180
+
181
+ def wrapper(anthropic_client: Any) -> Any:
182
+ return wrap_anthropic_client(anthropic_client, axonflow, user_token=user_token)
183
+
184
+ return wrapper
@@ -0,0 +1,58 @@
1
+ """Base Interceptor class for LLM providers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import ABC, abstractmethod
6
+ from typing import TYPE_CHECKING, Any
7
+
8
+ if TYPE_CHECKING:
9
+ from axonflow.client import AxonFlow
10
+
11
+
12
+ class BaseInterceptor(ABC):
13
+ """Base class for LLM provider interceptors.
14
+
15
+ Interceptors wrap LLM client methods to automatically apply
16
+ AxonFlow governance without changing application code.
17
+ """
18
+
19
+ def __init__(self, axonflow: AxonFlow, user_token: str = "") -> None:
20
+ """Initialize interceptor.
21
+
22
+ Args:
23
+ axonflow: AxonFlow client for governance
24
+ user_token: User token for policy evaluation
25
+ """
26
+ self.axonflow = axonflow
27
+ self.user_token = user_token
28
+
29
+ @abstractmethod
30
+ def wrap(self, client: Any) -> Any:
31
+ """Wrap an LLM client with governance.
32
+
33
+ Args:
34
+ client: The LLM client to wrap
35
+
36
+ Returns:
37
+ Wrapped client with automatic governance
38
+ """
39
+
40
+ @abstractmethod
41
+ def extract_prompt(self, *args: Any, **kwargs: Any) -> str:
42
+ """Extract prompt from method arguments.
43
+
44
+ Args:
45
+ args: Positional arguments
46
+ kwargs: Keyword arguments
47
+
48
+ Returns:
49
+ Extracted prompt string
50
+ """
51
+
52
+ @abstractmethod
53
+ def get_provider_name(self) -> str:
54
+ """Get the provider name.
55
+
56
+ Returns:
57
+ Provider name (e.g., "openai", "anthropic")
58
+ """
@@ -0,0 +1,231 @@
1
+ """AWS Bedrock Interceptor for transparent governance.
2
+
3
+ Wraps AWS Bedrock Runtime client to automatically apply AxonFlow governance
4
+ without changing application code.
5
+
6
+ Bedrock uses AWS IAM authentication (no API keys required).
7
+ Supports multiple model providers: Anthropic Claude, Amazon Titan, Meta Llama, etc.
8
+
9
+ Example:
10
+ >>> import boto3
11
+ >>> from axonflow import AxonFlow
12
+ >>> from axonflow.interceptors.bedrock import wrap_bedrock_client
13
+ >>>
14
+ >>> bedrock = boto3.client('bedrock-runtime', region_name='us-east-1')
15
+ >>> axonflow = AxonFlow(...)
16
+ >>>
17
+ >>> # Wrap the client - governance is now automatic
18
+ >>> wrapped = wrap_bedrock_client(bedrock, axonflow, user_token="user-123")
19
+ >>>
20
+ >>> # Use as normal - governance happens invisibly
21
+ >>> response = wrapped.invoke_model(
22
+ ... modelId='anthropic.claude-3-sonnet-20240229-v1:0',
23
+ ... body=json.dumps({...})
24
+ ... )
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import asyncio
30
+ import json
31
+ from functools import wraps
32
+ from typing import TYPE_CHECKING, Any, Callable, TypeVar
33
+
34
+ from axonflow.exceptions import PolicyViolationError
35
+ from axonflow.interceptors.base import BaseInterceptor
36
+
37
+ if TYPE_CHECKING:
38
+ from axonflow.client import AxonFlow
39
+
40
+ T = TypeVar("T")
41
+
42
+
43
+ # Common Bedrock model IDs
44
+ class BedrockModels:
45
+ """Common Bedrock model identifiers."""
46
+
47
+ # Anthropic Claude models
48
+ CLAUDE_3_OPUS = "anthropic.claude-3-opus-20240229-v1:0"
49
+ CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0"
50
+ CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0"
51
+ CLAUDE_2_1 = "anthropic.claude-v2:1"
52
+ CLAUDE_2 = "anthropic.claude-v2"
53
+ CLAUDE_INSTANT = "anthropic.claude-instant-v1"
54
+
55
+ # Amazon Titan models
56
+ TITAN_TEXT_EXPRESS = "amazon.titan-text-express-v1"
57
+ TITAN_TEXT_LITE = "amazon.titan-text-lite-v1"
58
+ TITAN_TEXT_PREMIER = "amazon.titan-text-premier-v1:0"
59
+
60
+ # Meta Llama models
61
+ LLAMA2_13B = "meta.llama2-13b-chat-v1"
62
+ LLAMA2_70B = "meta.llama2-70b-chat-v1"
63
+ LLAMA3_8B = "meta.llama3-8b-instruct-v1:0"
64
+ LLAMA3_70B = "meta.llama3-70b-instruct-v1:0"
65
+
66
+
67
+ class BedrockInterceptor(BaseInterceptor):
68
+ """Interceptor for AWS Bedrock Runtime client."""
69
+
70
+ def get_provider_name(self) -> str:
71
+ """Get the provider name."""
72
+ return "bedrock"
73
+
74
+ def extract_prompt(self, *_args: Any, **kwargs: Any) -> str:
75
+ """Extract prompt from invoke_model arguments."""
76
+ body = kwargs.get("body", "")
77
+ if isinstance(body, (bytes, bytearray)):
78
+ body = body.decode("utf-8")
79
+ if isinstance(body, str):
80
+ try:
81
+ parsed = json.loads(body)
82
+ # Claude format
83
+ if "messages" in parsed:
84
+ return " ".join(
85
+ m.get("content", "") for m in parsed["messages"] if isinstance(m, dict)
86
+ )
87
+ # Titan format
88
+ if "inputText" in parsed:
89
+ return str(parsed["inputText"])
90
+ # Generic prompt
91
+ if "prompt" in parsed:
92
+ return str(parsed["prompt"])
93
+ except json.JSONDecodeError:
94
+ pass
95
+ return ""
96
+
97
+ def wrap(self, client: Any) -> Any:
98
+ """Wrap Bedrock client with governance."""
99
+ return wrap_bedrock_client(client, self.axonflow, user_token=self.user_token)
100
+
101
+
102
+ def wrap_bedrock_client(
103
+ bedrock_client: Any,
104
+ axonflow: AxonFlow,
105
+ *,
106
+ user_token: str = "",
107
+ ) -> Any:
108
+ """Wrap AWS Bedrock Runtime client with AxonFlow governance.
109
+
110
+ Args:
111
+ bedrock_client: AWS Bedrock Runtime client (boto3.client('bedrock-runtime'))
112
+ axonflow: AxonFlow client for governance
113
+ user_token: User token for policy evaluation
114
+
115
+ Returns:
116
+ Wrapped Bedrock client with automatic governance
117
+ """
118
+
119
+ def _extract_prompt(body: Any, _model_id: str) -> str:
120
+ """Extract prompt from request body."""
121
+ if isinstance(body, (bytes, bytearray)):
122
+ body = body.decode("utf-8")
123
+ if isinstance(body, str):
124
+ try:
125
+ parsed = json.loads(body)
126
+ # Claude format
127
+ if "messages" in parsed:
128
+ return " ".join(
129
+ m.get("content", "") for m in parsed["messages"] if isinstance(m, dict)
130
+ )
131
+ # Titan format
132
+ if "inputText" in parsed:
133
+ return str(parsed["inputText"])
134
+ # Generic
135
+ if "prompt" in parsed:
136
+ return str(parsed["prompt"])
137
+ except json.JSONDecodeError:
138
+ pass
139
+ return ""
140
+
141
+ def _get_loop() -> asyncio.AbstractEventLoop:
142
+ """Get or create event loop."""
143
+ try:
144
+ return asyncio.get_event_loop()
145
+ except RuntimeError:
146
+ loop = asyncio.new_event_loop()
147
+ asyncio.set_event_loop(loop)
148
+ return loop
149
+
150
+ # Wrap invoke_model
151
+ if hasattr(bedrock_client, "invoke_model"):
152
+ original_invoke = bedrock_client.invoke_model
153
+
154
+ @wraps(original_invoke)
155
+ def sync_wrapped_invoke(*args: Any, **kwargs: Any) -> Any:
156
+ model_id = kwargs.get("modelId", "unknown")
157
+ body = kwargs.get("body", "")
158
+ prompt = _extract_prompt(body, model_id)
159
+
160
+ loop = _get_loop()
161
+ response = loop.run_until_complete(
162
+ axonflow.execute_query(
163
+ user_token=user_token,
164
+ query=prompt,
165
+ request_type="llm_chat",
166
+ context={
167
+ "provider": "bedrock",
168
+ "model": model_id,
169
+ },
170
+ )
171
+ )
172
+
173
+ if response.blocked:
174
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
175
+
176
+ return original_invoke(*args, **kwargs)
177
+
178
+ bedrock_client.invoke_model = sync_wrapped_invoke
179
+
180
+ # Wrap invoke_model_with_response_stream
181
+ if hasattr(bedrock_client, "invoke_model_with_response_stream"):
182
+ original_stream = bedrock_client.invoke_model_with_response_stream
183
+
184
+ @wraps(original_stream)
185
+ def sync_wrapped_stream(*args: Any, **kwargs: Any) -> Any:
186
+ model_id = kwargs.get("modelId", "unknown")
187
+ body = kwargs.get("body", "")
188
+ prompt = _extract_prompt(body, model_id)
189
+
190
+ loop = _get_loop()
191
+ response = loop.run_until_complete(
192
+ axonflow.execute_query(
193
+ user_token=user_token,
194
+ query=prompt,
195
+ request_type="llm_chat",
196
+ context={
197
+ "provider": "bedrock",
198
+ "model": model_id,
199
+ "streaming": True,
200
+ },
201
+ )
202
+ )
203
+
204
+ if response.blocked:
205
+ raise PolicyViolationError(response.block_reason or "Request blocked by policy")
206
+
207
+ return original_stream(*args, **kwargs)
208
+
209
+ bedrock_client.invoke_model_with_response_stream = sync_wrapped_stream
210
+
211
+ return bedrock_client
212
+
213
+
214
+ def create_bedrock_wrapper(
215
+ axonflow: AxonFlow,
216
+ user_token: str = "",
217
+ ) -> Callable[[Any], Any]:
218
+ """Create a wrapper function for Bedrock client.
219
+
220
+ Args:
221
+ axonflow: AxonFlow client for governance
222
+ user_token: User token for policy evaluation
223
+
224
+ Returns:
225
+ Wrapper function that takes a Bedrock client
226
+ """
227
+
228
+ def wrapper(bedrock_client: Any) -> Any:
229
+ return wrap_bedrock_client(bedrock_client, axonflow, user_token=user_token)
230
+
231
+ return wrapper