axcent 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- axcent-0.1.0/LICENSE +21 -0
- axcent-0.1.0/MANIFEST.in +2 -0
- axcent-0.1.0/PKG-INFO +110 -0
- axcent-0.1.0/README.md +81 -0
- axcent-0.1.0/axcent/__init__.py +2 -0
- axcent-0.1.0/axcent/core.py +93 -0
- axcent-0.1.0/axcent/llm.py +215 -0
- axcent-0.1.0/axcent/tools.py +56 -0
- axcent-0.1.0/axcent.egg-info/PKG-INFO +110 -0
- axcent-0.1.0/axcent.egg-info/SOURCES.txt +14 -0
- axcent-0.1.0/axcent.egg-info/dependency_links.txt +1 -0
- axcent-0.1.0/axcent.egg-info/requires.txt +5 -0
- axcent-0.1.0/axcent.egg-info/top_level.txt +1 -0
- axcent-0.1.0/pyproject.toml +3 -0
- axcent-0.1.0/setup.cfg +4 -0
- axcent-0.1.0/setup.py +27 -0
axcent-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Mohin Uddin Shipon
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
axcent-0.1.0/MANIFEST.in
ADDED
axcent-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: axcent
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: The easiest way to build AI agents in Python
|
|
5
|
+
Home-page: https://github.com/ssshiponu/axcent
|
|
6
|
+
Author: Mohin Uddin Shipon
|
|
7
|
+
Author-email: sshiponudin22@gmail.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.8
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: openai>=1.0.0
|
|
15
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
16
|
+
Provides-Extra: gemini
|
|
17
|
+
Requires-Dist: google-genai>=1.57.0; extra == "gemini"
|
|
18
|
+
Dynamic: author
|
|
19
|
+
Dynamic: author-email
|
|
20
|
+
Dynamic: classifier
|
|
21
|
+
Dynamic: description
|
|
22
|
+
Dynamic: description-content-type
|
|
23
|
+
Dynamic: home-page
|
|
24
|
+
Dynamic: license-file
|
|
25
|
+
Dynamic: provides-extra
|
|
26
|
+
Dynamic: requires-dist
|
|
27
|
+
Dynamic: requires-python
|
|
28
|
+
Dynamic: summary
|
|
29
|
+
|
|
30
|
+
# Axcent
|
|
31
|
+
|
|
32
|
+
**The easiest way to build AI agents in Python.**
|
|
33
|
+
|
|
34
|
+
Axcent is a lightweight framework designed to let you build powerful AI agents with tool calling, context caching, and multi-backend support in just a few lines of code.
|
|
35
|
+
|
|
36
|
+
## Installation
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pip install axcent
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
To use Gemini models:
|
|
43
|
+
```bash
|
|
44
|
+
pip install axcent[gemini]
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Quick Start (OpenAI)
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
import os
|
|
51
|
+
from axcent import Agent
|
|
52
|
+
|
|
53
|
+
# Set your API Key
|
|
54
|
+
os.environ["OPENAI_API_KEY"] = "sk-..."
|
|
55
|
+
|
|
56
|
+
# Initialize Agent
|
|
57
|
+
agent = Agent(system_prompt="You are a helpful assistant.")
|
|
58
|
+
|
|
59
|
+
# Register a Tool
|
|
60
|
+
@agent.tool
|
|
61
|
+
def get_weather(city: str) -> str:
|
|
62
|
+
"""Returns weather info for a city."""
|
|
63
|
+
return f"The weather in {city} is sunny!"
|
|
64
|
+
|
|
65
|
+
# Ask away!
|
|
66
|
+
response = agent.ask("What is the weather in Tokyo?")
|
|
67
|
+
print(response)
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Features
|
|
71
|
+
|
|
72
|
+
- **Simple Tool Registration**: Just use `@agent.tool`.
|
|
73
|
+
- **Automatic Context Caching**: Optimizes token usage by enforcing stable prompt structures.
|
|
74
|
+
- **Token Monitoring**: Track prompt, completion, and cached tokens via `agent.get_total_usage()`.
|
|
75
|
+
- **Backend Agnostic**:
|
|
76
|
+
- **OpenAI**: First-class support.
|
|
77
|
+
- **Google Gemini**: Support for all of the latest models.
|
|
78
|
+
- **OpenRouter**: Use any model via OpenRouter API compatibility.
|
|
79
|
+
|
|
80
|
+
## Multi-Backend Usage
|
|
81
|
+
|
|
82
|
+
### Google Gemini
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
from axcent import Agent, GeminiBackend
|
|
86
|
+
import os
|
|
87
|
+
|
|
88
|
+
# Set API Key (or GOOGLE_API_KEY)
|
|
89
|
+
os.environ["GEMINI_API_KEY"] = "AIza..."
|
|
90
|
+
|
|
91
|
+
# Use Gemini Backend (uses google-genai V2 SDK)
|
|
92
|
+
backend = GeminiBackend(model="gemini-3-flash")
|
|
93
|
+
agent = Agent(system_prompt="You are a helper.", backend=backend)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### OpenRouter
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
import os
|
|
100
|
+
from axcent import Agent
|
|
101
|
+
|
|
102
|
+
os.environ["OPENAI_API_KEY"] = "sk-or-..."
|
|
103
|
+
os.environ["OPENAI_BASE_URL"] = "https://openrouter.ai/api/v1"
|
|
104
|
+
|
|
105
|
+
agent = Agent(system_prompt="You are a helper.")
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## License
|
|
109
|
+
|
|
110
|
+
MIT
|
axcent-0.1.0/README.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# Axcent
|
|
2
|
+
|
|
3
|
+
**The easiest way to build AI agents in Python.**
|
|
4
|
+
|
|
5
|
+
Axcent is a lightweight framework designed to let you build powerful AI agents with tool calling, context caching, and multi-backend support in just a few lines of code.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install axcent
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
To use Gemini models:
|
|
14
|
+
```bash
|
|
15
|
+
pip install axcent[gemini]
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Quick Start (OpenAI)
|
|
19
|
+
|
|
20
|
+
```python
|
|
21
|
+
import os
|
|
22
|
+
from axcent import Agent
|
|
23
|
+
|
|
24
|
+
# Set your API Key
|
|
25
|
+
os.environ["OPENAI_API_KEY"] = "sk-..."
|
|
26
|
+
|
|
27
|
+
# Initialize Agent
|
|
28
|
+
agent = Agent(system_prompt="You are a helpful assistant.")
|
|
29
|
+
|
|
30
|
+
# Register a Tool
|
|
31
|
+
@agent.tool
|
|
32
|
+
def get_weather(city: str) -> str:
|
|
33
|
+
"""Returns weather info for a city."""
|
|
34
|
+
return f"The weather in {city} is sunny!"
|
|
35
|
+
|
|
36
|
+
# Ask away!
|
|
37
|
+
response = agent.ask("What is the weather in Tokyo?")
|
|
38
|
+
print(response)
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Features
|
|
42
|
+
|
|
43
|
+
- **Simple Tool Registration**: Just use `@agent.tool`.
|
|
44
|
+
- **Automatic Context Caching**: Optimizes token usage by enforcing stable prompt structures.
|
|
45
|
+
- **Token Monitoring**: Track prompt, completion, and cached tokens via `agent.get_total_usage()`.
|
|
46
|
+
- **Backend Agnostic**:
|
|
47
|
+
- **OpenAI**: First-class support.
|
|
48
|
+
- **Google Gemini**: Support for all of the latest models.
|
|
49
|
+
- **OpenRouter**: Use any model via OpenRouter API compatibility.
|
|
50
|
+
|
|
51
|
+
## Multi-Backend Usage
|
|
52
|
+
|
|
53
|
+
### Google Gemini
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
from axcent import Agent, GeminiBackend
|
|
57
|
+
import os
|
|
58
|
+
|
|
59
|
+
# Set API Key (or GOOGLE_API_KEY)
|
|
60
|
+
os.environ["GEMINI_API_KEY"] = "AIza..."
|
|
61
|
+
|
|
62
|
+
# Use Gemini Backend (uses google-genai V2 SDK)
|
|
63
|
+
backend = GeminiBackend(model="gemini-3-flash")
|
|
64
|
+
agent = Agent(system_prompt="You are a helper.", backend=backend)
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### OpenRouter
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
import os
|
|
71
|
+
from axcent import Agent
|
|
72
|
+
|
|
73
|
+
os.environ["OPENAI_API_KEY"] = "sk-or-..."
|
|
74
|
+
os.environ["OPENAI_BASE_URL"] = "https://openrouter.ai/api/v1"
|
|
75
|
+
|
|
76
|
+
agent = Agent(system_prompt="You are a helper.")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## License
|
|
80
|
+
|
|
81
|
+
MIT
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Callable, Dict, List, Any, Optional
|
|
3
|
+
from .tools import function_to_schema
|
|
4
|
+
from .llm import LLMBackend, OpenAIBackend
|
|
5
|
+
|
|
6
|
+
class Agent:
|
|
7
|
+
def __init__(self, system_prompt: str = "You are a helpful assistant.", backend: LLMBackend = None, model: str = "gpt-4o-mini"):
|
|
8
|
+
self.system_prompt = system_prompt
|
|
9
|
+
self.backend = backend or OpenAIBackend(model=model)
|
|
10
|
+
self.history: List[Dict[str, Any]] = [{"role": "system", "content": system_prompt}]
|
|
11
|
+
self.tools: Dict[str, Callable] = {}
|
|
12
|
+
self.tool_schemas: List[Dict[str, Any]] = []
|
|
13
|
+
self.usage_history: List[Dict[str, int]] = []
|
|
14
|
+
|
|
15
|
+
def get_total_usage(self) -> Dict[str, int]:
|
|
16
|
+
"""Returns the total token usage across all requests."""
|
|
17
|
+
total = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0, "cached_tokens": 0}
|
|
18
|
+
for usage in self.usage_history:
|
|
19
|
+
total["prompt_tokens"] += usage.get("prompt_tokens", 0)
|
|
20
|
+
total["completion_tokens"] += usage.get("completion_tokens", 0)
|
|
21
|
+
total["total_tokens"] += usage.get("total_tokens", 0)
|
|
22
|
+
total["cached_tokens"] += usage.get("cached_tokens", 0)
|
|
23
|
+
return total
|
|
24
|
+
|
|
25
|
+
def tool(self, func: Callable):
|
|
26
|
+
"""Decorator to register a tool."""
|
|
27
|
+
schema = function_to_schema(func)
|
|
28
|
+
self.tools[func.__name__] = func
|
|
29
|
+
self.tool_schemas.append(schema)
|
|
30
|
+
return func
|
|
31
|
+
|
|
32
|
+
def ask(self, query: str) -> str:
|
|
33
|
+
"""
|
|
34
|
+
Sends a query to the agent and returns the response.
|
|
35
|
+
Handles tool calls automatically.
|
|
36
|
+
"""
|
|
37
|
+
self.history.append({"role": "user", "content": query})
|
|
38
|
+
|
|
39
|
+
while True:
|
|
40
|
+
# Sort tools by name to ensure consistent order for OpenAI prompt caching
|
|
41
|
+
current_tools = sorted(self.tool_schemas, key=lambda x: x['function']['name']) if self.tool_schemas else None
|
|
42
|
+
response = self.backend.chat(self.history, tools=current_tools)
|
|
43
|
+
message = response.choices[0].message
|
|
44
|
+
|
|
45
|
+
# Track Usage
|
|
46
|
+
if hasattr(response, 'usage') and response.usage:
|
|
47
|
+
usage_data = {
|
|
48
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
|
49
|
+
"completion_tokens": response.usage.completion_tokens,
|
|
50
|
+
"total_tokens": response.usage.total_tokens,
|
|
51
|
+
"cached_tokens": 0
|
|
52
|
+
}
|
|
53
|
+
# Check for cached tokens (OpenAI specific structure)
|
|
54
|
+
if hasattr(response.usage, 'prompt_tokens_details') and response.usage.prompt_tokens_details:
|
|
55
|
+
if hasattr(response.usage.prompt_tokens_details, 'cached_tokens'):
|
|
56
|
+
usage_data["cached_tokens"] = response.usage.prompt_tokens_details.cached_tokens
|
|
57
|
+
|
|
58
|
+
self.usage_history.append(usage_data)
|
|
59
|
+
|
|
60
|
+
# Convert message to dict to ensure compatibility with next API call
|
|
61
|
+
message_dict = {
|
|
62
|
+
"role": message.role,
|
|
63
|
+
"content": message.content,
|
|
64
|
+
}
|
|
65
|
+
if message.tool_calls:
|
|
66
|
+
message_dict["tool_calls"] = message.tool_calls
|
|
67
|
+
|
|
68
|
+
self.history.append(message_dict)
|
|
69
|
+
|
|
70
|
+
if hasattr(message, 'tool_calls') and message.tool_calls:
|
|
71
|
+
for tool_call in message.tool_calls:
|
|
72
|
+
function_name = tool_call.function.name
|
|
73
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
74
|
+
|
|
75
|
+
if function_name in self.tools:
|
|
76
|
+
func = self.tools[function_name]
|
|
77
|
+
try:
|
|
78
|
+
result = func(**arguments)
|
|
79
|
+
content = str(result)
|
|
80
|
+
except Exception as e:
|
|
81
|
+
content = f"Error executing tool: {e}"
|
|
82
|
+
else:
|
|
83
|
+
content = f"Error: Tool {function_name} not found."
|
|
84
|
+
|
|
85
|
+
self.history.append({
|
|
86
|
+
"role": "tool",
|
|
87
|
+
"tool_call_id": tool_call.id,
|
|
88
|
+
"content": content
|
|
89
|
+
})
|
|
90
|
+
# Continue loop to let LLM process tool outputs
|
|
91
|
+
else:
|
|
92
|
+
# No tool calls, return content
|
|
93
|
+
return message.content
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import List, Dict, Any, Optional
|
|
5
|
+
|
|
6
|
+
class LLMBackend(ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
def chat(self, messages: List[Dict[str, Any]], tools: Optional[List[Dict[str, Any]]] = None) -> Any:
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
class OpenAIBackend(LLMBackend):
|
|
12
|
+
def __init__(self, api_key: str = None, model: str = "gpt-4o-mini", base_url: str = None):
|
|
13
|
+
import openai
|
|
14
|
+
self.client = openai.OpenAI(
|
|
15
|
+
api_key=api_key or os.environ.get("OPENAI_API_KEY"),
|
|
16
|
+
base_url=base_url or os.environ.get("OPENAI_BASE_URL")
|
|
17
|
+
)
|
|
18
|
+
self.model = model
|
|
19
|
+
|
|
20
|
+
def chat(self, messages: List[Dict[str, Any]], tools: Optional[List[Dict[str, Any]]] = None) -> Any:
|
|
21
|
+
# Filter out tool_map form tool definition if present (not needed for API)
|
|
22
|
+
# But here 'tools' is expected to be the list of schemas.
|
|
23
|
+
|
|
24
|
+
kwargs = {
|
|
25
|
+
"model": self.model,
|
|
26
|
+
"messages": messages,
|
|
27
|
+
}
|
|
28
|
+
if tools:
|
|
29
|
+
kwargs["tools"] = tools
|
|
30
|
+
kwargs["tool_choice"] = "auto"
|
|
31
|
+
|
|
32
|
+
return self.client.chat.completions.create(**kwargs)
|
|
33
|
+
|
|
34
|
+
class MockBackend(LLMBackend):
|
|
35
|
+
def __init__(self, responses: List[str] = None):
|
|
36
|
+
self.responses = responses or ["I am a mock agent."]
|
|
37
|
+
self.calls = []
|
|
38
|
+
|
|
39
|
+
def chat(self, messages: List[Dict[str, Any]], tools: Optional[List[Dict[str, Any]]] = None) -> Any:
|
|
40
|
+
self.calls.append({"messages": messages, "tools": tools})
|
|
41
|
+
|
|
42
|
+
# Simple mock response object mimicking OpenAI's structure
|
|
43
|
+
class MockChoice:
|
|
44
|
+
def __init__(self, content):
|
|
45
|
+
self.message = type('obj', (object,), {'content': content, 'tool_calls': None})
|
|
46
|
+
|
|
47
|
+
class MockResponse:
|
|
48
|
+
def __init__(self, content):
|
|
49
|
+
self.choices = [MockChoice(content)]
|
|
50
|
+
|
|
51
|
+
return MockResponse(self.responses.pop(0) if self.responses else "No more mock responses.")
|
|
52
|
+
|
|
53
|
+
class GeminiBackend(LLMBackend):
|
|
54
|
+
def __init__(self, api_key: str = None, model: str = "gemini-2.5-flash"):
|
|
55
|
+
from google import genai
|
|
56
|
+
self.client = genai.Client(api_key=api_key or os.environ.get("GEMINI_API_KEY"))
|
|
57
|
+
if not self.client.api_key:
|
|
58
|
+
# The new SDK might pick it up from env automatically, but let's be safe
|
|
59
|
+
if not os.environ.get("GEMINI_API_KEY") and not os.environ.get("GOOGLE_API_KEY"):
|
|
60
|
+
raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY not found.")
|
|
61
|
+
self.model_name = model
|
|
62
|
+
|
|
63
|
+
def _convert_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
64
|
+
"""Converts OpenAI tool schemas to Gemini V2 tool definitions."""
|
|
65
|
+
if not tools:
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
# The new SDK supports providing a list of tool configurations.
|
|
69
|
+
# It can accept raw function callables, or a Tool object.
|
|
70
|
+
# For manual schema control, we define it as a list of 'function_declarations'.
|
|
71
|
+
|
|
72
|
+
from google.genai import types
|
|
73
|
+
|
|
74
|
+
function_declarations = []
|
|
75
|
+
for tool in tools:
|
|
76
|
+
fn = tool['function']
|
|
77
|
+
# google-genai uses a simplified schema definition or standard OpenAPI-like dicts.
|
|
78
|
+
# We can use the text-based definition or constructs.
|
|
79
|
+
# Let's use the dict structure which is close to OpenAI's but structured under 'function_declarations'.
|
|
80
|
+
|
|
81
|
+
function_declarations.append({
|
|
82
|
+
"name": fn['name'],
|
|
83
|
+
"description": fn.get('description'),
|
|
84
|
+
"parameters": fn.get('parameters')
|
|
85
|
+
})
|
|
86
|
+
|
|
87
|
+
# The API expects: tools=[{'function_declarations': [...]}]
|
|
88
|
+
return [{"function_declarations": function_declarations}]
|
|
89
|
+
|
|
90
|
+
def chat(self, messages: List[Dict[str, Any]], tools: Optional[List[Dict[str, Any]]] = None) -> Any:
|
|
91
|
+
gemini_tools = self._convert_tools(tools)
|
|
92
|
+
|
|
93
|
+
# Convert History
|
|
94
|
+
# The new SDK uses 'contents' with 'role' (user/model) and 'parts'.
|
|
95
|
+
# 'system' instructions are passed separately to generate_content config.
|
|
96
|
+
|
|
97
|
+
contents = []
|
|
98
|
+
system_instruction = None
|
|
99
|
+
|
|
100
|
+
for msg in messages:
|
|
101
|
+
role = msg['role']
|
|
102
|
+
content = msg.get('content')
|
|
103
|
+
|
|
104
|
+
if role == "system":
|
|
105
|
+
system_instruction = content
|
|
106
|
+
elif role == "user":
|
|
107
|
+
contents.append({"role": "user", "parts": [{"text": content}]})
|
|
108
|
+
elif role == "assistant":
|
|
109
|
+
parts = []
|
|
110
|
+
if content:
|
|
111
|
+
parts.append({"text": content})
|
|
112
|
+
if "tool_calls" in msg:
|
|
113
|
+
for tc in msg["tool_calls"]:
|
|
114
|
+
parts.append({
|
|
115
|
+
"function_call": {
|
|
116
|
+
"name": tc.function.name,
|
|
117
|
+
"args": json.loads(tc.function.arguments)
|
|
118
|
+
}
|
|
119
|
+
})
|
|
120
|
+
contents.append({"role": "model", "parts": parts})
|
|
121
|
+
elif role == "tool":
|
|
122
|
+
# Find valid previous call or just append.
|
|
123
|
+
# The V2 SDK expects 'role': 'tool' (or 'function' depending on precise version parity).
|
|
124
|
+
# Actually, standard Vertex/Gemini API V1beta uses 'function_response' inside 'function' role.
|
|
125
|
+
# V2 SDK `types.Content` also supports role='tool'.
|
|
126
|
+
|
|
127
|
+
# We need to match the function name.
|
|
128
|
+
# Since we are iterating strictly, we can try to look up key by tool_call_id if we had a map.
|
|
129
|
+
# Re-scanning messages for map:
|
|
130
|
+
tool_map = {}
|
|
131
|
+
for m in messages:
|
|
132
|
+
if m['role'] == 'assistant' and 'tool_calls' in m:
|
|
133
|
+
for tc in m['tool_calls']:
|
|
134
|
+
tool_map[tc.id] = tc.function.name
|
|
135
|
+
|
|
136
|
+
fname = tool_map.get(msg.get('tool_call_id'), 'unknown')
|
|
137
|
+
|
|
138
|
+
contents.append({
|
|
139
|
+
"role": "user", # In many google apis, function response is user-side.
|
|
140
|
+
# HOWEVER, verify SDK V2.
|
|
141
|
+
# Docs say: function responses are part of the conversation.
|
|
142
|
+
# Let's try 'user' role with 'function_response' part.
|
|
143
|
+
"parts": [{
|
|
144
|
+
"function_response": {
|
|
145
|
+
"name": fname,
|
|
146
|
+
"response": {"result": content}
|
|
147
|
+
}
|
|
148
|
+
}]
|
|
149
|
+
})
|
|
150
|
+
|
|
151
|
+
from google.genai import types
|
|
152
|
+
|
|
153
|
+
config = types.GenerateContentConfig(
|
|
154
|
+
system_instruction=system_instruction,
|
|
155
|
+
tools=gemini_tools,
|
|
156
|
+
temperature=0.7, # Default setup
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
response = self.client.models.generate_content(
|
|
160
|
+
model=self.model_name,
|
|
161
|
+
contents=contents,
|
|
162
|
+
config=config
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Map response back to OpenAI format
|
|
166
|
+
class OpenAICompatMessage:
|
|
167
|
+
def __init__(self, content, tool_calls=None, role="assistant"):
|
|
168
|
+
self.content = content
|
|
169
|
+
self.tool_calls = tool_calls
|
|
170
|
+
self.role = role
|
|
171
|
+
|
|
172
|
+
class OpenAICompatChoice:
|
|
173
|
+
def __init__(self, message):
|
|
174
|
+
self.message = message
|
|
175
|
+
|
|
176
|
+
class OpenAICompatResponse:
|
|
177
|
+
def __init__(self, choices, usage=None):
|
|
178
|
+
self.choices = choices
|
|
179
|
+
self.usage = usage
|
|
180
|
+
|
|
181
|
+
content_text = None
|
|
182
|
+
tool_calls = []
|
|
183
|
+
|
|
184
|
+
# Parse candidates
|
|
185
|
+
if response.candidates and response.candidates[0].content.parts:
|
|
186
|
+
for part in response.candidates[0].content.parts:
|
|
187
|
+
if part.text:
|
|
188
|
+
content_text = (content_text or "") + part.text
|
|
189
|
+
if part.function_call:
|
|
190
|
+
tool_calls.append(type('obj', (object,), {
|
|
191
|
+
'id': 'call_' + part.function_call.name,
|
|
192
|
+
'function': type('obj', (object,), {
|
|
193
|
+
'name': part.function_call.name,
|
|
194
|
+
'arguments': json.dumps(part.function_call.args)
|
|
195
|
+
})
|
|
196
|
+
}))
|
|
197
|
+
|
|
198
|
+
# Usage
|
|
199
|
+
usage = None
|
|
200
|
+
if response.usage_metadata:
|
|
201
|
+
usage = type('obj', (object,), {
|
|
202
|
+
'prompt_tokens': response.usage_metadata.prompt_token_count,
|
|
203
|
+
'completion_tokens': response.usage_metadata.candidates_token_count,
|
|
204
|
+
'total_tokens': response.usage_metadata.total_token_count,
|
|
205
|
+
# Check if cached content is exposed in usage_metadata in V2
|
|
206
|
+
# It's usually present if implicit caching happened.
|
|
207
|
+
'prompt_tokens_details': type('obj', (object,), {
|
|
208
|
+
'cached_tokens': getattr(response.usage_metadata, 'cached_content_token_count', 0)
|
|
209
|
+
})
|
|
210
|
+
})
|
|
211
|
+
|
|
212
|
+
return OpenAICompatResponse(
|
|
213
|
+
choices=[OpenAICompatChoice(OpenAICompatMessage(content_text, tool_calls if tool_calls else None))],
|
|
214
|
+
usage=usage
|
|
215
|
+
)
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import typing
|
|
3
|
+
|
|
4
|
+
def type_to_json_type(t: typing.Any) -> str:
|
|
5
|
+
"""Maps Python types to JSON Safe types."""
|
|
6
|
+
if t == str:
|
|
7
|
+
return "string"
|
|
8
|
+
elif t == int:
|
|
9
|
+
return "integer"
|
|
10
|
+
elif t == float:
|
|
11
|
+
return "number"
|
|
12
|
+
elif t == bool:
|
|
13
|
+
return "boolean"
|
|
14
|
+
elif t == list or typing.get_origin(t) == list:
|
|
15
|
+
return "array"
|
|
16
|
+
elif t == dict or typing.get_origin(t) == dict:
|
|
17
|
+
return "object"
|
|
18
|
+
else:
|
|
19
|
+
return "string" # Default fallback
|
|
20
|
+
|
|
21
|
+
def function_to_schema(func: typing.Callable) -> dict:
|
|
22
|
+
"""
|
|
23
|
+
Converts a Python function into an OpenAI tool schema.
|
|
24
|
+
Uses type hints and docstrings.
|
|
25
|
+
"""
|
|
26
|
+
sig = inspect.signature(func)
|
|
27
|
+
doc = inspect.getdoc(func) or "No description provided."
|
|
28
|
+
name = func.__name__
|
|
29
|
+
|
|
30
|
+
parameters = {
|
|
31
|
+
"type": "object",
|
|
32
|
+
"properties": {},
|
|
33
|
+
"required": [],
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
for param_name, param in sig.parameters.items():
|
|
37
|
+
if param_name == "self":
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
param_type = type_to_json_type(param.annotation) if param.annotation != inspect.Parameter.empty else "string"
|
|
41
|
+
|
|
42
|
+
parameters["properties"][param_name] = {
|
|
43
|
+
"type": param_type
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
if param.default == inspect.Parameter.empty:
|
|
47
|
+
parameters["required"].append(param_name)
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
"type": "function",
|
|
51
|
+
"function": {
|
|
52
|
+
"name": name,
|
|
53
|
+
"description": doc,
|
|
54
|
+
"parameters": parameters,
|
|
55
|
+
}
|
|
56
|
+
}
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: axcent
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: The easiest way to build AI agents in Python
|
|
5
|
+
Home-page: https://github.com/ssshiponu/axcent
|
|
6
|
+
Author: Mohin Uddin Shipon
|
|
7
|
+
Author-email: sshiponudin22@gmail.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.8
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: openai>=1.0.0
|
|
15
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
16
|
+
Provides-Extra: gemini
|
|
17
|
+
Requires-Dist: google-genai>=1.57.0; extra == "gemini"
|
|
18
|
+
Dynamic: author
|
|
19
|
+
Dynamic: author-email
|
|
20
|
+
Dynamic: classifier
|
|
21
|
+
Dynamic: description
|
|
22
|
+
Dynamic: description-content-type
|
|
23
|
+
Dynamic: home-page
|
|
24
|
+
Dynamic: license-file
|
|
25
|
+
Dynamic: provides-extra
|
|
26
|
+
Dynamic: requires-dist
|
|
27
|
+
Dynamic: requires-python
|
|
28
|
+
Dynamic: summary
|
|
29
|
+
|
|
30
|
+
# Axcent
|
|
31
|
+
|
|
32
|
+
**The easiest way to build AI agents in Python.**
|
|
33
|
+
|
|
34
|
+
Axcent is a lightweight framework designed to let you build powerful AI agents with tool calling, context caching, and multi-backend support in just a few lines of code.
|
|
35
|
+
|
|
36
|
+
## Installation
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pip install axcent
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
To use Gemini models:
|
|
43
|
+
```bash
|
|
44
|
+
pip install axcent[gemini]
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Quick Start (OpenAI)
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
import os
|
|
51
|
+
from axcent import Agent
|
|
52
|
+
|
|
53
|
+
# Set your API Key
|
|
54
|
+
os.environ["OPENAI_API_KEY"] = "sk-..."
|
|
55
|
+
|
|
56
|
+
# Initialize Agent
|
|
57
|
+
agent = Agent(system_prompt="You are a helpful assistant.")
|
|
58
|
+
|
|
59
|
+
# Register a Tool
|
|
60
|
+
@agent.tool
|
|
61
|
+
def get_weather(city: str) -> str:
|
|
62
|
+
"""Returns weather info for a city."""
|
|
63
|
+
return f"The weather in {city} is sunny!"
|
|
64
|
+
|
|
65
|
+
# Ask away!
|
|
66
|
+
response = agent.ask("What is the weather in Tokyo?")
|
|
67
|
+
print(response)
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Features
|
|
71
|
+
|
|
72
|
+
- **Simple Tool Registration**: Just use `@agent.tool`.
|
|
73
|
+
- **Automatic Context Caching**: Optimizes token usage by enforcing stable prompt structures.
|
|
74
|
+
- **Token Monitoring**: Track prompt, completion, and cached tokens via `agent.get_total_usage()`.
|
|
75
|
+
- **Backend Agnostic**:
|
|
76
|
+
- **OpenAI**: First-class support.
|
|
77
|
+
- **Google Gemini**: Support for all of the latest models.
|
|
78
|
+
- **OpenRouter**: Use any model via OpenRouter API compatibility.
|
|
79
|
+
|
|
80
|
+
## Multi-Backend Usage
|
|
81
|
+
|
|
82
|
+
### Google Gemini
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
from axcent import Agent, GeminiBackend
|
|
86
|
+
import os
|
|
87
|
+
|
|
88
|
+
# Set API Key (or GOOGLE_API_KEY)
|
|
89
|
+
os.environ["GEMINI_API_KEY"] = "AIza..."
|
|
90
|
+
|
|
91
|
+
# Use Gemini Backend (uses google-genai V2 SDK)
|
|
92
|
+
backend = GeminiBackend(model="gemini-3-flash")
|
|
93
|
+
agent = Agent(system_prompt="You are a helper.", backend=backend)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### OpenRouter
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
import os
|
|
100
|
+
from axcent import Agent
|
|
101
|
+
|
|
102
|
+
os.environ["OPENAI_API_KEY"] = "sk-or-..."
|
|
103
|
+
os.environ["OPENAI_BASE_URL"] = "https://openrouter.ai/api/v1"
|
|
104
|
+
|
|
105
|
+
agent = Agent(system_prompt="You are a helper.")
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## License
|
|
109
|
+
|
|
110
|
+
MIT
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
MANIFEST.in
|
|
3
|
+
README.md
|
|
4
|
+
pyproject.toml
|
|
5
|
+
setup.py
|
|
6
|
+
axcent/__init__.py
|
|
7
|
+
axcent/core.py
|
|
8
|
+
axcent/llm.py
|
|
9
|
+
axcent/tools.py
|
|
10
|
+
axcent.egg-info/PKG-INFO
|
|
11
|
+
axcent.egg-info/SOURCES.txt
|
|
12
|
+
axcent.egg-info/dependency_links.txt
|
|
13
|
+
axcent.egg-info/requires.txt
|
|
14
|
+
axcent.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
axcent
|
axcent-0.1.0/setup.cfg
ADDED
axcent-0.1.0/setup.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from setuptools import setup, find_packages
|
|
3
|
+
|
|
4
|
+
setup(
|
|
5
|
+
name="axcent",
|
|
6
|
+
version="0.1.0",
|
|
7
|
+
description="The easiest way to build AI agents in Python",
|
|
8
|
+
long_description=open("README.md").read() if os.path.exists("README.md") else "",
|
|
9
|
+
long_description_content_type="text/markdown",
|
|
10
|
+
author="Mohin Uddin Shipon",
|
|
11
|
+
author_email="sshiponudin22@gmail.com",
|
|
12
|
+
url="https://github.com/ssshiponu/axcent",
|
|
13
|
+
packages=find_packages(),
|
|
14
|
+
install_requires=[
|
|
15
|
+
"openai>=1.0.0",
|
|
16
|
+
"python-dotenv>=1.0.0",
|
|
17
|
+
],
|
|
18
|
+
extras_require={
|
|
19
|
+
"gemini": ["google-genai>=1.57.0"],
|
|
20
|
+
},
|
|
21
|
+
classifiers=[
|
|
22
|
+
"Programming Language :: Python :: 3",
|
|
23
|
+
"License :: OSI Approved :: MIT License",
|
|
24
|
+
"Operating System :: OS Independent",
|
|
25
|
+
],
|
|
26
|
+
python_requires='>=3.8',
|
|
27
|
+
)
|