mbxai 0.7.2__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mbxai/__init__.py +1 -1
- mbxai/examples/openrouter_example.py +45 -0
- mbxai/examples/parse_example.py +99 -0
- mbxai/examples/parse_tool_example.py +144 -0
- mbxai/examples/tool_client_example.py +126 -0
- mbxai/mcp/server.py +1 -1
- mbxai/openrouter/client.py +21 -92
- mbxai/tools/client.py +54 -76
- {mbxai-0.7.2.dist-info → mbxai-0.8.0.dist-info}/METADATA +1 -1
- mbxai-0.8.0.dist-info/RECORD +22 -0
- mbxai-0.7.2.dist-info/RECORD +0 -18
- {mbxai-0.7.2.dist-info → mbxai-0.8.0.dist-info}/WHEEL +0 -0
- {mbxai-0.7.2.dist-info → mbxai-0.8.0.dist-info}/licenses/LICENSE +0 -0
mbxai/__init__.py
CHANGED
@@ -0,0 +1,45 @@
|
|
1
|
+
"""
|
2
|
+
Example script demonstrating basic usage of the OpenRouterClient.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
from mbxai.openrouter.client import OpenRouterClient, OpenRouterModel
|
8
|
+
|
9
|
+
# Configure logging
|
10
|
+
logging.basicConfig(
|
11
|
+
level=logging.INFO,
|
12
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
13
|
+
)
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
def main():
|
17
|
+
# Get API token from environment variable
|
18
|
+
token = os.getenv("OPENROUTER_API_KEY")
|
19
|
+
if not token:
|
20
|
+
logger.error("OPENROUTER_API_KEY environment variable not set")
|
21
|
+
raise ValueError("Please set the OPENROUTER_API_KEY environment variable")
|
22
|
+
|
23
|
+
logger.info("Initializing OpenRouterClient with GPT-4 Turbo")
|
24
|
+
# Initialize the client
|
25
|
+
client = OpenRouterClient(
|
26
|
+
token=token,
|
27
|
+
model=OpenRouterModel.GPT4_TURBO # Using GPT-4 Turbo as default
|
28
|
+
)
|
29
|
+
|
30
|
+
# Example messages
|
31
|
+
messages = [
|
32
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
33
|
+
{"role": "user", "content": "What is the capital of France?"}
|
34
|
+
]
|
35
|
+
|
36
|
+
logger.info("Sending request to OpenRouter API")
|
37
|
+
# Send the request
|
38
|
+
response = client.create(messages=messages)
|
39
|
+
|
40
|
+
# Log the response
|
41
|
+
logger.info("Received response from OpenRouter API")
|
42
|
+
logger.info(f"Response: {response}")
|
43
|
+
|
44
|
+
if __name__ == "__main__":
|
45
|
+
main()
|
@@ -0,0 +1,99 @@
|
|
1
|
+
"""
|
2
|
+
Example script demonstrating how to use the parse function with OpenRouterClient.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
from typing import Any
|
8
|
+
from pydantic import BaseModel, Field
|
9
|
+
from mbxai.openrouter.client import OpenRouterClient, OpenRouterModel
|
10
|
+
|
11
|
+
# Configure logging
|
12
|
+
logging.basicConfig(
|
13
|
+
level=logging.INFO,
|
14
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
15
|
+
)
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
# Define a Pydantic model for structured weather data
|
19
|
+
class WeatherData(BaseModel):
|
20
|
+
"""Weather data for a location."""
|
21
|
+
location: str = Field(..., description="The city name")
|
22
|
+
temperature: float = Field(..., description="Temperature in Celsius")
|
23
|
+
condition: str = Field(..., description="Weather condition (e.g., sunny, cloudy)")
|
24
|
+
humidity: float = Field(..., description="Humidity percentage")
|
25
|
+
wind_speed: float = Field(..., description="Wind speed in km/h")
|
26
|
+
feels_like: float = Field(..., description="Feels like temperature in Celsius")
|
27
|
+
precipitation_chance: float = Field(..., description="Chance of precipitation as a percentage")
|
28
|
+
|
29
|
+
async def main():
|
30
|
+
# Get API token from environment variable
|
31
|
+
token = os.getenv("OPENROUTER_API_KEY")
|
32
|
+
if not token:
|
33
|
+
raise ValueError("Please set the OPENROUTER_API_KEY environment variable")
|
34
|
+
|
35
|
+
# Initialize the OpenRouter client
|
36
|
+
logger.info("Initializing OpenRouter client")
|
37
|
+
client = OpenRouterClient(
|
38
|
+
token=token,
|
39
|
+
model=OpenRouterModel.GPT41
|
40
|
+
)
|
41
|
+
|
42
|
+
# Example 1: Parse weather data for a single location
|
43
|
+
logger.info("Parsing weather data for New York")
|
44
|
+
messages = [
|
45
|
+
{
|
46
|
+
"role": "user",
|
47
|
+
"content": "What's the current weather in New York? Please provide temperature, condition, humidity, wind speed, feels like temperature, and precipitation chance."
|
48
|
+
}
|
49
|
+
]
|
50
|
+
|
51
|
+
response = client.parse(
|
52
|
+
messages=messages,
|
53
|
+
response_format=WeatherData,
|
54
|
+
timeout=30.0,
|
55
|
+
)
|
56
|
+
|
57
|
+
weather_data = response.choices[0].message.parsed
|
58
|
+
print("\nWeather data for New York:")
|
59
|
+
print(f"Location: {weather_data.location}")
|
60
|
+
print(f"Temperature: {weather_data.temperature}°C")
|
61
|
+
print(f"Condition: {weather_data.condition}")
|
62
|
+
print(f"Humidity: {weather_data.humidity}%")
|
63
|
+
print(f"Wind Speed: {weather_data.wind_speed} km/h")
|
64
|
+
print(f"Feels Like: {weather_data.feels_like}°C")
|
65
|
+
print(f"Precipitation Chance: {weather_data.precipitation_chance}%")
|
66
|
+
|
67
|
+
# Example 2: Parse weather data for multiple locations
|
68
|
+
logger.info("\nParsing weather data for multiple locations")
|
69
|
+
messages = [
|
70
|
+
{
|
71
|
+
"role": "user",
|
72
|
+
"content": "Compare the weather in London and Tokyo. For each city, provide temperature, condition, humidity, wind speed, feels like temperature, and precipitation chance."
|
73
|
+
}
|
74
|
+
]
|
75
|
+
|
76
|
+
class MultiLocationWeather(BaseModel):
|
77
|
+
"""Weather data for multiple locations."""
|
78
|
+
locations: list[WeatherData] = Field(..., description="List of weather data for different locations")
|
79
|
+
|
80
|
+
response = client.parse(
|
81
|
+
messages=messages,
|
82
|
+
response_format=MultiLocationWeather,
|
83
|
+
timeout=30.0,
|
84
|
+
)
|
85
|
+
|
86
|
+
multi_weather = response.choices[0].message.parsed
|
87
|
+
print("\nWeather comparison:")
|
88
|
+
for location_data in multi_weather.locations:
|
89
|
+
print(f"\n{location_data.location}:")
|
90
|
+
print(f"Temperature: {location_data.temperature}°C")
|
91
|
+
print(f"Condition: {location_data.condition}")
|
92
|
+
print(f"Humidity: {location_data.humidity}%")
|
93
|
+
print(f"Wind Speed: {location_data.wind_speed} km/h")
|
94
|
+
print(f"Feels Like: {location_data.feels_like}°C")
|
95
|
+
print(f"Precipitation Chance: {location_data.precipitation_chance}%")
|
96
|
+
|
97
|
+
if __name__ == "__main__":
|
98
|
+
import asyncio
|
99
|
+
asyncio.run(main())
|
@@ -0,0 +1,144 @@
|
|
1
|
+
"""
|
2
|
+
Example script demonstrating how to use both parse and tools with OpenRouterClient.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
import random
|
8
|
+
from typing import Any
|
9
|
+
from pydantic import BaseModel, Field
|
10
|
+
from mbxai.openrouter.client import OpenRouterClient, OpenRouterModel
|
11
|
+
from mbxai.tools.client import ToolClient
|
12
|
+
|
13
|
+
# Configure logging
|
14
|
+
logging.basicConfig(
|
15
|
+
level=logging.INFO,
|
16
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
17
|
+
)
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
# Define a Pydantic model for structured weather data
|
21
|
+
class WeatherData(BaseModel):
|
22
|
+
"""Weather data for a location."""
|
23
|
+
location: str = Field(..., description="The city name")
|
24
|
+
temperature: float = Field(..., description="Temperature in Celsius")
|
25
|
+
condition: str = Field(..., description="Weather condition (e.g., sunny, cloudy)")
|
26
|
+
humidity: float = Field(..., description="Humidity percentage")
|
27
|
+
wind_speed: float = Field(..., description="Wind speed in km/h")
|
28
|
+
feels_like: float = Field(..., description="Feels like temperature in Celsius")
|
29
|
+
precipitation_chance: float = Field(..., description="Chance of precipitation as a percentage")
|
30
|
+
|
31
|
+
# Mock weather data for demonstration
|
32
|
+
WEATHER_DATA = {
|
33
|
+
"new york": {"temperature": 22.5, "condition": "sunny", "humidity": 65, "wind_speed": 12, "feels_like": 23.0, "precipitation_chance": 10},
|
34
|
+
"london": {"temperature": 18.2, "condition": "cloudy", "humidity": 75, "wind_speed": 8, "feels_like": 17.5, "precipitation_chance": 40},
|
35
|
+
"tokyo": {"temperature": 25.7, "condition": "clear", "humidity": 60, "wind_speed": 5, "feels_like": 26.0, "precipitation_chance": 5},
|
36
|
+
"paris": {"temperature": 20.1, "condition": "partly cloudy", "humidity": 70, "wind_speed": 10, "feels_like": 19.5, "precipitation_chance": 20},
|
37
|
+
}
|
38
|
+
|
39
|
+
def get_weather(location: str) -> dict[str, Any]:
|
40
|
+
"""Get weather information for a location.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
location: The city name to get weather for
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
Weather information including temperature, condition, humidity, and wind speed
|
47
|
+
"""
|
48
|
+
logger.info(f"Getting weather for location: {location}")
|
49
|
+
|
50
|
+
# Convert location to lowercase for case-insensitive matching
|
51
|
+
location = location.lower()
|
52
|
+
|
53
|
+
# Get weather data or generate random data for unknown locations
|
54
|
+
if location in WEATHER_DATA:
|
55
|
+
weather = WEATHER_DATA[location]
|
56
|
+
else:
|
57
|
+
logger.warning(f"No weather data for {location}, generating random data")
|
58
|
+
weather = {
|
59
|
+
"temperature": round(random.uniform(15, 30), 1),
|
60
|
+
"condition": random.choice(["sunny", "cloudy", "clear", "partly cloudy"]),
|
61
|
+
"humidity": round(random.uniform(50, 90)),
|
62
|
+
"wind_speed": round(random.uniform(5, 20)),
|
63
|
+
"feels_like": round(random.uniform(15, 30), 1),
|
64
|
+
"precipitation_chance": round(random.uniform(0, 100))
|
65
|
+
}
|
66
|
+
|
67
|
+
# Create WeatherData instance
|
68
|
+
weather_data = WeatherData(
|
69
|
+
location=location.title(),
|
70
|
+
temperature=weather["temperature"],
|
71
|
+
condition=weather["condition"],
|
72
|
+
humidity=weather["humidity"],
|
73
|
+
wind_speed=weather["wind_speed"],
|
74
|
+
feels_like=weather["feels_like"],
|
75
|
+
precipitation_chance=weather["precipitation_chance"]
|
76
|
+
)
|
77
|
+
|
78
|
+
logger.info(f"Weather data retrieved: {weather_data}")
|
79
|
+
return weather_data.model_dump()
|
80
|
+
|
81
|
+
async def main():
|
82
|
+
# Get API token from environment variable
|
83
|
+
token = os.getenv("OPENROUTER_API_KEY")
|
84
|
+
if not token:
|
85
|
+
raise ValueError("Please set the OPENROUTER_API_KEY environment variable")
|
86
|
+
|
87
|
+
# Initialize the OpenRouter client
|
88
|
+
logger.info("Initializing OpenRouter client")
|
89
|
+
openrouter_client = OpenRouterClient(
|
90
|
+
token=token,
|
91
|
+
model=OpenRouterModel.GPT41
|
92
|
+
)
|
93
|
+
|
94
|
+
# Initialize the ToolClient
|
95
|
+
logger.info("Initializing ToolClient")
|
96
|
+
tool_client = ToolClient(openrouter_client)
|
97
|
+
|
98
|
+
# Register the weather tool
|
99
|
+
logger.info("Registering weather tool")
|
100
|
+
tool_client.register_tool(
|
101
|
+
name="get_weather",
|
102
|
+
description="Get the current weather for a location",
|
103
|
+
function=get_weather,
|
104
|
+
schema={
|
105
|
+
"type": "object",
|
106
|
+
"properties": {
|
107
|
+
"location": {
|
108
|
+
"type": "string",
|
109
|
+
"description": "The city name to get weather for"
|
110
|
+
}
|
111
|
+
},
|
112
|
+
"required": ["location"]
|
113
|
+
}
|
114
|
+
)
|
115
|
+
|
116
|
+
# Example 1: Get weather for a single location using tools and parse
|
117
|
+
logger.info("Getting weather for New York using tools and parse")
|
118
|
+
messages = [
|
119
|
+
{
|
120
|
+
"role": "user",
|
121
|
+
"content": "What's the current weather in New York? Use the get_weather tool and format the response according to the WeatherData model."
|
122
|
+
}
|
123
|
+
]
|
124
|
+
|
125
|
+
response = tool_client.parse(
|
126
|
+
messages=messages,
|
127
|
+
response_format=WeatherData,
|
128
|
+
timeout=30.0,
|
129
|
+
)
|
130
|
+
|
131
|
+
weather_data = response.choices[0].message.parsed
|
132
|
+
print("\nWeather data for New York:")
|
133
|
+
print(f"Location: {weather_data.location}")
|
134
|
+
print(f"Temperature: {weather_data.temperature}°C")
|
135
|
+
print(f"Condition: {weather_data.condition}")
|
136
|
+
print(f"Humidity: {weather_data.humidity}%")
|
137
|
+
print(f"Wind Speed: {weather_data.wind_speed} km/h")
|
138
|
+
print(f"Feels Like: {weather_data.feels_like}°C")
|
139
|
+
print(f"Precipitation Chance: {weather_data.precipitation_chance}%")
|
140
|
+
|
141
|
+
|
142
|
+
if __name__ == "__main__":
|
143
|
+
import asyncio
|
144
|
+
asyncio.run(main())
|
@@ -0,0 +1,126 @@
|
|
1
|
+
"""
|
2
|
+
Example script demonstrating how to use the ToolClient with a custom Weather Tool.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
import random
|
8
|
+
from typing import Any
|
9
|
+
from pydantic import BaseModel
|
10
|
+
from mbxai.openrouter.client import OpenRouterClient, OpenRouterModel
|
11
|
+
from mbxai.tools.client import ToolClient
|
12
|
+
|
13
|
+
# Configure logging
|
14
|
+
logging.basicConfig(
|
15
|
+
level=logging.INFO,
|
16
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
17
|
+
)
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
# Define the weather data model
|
21
|
+
class WeatherData(BaseModel):
|
22
|
+
"""Weather data for a location."""
|
23
|
+
location: str
|
24
|
+
temperature: float
|
25
|
+
condition: str
|
26
|
+
humidity: float
|
27
|
+
wind_speed: float
|
28
|
+
|
29
|
+
# Mock weather data for demonstration
|
30
|
+
WEATHER_DATA = {
|
31
|
+
"new york": {"temperature": 22.5, "condition": "sunny", "humidity": 65, "wind_speed": 12},
|
32
|
+
"london": {"temperature": 18.2, "condition": "cloudy", "humidity": 75, "wind_speed": 8},
|
33
|
+
"tokyo": {"temperature": 25.7, "condition": "clear", "humidity": 60, "wind_speed": 5},
|
34
|
+
"paris": {"temperature": 20.1, "condition": "partly cloudy", "humidity": 70, "wind_speed": 10},
|
35
|
+
}
|
36
|
+
|
37
|
+
def get_weather(location: str) -> dict[str, Any]:
|
38
|
+
"""Get weather information for a location.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
location: The city name to get weather for
|
42
|
+
|
43
|
+
Returns:
|
44
|
+
Weather information including temperature, condition, humidity, and wind speed
|
45
|
+
"""
|
46
|
+
logger.info(f"Getting weather for location: {location}")
|
47
|
+
|
48
|
+
# Convert location to lowercase for case-insensitive matching
|
49
|
+
location = location.lower()
|
50
|
+
|
51
|
+
# Get weather data or generate random data for unknown locations
|
52
|
+
if location in WEATHER_DATA:
|
53
|
+
weather = WEATHER_DATA[location]
|
54
|
+
else:
|
55
|
+
logger.warning(f"No weather data for {location}, generating random data")
|
56
|
+
weather = {
|
57
|
+
"temperature": round(random.uniform(15, 30), 1),
|
58
|
+
"condition": random.choice(["sunny", "cloudy", "clear", "partly cloudy"]),
|
59
|
+
"humidity": round(random.uniform(50, 90)),
|
60
|
+
"wind_speed": round(random.uniform(5, 20))
|
61
|
+
}
|
62
|
+
|
63
|
+
# Create WeatherData instance
|
64
|
+
weather_data = WeatherData(
|
65
|
+
location=location.title(),
|
66
|
+
temperature=weather["temperature"],
|
67
|
+
condition=weather["condition"],
|
68
|
+
humidity=weather["humidity"],
|
69
|
+
wind_speed=weather["wind_speed"]
|
70
|
+
)
|
71
|
+
|
72
|
+
logger.info(f"Weather data retrieved: {weather_data}")
|
73
|
+
return weather_data.model_dump()
|
74
|
+
|
75
|
+
async def main():
|
76
|
+
# Get API token from environment variable
|
77
|
+
token = os.getenv("OPENROUTER_API_KEY")
|
78
|
+
if not token:
|
79
|
+
raise ValueError("Please set the OPENROUTER_API_KEY environment variable")
|
80
|
+
|
81
|
+
# Initialize the OpenRouter client
|
82
|
+
logger.info("Initializing OpenRouter client")
|
83
|
+
openrouter_client = OpenRouterClient(
|
84
|
+
token=token,
|
85
|
+
model=OpenRouterModel.GPT35_TURBO
|
86
|
+
)
|
87
|
+
|
88
|
+
# Initialize the ToolClient
|
89
|
+
logger.info("Initializing ToolClient")
|
90
|
+
tool_client = ToolClient(openrouter_client)
|
91
|
+
|
92
|
+
# Register the weather tool
|
93
|
+
logger.info("Registering weather tool")
|
94
|
+
tool_client.register_tool(
|
95
|
+
name="get_weather",
|
96
|
+
description="Get the current weather for a location",
|
97
|
+
function=get_weather,
|
98
|
+
schema={
|
99
|
+
"type": "object",
|
100
|
+
"properties": {
|
101
|
+
"location": {
|
102
|
+
"type": "string",
|
103
|
+
"description": "The city name to get weather for"
|
104
|
+
}
|
105
|
+
},
|
106
|
+
"required": ["location"]
|
107
|
+
}
|
108
|
+
)
|
109
|
+
|
110
|
+
# Example 1: Simple weather query
|
111
|
+
logger.info("Sending weather query for New York")
|
112
|
+
messages = [
|
113
|
+
{"role": "user", "content": "What's the weather like in New York?"}
|
114
|
+
]
|
115
|
+
|
116
|
+
response = tool_client.chat(
|
117
|
+
messages,
|
118
|
+
timeout=30.0,
|
119
|
+
)
|
120
|
+
logger.info("Received response from model")
|
121
|
+
print("\nResponse for New York weather:")
|
122
|
+
print(response)
|
123
|
+
|
124
|
+
if __name__ == "__main__":
|
125
|
+
import asyncio
|
126
|
+
asyncio.run(main())
|
mbxai/mcp/server.py
CHANGED
mbxai/openrouter/client.py
CHANGED
@@ -4,7 +4,6 @@ OpenRouter client implementation.
|
|
4
4
|
|
5
5
|
from typing import Any, Optional, Union
|
6
6
|
from openai import OpenAI, OpenAIError
|
7
|
-
from pydantic import BaseModel, TypeAdapter, Field
|
8
7
|
from .models import OpenRouterModel, OpenRouterModelRegistry
|
9
8
|
from .config import OpenRouterConfig
|
10
9
|
import logging
|
@@ -95,7 +94,7 @@ class OpenRouterClient:
|
|
95
94
|
def __init__(
|
96
95
|
self,
|
97
96
|
token: str,
|
98
|
-
model: Union[str, OpenRouterModel] = OpenRouterModel.
|
97
|
+
model: Union[str, OpenRouterModel] = OpenRouterModel.GPT35_TURBO,
|
99
98
|
base_url: Optional[str] = None,
|
100
99
|
default_headers: Optional[dict[str, str]] = None,
|
101
100
|
max_retries: int = 3,
|
@@ -106,7 +105,7 @@ class OpenRouterClient:
|
|
106
105
|
|
107
106
|
Args:
|
108
107
|
token: The OpenRouter API token
|
109
|
-
model: The model to use (default:
|
108
|
+
model: The model to use (default: GPT35_TURBO)
|
110
109
|
base_url: Optional custom base URL for the API
|
111
110
|
default_headers: Optional default headers for API requests
|
112
111
|
max_retries: Maximum number of retry attempts (default: 3)
|
@@ -187,7 +186,7 @@ class OpenRouterClient:
|
|
187
186
|
self.model = value
|
188
187
|
|
189
188
|
@with_retry()
|
190
|
-
def
|
189
|
+
def create(
|
191
190
|
self,
|
192
191
|
messages: list[dict[str, Any]],
|
193
192
|
*,
|
@@ -205,19 +204,15 @@ class OpenRouterClient:
|
|
205
204
|
total_size = sum(len(str(msg)) for msg in messages)
|
206
205
|
logger.info(f"Total message size: {total_size} bytes")
|
207
206
|
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
stream
|
207
|
+
request = {
|
208
|
+
"model": model or self.model,
|
209
|
+
"messages": messages,
|
210
|
+
"stream": stream,
|
212
211
|
**kwargs,
|
213
|
-
|
212
|
+
}
|
214
213
|
|
215
|
-
|
216
|
-
logger.info("Received response from OpenRouter")
|
217
|
-
if hasattr(response, 'output'):
|
218
|
-
logger.info(f"Response output length: {len(response.output) if response.output else 0}")
|
219
|
-
if hasattr(response, 'output_text'):
|
220
|
-
logger.info(f"Response output_text length: {len(response.output_text) if response.output_text else 0}")
|
214
|
+
response = self._client.chat.completions.create(**request)
|
215
|
+
logger.info(f"Received response from OpenRouter: {len(response.choices)} choices")
|
221
216
|
|
222
217
|
return response
|
223
218
|
|
@@ -235,31 +230,15 @@ class OpenRouterClient:
|
|
235
230
|
self._handle_api_error("chat completion", e)
|
236
231
|
|
237
232
|
@with_retry()
|
238
|
-
def
|
233
|
+
def parse(
|
239
234
|
self,
|
240
235
|
messages: list[dict[str, Any]],
|
241
|
-
response_format:
|
236
|
+
response_format: object,
|
242
237
|
*,
|
243
|
-
model:
|
238
|
+
model: str | None = None,
|
244
239
|
**kwargs: Any,
|
245
240
|
) -> Any:
|
246
|
-
"""
|
247
|
-
|
248
|
-
Args:
|
249
|
-
messages: list of messages
|
250
|
-
response_format: Pydantic model to parse the response into
|
251
|
-
model: Optional model override
|
252
|
-
**kwargs: Additional parameters
|
253
|
-
|
254
|
-
Returns:
|
255
|
-
Parsed completion response with output and output_parsed fields
|
256
|
-
|
257
|
-
Raises:
|
258
|
-
OpenRouterConnectionError: For connection issues
|
259
|
-
OpenRouterAPIError: For API errors
|
260
|
-
OpenRouterError: For other errors
|
261
|
-
ValueError: If response parsing fails
|
262
|
-
"""
|
241
|
+
"""Get a chat completion from OpenRouter."""
|
263
242
|
try:
|
264
243
|
# Log the request details
|
265
244
|
logger.info(f"Sending chat completion request to OpenRouter with model: {model or self.model}")
|
@@ -269,26 +248,15 @@ class OpenRouterClient:
|
|
269
248
|
total_size = sum(len(str(msg)) for msg in messages)
|
270
249
|
logger.info(f"Total message size: {total_size} bytes")
|
271
250
|
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
text_format=response_format,
|
251
|
+
request = {
|
252
|
+
"model": model or self.model,
|
253
|
+
"messages": messages,
|
254
|
+
"response_format": response_format,
|
277
255
|
**kwargs,
|
278
|
-
|
279
|
-
|
280
|
-
if not response:
|
281
|
-
logger.error(f"Full response content: {response}")
|
282
|
-
raise OpenRouterAPIError("Invalid response from OpenRouter: empty response")
|
256
|
+
}
|
283
257
|
|
284
|
-
|
285
|
-
logger.info("Received response from OpenRouter")
|
286
|
-
if hasattr(response, 'output'):
|
287
|
-
logger.info(f"Response output length: {len(response.output) if response.output else 0}")
|
288
|
-
if hasattr(response, 'output_parsed'):
|
289
|
-
logger.info("Response includes parsed output")
|
290
|
-
if hasattr(response, 'tool_calls'):
|
291
|
-
logger.info(f"Response includes {len(response.tool_calls)} tool calls")
|
258
|
+
response = self._client.beta.chat.completions.parse(**request)
|
259
|
+
logger.info(f"Received response from OpenRouter: {len(response.choices)} choices")
|
292
260
|
|
293
261
|
return response
|
294
262
|
|
@@ -305,45 +273,6 @@ class OpenRouterClient:
|
|
305
273
|
logger.error("Could not read response content")
|
306
274
|
self._handle_api_error("chat completion", e)
|
307
275
|
|
308
|
-
@with_retry()
|
309
|
-
def embeddings(
|
310
|
-
self,
|
311
|
-
input: Union[str, list[str]],
|
312
|
-
*,
|
313
|
-
model: Optional[Union[str, OpenRouterModel]] = None,
|
314
|
-
**kwargs: Any,
|
315
|
-
) -> Any:
|
316
|
-
"""Create embeddings.
|
317
|
-
|
318
|
-
Args:
|
319
|
-
input: Text to embed
|
320
|
-
model: Optional model override
|
321
|
-
**kwargs: Additional parameters
|
322
|
-
|
323
|
-
Returns:
|
324
|
-
Embeddings response
|
325
|
-
|
326
|
-
Raises:
|
327
|
-
OpenRouterConnectionError: For connection issues
|
328
|
-
OpenRouterAPIError: For API errors
|
329
|
-
OpenRouterError: For other errors
|
330
|
-
"""
|
331
|
-
try:
|
332
|
-
# Remove any incompatible parameters
|
333
|
-
kwargs.pop("parse", None) # Remove parse parameter if present
|
334
|
-
|
335
|
-
# Use text-embedding-ada-002 for embeddings
|
336
|
-
embeddings_model = "openai/text-embedding-ada-002"
|
337
|
-
|
338
|
-
return self._client.embeddings.create(
|
339
|
-
model=str(model or embeddings_model),
|
340
|
-
input=input if isinstance(input, list) else [input],
|
341
|
-
encoding_format="float", # Use float format instead of base64
|
342
|
-
**kwargs,
|
343
|
-
)
|
344
|
-
except Exception as e:
|
345
|
-
self._handle_api_error("embeddings", e)
|
346
|
-
|
347
276
|
@classmethod
|
348
277
|
def register_model(cls, name: str, value: str) -> None:
|
349
278
|
"""Register a new model.
|
mbxai/tools/client.py
CHANGED
@@ -2,14 +2,13 @@
|
|
2
2
|
ToolClient implementation for MBX AI.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import Any, Callable, TypeVar
|
5
|
+
from typing import Any, Callable, TypeVar
|
6
6
|
import logging
|
7
7
|
import inspect
|
8
8
|
import json
|
9
9
|
from pydantic import BaseModel
|
10
10
|
from ..openrouter import OpenRouterClient
|
11
|
-
from .types import Tool
|
12
|
-
import asyncio
|
11
|
+
from .types import Tool
|
13
12
|
|
14
13
|
logger = logging.getLogger(__name__)
|
15
14
|
|
@@ -196,7 +195,7 @@ class ToolClient:
|
|
196
195
|
# Log the messages we're about to send
|
197
196
|
self._log_messages(messages, validate_responses=False)
|
198
197
|
|
199
|
-
|
198
|
+
def chat(
|
200
199
|
self,
|
201
200
|
messages: list[dict[str, Any]],
|
202
201
|
*,
|
@@ -213,7 +212,7 @@ class ToolClient:
|
|
213
212
|
|
214
213
|
while True:
|
215
214
|
# Get the model's response
|
216
|
-
response = self._client.
|
215
|
+
response = self._client.create(
|
217
216
|
messages=messages,
|
218
217
|
model=model,
|
219
218
|
stream=stream,
|
@@ -222,49 +221,43 @@ class ToolClient:
|
|
222
221
|
|
223
222
|
if stream:
|
224
223
|
return response
|
225
|
-
|
226
|
-
# Parse the response output
|
227
|
-
output = response.output if response.output else response.output_text
|
228
224
|
|
229
|
-
|
230
|
-
|
231
|
-
if isinstance(output, list):
|
232
|
-
tool_calls = [
|
233
|
-
tc for tc in output
|
234
|
-
if isinstance(tc, dict) and tc.get("type") == "function_call"
|
235
|
-
]
|
225
|
+
if not hasattr(response, 'choices'):
|
226
|
+
raise ValueError("No choices found in response")
|
236
227
|
|
228
|
+
if len(response.choices) == 0:
|
229
|
+
raise ValueError("No choices found in response")
|
230
|
+
|
231
|
+
messages.append(response.choices[0].message.dict())
|
232
|
+
|
233
|
+
# Get all function calls from the output
|
234
|
+
tool_calls = response.choices[0].message.tool_calls
|
235
|
+
logger.info(f"Tool calls: {tool_calls}")
|
237
236
|
# Process function calls if any
|
238
237
|
if tool_calls:
|
239
238
|
logger.info(f"Processing {len(tool_calls)} function calls")
|
240
239
|
|
241
240
|
# Process each function call
|
242
241
|
for tool_call in tool_calls:
|
243
|
-
logger.info(f"Processing tool call: {tool_call
|
242
|
+
logger.info(f"Processing tool call: {tool_call.function.name}")
|
244
243
|
|
245
244
|
# Get the tool
|
246
|
-
tool = self._tools.get(tool_call
|
245
|
+
tool = self._tools.get(tool_call.function.name)
|
247
246
|
if not tool:
|
248
|
-
raise ValueError(f"Unknown tool: {tool_call
|
247
|
+
raise ValueError(f"Unknown tool: {tool_call.function.name}")
|
249
248
|
|
250
249
|
# Parse arguments
|
251
250
|
try:
|
252
|
-
arguments = json.loads(tool_call
|
251
|
+
arguments = json.loads(tool_call.function.arguments)
|
253
252
|
except json.JSONDecodeError as e:
|
254
253
|
logger.error(f"Failed to parse tool arguments: {e}")
|
255
|
-
raise ValueError(f"Invalid tool arguments format: {tool_call
|
254
|
+
raise ValueError(f"Invalid tool arguments format: {tool_call.function.arguments}")
|
256
255
|
|
257
256
|
# Call the tool
|
258
257
|
logger.info(f"Calling tool: {tool.name} with args: {self._truncate_dict(arguments)}")
|
259
258
|
try:
|
260
|
-
|
261
|
-
result = await asyncio.wait_for(tool.function(**arguments), timeout=300.0)
|
262
|
-
else:
|
263
|
-
result = tool.function(**arguments)
|
259
|
+
result = tool.function(**arguments)
|
264
260
|
logger.info(f"Tool {tool.name} completed successfully")
|
265
|
-
except asyncio.TimeoutError:
|
266
|
-
logger.error(f"Tool {tool.name} timed out after 5 minutes")
|
267
|
-
result = {"error": "Tool execution timed out after 5 minutes"}
|
268
261
|
except Exception as e:
|
269
262
|
logger.error(f"Error calling tool {tool.name}: {str(e)}")
|
270
263
|
result = {"error": f"Tool execution failed: {str(e)}"}
|
@@ -274,40 +267,31 @@ class ToolClient:
|
|
274
267
|
result = json.dumps(result)
|
275
268
|
|
276
269
|
# Append the function call and result to messages
|
277
|
-
messages.append(tool_call) # Append the model's function call
|
278
270
|
messages.append({
|
279
|
-
"
|
280
|
-
"
|
281
|
-
"
|
271
|
+
"role": "tool",
|
272
|
+
"tool_call_id": tool_call.id,
|
273
|
+
"name": tool_call.function.name,
|
274
|
+
"content": result,
|
282
275
|
})
|
283
276
|
|
284
|
-
logger.info(f"Added function call and output for {tool_call
|
277
|
+
logger.info(f"Added function call and output for {tool_call.function.name}")
|
285
278
|
|
286
279
|
# Continue the conversation after processing all calls
|
287
280
|
continue
|
288
281
|
else:
|
289
282
|
logger.info("Final response")
|
290
283
|
return response
|
291
|
-
|
292
|
-
|
284
|
+
|
285
|
+
|
286
|
+
def parse(
|
293
287
|
self,
|
294
288
|
messages: list[dict[str, Any]],
|
295
|
-
response_format:
|
289
|
+
response_format: object,
|
296
290
|
*,
|
297
291
|
model: str | None = None,
|
298
292
|
**kwargs: Any,
|
299
293
|
) -> Any:
|
300
|
-
"""Chat with the model
|
301
|
-
|
302
|
-
Args:
|
303
|
-
messages: The conversation messages
|
304
|
-
response_format: The Pydantic model to parse the response into
|
305
|
-
model: Optional model override
|
306
|
-
**kwargs: Additional parameters for the chat completion
|
307
|
-
|
308
|
-
Returns:
|
309
|
-
The parsed response from the model
|
310
|
-
"""
|
294
|
+
"""Chat with the model, handling tool calls."""
|
311
295
|
tools = [tool.to_openai_function() for tool in self._tools.values()]
|
312
296
|
|
313
297
|
if tools:
|
@@ -316,55 +300,49 @@ class ToolClient:
|
|
316
300
|
|
317
301
|
while True:
|
318
302
|
# Get the model's response
|
319
|
-
response = self._client.
|
303
|
+
response = self._client.parse(
|
320
304
|
messages=messages,
|
321
305
|
response_format=response_format,
|
322
306
|
model=model,
|
323
307
|
**kwargs,
|
324
308
|
)
|
325
|
-
|
326
|
-
# Parse the response output
|
327
|
-
output = response.output
|
328
309
|
|
329
|
-
|
330
|
-
|
331
|
-
if isinstance(output, list):
|
332
|
-
tool_calls = [
|
333
|
-
tc for tc in output
|
334
|
-
if isinstance(tc, dict) and tc.get("type") == "function_call"
|
335
|
-
]
|
310
|
+
if not hasattr(response, 'choices'):
|
311
|
+
raise ValueError("No choices found in response")
|
336
312
|
|
313
|
+
if len(response.choices) == 0:
|
314
|
+
raise ValueError("No choices found in response")
|
315
|
+
|
316
|
+
messages.append(response.choices[0].message.dict())
|
317
|
+
|
318
|
+
# Get all function calls from the output
|
319
|
+
tool_calls = response.choices[0].message.tool_calls
|
320
|
+
logger.info(f"Tool calls: {tool_calls}")
|
337
321
|
# Process function calls if any
|
338
322
|
if tool_calls:
|
339
323
|
logger.info(f"Processing {len(tool_calls)} function calls")
|
340
324
|
|
341
325
|
# Process each function call
|
342
326
|
for tool_call in tool_calls:
|
343
|
-
logger.info(f"Processing tool call: {tool_call
|
327
|
+
logger.info(f"Processing tool call: {tool_call.function.name}")
|
344
328
|
|
345
329
|
# Get the tool
|
346
|
-
tool = self._tools.get(tool_call
|
330
|
+
tool = self._tools.get(tool_call.function.name)
|
347
331
|
if not tool:
|
348
|
-
raise ValueError(f"Unknown tool: {tool_call
|
332
|
+
raise ValueError(f"Unknown tool: {tool_call.function.name}")
|
349
333
|
|
350
334
|
# Parse arguments
|
351
335
|
try:
|
352
|
-
arguments = json.loads(tool_call
|
336
|
+
arguments = json.loads(tool_call.function.arguments)
|
353
337
|
except json.JSONDecodeError as e:
|
354
338
|
logger.error(f"Failed to parse tool arguments: {e}")
|
355
|
-
raise ValueError(f"Invalid tool arguments format: {tool_call
|
339
|
+
raise ValueError(f"Invalid tool arguments format: {tool_call.function.arguments}")
|
356
340
|
|
357
341
|
# Call the tool
|
358
342
|
logger.info(f"Calling tool: {tool.name} with args: {self._truncate_dict(arguments)}")
|
359
343
|
try:
|
360
|
-
|
361
|
-
result = await asyncio.wait_for(tool.function(**arguments), timeout=300.0)
|
362
|
-
else:
|
363
|
-
result = tool.function(**arguments)
|
344
|
+
result = tool.function(**arguments)
|
364
345
|
logger.info(f"Tool {tool.name} completed successfully")
|
365
|
-
except asyncio.TimeoutError:
|
366
|
-
logger.error(f"Tool {tool.name} timed out after 5 minutes")
|
367
|
-
result = {"error": "Tool execution timed out after 5 minutes"}
|
368
346
|
except Exception as e:
|
369
347
|
logger.error(f"Error calling tool {tool.name}: {str(e)}")
|
370
348
|
result = {"error": f"Tool execution failed: {str(e)}"}
|
@@ -374,17 +352,17 @@ class ToolClient:
|
|
374
352
|
result = json.dumps(result)
|
375
353
|
|
376
354
|
# Append the function call and result to messages
|
377
|
-
messages.append(tool_call) # Append the model's function call
|
378
355
|
messages.append({
|
379
|
-
"
|
380
|
-
"
|
381
|
-
"
|
356
|
+
"role": "tool",
|
357
|
+
"tool_call_id": tool_call.id,
|
358
|
+
"name": tool_call.function.name,
|
359
|
+
"content": result,
|
382
360
|
})
|
383
361
|
|
384
|
-
logger.info(f"Added function call and output for {tool_call
|
362
|
+
logger.info(f"Added function call and output for {tool_call.function.name}")
|
385
363
|
|
386
364
|
# Continue the conversation after processing all calls
|
387
365
|
continue
|
388
366
|
else:
|
389
|
-
logger.info("
|
390
|
-
return response
|
367
|
+
logger.info("Final response")
|
368
|
+
return response
|
@@ -0,0 +1,22 @@
|
|
1
|
+
mbxai/__init__.py,sha256=3ljCLa8XDn3XjVIZkyCOi5pgS_0Fr2GEkziz7LY6Hrc,47
|
2
|
+
mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
|
3
|
+
mbxai/examples/openrouter_example.py,sha256=-grXHKMmFLoh-yUIEMc31n8Gg1S7uSazBWCIOWxgbyQ,1317
|
4
|
+
mbxai/examples/parse_example.py,sha256=eCKMJoOl6qwo8sDP6Trc6ncgjPlgTqi5tPE2kB5_P0k,3821
|
5
|
+
mbxai/examples/parse_tool_example.py,sha256=duHN8scI9ZK6XZ5hdiz1Adzyc-_7tH9Ls9qP4S0bf5s,5477
|
6
|
+
mbxai/examples/tool_client_example.py,sha256=9DNaejXLA85dPbExMiv5y76qlFhzOJF9E5EnMOsy_Dc,3993
|
7
|
+
mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
|
8
|
+
mbxai/mcp/client.py,sha256=m3FBMqewv6b8ZJ7V9TnHqHxJ3acHDQDHvbuYhweI9-g,5283
|
9
|
+
mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
|
10
|
+
mbxai/mcp/server.py,sha256=ZeN9Q6AMwF0eFg_n8Uu-nz0B7eO7jFomUwiWe8VrxHo,3462
|
11
|
+
mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
|
12
|
+
mbxai/openrouter/client.py,sha256=GhjyQ5YPDkUsOqlHF0ufkhDtU6g00hLAA5r7qKFU9co,10971
|
13
|
+
mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
|
14
|
+
mbxai/openrouter/models.py,sha256=b3IjjtZAjeGOf2rLsdnCD1HacjTnS8jmv_ZXorc-KJQ,2604
|
15
|
+
mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
|
16
|
+
mbxai/tools/client.py,sha256=h_1fxVDBq57f_OXNsj-TBp6-r367sv6Z5nk1qLFcLO8,14951
|
17
|
+
mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
|
18
|
+
mbxai/tools/types.py,sha256=PEJ2AxBqywbJCp689QZhG87rDHWNuKGnmB5CCQsAMlw,5251
|
19
|
+
mbxai-0.8.0.dist-info/METADATA,sha256=GEPKzzsNonL8Ynf237ZsaMBzSBNA018rCw8zfOXdCMU,4147
|
20
|
+
mbxai-0.8.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
21
|
+
mbxai-0.8.0.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
|
22
|
+
mbxai-0.8.0.dist-info/RECORD,,
|
mbxai-0.7.2.dist-info/RECORD
DELETED
@@ -1,18 +0,0 @@
|
|
1
|
-
mbxai/__init__.py,sha256=RXotiHhBVgcPh5XJj8qg6yQv3s_7UODMTOhZ3LVLsKI,47
|
2
|
-
mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
|
3
|
-
mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
|
4
|
-
mbxai/mcp/client.py,sha256=m3FBMqewv6b8ZJ7V9TnHqHxJ3acHDQDHvbuYhweI9-g,5283
|
5
|
-
mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
|
6
|
-
mbxai/mcp/server.py,sha256=FEmuSOALGSOJsuE8ywjWIaIBbvNwXafw3i5GU5hbukM,3462
|
7
|
-
mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
|
8
|
-
mbxai/openrouter/client.py,sha256=K8ytBeAnjHyxoN6DMH4yc1gaPFYmBk08ZLF95YhPXMU,13745
|
9
|
-
mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
|
10
|
-
mbxai/openrouter/models.py,sha256=b3IjjtZAjeGOf2rLsdnCD1HacjTnS8jmv_ZXorc-KJQ,2604
|
11
|
-
mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
|
12
|
-
mbxai/tools/client.py,sha256=KEFs-r97c4YD-DIZnJNUoSnIWQL-cwIZ4YUYqT6OIoo,16041
|
13
|
-
mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
|
14
|
-
mbxai/tools/types.py,sha256=PEJ2AxBqywbJCp689QZhG87rDHWNuKGnmB5CCQsAMlw,5251
|
15
|
-
mbxai-0.7.2.dist-info/METADATA,sha256=KtOisBQNvvBiCf_eB6nYcGe_v5nea5LzIvOMtXBKdNw,4147
|
16
|
-
mbxai-0.7.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
17
|
-
mbxai-0.7.2.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
|
18
|
-
mbxai-0.7.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|