openrouter-provider 0.1.1__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- OpenRouterProvider/LLMs.py → llms.py +15 -3
- message.py +99 -0
- OpenRouterProvider/Chatbot_manager.py → openrouter.py +74 -78
- openrouter_provider-1.0.1.dist-info/METADATA +430 -0
- openrouter_provider-1.0.1.dist-info/RECORD +10 -0
- openrouter_provider-1.0.1.dist-info/top_level.txt +6 -0
- openrouter_provider.py +232 -0
- OpenRouterProvider/Tool.py → tool.py +9 -12
- OpenRouterProvider/Chat_message.py +0 -142
- OpenRouterProvider/OpenRouterProvider.py +0 -266
- openrouter_provider-0.1.1.dist-info/METADATA +0 -232
- openrouter_provider-0.1.1.dist-info/RECORD +0 -10
- openrouter_provider-0.1.1.dist-info/top_level.txt +0 -2
- {openrouter_provider-0.1.1.dist-info → openrouter_provider-1.0.1.dist-info}/WHEEL +0 -0
|
@@ -15,16 +15,25 @@ gpt_4_1_mini = LLMModel(name='openai/gpt-4.1-mini', input_cost=0.4, output_cost=
|
|
|
15
15
|
gpt_4_1_nano = LLMModel(name='openai/gpt-4.1-nano', input_cost=0.1, output_cost=0.4)
|
|
16
16
|
o4_mini = LLMModel(name='openai/o4-mini', input_cost=1.1, output_cost=4.4)
|
|
17
17
|
o4_mini_high = LLMModel(name='openai/o4-mini-high', input_cost=1.1, output_cost=4.4)
|
|
18
|
-
o3 = LLMModel(name='openai/o3', input_cost=
|
|
18
|
+
o3 = LLMModel(name='openai/o3', input_cost=2, output_cost=8)
|
|
19
|
+
gpt_5_nano = LLMModel(name='openai/gpt-5-nano', input_cost=0.05, output_cost=0.4)
|
|
20
|
+
gpt_5_mini = LLMModel(name='openai/gpt-5-mini', input_cost=0.25, output_cost=2)
|
|
21
|
+
gpt_5 = LLMModel(name='openai/gpt-5', input_cost=1.25, output_cost=10)
|
|
22
|
+
gpt_oss_20B_free = LLMModel(name='openai/gpt-oss-20b:free', input_cost=0, output_cost=0)
|
|
23
|
+
gpt_oss_20B = LLMModel(name='openai/gpt-oss-20b', input_cost=0.06, output_cost=0.2)
|
|
24
|
+
gpt_oss_120B = LLMModel(name='openai/gpt-oss-120b', input_cost=0.25, output_cost=0.69)
|
|
19
25
|
|
|
20
26
|
# Anthropic
|
|
21
27
|
claude_3_7_sonnet = LLMModel(name='anthropic/claude-3.7-sonnet', input_cost=3.0, output_cost=15.0)
|
|
22
28
|
claude_3_7_sonnet_thinking = LLMModel(name='anthropic/claude-3.7-sonnet:thinking', input_cost=3.0, output_cost=15.0)
|
|
23
29
|
claude_3_5_haiku = LLMModel(name='anthropic/claude-3.5-haiku', input_cost=0.8, output_cost=4.0)
|
|
30
|
+
claude_4_sonnet = LLMModel(name='anthropic/claude-sonnet-4', input_cost=3.0, output_cost=15.0)
|
|
31
|
+
claude_4_opus = LLMModel(name='anthropic/claude-opus-4', input_cost=15, output_cost=75)
|
|
32
|
+
claude_4_1_opus = LLMModel(name='anthropic/claude-opus-4.1', input_cost=15, output_cost=75)
|
|
24
33
|
|
|
25
34
|
# Google
|
|
26
35
|
gemini_2_0_flash = LLMModel(name='google/gemini-2.0-flash-001', input_cost=0.1, output_cost=0.4)
|
|
27
|
-
|
|
36
|
+
gemini_2_5_flash_lite = LLMModel(name='google/gemini-2.5-flash-lite', input_cost=0.1, output_cost=0.4)
|
|
28
37
|
gemini_2_5_flash = LLMModel(name='google/gemini-2.5-flash-preview', input_cost=0.15, output_cost=0.60)
|
|
29
38
|
gemini_2_5_flash_thinking = LLMModel(name='google/gemini-2.5-flash-preview:thinking', input_cost=0.15, output_cost=3.5)
|
|
30
39
|
gemini_2_5_pro = LLMModel(name='google/gemini-2.5-pro-preview-03-25', input_cost=1.25, output_cost=10)
|
|
@@ -34,10 +43,12 @@ deepseek_v3_free = LLMModel(name='deepseek/deepseek-chat-v3-0324:free', input_co
|
|
|
34
43
|
deepseek_v3 = LLMModel(name='deepseek/deepseek-chat-v3-0324', input_cost=0.3, output_cost=1.2)
|
|
35
44
|
deepseek_r1_free = LLMModel(name='deepseek/deepseek-r1:free', input_cost=0, output_cost=0)
|
|
36
45
|
deepseek_r1 = LLMModel(name='deepseek/deepseek-r1', input_cost=0.5, output_cost=2.2)
|
|
46
|
+
deepseek_v3_1 = LLMModel(name='deepseek/deepseek-chat-v3.1', input_cost=0.55, output_cost=1.68)
|
|
37
47
|
|
|
38
48
|
# xAI
|
|
39
49
|
grok_3_mini = LLMModel(name='x-ai/grok-3-mini-beta', input_cost=0.3, output_cost=0.5)
|
|
40
50
|
grok_3 = LLMModel(name='x-ai/grok-3-beta', input_cost=3, output_cost=15)
|
|
51
|
+
grok_4 = LLMModel(name='x-ai/grok-4', input_cost=3, output_cost=15)
|
|
41
52
|
|
|
42
53
|
# Microsoft
|
|
43
54
|
mai_ds_r1_free = LLMModel(name="microsoft/mai-ds-r1:free", input_cost=0, output_cost=0)
|
|
@@ -45,4 +56,5 @@ mai_ds_r1_free = LLMModel(name="microsoft/mai-ds-r1:free", input_cost=0, output_
|
|
|
45
56
|
# Others
|
|
46
57
|
llama_4_maverick_free = LLMModel(name="meta-llama/llama-4-maverick:free", input_cost=0, output_cost=0)
|
|
47
58
|
llama_4_scout = LLMModel(name="meta-llama/llama-4-scout", input_cost=0.11, output_cost=0.34)
|
|
48
|
-
mistral_small_3_1_24B_free = LLMModel(name="mistralai/mistral-small-3.1-24b-instruct:free", input_cost=0, output_cost=0)
|
|
59
|
+
mistral_small_3_1_24B_free = LLMModel(name="mistralai/mistral-small-3.1-24b-instruct:free", input_cost=0, output_cost=0)
|
|
60
|
+
|
message.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from io import BytesIO
|
|
5
|
+
import base64
|
|
6
|
+
from typing import Optional, Any
|
|
7
|
+
|
|
8
|
+
from PIL import Image
|
|
9
|
+
from openai.types.chat import ChatCompletion
|
|
10
|
+
|
|
11
|
+
from .llms import LLMModel
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Role(Enum):
|
|
15
|
+
system = "system"
|
|
16
|
+
user = "user"
|
|
17
|
+
ai = "assistant"
|
|
18
|
+
agent = "agent"
|
|
19
|
+
tool = "tool"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ToolCall:
|
|
24
|
+
id: str
|
|
25
|
+
name: str
|
|
26
|
+
arguments: dict
|
|
27
|
+
result: Any = ""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Message:
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
text: str,
|
|
34
|
+
images: Optional[list[Image.Image]] = None,
|
|
35
|
+
role: Role = Role.user,
|
|
36
|
+
answered_by: Optional[LLMModel] = None,
|
|
37
|
+
raw_response: Optional[ChatCompletion] = None
|
|
38
|
+
) -> None:
|
|
39
|
+
self.role = role
|
|
40
|
+
self.text = text
|
|
41
|
+
self.images = self._process_image(images)
|
|
42
|
+
self.answered_by = answered_by
|
|
43
|
+
self.tool_calls: list[ToolCall] = []
|
|
44
|
+
self.raw_response = raw_response
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def __str__(self) -> str:
|
|
48
|
+
BLUE = "\033[34m"
|
|
49
|
+
GREEN = "\033[32m"
|
|
50
|
+
RESET = "\033[0m"
|
|
51
|
+
|
|
52
|
+
message = ""
|
|
53
|
+
|
|
54
|
+
if self.role == Role.system:
|
|
55
|
+
message = "---------------------- System ----------------------\n"
|
|
56
|
+
elif self.role == Role.user:
|
|
57
|
+
message = BLUE + "----------------------- User -----------------------\n" + RESET
|
|
58
|
+
elif self.role == Role.ai:
|
|
59
|
+
message = GREEN + "--------------------- Assistant --------------------\n" + RESET
|
|
60
|
+
|
|
61
|
+
message += self.text + RESET + "\n"
|
|
62
|
+
|
|
63
|
+
return message
|
|
64
|
+
|
|
65
|
+
def _process_image(self, images: Optional[list[Image.Image]]) -> Optional[list[str]]:
|
|
66
|
+
if images is None:
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
base64_images = []
|
|
70
|
+
for image in images:
|
|
71
|
+
if image.mode == "RGBA":
|
|
72
|
+
image = image.convert("RGB")
|
|
73
|
+
|
|
74
|
+
image = self._resize_image_aspect_ratio(image)
|
|
75
|
+
image = self._convert_to_base64(image)
|
|
76
|
+
base64_images.append(image)
|
|
77
|
+
|
|
78
|
+
return base64_images
|
|
79
|
+
|
|
80
|
+
def _convert_to_base64(self, image: Image.Image) -> str:
|
|
81
|
+
buffered = BytesIO()
|
|
82
|
+
format_type = image.format if image.format else 'JPEG'
|
|
83
|
+
image.save(buffered, format=format_type)
|
|
84
|
+
img_bytes = buffered.getvalue()
|
|
85
|
+
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
|
|
86
|
+
|
|
87
|
+
return img_base64
|
|
88
|
+
|
|
89
|
+
def _resize_image_aspect_ratio(self, image: Image.Image, target_length: int = 1024) -> Image.Image:
|
|
90
|
+
width, height = image.size
|
|
91
|
+
|
|
92
|
+
if width > height:
|
|
93
|
+
new_width = target_length
|
|
94
|
+
new_height = int((target_length / width) * height)
|
|
95
|
+
else:
|
|
96
|
+
new_height = target_length
|
|
97
|
+
new_width = int((target_length / height) * width)
|
|
98
|
+
|
|
99
|
+
return image.resize((new_width, new_height))
|
|
@@ -1,13 +1,16 @@
|
|
|
1
|
-
from
|
|
2
|
-
from .OpenRouterProvider import *
|
|
3
|
-
from .LLMs import LLMModel
|
|
4
|
-
|
|
5
|
-
from dotenv import load_dotenv
|
|
6
|
-
import time
|
|
1
|
+
from __future__ import annotations
|
|
7
2
|
import json
|
|
3
|
+
import time
|
|
8
4
|
from typing import Iterator, AsyncIterator
|
|
5
|
+
|
|
6
|
+
from dotenv import load_dotenv
|
|
9
7
|
from pydantic import BaseModel
|
|
10
8
|
|
|
9
|
+
from .llms import *
|
|
10
|
+
from .message import *
|
|
11
|
+
from .openrouter_provider import *
|
|
12
|
+
from .tool import *
|
|
13
|
+
|
|
11
14
|
|
|
12
15
|
_base_system_prompt = """
|
|
13
16
|
It's [TIME] today.
|
|
@@ -18,79 +21,71 @@ You are an intelligent AI. You must follow the system_instruction below, which i
|
|
|
18
21
|
</system_instruction>
|
|
19
22
|
"""
|
|
20
23
|
|
|
21
|
-
class
|
|
22
|
-
def __init__(self, system_prompt:str="", tools:list[tool_model]=
|
|
24
|
+
class OpenRouterClient:
|
|
25
|
+
def __init__(self, system_prompt: str = "", tools: list[tool_model] = None) -> None:
|
|
23
26
|
load_dotenv()
|
|
24
27
|
|
|
25
|
-
self._memory: list[
|
|
26
|
-
self.tools: list[tool_model] = tools
|
|
27
|
-
self.set_system_prompt(
|
|
28
|
+
self._memory: list[Message] = []
|
|
29
|
+
self.tools: list[tool_model] = tools or []
|
|
30
|
+
self.set_system_prompt(system_prompt)
|
|
28
31
|
|
|
29
|
-
def set_system_prompt(self, prompt: str):
|
|
30
|
-
|
|
32
|
+
def set_system_prompt(self, prompt: str) -> None:
|
|
33
|
+
month, day, year = time.localtime()[:3]
|
|
31
34
|
|
|
32
35
|
system_prompt = _base_system_prompt
|
|
33
|
-
system_prompt = system_prompt.replace("[TIME]", f"{
|
|
36
|
+
system_prompt = system_prompt.replace("[TIME]", f"{month}/{day}/{year}")
|
|
34
37
|
system_prompt = system_prompt.replace("[SYSTEM_INSTRUCTION]", prompt)
|
|
35
38
|
|
|
36
|
-
self._system_prompt =
|
|
39
|
+
self._system_prompt = Message(text=system_prompt, role=Role.system)
|
|
37
40
|
|
|
38
|
-
def clear_memory(self):
|
|
41
|
+
def clear_memory(self) -> None:
|
|
39
42
|
self._memory = []
|
|
40
43
|
|
|
41
|
-
def print_memory(self):
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
44
|
+
def print_memory(self) -> None:
|
|
45
|
+
from tqdm import tqdm
|
|
46
|
+
|
|
47
|
+
reset_code = "\033[0m"
|
|
48
|
+
|
|
45
49
|
for message in self._memory:
|
|
46
50
|
role = message.role.value
|
|
47
51
|
text = message.text.strip()
|
|
48
52
|
|
|
49
|
-
reset_code = "\033[0m"
|
|
50
53
|
role_str = f"{role.ljust(9)}:"
|
|
51
54
|
indent = " " * len(role_str)
|
|
52
55
|
lines = text.splitlines()
|
|
53
56
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
57
|
+
color_codes = {
|
|
58
|
+
"user": "\033[94m",
|
|
59
|
+
"assistant": "\033[92m",
|
|
60
|
+
"tool": "\033[93m",
|
|
61
|
+
"default": "\033[0m"
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
color_code = color_codes.get(role, color_codes["default"])
|
|
62
65
|
|
|
63
|
-
|
|
64
|
-
color_code = "\033[92m" # green
|
|
66
|
+
if role in ["user", "assistant"]:
|
|
65
67
|
if lines:
|
|
66
68
|
print(f"{color_code}{role_str}{reset_code} {lines[0]}")
|
|
67
69
|
for line in lines[1:]:
|
|
68
70
|
print(f"{color_code}{indent}{reset_code} {line}")
|
|
69
71
|
else:
|
|
70
72
|
print(f"{color_code}{role_str}{reset_code}")
|
|
71
|
-
|
|
73
|
+
|
|
72
74
|
elif role == "tool":
|
|
73
|
-
color_code = "\033[93m" # orange
|
|
74
75
|
print(f"{color_code}{role_str}{reset_code} ", end="")
|
|
75
|
-
|
|
76
76
|
for tool in message.tool_calls:
|
|
77
77
|
print(f"{tool.name}({json.loads(tool.arguments)}), ", end="")
|
|
78
78
|
print()
|
|
79
|
-
|
|
80
|
-
else:
|
|
81
|
-
color_code = "\033[0m" # default color
|
|
82
|
-
print("Print error: The role is invalid.")
|
|
83
|
-
|
|
84
|
-
print("----------------------------------------------------------\n")
|
|
85
79
|
|
|
86
80
|
def invoke(
|
|
87
|
-
self,
|
|
88
|
-
model: LLMModel,
|
|
89
|
-
query:
|
|
90
|
-
tools: list[tool_model]=
|
|
91
|
-
provider:ProviderConfig=None,
|
|
92
|
-
temperature: float=0.3
|
|
93
|
-
) ->
|
|
81
|
+
self,
|
|
82
|
+
model: LLMModel,
|
|
83
|
+
query: Message,
|
|
84
|
+
tools: list[tool_model] = None,
|
|
85
|
+
provider: ProviderConfig = None,
|
|
86
|
+
temperature: float = 0.3
|
|
87
|
+
) -> Message:
|
|
88
|
+
tools = tools or []
|
|
94
89
|
self._memory.append(query)
|
|
95
90
|
client = OpenRouterProvider()
|
|
96
91
|
reply = client.invoke(
|
|
@@ -101,7 +96,7 @@ class Chatbot_manager:
|
|
|
101
96
|
tools=self.tools + tools,
|
|
102
97
|
provider=provider,
|
|
103
98
|
)
|
|
104
|
-
reply.
|
|
99
|
+
reply.answered_by = model
|
|
105
100
|
self._memory.append(reply)
|
|
106
101
|
|
|
107
102
|
if reply.tool_calls:
|
|
@@ -116,7 +111,6 @@ class Chatbot_manager:
|
|
|
116
111
|
requested_tool.result = result
|
|
117
112
|
break
|
|
118
113
|
else:
|
|
119
|
-
print("Tool Not found", requested_tool.name)
|
|
120
114
|
return reply
|
|
121
115
|
|
|
122
116
|
reply = client.invoke(
|
|
@@ -127,19 +121,20 @@ class Chatbot_manager:
|
|
|
127
121
|
provider=provider
|
|
128
122
|
)
|
|
129
123
|
|
|
130
|
-
reply.
|
|
124
|
+
reply.answered_by = model
|
|
131
125
|
self._memory.append(reply)
|
|
132
126
|
|
|
133
127
|
return reply
|
|
134
128
|
|
|
135
129
|
def invoke_stream(
|
|
136
|
-
self,
|
|
137
|
-
model: LLMModel,
|
|
138
|
-
query:
|
|
139
|
-
tools: list[tool_model]=
|
|
140
|
-
provider:ProviderConfig=None,
|
|
141
|
-
temperature: float=0.3
|
|
130
|
+
self,
|
|
131
|
+
model: LLMModel,
|
|
132
|
+
query: Message,
|
|
133
|
+
tools: list[tool_model] = None,
|
|
134
|
+
provider: ProviderConfig = None,
|
|
135
|
+
temperature: float = 0.3
|
|
142
136
|
) -> Iterator[str]:
|
|
137
|
+
tools = tools or []
|
|
143
138
|
self._memory.append(query)
|
|
144
139
|
client = OpenRouterProvider()
|
|
145
140
|
generator = client.invoke_stream(
|
|
@@ -156,16 +151,17 @@ class Chatbot_manager:
|
|
|
156
151
|
text += token.choices[0].delta.content
|
|
157
152
|
yield token.choices[0].delta.content
|
|
158
153
|
|
|
159
|
-
self._memory.append(
|
|
154
|
+
self._memory.append(Message(text=text, role=Role.ai, answered_by=model))
|
|
160
155
|
|
|
161
156
|
async def async_invoke(
|
|
162
|
-
self,
|
|
163
|
-
model: LLMModel,
|
|
164
|
-
query:
|
|
165
|
-
tools: list[tool_model] =
|
|
157
|
+
self,
|
|
158
|
+
model: LLMModel,
|
|
159
|
+
query: Message,
|
|
160
|
+
tools: list[tool_model] = None,
|
|
166
161
|
provider: ProviderConfig = None,
|
|
167
|
-
temperature: float=0.3
|
|
168
|
-
) ->
|
|
162
|
+
temperature: float = 0.3
|
|
163
|
+
) -> Message:
|
|
164
|
+
tools = tools or []
|
|
169
165
|
self._memory.append(query)
|
|
170
166
|
client = OpenRouterProvider()
|
|
171
167
|
reply = await client.async_invoke(
|
|
@@ -176,7 +172,7 @@ class Chatbot_manager:
|
|
|
176
172
|
tools=self.tools + tools,
|
|
177
173
|
provider=provider
|
|
178
174
|
)
|
|
179
|
-
reply.
|
|
175
|
+
reply.answered_by = model
|
|
180
176
|
self._memory.append(reply)
|
|
181
177
|
|
|
182
178
|
if reply.tool_calls:
|
|
@@ -191,7 +187,6 @@ class Chatbot_manager:
|
|
|
191
187
|
requested_tool.result = result
|
|
192
188
|
break
|
|
193
189
|
else:
|
|
194
|
-
print("Tool Not found", requested_tool.name)
|
|
195
190
|
return reply
|
|
196
191
|
|
|
197
192
|
reply = await client.async_invoke(
|
|
@@ -201,19 +196,20 @@ class Chatbot_manager:
|
|
|
201
196
|
tools=self.tools + tools,
|
|
202
197
|
provider=provider
|
|
203
198
|
)
|
|
204
|
-
reply.
|
|
199
|
+
reply.answered_by = model
|
|
205
200
|
self._memory.append(reply)
|
|
206
201
|
|
|
207
202
|
return reply
|
|
208
203
|
|
|
209
204
|
async def async_invoke_stream(
|
|
210
|
-
self,
|
|
211
|
-
model: LLMModel,
|
|
212
|
-
query:
|
|
213
|
-
tools: list[tool_model] =
|
|
205
|
+
self,
|
|
206
|
+
model: LLMModel,
|
|
207
|
+
query: Message,
|
|
208
|
+
tools: list[tool_model] = None,
|
|
214
209
|
provider: ProviderConfig = None,
|
|
215
|
-
temperature: float=0.3
|
|
210
|
+
temperature: float = 0.3
|
|
216
211
|
) -> AsyncIterator[str]:
|
|
212
|
+
tools = tools or []
|
|
217
213
|
self._memory.append(query)
|
|
218
214
|
client = OpenRouterProvider()
|
|
219
215
|
|
|
@@ -232,15 +228,15 @@ class Chatbot_manager:
|
|
|
232
228
|
text += delta
|
|
233
229
|
yield delta
|
|
234
230
|
|
|
235
|
-
self._memory.append(
|
|
231
|
+
self._memory.append(Message(text=text, role=Role.ai, answered_by=model))
|
|
236
232
|
|
|
237
233
|
def structured_output(
|
|
238
|
-
self,
|
|
239
|
-
model: LLMModel,
|
|
240
|
-
query:
|
|
241
|
-
provider:ProviderConfig=None,
|
|
242
|
-
json_schema: BaseModel=None,
|
|
243
|
-
temperature: float=0.3
|
|
234
|
+
self,
|
|
235
|
+
model: LLMModel,
|
|
236
|
+
query: Message,
|
|
237
|
+
provider: ProviderConfig = None,
|
|
238
|
+
json_schema: BaseModel = None,
|
|
239
|
+
temperature: float = 0.3
|
|
244
240
|
) -> BaseModel:
|
|
245
241
|
self._memory.append(query)
|
|
246
242
|
client = OpenRouterProvider()
|