openrouter-provider 0.0.5__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openrouter/__init__.py ADDED
@@ -0,0 +1,5 @@
1
+ from .llms import *
2
+ from .message import *
3
+ from .openrouter import *
4
+ from .openrouter_provider import *
5
+ from .tool import *
@@ -15,29 +15,39 @@ gpt_4_1_mini = LLMModel(name='openai/gpt-4.1-mini', input_cost=0.4, output_cost=
15
15
  gpt_4_1_nano = LLMModel(name='openai/gpt-4.1-nano', input_cost=0.1, output_cost=0.4)
16
16
  o4_mini = LLMModel(name='openai/o4-mini', input_cost=1.1, output_cost=4.4)
17
17
  o4_mini_high = LLMModel(name='openai/o4-mini-high', input_cost=1.1, output_cost=4.4)
18
- o3 = LLMModel(name='openai/o3', input_cost=10, output_cost=40)
18
+ o3 = LLMModel(name='openai/o3', input_cost=2, output_cost=8)
19
+ gpt_5_nano = LLMModel(name='openai/gpt-5-nano', input_cost=0.05, output_cost=0.4)
20
+ gpt_5_mini = LLMModel(name='openai/gpt-5-mini', input_cost=0.25, output_cost=2)
21
+ gpt_5 = LLMModel(name='openai/gpt-5', input_cost=1.25, output_cost=10)
22
+ gpt_oss_20B_free = LLMModel(name='openai/gpt-oss-20b:free', input_cost=0, output_cost=0)
23
+ gpt_oss_20B = LLMModel(name='openai/gpt-oss-20b', input_cost=0.06, output_cost=0.2)
24
+ gpt_oss_120B = LLMModel(name='openai/gpt-oss-120b', input_cost=0.25, output_cost=0.69)
19
25
 
20
26
  # Anthropic
21
27
  claude_3_7_sonnet = LLMModel(name='anthropic/claude-3.7-sonnet', input_cost=3.0, output_cost=15.0)
22
28
  claude_3_7_sonnet_thinking = LLMModel(name='anthropic/claude-3.7-sonnet:thinking', input_cost=3.0, output_cost=15.0)
23
29
  claude_3_5_haiku = LLMModel(name='anthropic/claude-3.5-haiku', input_cost=0.8, output_cost=4.0)
30
+ claude_4_sonnet = LLMModel(name='anthropic/claude-sonnet-4', input_cost=3.0, output_cost=15.0)
31
+ claude_4_opus = LLMModel(name='anthropic/claude-opus-4', input_cost=15, output_cost=75)
32
+ claude_4_1_opus = LLMModel(name='anthropic/claude-opus-4.1', input_cost=15, output_cost=75)
24
33
 
25
34
  # Google
26
35
  gemini_2_0_flash = LLMModel(name='google/gemini-2.0-flash-001', input_cost=0.1, output_cost=0.4)
27
- gemini_2_0_flash_free = LLMModel(name='google/gemini-2.0-flash-exp:free', input_cost=0.1, output_cost=0.4)
28
- gemini_2_5_flash = LLMModel(name='google/gemini-2.5-flash-preview', input_cost=0.15, output_cost=0.60)
29
- gemini_2_5_flash_thinking = LLMModel(name='google/gemini-2.5-flash-preview:thinking', input_cost=0.15, output_cost=3.5)
30
- gemini_2_5_pro = LLMModel(name='google/gemini-2.5-pro-preview-03-25', input_cost=1.25, output_cost=10)
36
+ gemini_2_5_flash_lite = LLMModel(name='google/gemini-2.5-flash-lite', input_cost=0.1, output_cost=0.4)
37
+ gemini_2_5_flash = LLMModel(name='google/gemini-2.5-flash', input_cost=0.3, output_cost=2.5)
38
+ gemini_2_5_pro = LLMModel(name='google/gemini-2.5-pro', input_cost=1.25, output_cost=10)
31
39
 
32
40
  # Deepseek
33
41
  deepseek_v3_free = LLMModel(name='deepseek/deepseek-chat-v3-0324:free', input_cost=0, output_cost=0)
34
42
  deepseek_v3 = LLMModel(name='deepseek/deepseek-chat-v3-0324', input_cost=0.3, output_cost=1.2)
35
43
  deepseek_r1_free = LLMModel(name='deepseek/deepseek-r1:free', input_cost=0, output_cost=0)
36
44
  deepseek_r1 = LLMModel(name='deepseek/deepseek-r1', input_cost=0.5, output_cost=2.2)
45
+ deepseek_v3_1 = LLMModel(name='deepseek/deepseek-chat-v3.1', input_cost=0.55, output_cost=1.68)
37
46
 
38
47
  # xAI
39
48
  grok_3_mini = LLMModel(name='x-ai/grok-3-mini-beta', input_cost=0.3, output_cost=0.5)
40
49
  grok_3 = LLMModel(name='x-ai/grok-3-beta', input_cost=3, output_cost=15)
50
+ grok_4 = LLMModel(name='x-ai/grok-4', input_cost=3, output_cost=15)
41
51
 
42
52
  # Microsoft
43
53
  mai_ds_r1_free = LLMModel(name="microsoft/mai-ds-r1:free", input_cost=0, output_cost=0)
@@ -45,4 +55,5 @@ mai_ds_r1_free = LLMModel(name="microsoft/mai-ds-r1:free", input_cost=0, output_
45
55
  # Others
46
56
  llama_4_maverick_free = LLMModel(name="meta-llama/llama-4-maverick:free", input_cost=0, output_cost=0)
47
57
  llama_4_scout = LLMModel(name="meta-llama/llama-4-scout", input_cost=0.11, output_cost=0.34)
48
- mistral_small_3_1_24B_free = LLMModel(name="mistralai/mistral-small-3.1-24b-instruct:free", input_cost=0, output_cost=0)
58
+ mistral_small_3_1_24B_free = LLMModel(name="mistralai/mistral-small-3.1-24b-instruct:free", input_cost=0, output_cost=0)
59
+
openrouter/message.py ADDED
@@ -0,0 +1,102 @@
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ from io import BytesIO
5
+ import base64
6
+ import uuid
7
+ from typing import Optional, Any
8
+
9
+ from PIL import Image
10
+ from openai.types.chat import ChatCompletion
11
+
12
+ from openrouter.llms import LLMModel
13
+
14
+
15
+ class Role(Enum):
16
+ system = "system"
17
+ user = "user"
18
+ ai = "assistant"
19
+ agent = "agent"
20
+ tool = "tool"
21
+
22
+
23
+ @dataclass
24
+ class ToolCall:
25
+ id: str
26
+ name: str
27
+ arguments: dict
28
+ result: Any = ""
29
+
30
+
31
+ class Message:
32
+ def __init__(
33
+ self,
34
+ text: str,
35
+ images: Optional[list[Image.Image]] = None,
36
+ role: Role = Role.user,
37
+ answered_by: Optional[LLMModel] = None,
38
+ raw_response: Optional[ChatCompletion] = None,
39
+ id: Optional[str] = None
40
+ ) -> None:
41
+ self.id = id if id is not None else str(uuid.uuid4())
42
+ self.role = role
43
+ self.text = text
44
+ self.images = self._process_image(images)
45
+ self.answered_by = answered_by
46
+ self.tool_calls: list[ToolCall] = []
47
+ self.raw_response = raw_response
48
+
49
+
50
+ def __str__(self) -> str:
51
+ BLUE = "\033[34m"
52
+ GREEN = "\033[32m"
53
+ RESET = "\033[0m"
54
+
55
+ message = ""
56
+
57
+ if self.role == Role.system:
58
+ message = "---------------------- System ----------------------\n"
59
+ elif self.role == Role.user:
60
+ message = BLUE + "----------------------- User -----------------------\n" + RESET
61
+ elif self.role == Role.ai:
62
+ message = GREEN + "--------------------- Assistant --------------------\n" + RESET
63
+
64
+ message += self.text + RESET + "\n"
65
+
66
+ return message
67
+
68
+ def _process_image(self, images: Optional[list[Image.Image]]) -> Optional[list[str]]:
69
+ if images is None:
70
+ return None
71
+
72
+ base64_images = []
73
+ for image in images:
74
+ if image.mode == "RGBA":
75
+ image = image.convert("RGB")
76
+
77
+ image = self._resize_image_aspect_ratio(image)
78
+ image = self._convert_to_base64(image)
79
+ base64_images.append(image)
80
+
81
+ return base64_images
82
+
83
+ def _convert_to_base64(self, image: Image.Image) -> str:
84
+ buffered = BytesIO()
85
+ format_type = image.format if image.format else 'JPEG'
86
+ image.save(buffered, format=format_type)
87
+ img_bytes = buffered.getvalue()
88
+ img_base64 = base64.b64encode(img_bytes).decode('utf-8')
89
+
90
+ return img_base64
91
+
92
+ def _resize_image_aspect_ratio(self, image: Image.Image, target_length: int = 1024) -> Image.Image:
93
+ width, height = image.size
94
+
95
+ if width > height:
96
+ new_width = target_length
97
+ new_height = int((target_length / width) * height)
98
+ else:
99
+ new_height = target_length
100
+ new_width = int((target_length / height) * width)
101
+
102
+ return image.resize((new_width, new_height))
@@ -0,0 +1,287 @@
1
+ from __future__ import annotations
2
+ import json
3
+ import time
4
+ from copy import deepcopy
5
+ from typing import Iterator, AsyncIterator
6
+
7
+ from dotenv import load_dotenv
8
+ from pydantic import BaseModel
9
+
10
+ from openrouter.llms import *
11
+ from openrouter.message import *
12
+ from openrouter.openrouter_provider import *
13
+ from openrouter.tool import *
14
+
15
+
16
+ _base_system_prompt = """
17
+ It's [TIME] today.
18
+ You are an intelligent AI. You must follow the system_instruction below, which is provided by the user.
19
+
20
+ <system_instruction>
21
+ [SYSTEM_INSTRUCTION]
22
+ </system_instruction>
23
+ """
24
+
25
+ class OpenRouterClient:
26
+ def __init__(self, system_prompt: str = "", tools: list[tool_model] = None) -> None:
27
+ load_dotenv()
28
+
29
+ self._memory: list[Message] = []
30
+ self.tools: list[tool_model] = tools or []
31
+ self.set_system_prompt(system_prompt)
32
+
33
+ def set_system_prompt(self, prompt: str) -> None:
34
+ month, day, year = time.localtime()[:3]
35
+
36
+ system_prompt = _base_system_prompt
37
+ system_prompt = system_prompt.replace("[TIME]", f"{month}/{day}/{year}")
38
+ system_prompt = system_prompt.replace("[SYSTEM_INSTRUCTION]", prompt)
39
+
40
+ self._system_prompt = Message(text=system_prompt, role=Role.system)
41
+
42
+ def _execute_tools(self, reply: Message, tools: list[tool_model]) -> Message:
43
+ if not reply.tool_calls:
44
+ return reply
45
+
46
+ reply_copy = deepcopy(reply)
47
+
48
+ for requested_tool in reply_copy.tool_calls:
49
+ args = requested_tool.arguments
50
+ if isinstance(args, str):
51
+ args = json.loads(args)
52
+
53
+ for tool in tools:
54
+ if tool.name == requested_tool.name:
55
+ result = tool(**args)
56
+ requested_tool.result = result
57
+ break
58
+
59
+ return reply_copy
60
+
61
+ def execute_tool(self, reply: Message, tool_index: int, tools: List[tool_model] = []) -> Message:
62
+ if not reply.tool_calls:
63
+ return reply
64
+
65
+ if tool_index < 0 or tool_index >= len(reply.tool_calls):
66
+ raise IndexError(f"Tool index {tool_index} is out of range. Available tools: {len(reply.tool_calls)}")
67
+
68
+ requested_tool = reply.tool_calls[tool_index]
69
+
70
+ args = requested_tool.arguments
71
+ if isinstance(args, str):
72
+ args = json.loads(args)
73
+
74
+ all_tools = self.tools + tools
75
+ for tool in all_tools:
76
+ if tool.name == requested_tool.name:
77
+ result = tool(**args)
78
+ requested_tool.result = result
79
+ break
80
+ else:
81
+ raise ValueError(f"Tool '{requested_tool.name}' not found in registered tools")
82
+
83
+ for i, msg in enumerate(self._memory):
84
+ if msg.id == reply.id:
85
+ self._memory[i] = reply
86
+ break
87
+
88
+ return reply
89
+
90
+ def clear_memory(self) -> None:
91
+ self._memory = []
92
+
93
+ def print_memory(self) -> None:
94
+ from tqdm import tqdm
95
+
96
+ reset_code = "\033[0m"
97
+
98
+ for message in self._memory:
99
+ role = message.role.value
100
+ text = message.text.strip()
101
+
102
+ role_str = f"{role.ljust(9)}:"
103
+ indent = " " * len(role_str)
104
+ lines = text.splitlines()
105
+
106
+ color_codes = {
107
+ "user": "\033[94m",
108
+ "assistant": "\033[92m",
109
+ "tool": "\033[93m",
110
+ "default": "\033[0m"
111
+ }
112
+
113
+ color_code = color_codes.get(role, color_codes["default"])
114
+
115
+ if role in ["user", "assistant"]:
116
+ if lines:
117
+ print(f"{color_code}{role_str}{reset_code} {lines[0]}")
118
+ for line in lines[1:]:
119
+ print(f"{color_code}{indent}{reset_code} {line}")
120
+ else:
121
+ print(f"{color_code}{role_str}{reset_code}")
122
+
123
+ elif role == "tool":
124
+ print(f"{color_code}{role_str}{reset_code} ", end="")
125
+ for tool in message.tool_calls:
126
+ print(f"{tool.name}({json.loads(tool.arguments)}), ", end="")
127
+ print()
128
+
129
+ def invoke(
130
+ self,
131
+ model: LLMModel,
132
+ query: Message = None,
133
+ tools: list[tool_model] = None,
134
+ provider: ProviderConfig = None,
135
+ temperature: float = 0.3,
136
+ auto_tool_exec: bool = True
137
+ ) -> Message:
138
+ tools = tools or []
139
+ if query is not None:
140
+ self._memory.append(query)
141
+ client = OpenRouterProvider()
142
+
143
+ reply = client.invoke(
144
+ model=model,
145
+ temperature=temperature,
146
+ system_prompt=self._system_prompt,
147
+ querys=self._memory,
148
+ tools=self.tools + tools,
149
+ provider=provider,
150
+ )
151
+ reply.answered_by = model
152
+ self._memory.append(reply)
153
+
154
+ if auto_tool_exec and reply.tool_calls:
155
+ reply = self._execute_tools(reply, self.tools + tools)
156
+ self._memory[-1] = reply
157
+
158
+ reply = client.invoke(
159
+ model=model,
160
+ temperature=temperature,
161
+ system_prompt=self._system_prompt,
162
+ querys=self._memory,
163
+ provider=provider
164
+ )
165
+ reply.answered_by = model
166
+ self._memory.append(reply)
167
+
168
+ return reply
169
+
170
+ def invoke_stream(
171
+ self,
172
+ model: LLMModel,
173
+ query: Message,
174
+ tools: list[tool_model] = None,
175
+ provider: ProviderConfig = None,
176
+ temperature: float = 0.3
177
+ ) -> Iterator[str]:
178
+ tools = tools or []
179
+ self._memory.append(query)
180
+ client = OpenRouterProvider()
181
+ generator = client.invoke_stream(
182
+ model=model,
183
+ temperature=temperature,
184
+ system_prompt=self._system_prompt,
185
+ querys=self._memory,
186
+ tools=self.tools + tools,
187
+ provider=provider
188
+ )
189
+
190
+ text = ""
191
+ for token in generator:
192
+ text += token.choices[0].delta.content
193
+ yield token.choices[0].delta.content
194
+
195
+ self._memory.append(Message(text=text, role=Role.ai, answered_by=model))
196
+
197
+ async def async_invoke(
198
+ self,
199
+ model: LLMModel,
200
+ query: Message = None,
201
+ tools: list[tool_model] = None,
202
+ provider: ProviderConfig = None,
203
+ temperature: float = 0.3,
204
+ auto_tool_exec: bool = True
205
+ ) -> Message:
206
+ tools = tools or []
207
+ if query is not None:
208
+ self._memory.append(query)
209
+ client = OpenRouterProvider()
210
+ reply = await client.async_invoke(
211
+ model=model,
212
+ temperature=temperature,
213
+ system_prompt=self._system_prompt,
214
+ querys=self._memory,
215
+ tools=self.tools + tools,
216
+ provider=provider
217
+ )
218
+ reply.answered_by = model
219
+ self._memory.append(reply)
220
+
221
+ if auto_tool_exec and reply.tool_calls:
222
+ reply = self._execute_tools(reply, self.tools + tools)
223
+
224
+ reply = await client.async_invoke(
225
+ model=model,
226
+ system_prompt=self._system_prompt,
227
+ querys=self._memory,
228
+ tools=self.tools + tools,
229
+ provider=provider
230
+ )
231
+ reply.answered_by = model
232
+ self._memory.append(reply)
233
+
234
+ return reply
235
+
236
+ async def async_invoke_stream(
237
+ self,
238
+ model: LLMModel,
239
+ query: Message,
240
+ tools: list[tool_model] = None,
241
+ provider: ProviderConfig = None,
242
+ temperature: float = 0.3
243
+ ) -> AsyncIterator[str]:
244
+ tools = tools or []
245
+ self._memory.append(query)
246
+ client = OpenRouterProvider()
247
+
248
+ stream = client.async_invoke_stream(
249
+ model=model,
250
+ temperature=temperature,
251
+ system_prompt=self._system_prompt,
252
+ querys=self._memory,
253
+ tools=self.tools + tools,
254
+ provider=provider
255
+ )
256
+
257
+ text = ""
258
+ async for chunk in stream:
259
+ delta = chunk.choices[0].delta.content or ""
260
+ text += delta
261
+ yield delta
262
+
263
+ self._memory.append(Message(text=text, role=Role.ai, answered_by=model))
264
+
265
+ def structured_output(
266
+ self,
267
+ model: LLMModel,
268
+ query: Message,
269
+ provider: ProviderConfig = None,
270
+ json_schema: BaseModel = None,
271
+ temperature: float = 0.3
272
+ ) -> BaseModel:
273
+ self._memory.append(query)
274
+ client = OpenRouterProvider()
275
+ reply = client.structured_output(
276
+ model=model,
277
+ temperature=temperature,
278
+ system_prompt=self._system_prompt,
279
+ querys=self._memory,
280
+ provider=provider,
281
+ json_schema=json_schema
282
+ )
283
+
284
+ self._memory.append(Message(text=reply.model_dump_json(), role=Role.ai, answered_by=model))
285
+
286
+ return reply
287
+