openrouter-provider 0.1.1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,11 @@
1
+ from __future__ import annotations
1
2
  import inspect
2
3
  from functools import wraps
3
- from typing import get_type_hints, get_origin, get_args
4
+ from typing import get_type_hints, get_origin, get_args, Callable, Any, Dict
4
5
 
5
6
 
6
7
  class tool_model:
7
- def __init__(self, func):
8
+ def __init__(self, func: Callable) -> None:
8
9
  self.func = func
9
10
  self.name = func.__name__
10
11
 
@@ -12,22 +13,20 @@ class tool_model:
12
13
  type_hints = get_type_hints(func)
13
14
 
14
15
  type_map = {
15
- int: "integer",
16
- str: "string",
16
+ int: "integer",
17
+ str: "string",
17
18
  float: "number",
18
- bool: "boolean",
19
+ bool: "boolean",
19
20
  }
20
21
 
21
- properties = {}
22
- required = []
22
+ properties: Dict[str, Dict[str, Any]] = {}
23
+ required: list[str] = []
23
24
 
24
- # Build JSON schema properties
25
25
  for name, param in sig.parameters.items():
26
26
  anno = type_hints.get(name, None)
27
27
  origin = get_origin(anno)
28
28
 
29
29
  if origin is list:
30
- # Handle list element types
31
30
  (elem_type,) = get_args(anno)
32
31
  json_item_type = type_map.get(elem_type, "string")
33
32
  schema = {
@@ -36,7 +35,6 @@ class tool_model:
36
35
  "description": name,
37
36
  }
38
37
  else:
39
- # Primitive types
40
38
  json_type = type_map.get(anno, "string")
41
39
  schema = {
42
40
  "type": json_type,
@@ -47,7 +45,6 @@ class tool_model:
47
45
  if param.default is inspect._empty:
48
46
  required.append(name)
49
47
 
50
- # Attach tool definition metadata
51
48
  self.tool_definition = {
52
49
  "type": "function",
53
50
  "function": {
@@ -65,7 +62,7 @@ class tool_model:
65
62
 
66
63
  wraps(func)(self)
67
64
 
68
- def __call__(self, *args, **kwargs):
65
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
69
66
  return self.func(*args, **kwargs)
70
67
 
71
68
 
@@ -1,142 +0,0 @@
1
- from .LLMs import LLMModel
2
-
3
- from enum import Enum
4
- from PIL import Image
5
- import base64
6
- from io import BytesIO
7
- from dataclasses import dataclass
8
-
9
- from openai.types.chat import ChatCompletion
10
-
11
-
12
- class Role(Enum):
13
- system = "system"
14
- user = "user"
15
- ai = "assistant"
16
- agent = "agent"
17
- tool = "tool"
18
-
19
-
20
- @dataclass
21
- class ToolCall:
22
- id: str
23
- name: str
24
- arguments: dict
25
- result: any = ""
26
-
27
-
28
- class Chat_message:
29
- def __init__(self,
30
- text: str,
31
- images: list[Image.Image]=None,
32
- role: Role=Role.user,
33
- answerdBy: LLMModel=None,
34
- raw_response: ChatCompletion=None
35
- ) -> None:
36
- self.role = role
37
- self.text = text
38
- self.images = self._process_image(images=images)
39
- self.answeredBy: LLMModel = answerdBy
40
-
41
- self.tool_calls: list[ToolCall] = []
42
- self.raw_resoonse: ChatCompletion = raw_response
43
-
44
-
45
- def __str__(self) -> str:
46
- # ANSI color codes for blue, green, and reset (to default)
47
- BLUE = "\033[34m"
48
- GREEN = "\033[32m"
49
- RESET = "\033[0m"
50
-
51
- message = ""
52
-
53
- if self.role == Role.system:
54
- message = "---------------------- System ----------------------\n"
55
- elif self.role == Role.user:
56
- message = BLUE + "----------------------- User -----------------------\n" + RESET
57
- elif self.role == Role.ai:
58
- message = GREEN + "--------------------- Assistant --------------------\n" + RESET
59
-
60
- # Append text and reset color formatting at the end
61
- message += self.text + RESET + "\n"
62
-
63
- return message
64
-
65
- def _process_image(self, images: list):
66
- """
67
- Process a list of images by resizing them to maintain aspect ratio and then converting them to base64 format.
68
-
69
- Args:
70
- images (list): A list of image objects to be processed.
71
-
72
- Returns:
73
- list: A list of base64-encoded image strings if input is not None/empty, otherwise `None`.
74
-
75
- Note:
76
- - Images should be provided as a "list" even if there is only a single image to process.
77
- """
78
- if images == None:
79
- return None
80
-
81
- base64_images = []
82
- for image in images:
83
- if image.mode == "RGBA":
84
- image = image.convert("RGB")
85
-
86
- image = self._resize_image_aspect_ratio(image=image)
87
- image = self._convert_to_base64(image=image)
88
- base64_images.append(image)
89
-
90
- return base64_images
91
-
92
- def _convert_to_base64(self, image: Image) -> str:
93
- """
94
- Convert an image to a base64-encoded string.
95
-
96
- Args:
97
- image (Image): The image object to be converted to base64 format.
98
-
99
- Returns:
100
- str: The base64-encoded string representation of the image.
101
-
102
- Note:
103
- - The image format will default to 'JPEG' if the format is not specified.
104
- """
105
- buffered = BytesIO()
106
- format = image.format if image.format else 'JPEG'
107
- image.save(buffered, format=format)
108
- img_bytes = buffered.getvalue()
109
- img_base64 = base64.b64encode(img_bytes).decode('utf-8')
110
-
111
- return img_base64
112
-
113
- def _resize_image_aspect_ratio(self, image: Image, target_length=1024):
114
- """
115
- Resize an image to a target length while maintaining its aspect ratio.
116
-
117
- Args:
118
- image (Image): The image object to be resized.
119
- target_length (int, optional): The target length for the larger dimension (default is 1024).
120
-
121
- Returns:
122
- Image: The resized image object with maintained aspect ratio.
123
-
124
- Note:
125
- - The smaller dimension is scaled proportionally based on the larger dimension to maintain aspect ratio.
126
- - If the image's aspect ratio is non-square, the target_length is applied to the larger dimension.
127
- """
128
-
129
- width, height = image.size
130
-
131
- if width > height:
132
- new_width = target_length
133
- new_height = int((target_length / width) * height)
134
- else:
135
- new_height = target_length
136
- new_width = int((target_length / height) * width)
137
-
138
- resized_image = image.resize((new_width, new_height))
139
-
140
- return resized_image
141
-
142
-
@@ -1,266 +0,0 @@
1
-
2
- # structured output
3
- # https://note.com/brave_quince241/n/n60a5759c8f05
4
-
5
- import logging
6
- from .Chat_message import *
7
- from .Tool import tool_model
8
- from .LLMs import *
9
-
10
- from openai import OpenAI, AsyncOpenAI
11
- from openai.types.chat import ChatCompletionChunk
12
- from dotenv import load_dotenv
13
- import os, time
14
- from dataclasses import dataclass, field, asdict
15
- from typing import List, Optional, Literal, Iterator, AsyncIterator
16
- from pprint import pprint
17
- from pydantic import BaseModel
18
-
19
-
20
- # エラーのみ表示、詳細なトレースバック付き
21
- logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
22
- logger = logging.getLogger(__name__)
23
-
24
-
25
- @dataclass
26
- class ProviderConfig:
27
- order: Optional[List[str]] = None
28
- allow_fallbacks: bool = None
29
- require_parameters: bool = None
30
- data_collection: Literal["allow", "deny"] = None
31
- only: Optional[List[str]] = None
32
- ignore: Optional[List[str]] = None
33
- quantizations: Optional[List[str]] = None
34
- sort: Optional[Literal["price", "throughput"]] = None
35
- max_price: Optional[dict] = None
36
-
37
- def to_dict(self) -> dict:
38
- return {k: v for k, v in asdict(self).items() if v is not None}
39
-
40
-
41
- class OpenRouterProvider:
42
- def __init__(self) -> None:
43
- load_dotenv()
44
- api_key = os.getenv("OPENROUTER_API_KEY")
45
- if not api_key:
46
- logger.error("OPENROUTER_API_KEY is not set in environment variables.")
47
- self.client = OpenAI(
48
- base_url="https://openrouter.ai/api/v1",
49
- api_key=api_key,
50
- )
51
- self.async_client = AsyncOpenAI(
52
- base_url="https://openrouter.ai/api/v1",
53
- api_key=api_key,
54
- )
55
-
56
- def make_prompt(
57
- self,
58
- system_prompt: Chat_message,
59
- querys: list[Chat_message]
60
- ) -> list[dict]:
61
- messages = [{"role": "system", "content": system_prompt.text}]
62
-
63
- for query in querys:
64
- if query.role == Role.user:
65
- if query.images is None:
66
- messages.append({"role": "user", "content": query.text})
67
- else:
68
- content = [{"type": "text", "text": query.text}]
69
- for img in query.images[:50]:
70
- content.append(
71
- {"type": "image_url",
72
- "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
73
- messages.append({"role": "user", "content": content})
74
-
75
- elif query.role == Role.ai or query.role == Role.tool:
76
- assistant_msg = {"role": "assistant"}
77
- assistant_msg["content"] = query.text or None
78
-
79
- if query.tool_calls:
80
- assistant_msg["tool_calls"] = [
81
- {
82
- "id": str(t.id),
83
- "type": "function",
84
- "function": {
85
- "name": t.name,
86
- "arguments": t.arguments
87
- }
88
- }
89
- for t in query.tool_calls
90
- ]
91
- messages.append(assistant_msg)
92
-
93
- for t in query.tool_calls:
94
- messages.append({
95
- "role": "tool",
96
- "tool_call_id": str(t.id),
97
- "content": str(t.result)
98
- })
99
-
100
- return messages
101
-
102
- def invoke(
103
- self,
104
- model: LLMModel,
105
- system_prompt: Chat_message,
106
- querys: list[Chat_message],
107
- tools: list[tool_model] = [],
108
- provider: ProviderConfig = None,
109
- temperature: float = 0.3
110
- ) -> Chat_message:
111
- try:
112
- messages = self.make_prompt(system_prompt, querys)
113
-
114
- tool_defs = [tool.tool_definition for tool in tools] if tools else None
115
- provider_dict = provider.to_dict() if provider else None
116
-
117
- response = self.client.chat.completions.create(
118
- model=model.name,
119
- temperature=temperature,
120
- messages=messages,
121
- tools=tool_defs,
122
- extra_body={"provider": provider_dict},
123
- )
124
-
125
- reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
126
-
127
- if response.choices[0].message.tool_calls:
128
- reply.role = Role.tool
129
- for tool in response.choices[0].message.tool_calls:
130
- reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
131
- return reply
132
-
133
- except Exception as e:
134
- logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
135
- return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
136
-
137
- def invoke_stream(
138
- self,
139
- model: LLMModel,
140
- system_prompt: Chat_message,
141
- querys: list[Chat_message],
142
- tools: list[tool_model] = [],
143
- provider: ProviderConfig = None,
144
- temperature: float = 0.3
145
- ) -> Iterator[ChatCompletionChunk]:
146
- # chunk example
147
- # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
148
-
149
- # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason='stop', index=0, logprobs=None, native_finish_reason='stop')], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
150
-
151
- # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=54, prompt_tokens=61, total_tokens=115, completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0), prompt_tokens_details={'cached_tokens': 0}), provider='OpenAI')
152
-
153
- try:
154
- messages = self.make_prompt(system_prompt, querys)
155
-
156
- tool_defs = [tool.tool_definition for tool in tools] if tools else None
157
- provider_dict = provider.to_dict() if provider else None
158
-
159
- response = self.client.chat.completions.create(
160
- model=model.name,
161
- temperature=temperature,
162
- messages=messages,
163
- tools=tool_defs,
164
- extra_body={"provider": provider_dict},
165
- stream=True
166
- )
167
-
168
- return response
169
-
170
- except Exception as e:
171
- logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
172
- return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
173
-
174
- async def async_invoke(
175
- self, model: LLMModel,
176
- system_prompt: Chat_message,
177
- querys: list[Chat_message],
178
- tools: list[tool_model] = [],
179
- provider: ProviderConfig = None,
180
- temperature: float = 0.3
181
- ) -> Chat_message:
182
- try:
183
- messages = self.make_prompt(system_prompt, querys)
184
-
185
- tool_defs = [tool.tool_definition for tool in tools] if tools else None
186
- provider_dict = provider.to_dict() if provider else None
187
-
188
- response = await self.async_client.chat.completions.create(
189
- model=model.name,
190
- temperature=temperature,
191
- messages=messages,
192
- tools=tool_defs,
193
- extra_body={"provider": provider_dict}
194
- )
195
-
196
- reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
197
-
198
- if response.choices[0].message.tool_calls:
199
- reply.role = Role.tool
200
- for tool in response.choices[0].message.tool_calls:
201
- reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
202
- return reply
203
-
204
- except Exception as e:
205
- logger.exception(f"An error occurred while asynchronously invoking the model: {e.__class__.__name__}: {str(e)}")
206
- return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
207
-
208
- async def async_invoke_stream(
209
- self,
210
- model: LLMModel,
211
- system_prompt: Chat_message,
212
- querys: list[Chat_message],
213
- tools: list[tool_model] = [],
214
- provider: ProviderConfig = None,
215
- temperature: float = 0.3
216
- ) -> AsyncIterator[ChatCompletionChunk]:
217
- try:
218
- messages = self.make_prompt(system_prompt, querys)
219
-
220
- tool_defs = [tool.tool_definition for tool in tools] if tools else None
221
- provider_dict = provider.to_dict() if provider else None
222
-
223
- response = await self.async_client.chat.completions.create(
224
- model=model.name,
225
- temperature=temperature,
226
- messages=messages,
227
- tools=tool_defs,
228
- extra_body={"provider": provider_dict},
229
- stream=True
230
- )
231
-
232
- async for chunk in response:
233
- yield chunk
234
-
235
- except Exception as e:
236
- logger.exception(f"An error occurred while asynchronously streaming the model: {e.__class__.__name__}: {str(e)}")
237
- return
238
-
239
- def structured_output(
240
- self,
241
- model: LLMModel,
242
- system_prompt: Chat_message,
243
- querys: list[Chat_message],
244
- provider: ProviderConfig = None,
245
- json_schema: BaseModel = None,
246
- temperature: float = 0.3
247
- ) -> BaseModel:
248
- try:
249
- messages = self.make_prompt(system_prompt, querys)
250
- provider_dict = provider.to_dict() if provider else None
251
-
252
- response = self.client.chat.completions.create(
253
- model=model.name,
254
- temperature=temperature,
255
- messages=messages,
256
- response_format={"type": "json_schema", "json_schema": {"name": json_schema.__name__, "schema": json_schema.model_json_schema()}},
257
- extra_body={"provider": provider_dict},
258
- )
259
-
260
- return json_schema.model_validate_json(response.choices[0].message.content)
261
-
262
- except Exception as e:
263
- logger.exception(f"An error occurred while invoking structured output: {e.__class__.__name__}: {str(e)}")
264
- return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
265
-
266
-
@@ -1,232 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: openrouter-provider
3
- Version: 0.1.1
4
- Summary: This is an unofficial wrapper of OpenRouter.
5
- Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
- Requires-Python: >=3.7
7
- Description-Content-Type: text/markdown
8
- Requires-Dist: annotated-types
9
- Requires-Dist: anyio
10
- Requires-Dist: certifi
11
- Requires-Dist: distro
12
- Requires-Dist: h11
13
- Requires-Dist: httpcore
14
- Requires-Dist: httpx
15
- Requires-Dist: idna
16
- Requires-Dist: jiter
17
- Requires-Dist: openai
18
- Requires-Dist: pillow
19
- Requires-Dist: pydantic
20
- Requires-Dist: pydantic_core
21
- Requires-Dist: python-dotenv
22
- Requires-Dist: sniffio
23
- Requires-Dist: tqdm
24
- Requires-Dist: typing-inspection
25
- Requires-Dist: typing_extensions
26
-
27
- ## Introduction
28
-
29
- Welcome to **openrouter-provider**, an unofficial Python wrapper for the OpenRouter API. This library lets you easily integrate with OpenRouter models, manage chat sessions, process images, and call tools within your Python application.
30
-
31
-
32
- ## Features
33
-
34
- * Simple chat interface with system, user, assistant, and tool roles
35
- * Automatic image resizing and Base64 encoding
36
- * Built-in tool decorator for defining custom functions
37
-
38
-
39
- ## Installation
40
-
41
- ### From PyPI
42
-
43
- ```bash
44
- pip3 install openrouter-provider
45
- ```
46
-
47
- ### From Source
48
-
49
- ```bash
50
- git clone https://github.com/yourusername/openrouter-provider.git
51
- cd openrouter-provider
52
- pip3 install .
53
- ```
54
-
55
-
56
-
57
- ## Configuration
58
-
59
- 1. Create a `.env` file in your project root.
60
- 2. Add your OpenRouter API key:
61
-
62
- ```bash
63
- OPENROUTER_API_KEY=your_api_key_here
64
- ```
65
-
66
-
67
-
68
- ## Usage
69
-
70
- ### Basic chat bot
71
- Chat history is automatically sent, by Chatbot_manager. If you want to delete chat history, use `clear_memory` method.
72
-
73
- ```python
74
- from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
75
- from OpenRouterProvider.LLMs import gpt_4o_mini
76
-
77
- # Declare chat bot
78
- ai = Chatbot_manager(system_prompt="Please answer in English.")
79
-
80
- # Send query
81
- query = Chat_message(text="Introduce yourself, please.")
82
- response = ai.invoke(model=gpt_4o_mini, query=query)
83
- print(response.text)
84
-
85
- # Send next query. Chatbot_manager automatically handle chat history.
86
- query = Chat_message(text="Tell me a short story.")
87
- response = ai.invoke(model=gpt_4o_mini, query=query)
88
- print(response.text)
89
-
90
- # Print all chat history
91
- ai.print_memory()
92
-
93
- # Delete all chat history
94
- ai.clear_memory()
95
- ```
96
-
97
- ### Chat bot with images
98
- You can use images in the chat.
99
-
100
- ```python
101
- from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
102
- from OpenRouterProvider.LLMs import gpt_4o_mini
103
- from PIL import Image
104
-
105
- dog = Image.open("dog.jpg")
106
- cat = Image.open("cat.jpg")
107
-
108
- # Send query with images
109
- ai = Chatbot_manager(system_prompt="Please answer in English.")
110
- query = Chat_message(text="What can you see in the images?", images=[dog, cat])
111
- response = ai.invoke(model=gpt_4o_mini, query=query)
112
- print(response.text)
113
- ```
114
-
115
- ### With tools
116
-
117
- Use the `@tool_model` decorator to expose Python functions as callable tools in the chat. Tools are automatically processed by Chat_manager, so you don't need to care it.
118
-
119
- ```python
120
- from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
121
- from OpenRouterProvider.LLMs import gpt_4o_mini
122
- from OpenRouterProvider.Tool import tool_model
123
-
124
- @tool_model
125
- def get_user_info():
126
- """
127
- Return user's personal info: name, age, and address.
128
- """
129
- return "name: Alice\nage: 30\naddress: Wonderland"
130
-
131
- ai = Chatbot_manager(system_prompt="Please answer in English.", tools=[get_user_info])
132
- query = Chat_message(text="What is the name, age, address of the user?")
133
- response = ai.invoke(model=gpt_4o_mini, query=query)
134
- ai.print_memory()
135
- ```
136
-
137
- ## Advanced Usage
138
- ### Prebuilt and Custom Model Usage
139
-
140
- You can use prebuilt models defined or declare your own custom models easily.
141
- This library provides many ready-to-use models from OpenAI, Anthropic, Google, and others.
142
-
143
- ```python
144
- from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
145
- from OpenRouterProvider.LLMs import gpt_4o, claude_3_7_sonnet
146
-
147
- # Use OpenAI GPT-4o
148
- ai = Chatbot_manager(system_prompt="Please answer in English.")
149
- query = Chat_message(text="Tell me a joke.")
150
- response = ai.invoke(model=gpt_4o, query=query)
151
- print(response.text)
152
-
153
- # Use Anthropic Claude 3.7 Sonnet
154
- query = Chat_message(text="Summarize the story of Hamlet.")
155
- response = ai.invoke(model=claude_3_7_sonnet, query=query)
156
- print(response.text)
157
- ```
158
-
159
- Available prebuilt models include:
160
-
161
- #### **OpenAI**
162
-
163
- * `gpt_4o`
164
- * `gpt_4o_mini`
165
- * `gpt_4_1`
166
- * `gpt_4_1_mini`
167
- * `gpt_4_1_nano`
168
- * `o4_mini`
169
- * `o4_mini_high`
170
- * `o3`
171
-
172
- #### **Anthropic**
173
-
174
- * `claude_3_7_sonnet`
175
- * `claude_3_7_sonnet_thinking`
176
- * `claude_3_5_haiku`
177
-
178
- #### **Google**
179
-
180
- * `gemini_2_0_flash`
181
- * `gemini_2_0_flash_free`
182
- * `gemini_2_5_flash`
183
- * `gemini_2_5_flash_thinking`
184
- * `gemini_2_5_pro`
185
-
186
- #### **Deepseek**
187
-
188
- * `deepseek_v3_free`
189
- * `deepseek_v3`
190
- * `deepseek_r1_free`
191
- * `deepseek_r1`
192
-
193
- #### **xAI**
194
-
195
- * `grok_3_mini`
196
- * `grok_3`
197
-
198
- #### **Microsoft**
199
-
200
- * `mai_ds_r1_free`
201
-
202
- #### **Others**
203
-
204
- * `llama_4_maverick_free`
205
- * `llama_4_scout`
206
- * `mistral_small_3_1_24B_free`
207
-
208
- All of them are instances of `LLMModel`, which includes cost and model name settings.
209
-
210
- ### Using Custom Models
211
-
212
- You can define and use your own custom model if it's available on OpenRouter.
213
-
214
- ```python
215
- from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
216
- from OpenRouterProvider.LLMs import LLMModel
217
-
218
- # Declare a custom model
219
- my_model = LLMModel(
220
- name="my-org/my-custom-model", # Model name for OpenRouter
221
- input_cost=0.5, # Optional: cost per 1M input tokens
222
- output_cost=2.0 # Optional: cost per 1M output tokens
223
- )
224
-
225
- # Use the custom model
226
- ai = Chatbot_manager(system_prompt="Please answer in English.")
227
- query = Chat_message(text="Explain black holes simply.")
228
- response = ai.invoke(model=my_model, query=query)
229
- print(response.text)
230
- ```
231
-
232
- You only need to know the model name as used on OpenRouter. `input_cost` and `output_cost` are optional and currently, they are not used in this library. Please wait the future update.