openrouter-provider 0.0.5__py3-none-any.whl → 1.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openrouter/__init__.py +5 -0
- OpenRouterProvider/LLMs.py → openrouter/llms.py +17 -6
- openrouter/message.py +102 -0
- openrouter/openrouter.py +287 -0
- openrouter/openrouter_provider.py +279 -0
- OpenRouterProvider/Tool.py → openrouter/tool.py +9 -12
- openrouter_provider-1.0.10.dist-info/METADATA +430 -0
- openrouter_provider-1.0.10.dist-info/RECORD +10 -0
- {openrouter_provider-0.0.5.dist-info → openrouter_provider-1.0.10.dist-info}/WHEEL +1 -1
- openrouter_provider-1.0.10.dist-info/top_level.txt +1 -0
- OpenRouterProvider/Chat_message.py +0 -142
- OpenRouterProvider/Chatbot_manager.py +0 -123
- OpenRouterProvider/OpenRouterProvider.py +0 -112
- __init__.py +0 -0
- openrouter_provider-0.0.5.dist-info/METADATA +0 -232
- openrouter_provider-0.0.5.dist-info/RECORD +0 -10
- openrouter_provider-0.0.5.dist-info/top_level.txt +0 -2
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
from .Chat_message import *
|
|
2
|
-
from .OpenRouterProvider import *
|
|
3
|
-
from .LLMs import LLMModel
|
|
4
|
-
|
|
5
|
-
from dotenv import load_dotenv
|
|
6
|
-
import time
|
|
7
|
-
import json
|
|
8
|
-
|
|
9
|
-
_base_system_prompt = """
|
|
10
|
-
It's [TIME] today.
|
|
11
|
-
You are an intelligent AI. You must follow the system_instruction below, which is provided by the user.
|
|
12
|
-
|
|
13
|
-
<system_instruction>
|
|
14
|
-
[SYSTEM_INSTRUCTION]
|
|
15
|
-
</system_instruction>
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
class Chatbot_manager:
|
|
19
|
-
def __init__(self, system_prompt:str="", tools:list[tool_model]=[]) -> None:
|
|
20
|
-
load_dotenv()
|
|
21
|
-
|
|
22
|
-
self._memory: list[Chat_message] = []
|
|
23
|
-
self.tools: list[tool_model] = tools
|
|
24
|
-
self.set_system_prompt(prompt=system_prompt)
|
|
25
|
-
|
|
26
|
-
def set_system_prompt(self, prompt: str):
|
|
27
|
-
m, d, y = time.localtime()[:3]
|
|
28
|
-
|
|
29
|
-
system_prompt = _base_system_prompt
|
|
30
|
-
system_prompt = system_prompt.replace("[TIME]", f"{m}/{d}/{y}")
|
|
31
|
-
system_prompt = system_prompt.replace("[SYSTEM_INSTRUCTION]", prompt)
|
|
32
|
-
|
|
33
|
-
self._system_prompt = Chat_message(text=system_prompt, role=Role.system)
|
|
34
|
-
|
|
35
|
-
def clear_memory(self):
|
|
36
|
-
self._memory = []
|
|
37
|
-
|
|
38
|
-
def print_memory(self):
|
|
39
|
-
print("\n--------------------- Chatbot memory ---------------------")
|
|
40
|
-
print(f"system : {self._system_prompt.text}")
|
|
41
|
-
|
|
42
|
-
for message in self._memory:
|
|
43
|
-
role = message.role.value
|
|
44
|
-
text = message.text.strip()
|
|
45
|
-
|
|
46
|
-
reset_code = "\033[0m"
|
|
47
|
-
role_str = f"{role.ljust(9)}:"
|
|
48
|
-
indent = " " * len(role_str)
|
|
49
|
-
lines = text.splitlines()
|
|
50
|
-
|
|
51
|
-
if role == "user":
|
|
52
|
-
color_code = "\033[94m" # blue
|
|
53
|
-
if lines:
|
|
54
|
-
print(f"{color_code}{role_str}{reset_code} {lines[0]}")
|
|
55
|
-
for line in lines[1:]:
|
|
56
|
-
print(f"{color_code}{indent}{reset_code} {line}")
|
|
57
|
-
else:
|
|
58
|
-
print(f"{color_code}{role_str}{reset_code}")
|
|
59
|
-
|
|
60
|
-
elif role == "assistant":
|
|
61
|
-
color_code = "\033[92m" # green
|
|
62
|
-
if lines:
|
|
63
|
-
print(f"{color_code}{role_str}{reset_code} {lines[0]}")
|
|
64
|
-
for line in lines[1:]:
|
|
65
|
-
print(f"{color_code}{indent}{reset_code} {line}")
|
|
66
|
-
else:
|
|
67
|
-
print(f"{color_code}{role_str}{reset_code}")
|
|
68
|
-
|
|
69
|
-
elif role == "tool":
|
|
70
|
-
color_code = "\033[93m" # orange
|
|
71
|
-
print(f"{color_code}{role_str}{reset_code} ", end="")
|
|
72
|
-
|
|
73
|
-
for tool in message.tool_calls:
|
|
74
|
-
print(f"{tool.name}({json.loads(tool.arguments)}), ", end="")
|
|
75
|
-
print()
|
|
76
|
-
|
|
77
|
-
else:
|
|
78
|
-
color_code = "\033[0m" # default color
|
|
79
|
-
print("Print error: The role is invalid.")
|
|
80
|
-
|
|
81
|
-
print("----------------------------------------------------------\n")
|
|
82
|
-
|
|
83
|
-
def invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Chat_message:
|
|
84
|
-
self._memory.append(query)
|
|
85
|
-
client = OpenRouterProvider()
|
|
86
|
-
reply = client.invoke(
|
|
87
|
-
model=model,
|
|
88
|
-
system_prompt=self._system_prompt,
|
|
89
|
-
querys=self._memory,
|
|
90
|
-
tools=self.tools + tools,
|
|
91
|
-
provider=provider
|
|
92
|
-
)
|
|
93
|
-
reply.answeredBy = model
|
|
94
|
-
self._memory.append(reply)
|
|
95
|
-
|
|
96
|
-
if reply.tool_calls:
|
|
97
|
-
for requested_tool in reply.tool_calls:
|
|
98
|
-
args = requested_tool.arguments
|
|
99
|
-
if isinstance(args, str):
|
|
100
|
-
args = json.loads(args)
|
|
101
|
-
|
|
102
|
-
for tool in (self.tools + tools):
|
|
103
|
-
if tool.name == requested_tool.name:
|
|
104
|
-
result = tool(**args)
|
|
105
|
-
requested_tool.result = result
|
|
106
|
-
break
|
|
107
|
-
else:
|
|
108
|
-
print("Tool Not found", requested_tool.name)
|
|
109
|
-
return reply
|
|
110
|
-
|
|
111
|
-
reply = client.invoke(
|
|
112
|
-
model=model,
|
|
113
|
-
system_prompt=self._system_prompt,
|
|
114
|
-
querys=self._memory,
|
|
115
|
-
tools=self.tools + tools,
|
|
116
|
-
provider=provider
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
reply.answeredBy = model
|
|
120
|
-
self._memory.append(reply)
|
|
121
|
-
|
|
122
|
-
return reply
|
|
123
|
-
|
|
@@ -1,112 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from .Chat_message import *
|
|
3
|
-
from .Tool import tool_model
|
|
4
|
-
from .LLMs import *
|
|
5
|
-
|
|
6
|
-
from openai import OpenAI
|
|
7
|
-
from dotenv import load_dotenv
|
|
8
|
-
import os
|
|
9
|
-
from dataclasses import dataclass, field, asdict
|
|
10
|
-
from typing import List, Optional, Literal
|
|
11
|
-
import json
|
|
12
|
-
|
|
13
|
-
# エラーのみ表示、詳細なトレースバック付き
|
|
14
|
-
logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
15
|
-
logger = logging.getLogger(__name__)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
@dataclass
|
|
19
|
-
class ProviderConfig:
|
|
20
|
-
order: Optional[List[str]] = None
|
|
21
|
-
allow_fallbacks: bool = None
|
|
22
|
-
require_parameters: bool = None
|
|
23
|
-
data_collection: Literal["allow", "deny"] = None
|
|
24
|
-
only: Optional[List[str]] = None
|
|
25
|
-
ignore: Optional[List[str]] = None
|
|
26
|
-
quantizations: Optional[List[str]] = None
|
|
27
|
-
sort: Optional[Literal["price", "throughput"]] = None
|
|
28
|
-
max_price: Optional[dict] = None
|
|
29
|
-
|
|
30
|
-
def to_dict(self) -> dict:
|
|
31
|
-
return {k: v for k, v in asdict(self).items() if v is not None}
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class OpenRouterProvider:
|
|
35
|
-
def __init__(self) -> None:
|
|
36
|
-
load_dotenv()
|
|
37
|
-
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
38
|
-
if not api_key:
|
|
39
|
-
logger.error("OPENROUTER_API_KEY is not set in environment variables.")
|
|
40
|
-
self.client = OpenAI(
|
|
41
|
-
base_url="https://openrouter.ai/api/v1",
|
|
42
|
-
api_key=api_key,
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
def make_prompt(self, system_prompt: Chat_message,
|
|
46
|
-
querys: list[Chat_message]) -> list[dict]:
|
|
47
|
-
messages = [{"role": "system", "content": system_prompt.text}]
|
|
48
|
-
|
|
49
|
-
for query in querys:
|
|
50
|
-
if query.role == Role.user:
|
|
51
|
-
if query.images is None:
|
|
52
|
-
messages.append({"role": "user", "content": query.text})
|
|
53
|
-
else:
|
|
54
|
-
content = [{"type": "text", "text": query.text}]
|
|
55
|
-
for img in query.images[:50]:
|
|
56
|
-
content.append(
|
|
57
|
-
{"type": "image_url",
|
|
58
|
-
"image_url": {"url": f"data:image/jpeg;base64,{img}"}})
|
|
59
|
-
messages.append({"role": "user", "content": content})
|
|
60
|
-
|
|
61
|
-
elif query.role == Role.ai or query.role == Role.tool:
|
|
62
|
-
assistant_msg = {"role": "assistant"}
|
|
63
|
-
assistant_msg["content"] = query.text or None
|
|
64
|
-
|
|
65
|
-
if query.tool_calls:
|
|
66
|
-
assistant_msg["tool_calls"] = [
|
|
67
|
-
{
|
|
68
|
-
"id": str(t.id),
|
|
69
|
-
"type": "function",
|
|
70
|
-
"function": {
|
|
71
|
-
"name": t.name,
|
|
72
|
-
"arguments": t.arguments
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
for t in query.tool_calls
|
|
76
|
-
]
|
|
77
|
-
messages.append(assistant_msg)
|
|
78
|
-
|
|
79
|
-
for t in query.tool_calls:
|
|
80
|
-
messages.append({
|
|
81
|
-
"role": "tool",
|
|
82
|
-
"tool_call_id": str(t.id),
|
|
83
|
-
"content": str(t.result)
|
|
84
|
-
})
|
|
85
|
-
|
|
86
|
-
return messages
|
|
87
|
-
|
|
88
|
-
def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
|
|
89
|
-
try:
|
|
90
|
-
messages = self.make_prompt(system_prompt, querys)
|
|
91
|
-
|
|
92
|
-
tool_defs = [tool.tool_definition for tool in tools] if tools else None
|
|
93
|
-
provider_dict = provider.to_dict() if provider else None
|
|
94
|
-
|
|
95
|
-
response = self.client.chat.completions.create(
|
|
96
|
-
model=model.name,
|
|
97
|
-
messages=messages,
|
|
98
|
-
tools=tool_defs,
|
|
99
|
-
extra_body={"provider": provider_dict}
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
|
|
103
|
-
|
|
104
|
-
if response.choices[0].message.tool_calls:
|
|
105
|
-
reply.role = Role.tool
|
|
106
|
-
for tool in response.choices[0].message.tool_calls:
|
|
107
|
-
reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
|
|
108
|
-
return reply
|
|
109
|
-
|
|
110
|
-
except Exception as e:
|
|
111
|
-
logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
|
|
112
|
-
return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
|
__init__.py
DELETED
|
File without changes
|
|
@@ -1,232 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: openrouter-provider
|
|
3
|
-
Version: 0.0.5
|
|
4
|
-
Summary: This is an unofficial wrapper of OpenRouter.
|
|
5
|
-
Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
|
|
6
|
-
Requires-Python: >=3.7
|
|
7
|
-
Description-Content-Type: text/markdown
|
|
8
|
-
Requires-Dist: annotated-types
|
|
9
|
-
Requires-Dist: anyio
|
|
10
|
-
Requires-Dist: certifi
|
|
11
|
-
Requires-Dist: distro
|
|
12
|
-
Requires-Dist: h11
|
|
13
|
-
Requires-Dist: httpcore
|
|
14
|
-
Requires-Dist: httpx
|
|
15
|
-
Requires-Dist: idna
|
|
16
|
-
Requires-Dist: jiter
|
|
17
|
-
Requires-Dist: openai
|
|
18
|
-
Requires-Dist: pillow
|
|
19
|
-
Requires-Dist: pydantic
|
|
20
|
-
Requires-Dist: pydantic_core
|
|
21
|
-
Requires-Dist: python-dotenv
|
|
22
|
-
Requires-Dist: sniffio
|
|
23
|
-
Requires-Dist: tqdm
|
|
24
|
-
Requires-Dist: typing-inspection
|
|
25
|
-
Requires-Dist: typing_extensions
|
|
26
|
-
|
|
27
|
-
## Introduction
|
|
28
|
-
|
|
29
|
-
Welcome to **openrouter-provider**, an unofficial Python wrapper for the OpenRouter API. This library lets you easily integrate with OpenRouter models, manage chat sessions, process images, and call tools within your Python application.
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
## Features
|
|
33
|
-
|
|
34
|
-
* Simple chat interface with system, user, assistant, and tool roles
|
|
35
|
-
* Automatic image resizing and Base64 encoding
|
|
36
|
-
* Built-in tool decorator for defining custom functions
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
## Installation
|
|
40
|
-
|
|
41
|
-
### From PyPI
|
|
42
|
-
|
|
43
|
-
```bash
|
|
44
|
-
pip3 install openrouter-provider
|
|
45
|
-
```
|
|
46
|
-
|
|
47
|
-
### From Source
|
|
48
|
-
|
|
49
|
-
```bash
|
|
50
|
-
git clone https://github.com/yourusername/openrouter-provider.git
|
|
51
|
-
cd openrouter-provider
|
|
52
|
-
pip3 install .
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
## Configuration
|
|
58
|
-
|
|
59
|
-
1. Create a `.env` file in your project root.
|
|
60
|
-
2. Add your OpenRouter API key:
|
|
61
|
-
|
|
62
|
-
```bash
|
|
63
|
-
OPENROUTER_API_KEY=your_api_key_here
|
|
64
|
-
```
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
## Usage
|
|
69
|
-
|
|
70
|
-
### Basic chat bot
|
|
71
|
-
Chat history is automatically sent, by Chatbot_manager. If you want to delete chat history, use `clear_memory` method.
|
|
72
|
-
|
|
73
|
-
```python
|
|
74
|
-
from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
|
|
75
|
-
from OpenRouterProvider.LLMs import gpt_4o_mini
|
|
76
|
-
|
|
77
|
-
# Declare chat bot
|
|
78
|
-
ai = Chatbot_manager(system_prompt="Please answer in English.")
|
|
79
|
-
|
|
80
|
-
# Send query
|
|
81
|
-
query = Chat_message(text="Introduce yourself, please.")
|
|
82
|
-
response = ai.invoke(model=gpt_4o_mini, query=query)
|
|
83
|
-
print(response.text)
|
|
84
|
-
|
|
85
|
-
# Send next query. Chatbot_manager automatically handle chat history.
|
|
86
|
-
query = Chat_message(text="Tell me a short story.")
|
|
87
|
-
response = ai.invoke(model=gpt_4o_mini, query=query)
|
|
88
|
-
print(response.text)
|
|
89
|
-
|
|
90
|
-
# Print all chat history
|
|
91
|
-
ai.print_memory()
|
|
92
|
-
|
|
93
|
-
# Delete all chat history
|
|
94
|
-
ai.clear_memory()
|
|
95
|
-
```
|
|
96
|
-
|
|
97
|
-
### Chat bot with images
|
|
98
|
-
You can use images in the chat.
|
|
99
|
-
|
|
100
|
-
```python
|
|
101
|
-
from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
|
|
102
|
-
from OpenRouterProvider.LLMs import gpt_4o_mini
|
|
103
|
-
from PIL import Image
|
|
104
|
-
|
|
105
|
-
dog = Image.open("dog.jpg")
|
|
106
|
-
cat = Image.open("cat.jpg")
|
|
107
|
-
|
|
108
|
-
# Send query with images
|
|
109
|
-
ai = Chatbot_manager(system_prompt="Please answer in English.")
|
|
110
|
-
query = Chat_message(text="What can you see in the images?", images=[dog, cat])
|
|
111
|
-
response = ai.invoke(model=gpt_4o_mini, query=query)
|
|
112
|
-
print(response.text)
|
|
113
|
-
```
|
|
114
|
-
|
|
115
|
-
### With tools
|
|
116
|
-
|
|
117
|
-
Use the `@tool_model` decorator to expose Python functions as callable tools in the chat. Tools are automatically processed by Chat_manager, so you don't need to care it.
|
|
118
|
-
|
|
119
|
-
```python
|
|
120
|
-
from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
|
|
121
|
-
from OpenRouterProvider.LLMs import gpt_4o_mini
|
|
122
|
-
from OpenRouterProvider.Tool import tool_model
|
|
123
|
-
|
|
124
|
-
@tool_model
|
|
125
|
-
def get_user_info():
|
|
126
|
-
"""
|
|
127
|
-
Return user's personal info: name, age, and address.
|
|
128
|
-
"""
|
|
129
|
-
return "name: Alice\nage: 30\naddress: Wonderland"
|
|
130
|
-
|
|
131
|
-
ai = Chatbot_manager(system_prompt="Please answer in English.", tools=[get_user_info])
|
|
132
|
-
query = Chat_message(text="What is the name, age, address of the user?")
|
|
133
|
-
response = ai.invoke(model=gpt_4o_mini, query=query)
|
|
134
|
-
ai.print_memory()
|
|
135
|
-
```
|
|
136
|
-
|
|
137
|
-
## Advanced Usage
|
|
138
|
-
### Prebuilt and Custom Model Usage
|
|
139
|
-
|
|
140
|
-
You can use prebuilt models defined or declare your own custom models easily.
|
|
141
|
-
This library provides many ready-to-use models from OpenAI, Anthropic, Google, and others.
|
|
142
|
-
|
|
143
|
-
```python
|
|
144
|
-
from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
|
|
145
|
-
from OpenRouterProvider.LLMs import gpt_4o, claude_3_7_sonnet
|
|
146
|
-
|
|
147
|
-
# Use OpenAI GPT-4o
|
|
148
|
-
ai = Chatbot_manager(system_prompt="Please answer in English.")
|
|
149
|
-
query = Chat_message(text="Tell me a joke.")
|
|
150
|
-
response = ai.invoke(model=gpt_4o, query=query)
|
|
151
|
-
print(response.text)
|
|
152
|
-
|
|
153
|
-
# Use Anthropic Claude 3.7 Sonnet
|
|
154
|
-
query = Chat_message(text="Summarize the story of Hamlet.")
|
|
155
|
-
response = ai.invoke(model=claude_3_7_sonnet, query=query)
|
|
156
|
-
print(response.text)
|
|
157
|
-
```
|
|
158
|
-
|
|
159
|
-
Available prebuilt models include:
|
|
160
|
-
|
|
161
|
-
#### **OpenAI**
|
|
162
|
-
|
|
163
|
-
* `gpt_4o`
|
|
164
|
-
* `gpt_4o_mini`
|
|
165
|
-
* `gpt_4_1`
|
|
166
|
-
* `gpt_4_1_mini`
|
|
167
|
-
* `gpt_4_1_nano`
|
|
168
|
-
* `o4_mini`
|
|
169
|
-
* `o4_mini_high`
|
|
170
|
-
* `o3`
|
|
171
|
-
|
|
172
|
-
#### **Anthropic**
|
|
173
|
-
|
|
174
|
-
* `claude_3_7_sonnet`
|
|
175
|
-
* `claude_3_7_sonnet_thinking`
|
|
176
|
-
* `claude_3_5_haiku`
|
|
177
|
-
|
|
178
|
-
#### **Google**
|
|
179
|
-
|
|
180
|
-
* `gemini_2_0_flash`
|
|
181
|
-
* `gemini_2_0_flash_free`
|
|
182
|
-
* `gemini_2_5_flash`
|
|
183
|
-
* `gemini_2_5_flash_thinking`
|
|
184
|
-
* `gemini_2_5_pro`
|
|
185
|
-
|
|
186
|
-
#### **Deepseek**
|
|
187
|
-
|
|
188
|
-
* `deepseek_v3_free`
|
|
189
|
-
* `deepseek_v3`
|
|
190
|
-
* `deepseek_r1_free`
|
|
191
|
-
* `deepseek_r1`
|
|
192
|
-
|
|
193
|
-
#### **xAI**
|
|
194
|
-
|
|
195
|
-
* `grok_3_mini`
|
|
196
|
-
* `grok_3`
|
|
197
|
-
|
|
198
|
-
#### **Microsoft**
|
|
199
|
-
|
|
200
|
-
* `mai_ds_r1_free`
|
|
201
|
-
|
|
202
|
-
#### **Others**
|
|
203
|
-
|
|
204
|
-
* `llama_4_maverick_free`
|
|
205
|
-
* `llama_4_scout`
|
|
206
|
-
* `mistral_small_3_1_24B_free`
|
|
207
|
-
|
|
208
|
-
All of them are instances of `LLMModel`, which includes cost and model name settings.
|
|
209
|
-
|
|
210
|
-
### Using Custom Models
|
|
211
|
-
|
|
212
|
-
You can define and use your own custom model if it's available on OpenRouter.
|
|
213
|
-
|
|
214
|
-
```python
|
|
215
|
-
from OpenRouterProvider.Chatbot_manager import Chat_message, Chatbot_manager
|
|
216
|
-
from OpenRouterProvider.LLMs import LLMModel
|
|
217
|
-
|
|
218
|
-
# Declare a custom model
|
|
219
|
-
my_model = LLMModel(
|
|
220
|
-
name="my-org/my-custom-model", # Model name for OpenRouter
|
|
221
|
-
input_cost=0.5, # Optional: cost per 1M input tokens
|
|
222
|
-
output_cost=2.0 # Optional: cost per 1M output tokens
|
|
223
|
-
)
|
|
224
|
-
|
|
225
|
-
# Use the custom model
|
|
226
|
-
ai = Chatbot_manager(system_prompt="Please answer in English.")
|
|
227
|
-
query = Chat_message(text="Explain black holes simply.")
|
|
228
|
-
response = ai.invoke(model=my_model, query=query)
|
|
229
|
-
print(response.text)
|
|
230
|
-
```
|
|
231
|
-
|
|
232
|
-
You only need to know the model name as used on OpenRouter. `input_cost` and `output_cost` are optional and currently, they are not used in this library. Please wait the future update.
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
OpenRouterProvider/Chat_message.py,sha256=lQd8bFp7OHOgeOrcpcVZMdkV2Mb4reUsv5Ixo6WecYY,4424
|
|
3
|
-
OpenRouterProvider/Chatbot_manager.py,sha256=EpLWhxx7xnRa-q7xqP2Ur9dmYb9Mzv_UF6BChwpcbYk,4357
|
|
4
|
-
OpenRouterProvider/LLMs.py,sha256=-0ELd6fqmdDvsdaPIElRsluiK85-Y6USwvQb2b4M8TA,2607
|
|
5
|
-
OpenRouterProvider/OpenRouterProvider.py,sha256=4k87D5kKNPgRJ-7qMv9oPWY7P2psrNaNOALmyPMuNsw,4397
|
|
6
|
-
OpenRouterProvider/Tool.py,sha256=QeeWOD2oaYjB9tjF-Jvcjd_G_qSUIuKwFgyh20Ne06I,2010
|
|
7
|
-
openrouter_provider-0.0.5.dist-info/METADATA,sha256=_H9lXm0ohRX57GdvJMJNIyn6pyWblQQTDHOWv7EM6GE,5995
|
|
8
|
-
openrouter_provider-0.0.5.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
|
|
9
|
-
openrouter_provider-0.0.5.dist-info/top_level.txt,sha256=I5BMEzkQFEnEYTqOY1Ktmnp7r1rrZQyeWdclKyyyHKs,28
|
|
10
|
-
openrouter_provider-0.0.5.dist-info/RECORD,,
|