openrouter-provider 0.0.5__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openrouter-provider might be problematic. Click here for more details.

Files changed (16) hide show
  1. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/PKG-INFO +1 -1
  2. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/pyproject.toml +1 -1
  3. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/OpenRouterProvider/Chatbot_manager.py +82 -1
  4. openrouter_provider-0.0.7/src/OpenRouterProvider/OpenRouterProvider.py +202 -0
  5. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/openrouter_provider.egg-info/PKG-INFO +1 -1
  6. openrouter_provider-0.0.5/src/OpenRouterProvider/OpenRouterProvider.py +0 -112
  7. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/README.md +0 -0
  8. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/setup.cfg +0 -0
  9. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/OpenRouterProvider/Chat_message.py +0 -0
  10. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/OpenRouterProvider/LLMs.py +0 -0
  11. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/OpenRouterProvider/Tool.py +0 -0
  12. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/__init__.py +0 -0
  13. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/openrouter_provider.egg-info/SOURCES.txt +0 -0
  14. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/openrouter_provider.egg-info/dependency_links.txt +0 -0
  15. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/openrouter_provider.egg-info/requires.txt +0 -0
  16. {openrouter_provider-0.0.5 → openrouter_provider-0.0.7}/src/openrouter_provider.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.5
3
+ Version: 0.0.7
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openrouter-provider"
3
- version = "0.0.5"
3
+ version = "0.0.7"
4
4
  description = "This is an unofficial wrapper of OpenRouter."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.7"
@@ -5,6 +5,8 @@ from .LLMs import LLMModel
5
5
  from dotenv import load_dotenv
6
6
  import time
7
7
  import json
8
+ from typing import Iterator, AsyncIterator
9
+
8
10
 
9
11
  _base_system_prompt = """
10
12
  It's [TIME] today.
@@ -120,4 +122,83 @@ class Chatbot_manager:
120
122
  self._memory.append(reply)
121
123
 
122
124
  return reply
123
-
125
+
126
+ def invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Iterator[str]:
127
+ self._memory.append(query)
128
+ client = OpenRouterProvider()
129
+ generator = client.invoke_stream(
130
+ model=model,
131
+ system_prompt=self._system_prompt,
132
+ querys=self._memory,
133
+ tools=self.tools + tools,
134
+ provider=provider
135
+ )
136
+
137
+ text = ""
138
+ for token in generator:
139
+ text += token.choices[0].delta.content
140
+ yield token.choices[0].delta.content
141
+
142
+ self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=LLMModel))
143
+
144
+ async def async_invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
145
+ self._memory.append(query)
146
+ client = OpenRouterProvider()
147
+ reply = await client.async_invoke(
148
+ model=model,
149
+ system_prompt=self._system_prompt,
150
+ querys=self._memory,
151
+ tools=self.tools + tools,
152
+ provider=provider
153
+ )
154
+ reply.answeredBy = model
155
+ self._memory.append(reply)
156
+
157
+ if reply.tool_calls:
158
+ for requested_tool in reply.tool_calls:
159
+ args = requested_tool.arguments
160
+ if isinstance(args, str):
161
+ args = json.loads(args)
162
+
163
+ for tool in (self.tools + tools):
164
+ if tool.name == requested_tool.name:
165
+ result = tool(**args)
166
+ requested_tool.result = result
167
+ break
168
+ else:
169
+ print("Tool Not found", requested_tool.name)
170
+ return reply
171
+
172
+ reply = await client.async_invoke(
173
+ model=model,
174
+ system_prompt=self._system_prompt,
175
+ querys=self._memory,
176
+ tools=self.tools + tools,
177
+ provider=provider
178
+ )
179
+ reply.answeredBy = model
180
+ self._memory.append(reply)
181
+
182
+ return reply
183
+
184
+ async def async_invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> AsyncIterator[str]:
185
+ self._memory.append(query)
186
+ client = OpenRouterProvider()
187
+
188
+ stream = client.async_invoke_stream(
189
+ model=model,
190
+ system_prompt=self._system_prompt,
191
+ querys=self._memory,
192
+ tools=self.tools + tools,
193
+ provider=provider
194
+ )
195
+
196
+ text = ""
197
+ async for chunk in stream:
198
+ delta = chunk.choices[0].delta.content or ""
199
+ text += delta
200
+ yield delta
201
+
202
+ self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=model))
203
+
204
+
@@ -0,0 +1,202 @@
1
+ import logging
2
+ from .Chat_message import *
3
+ from .Tool import tool_model
4
+ from .LLMs import *
5
+
6
+ from openai import OpenAI, AsyncOpenAI
7
+ from openai.types.chat import ChatCompletionChunk
8
+ from dotenv import load_dotenv
9
+ import os, time
10
+ from dataclasses import dataclass, field, asdict
11
+ from typing import List, Optional, Literal, Iterator, AsyncIterator
12
+ from pprint import pprint
13
+
14
+ # エラーのみ表示、詳細なトレースバック付き
15
+ logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass
20
+ class ProviderConfig:
21
+ order: Optional[List[str]] = None
22
+ allow_fallbacks: bool = None
23
+ require_parameters: bool = None
24
+ data_collection: Literal["allow", "deny"] = None
25
+ only: Optional[List[str]] = None
26
+ ignore: Optional[List[str]] = None
27
+ quantizations: Optional[List[str]] = None
28
+ sort: Optional[Literal["price", "throughput"]] = None
29
+ max_price: Optional[dict] = None
30
+
31
+ def to_dict(self) -> dict:
32
+ return {k: v for k, v in asdict(self).items() if v is not None}
33
+
34
+
35
+ class OpenRouterProvider:
36
+ def __init__(self) -> None:
37
+ load_dotenv()
38
+ api_key = os.getenv("OPENROUTER_API_KEY")
39
+ if not api_key:
40
+ logger.error("OPENROUTER_API_KEY is not set in environment variables.")
41
+ self.client = OpenAI(
42
+ base_url="https://openrouter.ai/api/v1",
43
+ api_key=api_key,
44
+ )
45
+ self.async_client = AsyncOpenAI(
46
+ base_url="https://openrouter.ai/api/v1",
47
+ api_key=api_key,
48
+ )
49
+
50
+ def make_prompt(self, system_prompt: Chat_message,
51
+ querys: list[Chat_message]) -> list[dict]:
52
+ messages = [{"role": "system", "content": system_prompt.text}]
53
+
54
+ for query in querys:
55
+ if query.role == Role.user:
56
+ if query.images is None:
57
+ messages.append({"role": "user", "content": query.text})
58
+ else:
59
+ content = [{"type": "text", "text": query.text}]
60
+ for img in query.images[:50]:
61
+ content.append(
62
+ {"type": "image_url",
63
+ "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
64
+ messages.append({"role": "user", "content": content})
65
+
66
+ elif query.role == Role.ai or query.role == Role.tool:
67
+ assistant_msg = {"role": "assistant"}
68
+ assistant_msg["content"] = query.text or None
69
+
70
+ if query.tool_calls:
71
+ assistant_msg["tool_calls"] = [
72
+ {
73
+ "id": str(t.id),
74
+ "type": "function",
75
+ "function": {
76
+ "name": t.name,
77
+ "arguments": t.arguments
78
+ }
79
+ }
80
+ for t in query.tool_calls
81
+ ]
82
+ messages.append(assistant_msg)
83
+
84
+ for t in query.tool_calls:
85
+ messages.append({
86
+ "role": "tool",
87
+ "tool_call_id": str(t.id),
88
+ "content": str(t.result)
89
+ })
90
+
91
+ return messages
92
+
93
+ def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
94
+ try:
95
+ messages = self.make_prompt(system_prompt, querys)
96
+
97
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
98
+ provider_dict = provider.to_dict() if provider else None
99
+
100
+ response = self.client.chat.completions.create(
101
+ model=model.name,
102
+ messages=messages,
103
+ tools=tool_defs,
104
+ extra_body={"provider": provider_dict}
105
+ )
106
+
107
+ reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
108
+
109
+ if response.choices[0].message.tool_calls:
110
+ reply.role = Role.tool
111
+ for tool in response.choices[0].message.tool_calls:
112
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
113
+ return reply
114
+
115
+ except Exception as e:
116
+ logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
117
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
118
+
119
+ def invoke_stream(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Iterator[ChatCompletionChunk]:
120
+ # chunk example
121
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
122
+
123
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason='stop', index=0, logprobs=None, native_finish_reason='stop')], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
124
+
125
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=54, prompt_tokens=61, total_tokens=115, completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0), prompt_tokens_details={'cached_tokens': 0}), provider='OpenAI')
126
+
127
+ try:
128
+ messages = self.make_prompt(system_prompt, querys)
129
+
130
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
131
+ provider_dict = provider.to_dict() if provider else None
132
+
133
+ response = self.client.chat.completions.create(
134
+ model=model.name,
135
+ messages=messages,
136
+ tools=tool_defs,
137
+ extra_body={"provider": provider_dict},
138
+ stream=True
139
+ )
140
+
141
+ return response
142
+
143
+ except Exception as e:
144
+ logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
145
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
146
+
147
+ async def async_invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
148
+ try:
149
+ messages = self.make_prompt(system_prompt, querys)
150
+
151
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
152
+ provider_dict = provider.to_dict() if provider else None
153
+
154
+ response = await self.async_client.chat.completions.create(
155
+ model=model.name,
156
+ messages=messages,
157
+ tools=tool_defs,
158
+ extra_body={"provider": provider_dict}
159
+ )
160
+
161
+ reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
162
+
163
+ if response.choices[0].message.tool_calls:
164
+ reply.role = Role.tool
165
+ for tool in response.choices[0].message.tool_calls:
166
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
167
+ return reply
168
+
169
+ except Exception as e:
170
+ logger.exception(f"An error occurred while asynchronously invoking the model: {e.__class__.__name__}: {str(e)}")
171
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
172
+
173
+ async def async_invoke_stream(
174
+ self,
175
+ model: LLMModel,
176
+ system_prompt: Chat_message,
177
+ querys: list[Chat_message],
178
+ tools: list[tool_model] = [],
179
+ provider: ProviderConfig = None
180
+ ) -> AsyncIterator[ChatCompletionChunk]:
181
+ try:
182
+ messages = self.make_prompt(system_prompt, querys)
183
+
184
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
185
+ provider_dict = provider.to_dict() if provider else None
186
+
187
+ response = await self.async_client.chat.completions.create(
188
+ model=model.name,
189
+ messages=messages,
190
+ tools=tool_defs,
191
+ extra_body={"provider": provider_dict},
192
+ stream=True
193
+ )
194
+
195
+ async for chunk in response:
196
+ yield chunk
197
+
198
+ except Exception as e:
199
+ logger.exception(f"An error occurred while asynchronously streaming the model: {e.__class__.__name__}: {str(e)}")
200
+ return
201
+
202
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.5
3
+ Version: 0.0.7
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -1,112 +0,0 @@
1
- import logging
2
- from .Chat_message import *
3
- from .Tool import tool_model
4
- from .LLMs import *
5
-
6
- from openai import OpenAI
7
- from dotenv import load_dotenv
8
- import os
9
- from dataclasses import dataclass, field, asdict
10
- from typing import List, Optional, Literal
11
- import json
12
-
13
- # エラーのみ表示、詳細なトレースバック付き
14
- logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- @dataclass
19
- class ProviderConfig:
20
- order: Optional[List[str]] = None
21
- allow_fallbacks: bool = None
22
- require_parameters: bool = None
23
- data_collection: Literal["allow", "deny"] = None
24
- only: Optional[List[str]] = None
25
- ignore: Optional[List[str]] = None
26
- quantizations: Optional[List[str]] = None
27
- sort: Optional[Literal["price", "throughput"]] = None
28
- max_price: Optional[dict] = None
29
-
30
- def to_dict(self) -> dict:
31
- return {k: v for k, v in asdict(self).items() if v is not None}
32
-
33
-
34
- class OpenRouterProvider:
35
- def __init__(self) -> None:
36
- load_dotenv()
37
- api_key = os.getenv("OPENROUTER_API_KEY")
38
- if not api_key:
39
- logger.error("OPENROUTER_API_KEY is not set in environment variables.")
40
- self.client = OpenAI(
41
- base_url="https://openrouter.ai/api/v1",
42
- api_key=api_key,
43
- )
44
-
45
- def make_prompt(self, system_prompt: Chat_message,
46
- querys: list[Chat_message]) -> list[dict]:
47
- messages = [{"role": "system", "content": system_prompt.text}]
48
-
49
- for query in querys:
50
- if query.role == Role.user:
51
- if query.images is None:
52
- messages.append({"role": "user", "content": query.text})
53
- else:
54
- content = [{"type": "text", "text": query.text}]
55
- for img in query.images[:50]:
56
- content.append(
57
- {"type": "image_url",
58
- "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
59
- messages.append({"role": "user", "content": content})
60
-
61
- elif query.role == Role.ai or query.role == Role.tool:
62
- assistant_msg = {"role": "assistant"}
63
- assistant_msg["content"] = query.text or None
64
-
65
- if query.tool_calls:
66
- assistant_msg["tool_calls"] = [
67
- {
68
- "id": str(t.id),
69
- "type": "function",
70
- "function": {
71
- "name": t.name,
72
- "arguments": t.arguments
73
- }
74
- }
75
- for t in query.tool_calls
76
- ]
77
- messages.append(assistant_msg)
78
-
79
- for t in query.tool_calls:
80
- messages.append({
81
- "role": "tool",
82
- "tool_call_id": str(t.id),
83
- "content": str(t.result)
84
- })
85
-
86
- return messages
87
-
88
- def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
89
- try:
90
- messages = self.make_prompt(system_prompt, querys)
91
-
92
- tool_defs = [tool.tool_definition for tool in tools] if tools else None
93
- provider_dict = provider.to_dict() if provider else None
94
-
95
- response = self.client.chat.completions.create(
96
- model=model.name,
97
- messages=messages,
98
- tools=tool_defs,
99
- extra_body={"provider": provider_dict}
100
- )
101
-
102
- reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
103
-
104
- if response.choices[0].message.tool_calls:
105
- reply.role = Role.tool
106
- for tool in response.choices[0].message.tool_calls:
107
- reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
108
- return reply
109
-
110
- except Exception as e:
111
- logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
112
- return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)