openrouter-provider 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openrouter-provider might be problematic. Click here for more details.

@@ -5,6 +5,8 @@ from .LLMs import LLMModel
5
5
  from dotenv import load_dotenv
6
6
  import time
7
7
  import json
8
+ from typing import Iterator, AsyncIterator
9
+
8
10
 
9
11
  _base_system_prompt = """
10
12
  It's [TIME] today.
@@ -120,4 +122,83 @@ class Chatbot_manager:
120
122
  self._memory.append(reply)
121
123
 
122
124
  return reply
123
-
125
+
126
+ def invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Iterator[str]:
127
+ self._memory.append(query)
128
+ client = OpenRouterProvider()
129
+ generator = client.invoke_stream(
130
+ model=model,
131
+ system_prompt=self._system_prompt,
132
+ querys=self._memory,
133
+ tools=self.tools + tools,
134
+ provider=provider
135
+ )
136
+
137
+ text = ""
138
+ for token in generator:
139
+ text += token.choices[0].delta.content
140
+ yield token.choices[0].delta.content
141
+
142
+ self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=LLMModel))
143
+
144
+ async def async_invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
145
+ self._memory.append(query)
146
+ client = OpenRouterProvider()
147
+ reply = await client.async_invoke(
148
+ model=model,
149
+ system_prompt=self._system_prompt,
150
+ querys=self._memory,
151
+ tools=self.tools + tools,
152
+ provider=provider
153
+ )
154
+ reply.answeredBy = model
155
+ self._memory.append(reply)
156
+
157
+ if reply.tool_calls:
158
+ for requested_tool in reply.tool_calls:
159
+ args = requested_tool.arguments
160
+ if isinstance(args, str):
161
+ args = json.loads(args)
162
+
163
+ for tool in (self.tools + tools):
164
+ if tool.name == requested_tool.name:
165
+ result = tool(**args)
166
+ requested_tool.result = result
167
+ break
168
+ else:
169
+ print("Tool Not found", requested_tool.name)
170
+ return reply
171
+
172
+ reply = await client.async_invoke(
173
+ model=model,
174
+ system_prompt=self._system_prompt,
175
+ querys=self._memory,
176
+ tools=self.tools + tools,
177
+ provider=provider
178
+ )
179
+ reply.answeredBy = model
180
+ self._memory.append(reply)
181
+
182
+ return reply
183
+
184
+ async def async_invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> AsyncIterator[str]:
185
+ self._memory.append(query)
186
+ client = OpenRouterProvider()
187
+
188
+ stream = client.async_invoke_stream(
189
+ model=model,
190
+ system_prompt=self._system_prompt,
191
+ querys=self._memory,
192
+ tools=self.tools + tools,
193
+ provider=provider
194
+ )
195
+
196
+ text = ""
197
+ async for chunk in stream:
198
+ delta = chunk.choices[0].delta.content or ""
199
+ text += delta
200
+ yield delta
201
+
202
+ self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=model))
203
+
204
+
@@ -3,12 +3,13 @@ from .Chat_message import *
3
3
  from .Tool import tool_model
4
4
  from .LLMs import *
5
5
 
6
- from openai import OpenAI
6
+ from openai import OpenAI, AsyncOpenAI
7
+ from openai.types.chat import ChatCompletionChunk
7
8
  from dotenv import load_dotenv
8
- import os
9
+ import os, time
9
10
  from dataclasses import dataclass, field, asdict
10
- from typing import List, Optional, Literal
11
- import json
11
+ from typing import List, Optional, Literal, Iterator, AsyncIterator
12
+ from pprint import pprint
12
13
 
13
14
  # エラーのみ表示、詳細なトレースバック付き
14
15
  logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
@@ -41,6 +42,10 @@ class OpenRouterProvider:
41
42
  base_url="https://openrouter.ai/api/v1",
42
43
  api_key=api_key,
43
44
  )
45
+ self.async_client = AsyncOpenAI(
46
+ base_url="https://openrouter.ai/api/v1",
47
+ api_key=api_key,
48
+ )
44
49
 
45
50
  def make_prompt(self, system_prompt: Chat_message,
46
51
  querys: list[Chat_message]) -> list[dict]:
@@ -110,3 +115,88 @@ class OpenRouterProvider:
110
115
  except Exception as e:
111
116
  logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
112
117
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
118
+
119
+ def invoke_stream(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Iterator[ChatCompletionChunk]:
120
+ # chunk example
121
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
122
+
123
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason='stop', index=0, logprobs=None, native_finish_reason='stop')], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
124
+
125
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=54, prompt_tokens=61, total_tokens=115, completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0), prompt_tokens_details={'cached_tokens': 0}), provider='OpenAI')
126
+
127
+ try:
128
+ messages = self.make_prompt(system_prompt, querys)
129
+
130
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
131
+ provider_dict = provider.to_dict() if provider else None
132
+
133
+ response = self.client.chat.completions.create(
134
+ model=model.name,
135
+ messages=messages,
136
+ tools=tool_defs,
137
+ extra_body={"provider": provider_dict},
138
+ stream=True
139
+ )
140
+
141
+ return response
142
+
143
+ except Exception as e:
144
+ logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
145
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
146
+
147
+ async def async_invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
148
+ try:
149
+ messages = self.make_prompt(system_prompt, querys)
150
+
151
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
152
+ provider_dict = provider.to_dict() if provider else None
153
+
154
+ response = await self.async_client.chat.completions.create(
155
+ model=model.name,
156
+ messages=messages,
157
+ tools=tool_defs,
158
+ extra_body={"provider": provider_dict}
159
+ )
160
+
161
+ reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
162
+
163
+ if response.choices[0].message.tool_calls:
164
+ reply.role = Role.tool
165
+ for tool in response.choices[0].message.tool_calls:
166
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
167
+ return reply
168
+
169
+ except Exception as e:
170
+ logger.exception(f"An error occurred while asynchronously invoking the model: {e.__class__.__name__}: {str(e)}")
171
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
172
+
173
+ async def async_invoke_stream(
174
+ self,
175
+ model: LLMModel,
176
+ system_prompt: Chat_message,
177
+ querys: list[Chat_message],
178
+ tools: list[tool_model] = [],
179
+ provider: ProviderConfig = None
180
+ ) -> AsyncIterator[ChatCompletionChunk]:
181
+ try:
182
+ messages = self.make_prompt(system_prompt, querys)
183
+
184
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
185
+ provider_dict = provider.to_dict() if provider else None
186
+
187
+ response = await self.async_client.chat.completions.create(
188
+ model=model.name,
189
+ messages=messages,
190
+ tools=tool_defs,
191
+ extra_body={"provider": provider_dict},
192
+ stream=True
193
+ )
194
+
195
+ async for chunk in response:
196
+ yield chunk
197
+
198
+ except Exception as e:
199
+ logger.exception(f"An error occurred while asynchronously streaming the model: {e.__class__.__name__}: {str(e)}")
200
+ return
201
+
202
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.5
3
+ Version: 0.0.7
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -0,0 +1,10 @@
1
+ __init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ OpenRouterProvider/Chat_message.py,sha256=lQd8bFp7OHOgeOrcpcVZMdkV2Mb4reUsv5Ixo6WecYY,4424
3
+ OpenRouterProvider/Chatbot_manager.py,sha256=2kWnyf0SFWACfK4tA0wr2rzlqSnF8AQ3QC-sKpoMFhI,7274
4
+ OpenRouterProvider/LLMs.py,sha256=-0ELd6fqmdDvsdaPIElRsluiK85-Y6USwvQb2b4M8TA,2607
5
+ OpenRouterProvider/OpenRouterProvider.py,sha256=5mKrGbRWOsjKTcE7-WbWYcwy3avTT7-9H5cy6N25d0M,9572
6
+ OpenRouterProvider/Tool.py,sha256=QeeWOD2oaYjB9tjF-Jvcjd_G_qSUIuKwFgyh20Ne06I,2010
7
+ openrouter_provider-0.0.7.dist-info/METADATA,sha256=j9FsXCm8g5SRkiwqlwv3ptc3i5NnfhL7CvweoOG-_kE,5995
8
+ openrouter_provider-0.0.7.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
9
+ openrouter_provider-0.0.7.dist-info/top_level.txt,sha256=I5BMEzkQFEnEYTqOY1Ktmnp7r1rrZQyeWdclKyyyHKs,28
10
+ openrouter_provider-0.0.7.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.1)
2
+ Generator: setuptools (80.4.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,10 +0,0 @@
1
- __init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- OpenRouterProvider/Chat_message.py,sha256=lQd8bFp7OHOgeOrcpcVZMdkV2Mb4reUsv5Ixo6WecYY,4424
3
- OpenRouterProvider/Chatbot_manager.py,sha256=EpLWhxx7xnRa-q7xqP2Ur9dmYb9Mzv_UF6BChwpcbYk,4357
4
- OpenRouterProvider/LLMs.py,sha256=-0ELd6fqmdDvsdaPIElRsluiK85-Y6USwvQb2b4M8TA,2607
5
- OpenRouterProvider/OpenRouterProvider.py,sha256=4k87D5kKNPgRJ-7qMv9oPWY7P2psrNaNOALmyPMuNsw,4397
6
- OpenRouterProvider/Tool.py,sha256=QeeWOD2oaYjB9tjF-Jvcjd_G_qSUIuKwFgyh20Ne06I,2010
7
- openrouter_provider-0.0.5.dist-info/METADATA,sha256=_H9lXm0ohRX57GdvJMJNIyn6pyWblQQTDHOWv7EM6GE,5995
8
- openrouter_provider-0.0.5.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
9
- openrouter_provider-0.0.5.dist-info/top_level.txt,sha256=I5BMEzkQFEnEYTqOY1Ktmnp7r1rrZQyeWdclKyyyHKs,28
10
- openrouter_provider-0.0.5.dist-info/RECORD,,