openrouter-provider 0.0.6__tar.gz → 0.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openrouter-provider might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openrouter-provider"
3
- version = "0.0.6"
3
+ version = "0.0.8"
4
4
  description = "This is an unofficial wrapper of OpenRouter."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.7"
@@ -5,7 +5,8 @@ from .LLMs import LLMModel
5
5
  from dotenv import load_dotenv
6
6
  import time
7
7
  import json
8
- from typing import Iterator
8
+ from typing import Iterator, AsyncIterator
9
+ from pydantic import BaseModel
9
10
 
10
11
 
11
12
  _base_system_prompt = """
@@ -90,7 +91,7 @@ class Chatbot_manager:
90
91
  system_prompt=self._system_prompt,
91
92
  querys=self._memory,
92
93
  tools=self.tools + tools,
93
- provider=provider
94
+ provider=provider,
94
95
  )
95
96
  reply.answeredBy = model
96
97
  self._memory.append(reply)
@@ -141,4 +142,74 @@ class Chatbot_manager:
141
142
 
142
143
  self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=LLMModel))
143
144
 
144
-
145
+ async def async_invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
146
+ self._memory.append(query)
147
+ client = OpenRouterProvider()
148
+ reply = await client.async_invoke(
149
+ model=model,
150
+ system_prompt=self._system_prompt,
151
+ querys=self._memory,
152
+ tools=self.tools + tools,
153
+ provider=provider
154
+ )
155
+ reply.answeredBy = model
156
+ self._memory.append(reply)
157
+
158
+ if reply.tool_calls:
159
+ for requested_tool in reply.tool_calls:
160
+ args = requested_tool.arguments
161
+ if isinstance(args, str):
162
+ args = json.loads(args)
163
+
164
+ for tool in (self.tools + tools):
165
+ if tool.name == requested_tool.name:
166
+ result = tool(**args)
167
+ requested_tool.result = result
168
+ break
169
+ else:
170
+ print("Tool Not found", requested_tool.name)
171
+ return reply
172
+
173
+ reply = await client.async_invoke(
174
+ model=model,
175
+ system_prompt=self._system_prompt,
176
+ querys=self._memory,
177
+ tools=self.tools + tools,
178
+ provider=provider
179
+ )
180
+ reply.answeredBy = model
181
+ self._memory.append(reply)
182
+
183
+ return reply
184
+
185
+ async def async_invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> AsyncIterator[str]:
186
+ self._memory.append(query)
187
+ client = OpenRouterProvider()
188
+
189
+ stream = client.async_invoke_stream(
190
+ model=model,
191
+ system_prompt=self._system_prompt,
192
+ querys=self._memory,
193
+ tools=self.tools + tools,
194
+ provider=provider
195
+ )
196
+
197
+ text = ""
198
+ async for chunk in stream:
199
+ delta = chunk.choices[0].delta.content or ""
200
+ text += delta
201
+ yield delta
202
+
203
+ self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=model))
204
+
205
+ def structured_output(self, model: LLMModel, query: Chat_message, provider:ProviderConfig=None, json_schema: BaseModel=None) -> BaseModel:
206
+ self._memory.append(query)
207
+ client = OpenRouterProvider()
208
+ reply = client.structured_output(
209
+ model=model,
210
+ system_prompt=self._system_prompt,
211
+ querys=self._memory,
212
+ provider=provider,
213
+ json_schema=json_schema
214
+ )
215
+ return reply
@@ -1,15 +1,21 @@
1
+
2
+ # structured output
3
+ # https://note.com/brave_quince241/n/n60a5759c8f05
4
+
1
5
  import logging
2
6
  from .Chat_message import *
3
7
  from .Tool import tool_model
4
8
  from .LLMs import *
5
9
 
6
- from openai import OpenAI
10
+ from openai import OpenAI, AsyncOpenAI
7
11
  from openai.types.chat import ChatCompletionChunk
8
12
  from dotenv import load_dotenv
9
13
  import os, time
10
14
  from dataclasses import dataclass, field, asdict
11
- from typing import List, Optional, Literal, Iterator
15
+ from typing import List, Optional, Literal, Iterator, AsyncIterator
12
16
  from pprint import pprint
17
+ from pydantic import BaseModel
18
+
13
19
 
14
20
  # エラーのみ表示、詳細なトレースバック付き
15
21
  logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
@@ -42,6 +48,10 @@ class OpenRouterProvider:
42
48
  base_url="https://openrouter.ai/api/v1",
43
49
  api_key=api_key,
44
50
  )
51
+ self.async_client = AsyncOpenAI(
52
+ base_url="https://openrouter.ai/api/v1",
53
+ api_key=api_key,
54
+ )
45
55
 
46
56
  def make_prompt(self, system_prompt: Chat_message,
47
57
  querys: list[Chat_message]) -> list[dict]:
@@ -83,7 +93,7 @@ class OpenRouterProvider:
83
93
  "tool_call_id": str(t.id),
84
94
  "content": str(t.result)
85
95
  })
86
-
96
+
87
97
  return messages
88
98
 
89
99
  def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
@@ -92,12 +102,12 @@ class OpenRouterProvider:
92
102
 
93
103
  tool_defs = [tool.tool_definition for tool in tools] if tools else None
94
104
  provider_dict = provider.to_dict() if provider else None
95
-
105
+
96
106
  response = self.client.chat.completions.create(
97
107
  model=model.name,
98
108
  messages=messages,
99
109
  tools=tool_defs,
100
- extra_body={"provider": provider_dict}
110
+ extra_body={"provider": provider_dict},
101
111
  )
102
112
 
103
113
  reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
@@ -139,3 +149,77 @@ class OpenRouterProvider:
139
149
  except Exception as e:
140
150
  logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
141
151
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
152
+
153
+ async def async_invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
154
+ try:
155
+ messages = self.make_prompt(system_prompt, querys)
156
+
157
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
158
+ provider_dict = provider.to_dict() if provider else None
159
+
160
+ response = await self.async_client.chat.completions.create(
161
+ model=model.name,
162
+ messages=messages,
163
+ tools=tool_defs,
164
+ extra_body={"provider": provider_dict}
165
+ )
166
+
167
+ reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
168
+
169
+ if response.choices[0].message.tool_calls:
170
+ reply.role = Role.tool
171
+ for tool in response.choices[0].message.tool_calls:
172
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
173
+ return reply
174
+
175
+ except Exception as e:
176
+ logger.exception(f"An error occurred while asynchronously invoking the model: {e.__class__.__name__}: {str(e)}")
177
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
178
+
179
+ async def async_invoke_stream(
180
+ self,
181
+ model: LLMModel,
182
+ system_prompt: Chat_message,
183
+ querys: list[Chat_message],
184
+ tools: list[tool_model] = [],
185
+ provider: ProviderConfig = None
186
+ ) -> AsyncIterator[ChatCompletionChunk]:
187
+ try:
188
+ messages = self.make_prompt(system_prompt, querys)
189
+
190
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
191
+ provider_dict = provider.to_dict() if provider else None
192
+
193
+ response = await self.async_client.chat.completions.create(
194
+ model=model.name,
195
+ messages=messages,
196
+ tools=tool_defs,
197
+ extra_body={"provider": provider_dict},
198
+ stream=True
199
+ )
200
+
201
+ async for chunk in response:
202
+ yield chunk
203
+
204
+ except Exception as e:
205
+ logger.exception(f"An error occurred while asynchronously streaming the model: {e.__class__.__name__}: {str(e)}")
206
+ return
207
+
208
+ def structured_output(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], provider: ProviderConfig = None, json_schema: BaseModel = None) -> BaseModel:
209
+ try:
210
+ messages = self.make_prompt(system_prompt, querys)
211
+ provider_dict = provider.to_dict() if provider else None
212
+
213
+ response = self.client.chat.completions.create(
214
+ model=model.name,
215
+ messages=messages,
216
+ response_format={"type": "json_schema", "json_schema": {"name": json_schema.__name__, "schema": json_schema.model_json_schema()}},
217
+ extra_body={"provider": provider_dict},
218
+ )
219
+
220
+ return json_schema.model_validate_json(response.choices[0].message.content)
221
+
222
+ except Exception as e:
223
+ logger.exception(f"An error occurred while invoking structured output: {e.__class__.__name__}: {str(e)}")
224
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
225
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7