openrouter-provider 0.0.7__tar.gz → 0.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openrouter-provider might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.7
3
+ Version: 0.0.9
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openrouter-provider"
3
- version = "0.0.7"
3
+ version = "0.0.9"
4
4
  description = "This is an unofficial wrapper of OpenRouter."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.7"
@@ -6,6 +6,7 @@ from dotenv import load_dotenv
6
6
  import time
7
7
  import json
8
8
  from typing import Iterator, AsyncIterator
9
+ from pydantic import BaseModel
9
10
 
10
11
 
11
12
  _base_system_prompt = """
@@ -82,15 +83,23 @@ class Chatbot_manager:
82
83
 
83
84
  print("----------------------------------------------------------\n")
84
85
 
85
- def invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Chat_message:
86
+ def invoke(
87
+ self,
88
+ model: LLMModel,
89
+ query: Chat_message,
90
+ tools: list[tool_model]=[],
91
+ provider:ProviderConfig=None,
92
+ temperature: float=0.3
93
+ ) -> Chat_message:
86
94
  self._memory.append(query)
87
95
  client = OpenRouterProvider()
88
96
  reply = client.invoke(
89
97
  model=model,
98
+ temperature=temperature,
90
99
  system_prompt=self._system_prompt,
91
100
  querys=self._memory,
92
101
  tools=self.tools + tools,
93
- provider=provider
102
+ provider=provider,
94
103
  )
95
104
  reply.answeredBy = model
96
105
  self._memory.append(reply)
@@ -123,11 +132,19 @@ class Chatbot_manager:
123
132
 
124
133
  return reply
125
134
 
126
- def invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Iterator[str]:
135
+ def invoke_stream(
136
+ self,
137
+ model: LLMModel,
138
+ query: Chat_message,
139
+ tools: list[tool_model]=[],
140
+ provider:ProviderConfig=None,
141
+ temperature: float=0.3
142
+ ) -> Iterator[str]:
127
143
  self._memory.append(query)
128
144
  client = OpenRouterProvider()
129
145
  generator = client.invoke_stream(
130
146
  model=model,
147
+ temperature=temperature,
131
148
  system_prompt=self._system_prompt,
132
149
  querys=self._memory,
133
150
  tools=self.tools + tools,
@@ -141,11 +158,19 @@ class Chatbot_manager:
141
158
 
142
159
  self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=LLMModel))
143
160
 
144
- async def async_invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
161
+ async def async_invoke(
162
+ self,
163
+ model: LLMModel,
164
+ query: Chat_message,
165
+ tools: list[tool_model] = [],
166
+ provider: ProviderConfig = None,
167
+ temperature: float=0.3
168
+ ) -> Chat_message:
145
169
  self._memory.append(query)
146
170
  client = OpenRouterProvider()
147
171
  reply = await client.async_invoke(
148
172
  model=model,
173
+ temperature=temperature,
149
174
  system_prompt=self._system_prompt,
150
175
  querys=self._memory,
151
176
  tools=self.tools + tools,
@@ -181,12 +206,20 @@ class Chatbot_manager:
181
206
 
182
207
  return reply
183
208
 
184
- async def async_invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> AsyncIterator[str]:
209
+ async def async_invoke_stream(
210
+ self,
211
+ model: LLMModel,
212
+ query: Chat_message,
213
+ tools: list[tool_model] = [],
214
+ provider: ProviderConfig = None,
215
+ temperature: float=0.3
216
+ ) -> AsyncIterator[str]:
185
217
  self._memory.append(query)
186
218
  client = OpenRouterProvider()
187
219
 
188
220
  stream = client.async_invoke_stream(
189
221
  model=model,
222
+ temperature=temperature,
190
223
  system_prompt=self._system_prompt,
191
224
  querys=self._memory,
192
225
  tools=self.tools + tools,
@@ -201,4 +234,22 @@ class Chatbot_manager:
201
234
 
202
235
  self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=model))
203
236
 
204
-
237
+ def structured_output(
238
+ self,
239
+ model: LLMModel,
240
+ query: Chat_message,
241
+ provider:ProviderConfig=None,
242
+ json_schema: BaseModel=None,
243
+ temperature: float=0.3
244
+ ) -> BaseModel:
245
+ self._memory.append(query)
246
+ client = OpenRouterProvider()
247
+ reply = client.structured_output(
248
+ model=model,
249
+ temperature=temperature,
250
+ system_prompt=self._system_prompt,
251
+ querys=self._memory,
252
+ provider=provider,
253
+ json_schema=json_schema
254
+ )
255
+ return reply
@@ -1,3 +1,7 @@
1
+
2
+ # structured output
3
+ # https://note.com/brave_quince241/n/n60a5759c8f05
4
+
1
5
  import logging
2
6
  from .Chat_message import *
3
7
  from .Tool import tool_model
@@ -10,6 +14,8 @@ import os, time
10
14
  from dataclasses import dataclass, field, asdict
11
15
  from typing import List, Optional, Literal, Iterator, AsyncIterator
12
16
  from pprint import pprint
17
+ from pydantic import BaseModel
18
+
13
19
 
14
20
  # エラーのみ表示、詳細なトレースバック付き
15
21
  logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
@@ -47,8 +53,11 @@ class OpenRouterProvider:
47
53
  api_key=api_key,
48
54
  )
49
55
 
50
- def make_prompt(self, system_prompt: Chat_message,
51
- querys: list[Chat_message]) -> list[dict]:
56
+ def make_prompt(
57
+ self,
58
+ system_prompt: Chat_message,
59
+ querys: list[Chat_message]
60
+ ) -> list[dict]:
52
61
  messages = [{"role": "system", "content": system_prompt.text}]
53
62
 
54
63
  for query in querys:
@@ -87,21 +96,30 @@ class OpenRouterProvider:
87
96
  "tool_call_id": str(t.id),
88
97
  "content": str(t.result)
89
98
  })
90
-
99
+
91
100
  return messages
92
101
 
93
- def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
102
+ def invoke(
103
+ self,
104
+ model: LLMModel,
105
+ system_prompt: Chat_message,
106
+ querys: list[Chat_message],
107
+ tools: list[tool_model] = [],
108
+ provider: ProviderConfig = None,
109
+ temperature: float = 0.3
110
+ ) -> Chat_message:
94
111
  try:
95
112
  messages = self.make_prompt(system_prompt, querys)
96
113
 
97
114
  tool_defs = [tool.tool_definition for tool in tools] if tools else None
98
115
  provider_dict = provider.to_dict() if provider else None
99
-
116
+
100
117
  response = self.client.chat.completions.create(
101
118
  model=model.name,
119
+ temperature=temperature,
102
120
  messages=messages,
103
121
  tools=tool_defs,
104
- extra_body={"provider": provider_dict}
122
+ extra_body={"provider": provider_dict},
105
123
  )
106
124
 
107
125
  reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
@@ -116,7 +134,15 @@ class OpenRouterProvider:
116
134
  logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
117
135
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
118
136
 
119
- def invoke_stream(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Iterator[ChatCompletionChunk]:
137
+ def invoke_stream(
138
+ self,
139
+ model: LLMModel,
140
+ system_prompt: Chat_message,
141
+ querys: list[Chat_message],
142
+ tools: list[tool_model] = [],
143
+ provider: ProviderConfig = None,
144
+ temperature: float = 0.3
145
+ ) -> Iterator[ChatCompletionChunk]:
120
146
  # chunk example
121
147
  # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
122
148
 
@@ -132,6 +158,7 @@ class OpenRouterProvider:
132
158
 
133
159
  response = self.client.chat.completions.create(
134
160
  model=model.name,
161
+ temperature=temperature,
135
162
  messages=messages,
136
163
  tools=tool_defs,
137
164
  extra_body={"provider": provider_dict},
@@ -144,7 +171,14 @@ class OpenRouterProvider:
144
171
  logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
145
172
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
146
173
 
147
- async def async_invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
174
+ async def async_invoke(
175
+ self, model: LLMModel,
176
+ system_prompt: Chat_message,
177
+ querys: list[Chat_message],
178
+ tools: list[tool_model] = [],
179
+ provider: ProviderConfig = None,
180
+ temperature: float = 0.3
181
+ ) -> Chat_message:
148
182
  try:
149
183
  messages = self.make_prompt(system_prompt, querys)
150
184
 
@@ -153,6 +187,7 @@ class OpenRouterProvider:
153
187
 
154
188
  response = await self.async_client.chat.completions.create(
155
189
  model=model.name,
190
+ temperature=temperature,
156
191
  messages=messages,
157
192
  tools=tool_defs,
158
193
  extra_body={"provider": provider_dict}
@@ -176,7 +211,8 @@ class OpenRouterProvider:
176
211
  system_prompt: Chat_message,
177
212
  querys: list[Chat_message],
178
213
  tools: list[tool_model] = [],
179
- provider: ProviderConfig = None
214
+ provider: ProviderConfig = None,
215
+ temperature: float = 0.3
180
216
  ) -> AsyncIterator[ChatCompletionChunk]:
181
217
  try:
182
218
  messages = self.make_prompt(system_prompt, querys)
@@ -186,6 +222,7 @@ class OpenRouterProvider:
186
222
 
187
223
  response = await self.async_client.chat.completions.create(
188
224
  model=model.name,
225
+ temperature=temperature,
189
226
  messages=messages,
190
227
  tools=tool_defs,
191
228
  extra_body={"provider": provider_dict},
@@ -199,4 +236,31 @@ class OpenRouterProvider:
199
236
  logger.exception(f"An error occurred while asynchronously streaming the model: {e.__class__.__name__}: {str(e)}")
200
237
  return
201
238
 
202
-
239
+ def structured_output(
240
+ self,
241
+ model: LLMModel,
242
+ system_prompt: Chat_message,
243
+ querys: list[Chat_message],
244
+ provider: ProviderConfig = None,
245
+ json_schema: BaseModel = None,
246
+ temperature: float = 0.3
247
+ ) -> BaseModel:
248
+ try:
249
+ messages = self.make_prompt(system_prompt, querys)
250
+ provider_dict = provider.to_dict() if provider else None
251
+
252
+ response = self.client.chat.completions.create(
253
+ model=model.name,
254
+ temperature=temperature,
255
+ messages=messages,
256
+ response_format={"type": "json_schema", "json_schema": {"name": json_schema.__name__, "schema": json_schema.model_json_schema()}},
257
+ extra_body={"provider": provider_dict},
258
+ )
259
+
260
+ return json_schema.model_validate_json(response.choices[0].message.content)
261
+
262
+ except Exception as e:
263
+ logger.exception(f"An error occurred while invoking structured output: {e.__class__.__name__}: {str(e)}")
264
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
265
+
266
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.7
3
+ Version: 0.0.9
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7