openrouter-provider 0.0.8__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openrouter-provider might be problematic. Click here for more details.

@@ -83,11 +83,19 @@ class Chatbot_manager:
83
83
 
84
84
  print("----------------------------------------------------------\n")
85
85
 
86
- def invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Chat_message:
86
+ def invoke(
87
+ self,
88
+ model: LLMModel,
89
+ query: Chat_message,
90
+ tools: list[tool_model]=[],
91
+ provider:ProviderConfig=None,
92
+ temperature: float=0.3
93
+ ) -> Chat_message:
87
94
  self._memory.append(query)
88
95
  client = OpenRouterProvider()
89
96
  reply = client.invoke(
90
97
  model=model,
98
+ temperature=temperature,
91
99
  system_prompt=self._system_prompt,
92
100
  querys=self._memory,
93
101
  tools=self.tools + tools,
@@ -124,11 +132,19 @@ class Chatbot_manager:
124
132
 
125
133
  return reply
126
134
 
127
- def invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Iterator[str]:
135
+ def invoke_stream(
136
+ self,
137
+ model: LLMModel,
138
+ query: Chat_message,
139
+ tools: list[tool_model]=[],
140
+ provider:ProviderConfig=None,
141
+ temperature: float=0.3
142
+ ) -> Iterator[str]:
128
143
  self._memory.append(query)
129
144
  client = OpenRouterProvider()
130
145
  generator = client.invoke_stream(
131
146
  model=model,
147
+ temperature=temperature,
132
148
  system_prompt=self._system_prompt,
133
149
  querys=self._memory,
134
150
  tools=self.tools + tools,
@@ -142,11 +158,19 @@ class Chatbot_manager:
142
158
 
143
159
  self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=LLMModel))
144
160
 
145
- async def async_invoke(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
161
+ async def async_invoke(
162
+ self,
163
+ model: LLMModel,
164
+ query: Chat_message,
165
+ tools: list[tool_model] = [],
166
+ provider: ProviderConfig = None,
167
+ temperature: float=0.3
168
+ ) -> Chat_message:
146
169
  self._memory.append(query)
147
170
  client = OpenRouterProvider()
148
171
  reply = await client.async_invoke(
149
172
  model=model,
173
+ temperature=temperature,
150
174
  system_prompt=self._system_prompt,
151
175
  querys=self._memory,
152
176
  tools=self.tools + tools,
@@ -182,12 +206,20 @@ class Chatbot_manager:
182
206
 
183
207
  return reply
184
208
 
185
- async def async_invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model] = [], provider: ProviderConfig = None) -> AsyncIterator[str]:
209
+ async def async_invoke_stream(
210
+ self,
211
+ model: LLMModel,
212
+ query: Chat_message,
213
+ tools: list[tool_model] = [],
214
+ provider: ProviderConfig = None,
215
+ temperature: float=0.3
216
+ ) -> AsyncIterator[str]:
186
217
  self._memory.append(query)
187
218
  client = OpenRouterProvider()
188
219
 
189
220
  stream = client.async_invoke_stream(
190
221
  model=model,
222
+ temperature=temperature,
191
223
  system_prompt=self._system_prompt,
192
224
  querys=self._memory,
193
225
  tools=self.tools + tools,
@@ -202,11 +234,19 @@ class Chatbot_manager:
202
234
 
203
235
  self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=model))
204
236
 
205
- def structured_output(self, model: LLMModel, query: Chat_message, provider:ProviderConfig=None, json_schema: BaseModel=None) -> BaseModel:
237
+ def structured_output(
238
+ self,
239
+ model: LLMModel,
240
+ query: Chat_message,
241
+ provider:ProviderConfig=None,
242
+ json_schema: BaseModel=None,
243
+ temperature: float=0.3
244
+ ) -> BaseModel:
206
245
  self._memory.append(query)
207
246
  client = OpenRouterProvider()
208
247
  reply = client.structured_output(
209
248
  model=model,
249
+ temperature=temperature,
210
250
  system_prompt=self._system_prompt,
211
251
  querys=self._memory,
212
252
  provider=provider,
@@ -53,8 +53,11 @@ class OpenRouterProvider:
53
53
  api_key=api_key,
54
54
  )
55
55
 
56
- def make_prompt(self, system_prompt: Chat_message,
57
- querys: list[Chat_message]) -> list[dict]:
56
+ def make_prompt(
57
+ self,
58
+ system_prompt: Chat_message,
59
+ querys: list[Chat_message]
60
+ ) -> list[dict]:
58
61
  messages = [{"role": "system", "content": system_prompt.text}]
59
62
 
60
63
  for query in querys:
@@ -96,7 +99,15 @@ class OpenRouterProvider:
96
99
 
97
100
  return messages
98
101
 
99
- def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
102
+ def invoke(
103
+ self,
104
+ model: LLMModel,
105
+ system_prompt: Chat_message,
106
+ querys: list[Chat_message],
107
+ tools: list[tool_model] = [],
108
+ provider: ProviderConfig = None,
109
+ temperature: float = 0.3
110
+ ) -> Chat_message:
100
111
  try:
101
112
  messages = self.make_prompt(system_prompt, querys)
102
113
 
@@ -105,6 +116,7 @@ class OpenRouterProvider:
105
116
 
106
117
  response = self.client.chat.completions.create(
107
118
  model=model.name,
119
+ temperature=temperature,
108
120
  messages=messages,
109
121
  tools=tool_defs,
110
122
  extra_body={"provider": provider_dict},
@@ -122,7 +134,15 @@ class OpenRouterProvider:
122
134
  logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
123
135
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
124
136
 
125
- def invoke_stream(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Iterator[ChatCompletionChunk]:
137
+ def invoke_stream(
138
+ self,
139
+ model: LLMModel,
140
+ system_prompt: Chat_message,
141
+ querys: list[Chat_message],
142
+ tools: list[tool_model] = [],
143
+ provider: ProviderConfig = None,
144
+ temperature: float = 0.3
145
+ ) -> Iterator[ChatCompletionChunk]:
126
146
  # chunk example
127
147
  # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
128
148
 
@@ -138,6 +158,7 @@ class OpenRouterProvider:
138
158
 
139
159
  response = self.client.chat.completions.create(
140
160
  model=model.name,
161
+ temperature=temperature,
141
162
  messages=messages,
142
163
  tools=tool_defs,
143
164
  extra_body={"provider": provider_dict},
@@ -150,7 +171,14 @@ class OpenRouterProvider:
150
171
  logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
151
172
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
152
173
 
153
- async def async_invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
174
+ async def async_invoke(
175
+ self, model: LLMModel,
176
+ system_prompt: Chat_message,
177
+ querys: list[Chat_message],
178
+ tools: list[tool_model] = [],
179
+ provider: ProviderConfig = None,
180
+ temperature: float = 0.3
181
+ ) -> Chat_message:
154
182
  try:
155
183
  messages = self.make_prompt(system_prompt, querys)
156
184
 
@@ -159,6 +187,7 @@ class OpenRouterProvider:
159
187
 
160
188
  response = await self.async_client.chat.completions.create(
161
189
  model=model.name,
190
+ temperature=temperature,
162
191
  messages=messages,
163
192
  tools=tool_defs,
164
193
  extra_body={"provider": provider_dict}
@@ -182,7 +211,8 @@ class OpenRouterProvider:
182
211
  system_prompt: Chat_message,
183
212
  querys: list[Chat_message],
184
213
  tools: list[tool_model] = [],
185
- provider: ProviderConfig = None
214
+ provider: ProviderConfig = None,
215
+ temperature: float = 0.3
186
216
  ) -> AsyncIterator[ChatCompletionChunk]:
187
217
  try:
188
218
  messages = self.make_prompt(system_prompt, querys)
@@ -192,6 +222,7 @@ class OpenRouterProvider:
192
222
 
193
223
  response = await self.async_client.chat.completions.create(
194
224
  model=model.name,
225
+ temperature=temperature,
195
226
  messages=messages,
196
227
  tools=tool_defs,
197
228
  extra_body={"provider": provider_dict},
@@ -205,13 +236,22 @@ class OpenRouterProvider:
205
236
  logger.exception(f"An error occurred while asynchronously streaming the model: {e.__class__.__name__}: {str(e)}")
206
237
  return
207
238
 
208
- def structured_output(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], provider: ProviderConfig = None, json_schema: BaseModel = None) -> BaseModel:
239
+ def structured_output(
240
+ self,
241
+ model: LLMModel,
242
+ system_prompt: Chat_message,
243
+ querys: list[Chat_message],
244
+ provider: ProviderConfig = None,
245
+ json_schema: BaseModel = None,
246
+ temperature: float = 0.3
247
+ ) -> BaseModel:
209
248
  try:
210
249
  messages = self.make_prompt(system_prompt, querys)
211
250
  provider_dict = provider.to_dict() if provider else None
212
251
 
213
252
  response = self.client.chat.completions.create(
214
253
  model=model.name,
254
+ temperature=temperature,
215
255
  messages=messages,
216
256
  response_format={"type": "json_schema", "json_schema": {"name": json_schema.__name__, "schema": json_schema.model_json_schema()}},
217
257
  extra_body={"provider": provider_dict},
@@ -222,4 +262,5 @@ class OpenRouterProvider:
222
262
  except Exception as e:
223
263
  logger.exception(f"An error occurred while invoking structured output: {e.__class__.__name__}: {str(e)}")
224
264
  return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
225
-
265
+
266
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.8
3
+ Version: 0.1.0
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -0,0 +1,10 @@
1
+ __init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ OpenRouterProvider/Chat_message.py,sha256=lQd8bFp7OHOgeOrcpcVZMdkV2Mb4reUsv5Ixo6WecYY,4424
3
+ OpenRouterProvider/Chatbot_manager.py,sha256=sknAIzZbj7R3MalLaEViH6tOxuwCXJA0yu4njGHhb0w,8354
4
+ OpenRouterProvider/LLMs.py,sha256=-0ELd6fqmdDvsdaPIElRsluiK85-Y6USwvQb2b4M8TA,2607
5
+ OpenRouterProvider/OpenRouterProvider.py,sha256=5vKBdLP7349fgop1Pn1JQWmIZtYI30IBFZ0ETyRN0bo,11372
6
+ OpenRouterProvider/Tool.py,sha256=QeeWOD2oaYjB9tjF-Jvcjd_G_qSUIuKwFgyh20Ne06I,2010
7
+ openrouter_provider-0.1.0.dist-info/METADATA,sha256=6v_qOVud7xD6cv3UDQ-pmXsHFF883ulLOLAHmT0yn3U,5995
8
+ openrouter_provider-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ openrouter_provider-0.1.0.dist-info/top_level.txt,sha256=I5BMEzkQFEnEYTqOY1Ktmnp7r1rrZQyeWdclKyyyHKs,28
10
+ openrouter_provider-0.1.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.7.1)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,10 +0,0 @@
1
- __init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- OpenRouterProvider/Chat_message.py,sha256=lQd8bFp7OHOgeOrcpcVZMdkV2Mb4reUsv5Ixo6WecYY,4424
3
- OpenRouterProvider/Chatbot_manager.py,sha256=BAp1ZENnouzNJii7Mry8NejmudQcVxLKnny3493nZFU,7759
4
- OpenRouterProvider/LLMs.py,sha256=-0ELd6fqmdDvsdaPIElRsluiK85-Y6USwvQb2b4M8TA,2607
5
- OpenRouterProvider/OpenRouterProvider.py,sha256=oemGsQY4KJafNp-fCPWp5MPAYWez2KSvrnuHOXzU_84,10754
6
- OpenRouterProvider/Tool.py,sha256=QeeWOD2oaYjB9tjF-Jvcjd_G_qSUIuKwFgyh20Ne06I,2010
7
- openrouter_provider-0.0.8.dist-info/METADATA,sha256=420W5e1NvkI1jTvjWEFD6RNv4lfFdLeS6wbYHwPuK68,5995
8
- openrouter_provider-0.0.8.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
9
- openrouter_provider-0.0.8.dist-info/top_level.txt,sha256=I5BMEzkQFEnEYTqOY1Ktmnp7r1rrZQyeWdclKyyyHKs,28
10
- openrouter_provider-0.0.8.dist-info/RECORD,,