openrouter-provider 0.0.5__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,279 @@
1
+ from __future__ import annotations
2
+ import json
3
+ import os
4
+ from dataclasses import dataclass, asdict
5
+ from typing import List, Optional, Literal, Iterator, AsyncIterator
6
+
7
+ from dotenv import load_dotenv
8
+ from openai import OpenAI, AsyncOpenAI
9
+ from openai.types.chat import ChatCompletionChunk
10
+ from pydantic import BaseModel, ValidationError
11
+
12
+ from openrouter.message import Message, Role, ToolCall
13
+ from openrouter.tool import tool_model
14
+ from openrouter.llms import LLMModel
15
+
16
+
17
+
18
+
19
+ @dataclass
20
+ class ProviderConfig:
21
+ order: Optional[List[str]] = None
22
+ allow_fallbacks: bool = None
23
+ require_parameters: bool = None
24
+ data_collection: Literal["allow", "deny"] = None
25
+ only: Optional[List[str]] = None
26
+ ignore: Optional[List[str]] = None
27
+ quantizations: Optional[List[str]] = None
28
+ sort: Optional[Literal["price", "throughput"]] = None
29
+ max_price: Optional[dict] = None
30
+
31
+ def to_dict(self) -> dict:
32
+ return {k: v for k, v in asdict(self).items() if v is not None}
33
+
34
+
35
+ class OpenRouterProvider:
36
+ def __init__(self) -> None:
37
+ load_dotenv()
38
+ api_key = os.getenv("OPENROUTER_API_KEY")
39
+ if not api_key:
40
+ raise ValueError("OPENROUTER_API_KEY is not set in environment variables.")
41
+ self.client = OpenAI(
42
+ base_url="https://openrouter.ai/api/v1",
43
+ api_key=api_key,
44
+ )
45
+ self.async_client = AsyncOpenAI(
46
+ base_url="https://openrouter.ai/api/v1",
47
+ api_key=api_key,
48
+ )
49
+
50
+ def make_prompt(
51
+ self,
52
+ system_prompt: Message,
53
+ querys: list[Message]
54
+ ) -> list[dict]:
55
+ messages = [{"role": "system", "content": system_prompt.text}]
56
+
57
+ for query in querys:
58
+ if query.role == Role.user:
59
+ if query.images is None:
60
+ messages.append({"role": "user", "content": query.text})
61
+ else:
62
+ content = [{"type": "text", "text": query.text}]
63
+ for img in query.images[:50]:
64
+ content.append(
65
+ {"type": "image_url",
66
+ "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
67
+ messages.append({"role": "user", "content": content})
68
+
69
+ elif query.role == Role.ai or query.role == Role.tool:
70
+ assistant_msg = {"role": "assistant"}
71
+ assistant_msg["content"] = query.text or None
72
+
73
+ if query.tool_calls:
74
+ assistant_msg["tool_calls"] = [
75
+ {
76
+ "id": str(t.id),
77
+ "type": "function",
78
+ "function": {
79
+ "name": t.name,
80
+ "arguments": t.arguments
81
+ }
82
+ }
83
+ for t in query.tool_calls
84
+ ]
85
+ messages.append(assistant_msg)
86
+
87
+ for t in query.tool_calls:
88
+ messages.append({
89
+ "role": "tool",
90
+ "tool_call_id": str(t.id),
91
+ "content": str(t.result)
92
+ })
93
+
94
+ return messages
95
+
96
+ def invoke(
97
+ self,
98
+ model: LLMModel,
99
+ system_prompt: Message,
100
+ querys: list[Message],
101
+ tools: list[tool_model] = None,
102
+ provider: ProviderConfig = None,
103
+ temperature: float = 0.3
104
+ ) -> Message:
105
+ tools = tools or []
106
+ messages = self.make_prompt(system_prompt, querys)
107
+
108
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
109
+ provider_dict = provider.to_dict() if provider else None
110
+
111
+ response = self.client.chat.completions.create(
112
+ model=model.name,
113
+ temperature=temperature,
114
+ messages=messages,
115
+ tools=tool_defs,
116
+ extra_body={"provider": provider_dict},
117
+ )
118
+
119
+ reply = Message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
120
+
121
+ if response.choices[0].message.tool_calls:
122
+ reply.role = Role.tool
123
+ for tool in response.choices[0].message.tool_calls:
124
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
125
+ return reply
126
+
127
+ def invoke_stream(
128
+ self,
129
+ model: LLMModel,
130
+ system_prompt: Message,
131
+ querys: list[Message],
132
+ tools: list[tool_model] = None,
133
+ provider: ProviderConfig = None,
134
+ temperature: float = 0.3
135
+ ) -> Iterator[ChatCompletionChunk]:
136
+ tools = tools or []
137
+ messages = self.make_prompt(system_prompt, querys)
138
+
139
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
140
+ provider_dict = provider.to_dict() if provider else None
141
+
142
+ response = self.client.chat.completions.create(
143
+ model=model.name,
144
+ temperature=temperature,
145
+ messages=messages,
146
+ tools=tool_defs,
147
+ extra_body={"provider": provider_dict},
148
+ stream=True
149
+ )
150
+
151
+ return response
152
+
153
+ async def async_invoke(
154
+ self,
155
+ model: LLMModel,
156
+ system_prompt: Message,
157
+ querys: list[Message],
158
+ tools: list[tool_model] = None,
159
+ provider: ProviderConfig = None,
160
+ temperature: float = 0.3
161
+ ) -> Message:
162
+ tools = tools or []
163
+ messages = self.make_prompt(system_prompt, querys)
164
+
165
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
166
+ provider_dict = provider.to_dict() if provider else None
167
+
168
+ response = await self.async_client.chat.completions.create(
169
+ model=model.name,
170
+ temperature=temperature,
171
+ messages=messages,
172
+ tools=tool_defs,
173
+ extra_body={"provider": provider_dict}
174
+ )
175
+
176
+ reply = Message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
177
+
178
+ if response.choices[0].message.tool_calls:
179
+ reply.role = Role.tool
180
+ for tool in response.choices[0].message.tool_calls:
181
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
182
+ return reply
183
+
184
+ async def async_invoke_stream(
185
+ self,
186
+ model: LLMModel,
187
+ system_prompt: Message,
188
+ querys: list[Message],
189
+ tools: list[tool_model] = None,
190
+ provider: ProviderConfig = None,
191
+ temperature: float = 0.3
192
+ ) -> AsyncIterator[ChatCompletionChunk]:
193
+ tools = tools or []
194
+ messages = self.make_prompt(system_prompt, querys)
195
+
196
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
197
+ provider_dict = provider.to_dict() if provider else None
198
+
199
+ response = await self.async_client.chat.completions.create(
200
+ model=model.name,
201
+ temperature=temperature,
202
+ messages=messages,
203
+ tools=tool_defs,
204
+ extra_body={"provider": provider_dict},
205
+ stream=True
206
+ )
207
+
208
+ async for chunk in response:
209
+ yield chunk
210
+
211
+ def structured_output(
212
+ self,
213
+ model: LLMModel,
214
+ system_prompt: Message,
215
+ querys: list[Message],
216
+ provider: ProviderConfig = None,
217
+ json_schema: BaseModel = None,
218
+ temperature: float = 0.3
219
+ ) -> BaseModel:
220
+ messages = self.make_prompt(system_prompt, querys)
221
+ provider_dict = provider.to_dict() if provider else None
222
+
223
+ schema = json_schema.model_json_schema()
224
+
225
+ def add_additional_properties_false(obj):
226
+ if isinstance(obj, dict):
227
+ if "properties" in obj:
228
+ obj["additionalProperties"] = False
229
+ for value in obj.values():
230
+ add_additional_properties_false(value)
231
+ elif isinstance(obj, list):
232
+ for item in obj:
233
+ add_additional_properties_false(item)
234
+
235
+ def ensure_required_properties(obj):
236
+ if isinstance(obj, dict):
237
+ properties = obj.get("properties")
238
+ if isinstance(properties, dict):
239
+ keys = list(properties.keys())
240
+ existing_required = obj.get("required")
241
+ if isinstance(existing_required, list):
242
+ required_set = set(existing_required)
243
+ else:
244
+ required_set = set()
245
+ required_set.update(keys)
246
+ obj["required"] = list(required_set)
247
+ for value in obj.values():
248
+ ensure_required_properties(value)
249
+ elif isinstance(obj, list):
250
+ for item in obj:
251
+ ensure_required_properties(item)
252
+
253
+ add_additional_properties_false(schema)
254
+ ensure_required_properties(schema)
255
+
256
+ response = self.client.chat.completions.create(
257
+ model=model.name,
258
+ temperature=temperature,
259
+ messages=messages,
260
+ response_format={"type": "json_schema", "json_schema": {"name": json_schema.__name__, "schema": schema}},
261
+ extra_body={"provider": provider_dict},
262
+ )
263
+
264
+ content = response.choices[0].message.content
265
+
266
+ try:
267
+ return json_schema.model_validate_json(content)
268
+ except ValidationError:
269
+ formatted_content = content
270
+ try:
271
+ parsed = json.loads(content)
272
+ formatted_content = json.dumps(parsed, indent=2, ensure_ascii=False)
273
+ except json.JSONDecodeError:
274
+ pass
275
+ print("structured_output validation failed, response content:")
276
+ print(formatted_content)
277
+ raise
278
+
279
+
@@ -1,10 +1,11 @@
1
+ from __future__ import annotations
1
2
  import inspect
2
3
  from functools import wraps
3
- from typing import get_type_hints, get_origin, get_args
4
+ from typing import get_type_hints, get_origin, get_args, Callable, Any, Dict
4
5
 
5
6
 
6
7
  class tool_model:
7
- def __init__(self, func):
8
+ def __init__(self, func: Callable) -> None:
8
9
  self.func = func
9
10
  self.name = func.__name__
10
11
 
@@ -12,22 +13,20 @@ class tool_model:
12
13
  type_hints = get_type_hints(func)
13
14
 
14
15
  type_map = {
15
- int: "integer",
16
- str: "string",
16
+ int: "integer",
17
+ str: "string",
17
18
  float: "number",
18
- bool: "boolean",
19
+ bool: "boolean",
19
20
  }
20
21
 
21
- properties = {}
22
- required = []
22
+ properties: Dict[str, Dict[str, Any]] = {}
23
+ required: list[str] = []
23
24
 
24
- # Build JSON schema properties
25
25
  for name, param in sig.parameters.items():
26
26
  anno = type_hints.get(name, None)
27
27
  origin = get_origin(anno)
28
28
 
29
29
  if origin is list:
30
- # Handle list element types
31
30
  (elem_type,) = get_args(anno)
32
31
  json_item_type = type_map.get(elem_type, "string")
33
32
  schema = {
@@ -36,7 +35,6 @@ class tool_model:
36
35
  "description": name,
37
36
  }
38
37
  else:
39
- # Primitive types
40
38
  json_type = type_map.get(anno, "string")
41
39
  schema = {
42
40
  "type": json_type,
@@ -47,7 +45,6 @@ class tool_model:
47
45
  if param.default is inspect._empty:
48
46
  required.append(name)
49
47
 
50
- # Attach tool definition metadata
51
48
  self.tool_definition = {
52
49
  "type": "function",
53
50
  "function": {
@@ -65,7 +62,7 @@ class tool_model:
65
62
 
66
63
  wraps(func)(self)
67
64
 
68
- def __call__(self, *args, **kwargs):
65
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
69
66
  return self.func(*args, **kwargs)
70
67
 
71
68