openrouter-provider 0.0.4__tar.gz → 0.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openrouter-provider might be problematic. Click here for more details.

Files changed (16) hide show
  1. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/PKG-INFO +1 -1
  2. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/pyproject.toml +1 -1
  3. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/OpenRouterProvider/Chat_message.py +11 -3
  4. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/OpenRouterProvider/Chatbot_manager.py +22 -1
  5. openrouter_provider-0.0.6/src/OpenRouterProvider/OpenRouterProvider.py +141 -0
  6. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/openrouter_provider.egg-info/PKG-INFO +1 -1
  7. openrouter_provider-0.0.4/src/OpenRouterProvider/OpenRouterProvider.py +0 -101
  8. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/README.md +0 -0
  9. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/setup.cfg +0 -0
  10. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/OpenRouterProvider/LLMs.py +0 -0
  11. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/OpenRouterProvider/Tool.py +0 -0
  12. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/__init__.py +0 -0
  13. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/openrouter_provider.egg-info/SOURCES.txt +0 -0
  14. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/openrouter_provider.egg-info/dependency_links.txt +0 -0
  15. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/openrouter_provider.egg-info/requires.txt +0 -0
  16. {openrouter_provider-0.0.4 → openrouter_provider-0.0.6}/src/openrouter_provider.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openrouter-provider"
3
- version = "0.0.4"
3
+ version = "0.0.6"
4
4
  description = "This is an unofficial wrapper of OpenRouter."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.7"
@@ -6,6 +6,8 @@ import base64
6
6
  from io import BytesIO
7
7
  from dataclasses import dataclass
8
8
 
9
+ from openai.types.chat import ChatCompletion
10
+
9
11
 
10
12
  class Role(Enum):
11
13
  system = "system"
@@ -24,15 +26,21 @@ class ToolCall:
24
26
 
25
27
 
26
28
  class Chat_message:
27
- def __init__(self, text: str, images: list[Image.Image]=None, role: Role=Role.user, answerdBy: LLMModel=None, token :int=0, cost: float=0) -> None:
29
+ def __init__(self,
30
+ text: str,
31
+ images: list[Image.Image]=None,
32
+ role: Role=Role.user,
33
+ answerdBy: LLMModel=None,
34
+ raw_response: ChatCompletion=None
35
+ ) -> None:
28
36
  self.role = role
29
37
  self.text = text
30
38
  self.images = self._process_image(images=images)
31
- self.token = token
32
- self.cost = cost
33
39
  self.answeredBy: LLMModel = answerdBy
34
40
 
35
41
  self.tool_calls: list[ToolCall] = []
42
+ self.raw_resoonse: ChatCompletion = raw_response
43
+
36
44
 
37
45
  def __str__(self) -> str:
38
46
  # ANSI color codes for blue, green, and reset (to default)
@@ -5,6 +5,8 @@ from .LLMs import LLMModel
5
5
  from dotenv import load_dotenv
6
6
  import time
7
7
  import json
8
+ from typing import Iterator
9
+
8
10
 
9
11
  _base_system_prompt = """
10
12
  It's [TIME] today.
@@ -120,4 +122,23 @@ class Chatbot_manager:
120
122
  self._memory.append(reply)
121
123
 
122
124
  return reply
123
-
125
+
126
+ def invoke_stream(self, model: LLMModel, query: Chat_message, tools: list[tool_model]=[], provider:ProviderConfig=None) -> Iterator[str]:
127
+ self._memory.append(query)
128
+ client = OpenRouterProvider()
129
+ generator = client.invoke_stream(
130
+ model=model,
131
+ system_prompt=self._system_prompt,
132
+ querys=self._memory,
133
+ tools=self.tools + tools,
134
+ provider=provider
135
+ )
136
+
137
+ text = ""
138
+ for token in generator:
139
+ text += token.choices[0].delta.content
140
+ yield token.choices[0].delta.content
141
+
142
+ self._memory.append(Chat_message(text=text, role=Role.ai, answerdBy=LLMModel))
143
+
144
+
@@ -0,0 +1,141 @@
1
+ import logging
2
+ from .Chat_message import *
3
+ from .Tool import tool_model
4
+ from .LLMs import *
5
+
6
+ from openai import OpenAI
7
+ from openai.types.chat import ChatCompletionChunk
8
+ from dotenv import load_dotenv
9
+ import os, time
10
+ from dataclasses import dataclass, field, asdict
11
+ from typing import List, Optional, Literal, Iterator
12
+ from pprint import pprint
13
+
14
+ # エラーのみ表示、詳細なトレースバック付き
15
+ logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(levelname)s - %(message)s")
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass
20
+ class ProviderConfig:
21
+ order: Optional[List[str]] = None
22
+ allow_fallbacks: bool = None
23
+ require_parameters: bool = None
24
+ data_collection: Literal["allow", "deny"] = None
25
+ only: Optional[List[str]] = None
26
+ ignore: Optional[List[str]] = None
27
+ quantizations: Optional[List[str]] = None
28
+ sort: Optional[Literal["price", "throughput"]] = None
29
+ max_price: Optional[dict] = None
30
+
31
+ def to_dict(self) -> dict:
32
+ return {k: v for k, v in asdict(self).items() if v is not None}
33
+
34
+
35
+ class OpenRouterProvider:
36
+ def __init__(self) -> None:
37
+ load_dotenv()
38
+ api_key = os.getenv("OPENROUTER_API_KEY")
39
+ if not api_key:
40
+ logger.error("OPENROUTER_API_KEY is not set in environment variables.")
41
+ self.client = OpenAI(
42
+ base_url="https://openrouter.ai/api/v1",
43
+ api_key=api_key,
44
+ )
45
+
46
+ def make_prompt(self, system_prompt: Chat_message,
47
+ querys: list[Chat_message]) -> list[dict]:
48
+ messages = [{"role": "system", "content": system_prompt.text}]
49
+
50
+ for query in querys:
51
+ if query.role == Role.user:
52
+ if query.images is None:
53
+ messages.append({"role": "user", "content": query.text})
54
+ else:
55
+ content = [{"type": "text", "text": query.text}]
56
+ for img in query.images[:50]:
57
+ content.append(
58
+ {"type": "image_url",
59
+ "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
60
+ messages.append({"role": "user", "content": content})
61
+
62
+ elif query.role == Role.ai or query.role == Role.tool:
63
+ assistant_msg = {"role": "assistant"}
64
+ assistant_msg["content"] = query.text or None
65
+
66
+ if query.tool_calls:
67
+ assistant_msg["tool_calls"] = [
68
+ {
69
+ "id": str(t.id),
70
+ "type": "function",
71
+ "function": {
72
+ "name": t.name,
73
+ "arguments": t.arguments
74
+ }
75
+ }
76
+ for t in query.tool_calls
77
+ ]
78
+ messages.append(assistant_msg)
79
+
80
+ for t in query.tool_calls:
81
+ messages.append({
82
+ "role": "tool",
83
+ "tool_call_id": str(t.id),
84
+ "content": str(t.result)
85
+ })
86
+
87
+ return messages
88
+
89
+ def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Chat_message:
90
+ try:
91
+ messages = self.make_prompt(system_prompt, querys)
92
+
93
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
94
+ provider_dict = provider.to_dict() if provider else None
95
+
96
+ response = self.client.chat.completions.create(
97
+ model=model.name,
98
+ messages=messages,
99
+ tools=tool_defs,
100
+ extra_body={"provider": provider_dict}
101
+ )
102
+
103
+ reply = Chat_message(text=response.choices[0].message.content, role=Role.ai, raw_response=response)
104
+
105
+ if response.choices[0].message.tool_calls:
106
+ reply.role = Role.tool
107
+ for tool in response.choices[0].message.tool_calls:
108
+ reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
109
+ return reply
110
+
111
+ except Exception as e:
112
+ logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
113
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
114
+
115
+ def invoke_stream(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools: list[tool_model] = [], provider: ProviderConfig = None) -> Iterator[ChatCompletionChunk]:
116
+ # chunk example
117
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
118
+
119
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason='stop', index=0, logprobs=None, native_finish_reason='stop')], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint='fp_e2f22fdd96', usage=None, provider='OpenAI')
120
+
121
+ # ChatCompletionChunk(id='gen-1746748260-mdKZLTs9QY7MmUxWKb8V', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, refusal=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None, native_finish_reason=None)], created=1746748260, model='openai/gpt-4o-mini', object='chat.completion.chunk', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=54, prompt_tokens=61, total_tokens=115, completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0), prompt_tokens_details={'cached_tokens': 0}), provider='OpenAI')
122
+
123
+ try:
124
+ messages = self.make_prompt(system_prompt, querys)
125
+
126
+ tool_defs = [tool.tool_definition for tool in tools] if tools else None
127
+ provider_dict = provider.to_dict() if provider else None
128
+
129
+ response = self.client.chat.completions.create(
130
+ model=model.name,
131
+ messages=messages,
132
+ tools=tool_defs,
133
+ extra_body={"provider": provider_dict},
134
+ stream=True
135
+ )
136
+
137
+ return response
138
+
139
+ except Exception as e:
140
+ logger.exception(f"An error occurred while invoking the model: {e.__class__.__name__}: {str(e)}")
141
+ return Chat_message(text="Fail to get response. Please see the error message.", role=Role.ai, raw_response=None)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openrouter-provider
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: This is an unofficial wrapper of OpenRouter.
5
5
  Author-email: Keisuke Miyamto <aichiboyhighschool@gmail.com>
6
6
  Requires-Python: >=3.7
@@ -1,101 +0,0 @@
1
- from .Chat_message import *
2
- from .Tool import tool_model
3
- from .LLMs import *
4
-
5
- from openai import OpenAI
6
- from dotenv import load_dotenv
7
- import os
8
- from dataclasses import dataclass, field, asdict
9
- from typing import List, Optional, Literal
10
- import json
11
-
12
-
13
- @dataclass
14
- class ProviderConfig:
15
- order: Optional[List[str]] = None
16
- allow_fallbacks: bool = None
17
- require_parameters: bool = None
18
- data_collection: Literal["allow", "deny"] = None
19
- only: Optional[List[str]] = None
20
- ignore: Optional[List[str]] = None
21
- quantizations: Optional[List[str]] = None
22
- sort: Optional[Literal["price", "throughput"]] = None
23
- max_price: Optional[dict] = None
24
-
25
- def to_dict(self) -> dict:
26
- return {k: v for k, v in asdict(self).items() if v is not None}
27
-
28
-
29
- class OpenRouterProvider:
30
- def __init__(self) -> None:
31
- load_dotenv()
32
- self.client = OpenAI(
33
- base_url="https://openrouter.ai/api/v1",
34
- api_key=os.getenv("OPENROUTER_API_KEY"),
35
- )
36
-
37
- def make_prompt(self, system_prompt: Chat_message,
38
- querys: list[Chat_message]) -> list[dict]:
39
- messages = [{"role": "system", "content": system_prompt.text}]
40
-
41
- for query in querys:
42
- # ----- USER -----
43
- if query.role == Role.user:
44
- if query.images is None:
45
- messages.append({"role": "user", "content": query.text})
46
- else:
47
- content = [{"type": "text", "text": query.text}]
48
- for img in query.images[:50]:
49
- content.append(
50
- {"type": "image_url",
51
- "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
52
- messages.append({"role": "user", "content": content})
53
-
54
- # ----- ASSISTANT -----
55
- elif query.role == Role.ai or query.role == Role.tool:
56
- assistant_msg = {"role": "assistant"}
57
- assistant_msg["content"] = query.text or None # ← content は明示必須
58
-
59
- # ① tool_calls を付与(あれば)
60
- if query.tool_calls:
61
- assistant_msg["tool_calls"] = [
62
- {
63
- "id": str(t.id),
64
- "type": "function",
65
- "function": {
66
- "name": t.name,
67
- "arguments": t.arguments # JSON 文字列
68
- }
69
- }
70
- for t in query.tool_calls
71
- ]
72
- messages.append(assistant_msg)
73
-
74
- # ② tool メッセージを assistant の直後に並べる
75
- for t in query.tool_calls:
76
- messages.append({
77
- "role": "tool",
78
- "tool_call_id": str(t.id),
79
- "content": str(t.result) # 実行結果(文字列)
80
- })
81
- return messages
82
-
83
-
84
- def invoke(self, model: LLMModel, system_prompt: Chat_message, querys: list[Chat_message], tools:list[tool_model]=[], provider:ProviderConfig=None) -> Chat_message:
85
- response = self.client.chat.completions.create(
86
- model=model.name,
87
- messages=self.make_prompt(system_prompt, querys),
88
- tools=[tool.tool_definition for tool in tools] if tools else None,
89
- extra_body={
90
- "provider": provider.to_dict() if provider else None
91
- }
92
- )
93
- reply = Chat_message(text=response.choices[0].message.content, role=Role.ai)
94
-
95
- if response.choices[0].message.tool_calls:
96
- reply.role = Role.tool
97
- for tool in response.choices[0].message.tool_calls:
98
- reply.tool_calls.append(ToolCall(id=tool.id, name=tool.function.name, arguments=tool.function.arguments))
99
-
100
- return reply
101
-