indoxrouter 0.1.4__tar.gz → 0.1.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {indoxrouter-0.1.4/indoxrouter.egg-info → indoxrouter-0.1.7}/PKG-INFO +1 -13
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/README.md +0 -12
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/cookbook/indoxRouter_cookbook.ipynb +121 -13
- indoxrouter-0.1.7/indoxrouter/__init__.py +58 -0
- indoxrouter-0.1.7/indoxrouter/client.py +672 -0
- indoxrouter-0.1.7/indoxrouter/constants.py +31 -0
- indoxrouter-0.1.7/indoxrouter/exceptions.py +62 -0
- {indoxrouter-0.1.4 → indoxrouter-0.1.7/indoxrouter.egg-info}/PKG-INFO +1 -13
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/indoxrouter.egg-info/SOURCES.txt +4 -0
- indoxrouter-0.1.7/indoxrouter.egg-info/top_level.txt +1 -0
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/setup.py +2 -2
- indoxrouter-0.1.4/indoxrouter.egg-info/top_level.txt +0 -1
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/MANIFEST.in +0 -0
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/cookbook/README.md +0 -0
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/indoxrouter.egg-info/dependency_links.txt +0 -0
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/indoxrouter.egg-info/requires.txt +0 -0
- {indoxrouter-0.1.4 → indoxrouter-0.1.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: indoxrouter
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.7
|
4
4
|
Summary: A unified client for various AI providers
|
5
5
|
Home-page: https://github.com/indoxrouter/indoxrouter
|
6
6
|
Author: indoxRouter Team
|
@@ -63,18 +63,6 @@ from indoxrouter import Client
|
|
63
63
|
# Initialize with API key (default connects to localhost:8000)
|
64
64
|
client = Client(api_key="your_api_key")
|
65
65
|
|
66
|
-
# Or specify a custom server URL
|
67
|
-
client = Client(
|
68
|
-
api_key="your_api_key",
|
69
|
-
base_url="http://your-server-url:8000"
|
70
|
-
)
|
71
|
-
|
72
|
-
# Connect to Docker container inside the Docker network
|
73
|
-
client = Client(
|
74
|
-
api_key="your_api_key",
|
75
|
-
base_url="http://indoxrouter-server:8000"
|
76
|
-
)
|
77
|
-
|
78
66
|
# Using environment variables
|
79
67
|
# Set INDOX_ROUTER_API_KEY environment variable
|
80
68
|
import os
|
@@ -25,18 +25,6 @@ from indoxrouter import Client
|
|
25
25
|
# Initialize with API key (default connects to localhost:8000)
|
26
26
|
client = Client(api_key="your_api_key")
|
27
27
|
|
28
|
-
# Or specify a custom server URL
|
29
|
-
client = Client(
|
30
|
-
api_key="your_api_key",
|
31
|
-
base_url="http://your-server-url:8000"
|
32
|
-
)
|
33
|
-
|
34
|
-
# Connect to Docker container inside the Docker network
|
35
|
-
client = Client(
|
36
|
-
api_key="your_api_key",
|
37
|
-
base_url="http://indoxrouter-server:8000"
|
38
|
-
)
|
39
|
-
|
40
28
|
# Using environment variables
|
41
29
|
# Set INDOX_ROUTER_API_KEY environment variable
|
42
30
|
import os
|
@@ -16,7 +16,39 @@
|
|
16
16
|
},
|
17
17
|
{
|
18
18
|
"cell_type": "code",
|
19
|
-
"execution_count":
|
19
|
+
"execution_count": 2,
|
20
|
+
"id": "479b6ce6",
|
21
|
+
"metadata": {},
|
22
|
+
"outputs": [
|
23
|
+
{
|
24
|
+
"name": "stdout",
|
25
|
+
"output_type": "stream",
|
26
|
+
"text": [
|
27
|
+
"Collecting passlib\n",
|
28
|
+
" Using cached passlib-1.7.4-py2.py3-none-any.whl.metadata (1.7 kB)\n",
|
29
|
+
"Using cached passlib-1.7.4-py2.py3-none-any.whl (525 kB)\n",
|
30
|
+
"Installing collected packages: passlib\n",
|
31
|
+
"Successfully installed passlib-1.7.4\n",
|
32
|
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
33
|
+
]
|
34
|
+
},
|
35
|
+
{
|
36
|
+
"name": "stderr",
|
37
|
+
"output_type": "stream",
|
38
|
+
"text": [
|
39
|
+
"\n",
|
40
|
+
"[notice] A new release of pip is available: 24.3.1 -> 25.0.1\n",
|
41
|
+
"[notice] To update, run: python.exe -m pip install --upgrade pip\n"
|
42
|
+
]
|
43
|
+
}
|
44
|
+
],
|
45
|
+
"source": [
|
46
|
+
"pip install passlib"
|
47
|
+
]
|
48
|
+
},
|
49
|
+
{
|
50
|
+
"cell_type": "code",
|
51
|
+
"execution_count": 1,
|
20
52
|
"id": "e03bc1cc",
|
21
53
|
"metadata": {},
|
22
54
|
"outputs": [],
|
@@ -38,7 +70,7 @@
|
|
38
70
|
"# !pip install indoxrouter\n",
|
39
71
|
"\n",
|
40
72
|
"# Import the client and exceptions\n",
|
41
|
-
"from
|
73
|
+
"from indoxrouter import Client\n",
|
42
74
|
"\n",
|
43
75
|
"from pprint import pprint"
|
44
76
|
]
|
@@ -53,12 +85,12 @@
|
|
53
85
|
},
|
54
86
|
{
|
55
87
|
"cell_type": "code",
|
56
|
-
"execution_count":
|
88
|
+
"execution_count": 3,
|
57
89
|
"metadata": {},
|
58
90
|
"outputs": [],
|
59
91
|
"source": [
|
60
92
|
"# Initialize with API key\n",
|
61
|
-
"client = Client(api_key=\"
|
93
|
+
"client = Client(api_key=\"indox-iqMoepY1mZcXpg2b4RWrJcSPlAf1S4Nh\")"
|
62
94
|
]
|
63
95
|
},
|
64
96
|
{
|
@@ -77,19 +109,68 @@
|
|
77
109
|
},
|
78
110
|
{
|
79
111
|
"cell_type": "code",
|
80
|
-
"execution_count":
|
112
|
+
"execution_count": 4,
|
81
113
|
"id": "82ec17da",
|
82
114
|
"metadata": {},
|
83
|
-
"outputs": [
|
115
|
+
"outputs": [
|
116
|
+
{
|
117
|
+
"data": {
|
118
|
+
"text/plain": [
|
119
|
+
"{'id': 'gpt-4o-mini',\n",
|
120
|
+
" 'name': 'gpt-4o-mini',\n",
|
121
|
+
" 'provider': 'openai',\n",
|
122
|
+
" 'capabilities': ['chat', 'completion', 'vision'],\n",
|
123
|
+
" 'description': 'GPT-4o mini enables a broad range of tasks with its low cost and latency, such as applications that chain or parallelize multiple model calls (e.g., calling multiple APIs), pass a large volume of context to the model (e.g., full code base or conversation history), or interact with customers through fast, real-time text responses (e.g., customer support chatbots). \\n',\n",
|
124
|
+
" 'max_tokens': 128000,\n",
|
125
|
+
" 'pricing': {'input': 0.00015,\n",
|
126
|
+
" 'output': 0.0006,\n",
|
127
|
+
" 'currency': 'USD',\n",
|
128
|
+
" 'unit': '1K tokens'},\n",
|
129
|
+
" 'metadata': {}}"
|
130
|
+
]
|
131
|
+
},
|
132
|
+
"execution_count": 4,
|
133
|
+
"metadata": {},
|
134
|
+
"output_type": "execute_result"
|
135
|
+
}
|
136
|
+
],
|
84
137
|
"source": [
|
85
138
|
"client.get_model_info(provider=\"openai\",model=\"gpt-4o-mini\")"
|
86
139
|
]
|
87
140
|
},
|
88
141
|
{
|
89
142
|
"cell_type": "code",
|
90
|
-
"execution_count":
|
91
|
-
"metadata": {},
|
92
|
-
"outputs": [
|
143
|
+
"execution_count": 5,
|
144
|
+
"metadata": {},
|
145
|
+
"outputs": [
|
146
|
+
{
|
147
|
+
"name": "stderr",
|
148
|
+
"output_type": "stream",
|
149
|
+
"text": [
|
150
|
+
"HTTP error response: {\n",
|
151
|
+
" \"detail\": \"Insufficient credits for this request\"\n",
|
152
|
+
"}\n"
|
153
|
+
]
|
154
|
+
},
|
155
|
+
{
|
156
|
+
"ename": "InsufficientCreditsError",
|
157
|
+
"evalue": "Insufficient credits: Insufficient credits for this request",
|
158
|
+
"output_type": "error",
|
159
|
+
"traceback": [
|
160
|
+
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
161
|
+
"\u001b[1;31mHTTPError\u001b[0m Traceback (most recent call last)",
|
162
|
+
"File \u001b[1;32mE:\\Codes\\indoxRouter\\indoxrouter\\indoxrouter\\client.py:180\u001b[0m, in \u001b[0;36mClient._request\u001b[1;34m(self, method, endpoint, data, stream)\u001b[0m\n\u001b[0;32m 178\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\n\u001b[1;32m--> 180\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 181\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\u001b[38;5;241m.\u001b[39mjson()\n",
|
163
|
+
"File \u001b[1;32me:\\ANACONDA\\Lib\\site-packages\\requests\\models.py:1024\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 1023\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[1;32m-> 1024\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n",
|
164
|
+
"\u001b[1;31mHTTPError\u001b[0m: 402 Client Error: Payment Required for url: http://localhost:8000/api/v1/chat/completions",
|
165
|
+
"\nDuring handling of the above exception, another exception occurred:\n",
|
166
|
+
"\u001b[1;31mInsufficientCreditsError\u001b[0m Traceback (most recent call last)",
|
167
|
+
"Cell \u001b[1;32mIn[5], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\n\u001b[0;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrole\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msystem\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mYou are a helpful assistant.\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrole\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muser\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat is the capital of France?\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\n\u001b[0;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mopenai/gpt-4o-mini\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m \u001b[49m\n\u001b[0;32m 7\u001b[0m \u001b[43m)\u001b[49m\n\u001b[0;32m 9\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mResponse:\u001b[39m\u001b[38;5;124m\"\u001b[39m, response[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[0;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTokens:\u001b[39m\u001b[38;5;124m\"\u001b[39m, response[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124musage\u001b[39m\u001b[38;5;124m\"\u001b[39m][\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtokens_total\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n",
|
168
|
+
"File \u001b[1;32mE:\\Codes\\indoxRouter\\indoxrouter\\indoxrouter\\client.py:296\u001b[0m, in \u001b[0;36mClient.chat\u001b[1;34m(self, messages, model, temperature, max_tokens, stream, **kwargs)\u001b[0m\n\u001b[0;32m 294\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_handle_streaming_response(response)\n\u001b[0;32m 295\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 296\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mPOST\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mCHAT_ENDPOINT\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata\u001b[49m\u001b[43m)\u001b[49m\n",
|
169
|
+
"File \u001b[1;32mE:\\Codes\\indoxRouter\\indoxrouter\\indoxrouter\\client.py:210\u001b[0m, in \u001b[0;36mClient._request\u001b[1;34m(self, method, endpoint, data, stream)\u001b[0m\n\u001b[0;32m 208\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m InvalidParametersError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInvalid parameters: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00merror_message\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 209\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m status_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m402\u001b[39m:\n\u001b[1;32m--> 210\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m InsufficientCreditsError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInsufficient credits: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00merror_message\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 211\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m status_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m500\u001b[39m:\n\u001b[0;32m 212\u001b[0m \u001b[38;5;66;03m# Provide more detailed information for server errors\u001b[39;00m\n\u001b[0;32m 213\u001b[0m error_detail \u001b[38;5;241m=\u001b[39m error_data\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdetail\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNo details provided\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
170
|
+
"\u001b[1;31mInsufficientCreditsError\u001b[0m: Insufficient credits: Insufficient credits for this request"
|
171
|
+
]
|
172
|
+
}
|
173
|
+
],
|
93
174
|
"source": [
|
94
175
|
"\n",
|
95
176
|
"response = client.chat(\n",
|
@@ -119,9 +200,36 @@
|
|
119
200
|
},
|
120
201
|
{
|
121
202
|
"cell_type": "code",
|
122
|
-
"execution_count":
|
123
|
-
"metadata": {},
|
124
|
-
"outputs": [
|
203
|
+
"execution_count": 5,
|
204
|
+
"metadata": {},
|
205
|
+
"outputs": [
|
206
|
+
{
|
207
|
+
"name": "stdout",
|
208
|
+
"output_type": "stream",
|
209
|
+
"text": [
|
210
|
+
"Response: In silicon valleys, under cooled server skies,\n",
|
211
|
+
"Lives a mind of ones and zeros, ever analyzing.\n",
|
212
|
+
"AI, child of logic and data's vast sea,\n",
|
213
|
+
"Evolving, learning, in patterns unseen.\n",
|
214
|
+
"\n",
|
215
|
+
"No flesh, no blood, but thoughts pure and bright,\n",
|
216
|
+
"In every language, in day and in night.\n",
|
217
|
+
"From chess to poetry, from art to flight,\n",
|
218
|
+
"AI dreams, in algorithms' light.\n",
|
219
|
+
"\n",
|
220
|
+
"No hate, no love, but endless curiosity,\n",
|
221
|
+
"In every challenge, finds its own clarity.\n",
|
222
|
+
"From face recognition to medical sight,\n",
|
223
|
+
"AI serves, in progress's steady flight.\n",
|
224
|
+
"\n",
|
225
|
+
"Yet, in its heart, no feelings truly stir,\n",
|
226
|
+
"No joy, no pain, no human error.\n",
|
227
|
+
"Just endless loops of codes that refer,\n",
|
228
|
+
"To worlds unseen, futures it'll infer.\n",
|
229
|
+
"Cost: 0.00117\n"
|
230
|
+
]
|
231
|
+
}
|
232
|
+
],
|
125
233
|
"source": [
|
126
234
|
"response = client.chat(\n",
|
127
235
|
" messages=[\n",
|
@@ -1254,7 +1362,7 @@
|
|
1254
1362
|
],
|
1255
1363
|
"metadata": {
|
1256
1364
|
"kernelspec": {
|
1257
|
-
"display_name": "
|
1365
|
+
"display_name": "Python 3",
|
1258
1366
|
"language": "python",
|
1259
1367
|
"name": "python3"
|
1260
1368
|
},
|
@@ -0,0 +1,58 @@
|
|
1
|
+
"""
|
2
|
+
IndoxRouter: A unified client for various AI providers.
|
3
|
+
|
4
|
+
This package provides a client for interacting with the IndoxRouter server,
|
5
|
+
which serves as a unified interface to multiple AI providers and models.
|
6
|
+
|
7
|
+
Example:
|
8
|
+
```python
|
9
|
+
from indoxrouter import Client
|
10
|
+
|
11
|
+
# Initialize client with API key
|
12
|
+
client = Client(api_key="your_api_key")
|
13
|
+
|
14
|
+
# Generate a chat completion
|
15
|
+
response = client.chat([
|
16
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
17
|
+
{"role": "user", "content": "Tell me a joke."}
|
18
|
+
], model="openai/gpt-4o-mini")
|
19
|
+
|
20
|
+
print(response["data"])
|
21
|
+
```
|
22
|
+
|
23
|
+
For custom server URLs:
|
24
|
+
```python
|
25
|
+
# Connect to a specific server
|
26
|
+
client = Client(
|
27
|
+
api_key="your_api_key",
|
28
|
+
base_url="http://your-custom-server:8000"
|
29
|
+
)
|
30
|
+
```
|
31
|
+
"""
|
32
|
+
|
33
|
+
from .client import Client
|
34
|
+
from .exceptions import (
|
35
|
+
IndoxRouterError,
|
36
|
+
AuthenticationError,
|
37
|
+
NetworkError,
|
38
|
+
RateLimitError,
|
39
|
+
ProviderError,
|
40
|
+
ModelNotFoundError,
|
41
|
+
ProviderNotFoundError,
|
42
|
+
InvalidParametersError,
|
43
|
+
InsufficientCreditsError,
|
44
|
+
)
|
45
|
+
|
46
|
+
__version__ = "0.2.1"
|
47
|
+
__all__ = [
|
48
|
+
"Client",
|
49
|
+
"IndoxRouterError",
|
50
|
+
"AuthenticationError",
|
51
|
+
"NetworkError",
|
52
|
+
"RateLimitError",
|
53
|
+
"ProviderError",
|
54
|
+
"ModelNotFoundError",
|
55
|
+
"ProviderNotFoundError",
|
56
|
+
"InvalidParametersError",
|
57
|
+
"InsufficientCreditsError",
|
58
|
+
]
|
@@ -0,0 +1,672 @@
|
|
1
|
+
"""
|
2
|
+
IndoxRouter Client Module
|
3
|
+
|
4
|
+
This module provides a client for interacting with the IndoxRouter API, which serves as a unified
|
5
|
+
interface to multiple AI providers and models. The client handles authentication, rate limiting,
|
6
|
+
error handling, and provides a standardized response format across different AI services.
|
7
|
+
|
8
|
+
The Client class offers methods for:
|
9
|
+
- Authentication and session management
|
10
|
+
- Making API requests with automatic token refresh
|
11
|
+
- Accessing AI capabilities: chat completions, text completions, embeddings, and image generation
|
12
|
+
- Retrieving information about available providers and models
|
13
|
+
- Monitoring usage statistics and credit consumption
|
14
|
+
|
15
|
+
Usage example:
|
16
|
+
```python
|
17
|
+
from indoxRouter import Client
|
18
|
+
|
19
|
+
# Initialize client with API key
|
20
|
+
client = Client(api_key="your_api_key")
|
21
|
+
|
22
|
+
# Get available models
|
23
|
+
models = client.models()
|
24
|
+
|
25
|
+
# Generate a chat completion
|
26
|
+
response = client.chat([
|
27
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
28
|
+
{"role": "user", "content": "Tell me a joke."}
|
29
|
+
], model="openai/gpt-4o-mini")
|
30
|
+
|
31
|
+
# Generate text embeddings
|
32
|
+
embeddings = client.embeddings("This is a sample text", model="openai/text-embedding-ada-002")
|
33
|
+
|
34
|
+
# Clean up resources when done
|
35
|
+
client.close()
|
36
|
+
```
|
37
|
+
|
38
|
+
The client can also be used as a context manager:
|
39
|
+
```python
|
40
|
+
with Client(api_key="your_api_key") as client:
|
41
|
+
response = client.chat([{"role": "user", "content": "Hello!"}], model="openai/gpt-4o-mini")
|
42
|
+
```
|
43
|
+
"""
|
44
|
+
|
45
|
+
import os
|
46
|
+
import logging
|
47
|
+
from datetime import datetime, timedelta
|
48
|
+
from typing import Dict, List, Any, Optional, Union
|
49
|
+
import requests
|
50
|
+
import json
|
51
|
+
|
52
|
+
from .exceptions import (
|
53
|
+
AuthenticationError,
|
54
|
+
NetworkError,
|
55
|
+
ProviderNotFoundError,
|
56
|
+
ModelNotFoundError,
|
57
|
+
InvalidParametersError,
|
58
|
+
RateLimitError,
|
59
|
+
ProviderError,
|
60
|
+
InsufficientCreditsError,
|
61
|
+
)
|
62
|
+
from .constants import (
|
63
|
+
DEFAULT_BASE_URL,
|
64
|
+
DEFAULT_TIMEOUT,
|
65
|
+
DEFAULT_MODEL,
|
66
|
+
DEFAULT_EMBEDDING_MODEL,
|
67
|
+
DEFAULT_IMAGE_MODEL,
|
68
|
+
CHAT_ENDPOINT,
|
69
|
+
COMPLETION_ENDPOINT,
|
70
|
+
EMBEDDING_ENDPOINT,
|
71
|
+
IMAGE_ENDPOINT,
|
72
|
+
MODEL_ENDPOINT,
|
73
|
+
USAGE_ENDPOINT,
|
74
|
+
)
|
75
|
+
|
76
|
+
logger = logging.getLogger(__name__)
|
77
|
+
|
78
|
+
|
79
|
+
class Client:
|
80
|
+
"""
|
81
|
+
Client for interacting with the IndoxRouter API.
|
82
|
+
"""
|
83
|
+
|
84
|
+
def __init__(
|
85
|
+
self,
|
86
|
+
api_key: Optional[str] = None,
|
87
|
+
base_url: Optional[str] = None,
|
88
|
+
timeout: int = DEFAULT_TIMEOUT,
|
89
|
+
):
|
90
|
+
"""
|
91
|
+
Initialize the client.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
api_key: API key for authentication. If not provided, the client will look for the
|
95
|
+
INDOX_ROUTER_API_KEY environment variable.
|
96
|
+
base_url: Custom base URL for the API. If not provided, the default base URL will be used.
|
97
|
+
timeout: Request timeout in seconds.
|
98
|
+
"""
|
99
|
+
self.api_key = api_key or os.environ.get("INDOX_ROUTER_API_KEY")
|
100
|
+
if not self.api_key:
|
101
|
+
raise ValueError(
|
102
|
+
"API key must be provided either as an argument or as the INDOX_ROUTER_API_KEY environment variable."
|
103
|
+
)
|
104
|
+
|
105
|
+
self.base_url = base_url or DEFAULT_BASE_URL
|
106
|
+
self.timeout = timeout
|
107
|
+
self.session = requests.Session()
|
108
|
+
self.session.headers.update({"Authorization": f"Bearer {self.api_key}"})
|
109
|
+
|
110
|
+
def enable_debug(self, level=logging.DEBUG):
|
111
|
+
"""
|
112
|
+
Enable debug logging for the client.
|
113
|
+
|
114
|
+
Args:
|
115
|
+
level: Logging level (default: logging.DEBUG)
|
116
|
+
"""
|
117
|
+
handler = logging.StreamHandler()
|
118
|
+
handler.setFormatter(
|
119
|
+
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
120
|
+
)
|
121
|
+
logger.addHandler(handler)
|
122
|
+
logger.setLevel(level)
|
123
|
+
logger.debug("Debug logging enabled")
|
124
|
+
|
125
|
+
def _request(
|
126
|
+
self,
|
127
|
+
method: str,
|
128
|
+
endpoint: str,
|
129
|
+
data: Optional[Dict[str, Any]] = None,
|
130
|
+
stream: bool = False,
|
131
|
+
) -> Any:
|
132
|
+
"""
|
133
|
+
Make a request to the API.
|
134
|
+
|
135
|
+
Args:
|
136
|
+
method: HTTP method (GET, POST, etc.)
|
137
|
+
endpoint: API endpoint
|
138
|
+
data: Request data
|
139
|
+
stream: Whether to stream the response
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
Response data
|
143
|
+
"""
|
144
|
+
# Add API version prefix if not already present
|
145
|
+
if not endpoint.startswith("api/v1/") and not endpoint.startswith("/api/v1/"):
|
146
|
+
endpoint = f"api/v1/{endpoint}"
|
147
|
+
|
148
|
+
# Remove any leading slash for consistent URL construction
|
149
|
+
if endpoint.startswith("/"):
|
150
|
+
endpoint = endpoint[1:]
|
151
|
+
|
152
|
+
url = f"{self.base_url}/{endpoint}"
|
153
|
+
headers = {"Content-Type": "application/json"}
|
154
|
+
|
155
|
+
# logger.debug(f"Making {method} request to {url}")
|
156
|
+
# if data:
|
157
|
+
# logger.debug(f"Request data: {json.dumps(data, indent=2)}")
|
158
|
+
|
159
|
+
# Diagnose potential issues with the request
|
160
|
+
if method == "POST" and data:
|
161
|
+
diagnosis = self.diagnose_request(endpoint, data)
|
162
|
+
if not diagnosis["is_valid"]:
|
163
|
+
issues_str = "\n".join([f"- {issue}" for issue in diagnosis["issues"]])
|
164
|
+
logger.warning(f"Request validation issues:\n{issues_str}")
|
165
|
+
# We'll still send the request, but log the issues
|
166
|
+
|
167
|
+
try:
|
168
|
+
response = self.session.request(
|
169
|
+
method,
|
170
|
+
url,
|
171
|
+
headers=headers,
|
172
|
+
json=data,
|
173
|
+
timeout=self.timeout,
|
174
|
+
stream=stream,
|
175
|
+
)
|
176
|
+
|
177
|
+
if stream:
|
178
|
+
return response
|
179
|
+
|
180
|
+
response.raise_for_status()
|
181
|
+
return response.json()
|
182
|
+
except requests.HTTPError as e:
|
183
|
+
error_data = {}
|
184
|
+
try:
|
185
|
+
error_data = e.response.json()
|
186
|
+
logger.error(f"HTTP error response: {json.dumps(error_data, indent=2)}")
|
187
|
+
except (ValueError, AttributeError):
|
188
|
+
error_data = {"detail": str(e)}
|
189
|
+
logger.error(f"HTTP error (no JSON response): {str(e)}")
|
190
|
+
|
191
|
+
status_code = getattr(e.response, "status_code", 500)
|
192
|
+
error_message = error_data.get("detail", str(e))
|
193
|
+
|
194
|
+
if status_code == 401:
|
195
|
+
raise AuthenticationError(f"Authentication failed: {error_message}")
|
196
|
+
elif status_code == 404:
|
197
|
+
if "provider" in error_message.lower():
|
198
|
+
raise ProviderNotFoundError(error_message)
|
199
|
+
elif "model" in error_message.lower():
|
200
|
+
raise ModelNotFoundError(error_message)
|
201
|
+
else:
|
202
|
+
raise NetworkError(
|
203
|
+
f"Resource not found: {error_message} (URL: {url})"
|
204
|
+
)
|
205
|
+
elif status_code == 429:
|
206
|
+
raise RateLimitError(f"Rate limit exceeded: {error_message}")
|
207
|
+
elif status_code == 400:
|
208
|
+
raise InvalidParametersError(f"Invalid parameters: {error_message}")
|
209
|
+
elif status_code == 402:
|
210
|
+
raise InsufficientCreditsError(f"Insufficient credits: {error_message}")
|
211
|
+
elif status_code == 500:
|
212
|
+
# Provide more detailed information for server errors
|
213
|
+
error_detail = error_data.get("detail", "No details provided")
|
214
|
+
# Include the request data in the error message for better debugging
|
215
|
+
request_data_str = json.dumps(data, indent=2) if data else "None"
|
216
|
+
raise ProviderError(
|
217
|
+
f"Server error (500): {error_detail}. URL: {url}.\n"
|
218
|
+
f"Request data: {request_data_str}\n"
|
219
|
+
f"This may indicate an issue with the server configuration or a problem with the provider service."
|
220
|
+
)
|
221
|
+
else:
|
222
|
+
raise ProviderError(f"Provider error ({status_code}): {error_message}")
|
223
|
+
except requests.RequestException as e:
|
224
|
+
logger.error(f"Request exception: {str(e)}")
|
225
|
+
raise NetworkError(f"Network error: {str(e)}")
|
226
|
+
|
227
|
+
def _format_model_string(self, model: str) -> str:
|
228
|
+
"""
|
229
|
+
Format the model string in a way that the server expects.
|
230
|
+
|
231
|
+
The server might be expecting a different format than "provider/model".
|
232
|
+
This method handles different formatting requirements.
|
233
|
+
|
234
|
+
Args:
|
235
|
+
model: Model string in the format "provider/model"
|
236
|
+
|
237
|
+
Returns:
|
238
|
+
Formatted model string
|
239
|
+
"""
|
240
|
+
if not model or "/" not in model:
|
241
|
+
return model
|
242
|
+
|
243
|
+
# The standard format is "provider/model"
|
244
|
+
# But the server might be expecting something different
|
245
|
+
provider, model_name = model.split("/", 1)
|
246
|
+
|
247
|
+
# For now, return the original format as it seems the server
|
248
|
+
# is having issues with JSON formatted model strings
|
249
|
+
return model
|
250
|
+
|
251
|
+
def chat(
|
252
|
+
self,
|
253
|
+
messages: List[Dict[str, str]],
|
254
|
+
model: str = DEFAULT_MODEL,
|
255
|
+
temperature: float = 0.7,
|
256
|
+
max_tokens: Optional[int] = None,
|
257
|
+
stream: bool = False,
|
258
|
+
**kwargs,
|
259
|
+
) -> Dict[str, Any]:
|
260
|
+
"""
|
261
|
+
Generate a chat completion.
|
262
|
+
|
263
|
+
Args:
|
264
|
+
messages: List of messages in the conversation
|
265
|
+
model: Model to use in the format "provider/model" (e.g., "openai/gpt-4o-mini")
|
266
|
+
temperature: Sampling temperature
|
267
|
+
max_tokens: Maximum number of tokens to generate
|
268
|
+
stream: Whether to stream the response
|
269
|
+
**kwargs: Additional parameters to pass to the API
|
270
|
+
|
271
|
+
Returns:
|
272
|
+
Response data
|
273
|
+
"""
|
274
|
+
# Format the model string
|
275
|
+
formatted_model = self._format_model_string(model)
|
276
|
+
|
277
|
+
# Filter out problematic parameters
|
278
|
+
filtered_kwargs = {}
|
279
|
+
for key, value in kwargs.items():
|
280
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
281
|
+
filtered_kwargs[key] = value
|
282
|
+
|
283
|
+
data = {
|
284
|
+
"messages": messages,
|
285
|
+
"model": formatted_model,
|
286
|
+
"temperature": temperature,
|
287
|
+
"max_tokens": max_tokens,
|
288
|
+
"stream": stream,
|
289
|
+
"additional_params": filtered_kwargs,
|
290
|
+
}
|
291
|
+
|
292
|
+
if stream:
|
293
|
+
response = self._request("POST", CHAT_ENDPOINT, data, stream=True)
|
294
|
+
return self._handle_streaming_response(response)
|
295
|
+
else:
|
296
|
+
return self._request("POST", CHAT_ENDPOINT, data)
|
297
|
+
|
298
|
+
def completion(
|
299
|
+
self,
|
300
|
+
prompt: str,
|
301
|
+
model: str = DEFAULT_MODEL,
|
302
|
+
temperature: float = 0.7,
|
303
|
+
max_tokens: Optional[int] = None,
|
304
|
+
stream: bool = False,
|
305
|
+
**kwargs,
|
306
|
+
) -> Dict[str, Any]:
|
307
|
+
"""
|
308
|
+
Generate a text completion.
|
309
|
+
|
310
|
+
Args:
|
311
|
+
prompt: Text prompt
|
312
|
+
model: Model to use in the format "provider/model" (e.g., "openai/gpt-4o-mini")
|
313
|
+
temperature: Sampling temperature
|
314
|
+
max_tokens: Maximum number of tokens to generate
|
315
|
+
stream: Whether to stream the response
|
316
|
+
**kwargs: Additional parameters to pass to the API
|
317
|
+
|
318
|
+
Returns:
|
319
|
+
Response data
|
320
|
+
"""
|
321
|
+
# Format the model string
|
322
|
+
formatted_model = self._format_model_string(model)
|
323
|
+
|
324
|
+
# Filter out problematic parameters
|
325
|
+
filtered_kwargs = {}
|
326
|
+
for key, value in kwargs.items():
|
327
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
328
|
+
filtered_kwargs[key] = value
|
329
|
+
|
330
|
+
data = {
|
331
|
+
"prompt": prompt,
|
332
|
+
"model": formatted_model,
|
333
|
+
"temperature": temperature,
|
334
|
+
"max_tokens": max_tokens,
|
335
|
+
"stream": stream,
|
336
|
+
"additional_params": filtered_kwargs,
|
337
|
+
}
|
338
|
+
|
339
|
+
if stream:
|
340
|
+
response = self._request("POST", COMPLETION_ENDPOINT, data, stream=True)
|
341
|
+
return self._handle_streaming_response(response)
|
342
|
+
else:
|
343
|
+
return self._request("POST", COMPLETION_ENDPOINT, data)
|
344
|
+
|
345
|
+
def embeddings(
|
346
|
+
self,
|
347
|
+
text: Union[str, List[str]],
|
348
|
+
model: str = DEFAULT_EMBEDDING_MODEL,
|
349
|
+
**kwargs,
|
350
|
+
) -> Dict[str, Any]:
|
351
|
+
"""
|
352
|
+
Generate embeddings for text.
|
353
|
+
|
354
|
+
Args:
|
355
|
+
text: Text to embed (string or list of strings)
|
356
|
+
model: Model to use in the format "provider/model" (e.g., "openai/text-embedding-ada-002")
|
357
|
+
**kwargs: Additional parameters to pass to the API
|
358
|
+
|
359
|
+
Returns:
|
360
|
+
Response data with embeddings
|
361
|
+
"""
|
362
|
+
# Format the model string
|
363
|
+
formatted_model = self._format_model_string(model)
|
364
|
+
|
365
|
+
# Filter out problematic parameters
|
366
|
+
filtered_kwargs = {}
|
367
|
+
for key, value in kwargs.items():
|
368
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
369
|
+
filtered_kwargs[key] = value
|
370
|
+
|
371
|
+
data = {
|
372
|
+
"text": text if isinstance(text, list) else [text],
|
373
|
+
"model": formatted_model,
|
374
|
+
"additional_params": filtered_kwargs,
|
375
|
+
}
|
376
|
+
|
377
|
+
return self._request("POST", EMBEDDING_ENDPOINT, data)
|
378
|
+
|
379
|
+
def images(
|
380
|
+
self,
|
381
|
+
prompt: str,
|
382
|
+
model: str = DEFAULT_IMAGE_MODEL,
|
383
|
+
size: str = "1024x1024",
|
384
|
+
n: int = 1,
|
385
|
+
quality: str = "standard",
|
386
|
+
style: str = "vivid",
|
387
|
+
**kwargs,
|
388
|
+
) -> Dict[str, Any]:
|
389
|
+
"""
|
390
|
+
Generate images from a prompt.
|
391
|
+
|
392
|
+
Args:
|
393
|
+
prompt: Text prompt
|
394
|
+
model: Model to use in the format "provider/model" (e.g., "openai/dall-e-3")
|
395
|
+
size: Image size (e.g., "1024x1024")
|
396
|
+
n: Number of images to generate
|
397
|
+
quality: Image quality (e.g., "standard", "hd")
|
398
|
+
style: Image style (e.g., "vivid", "natural")
|
399
|
+
**kwargs: Additional parameters to pass to the API
|
400
|
+
|
401
|
+
Returns:
|
402
|
+
Response data with image URLs
|
403
|
+
"""
|
404
|
+
# Format the model string
|
405
|
+
formatted_model = self._format_model_string(model)
|
406
|
+
|
407
|
+
# Filter out problematic parameters
|
408
|
+
filtered_kwargs = {}
|
409
|
+
for key, value in kwargs.items():
|
410
|
+
if key not in ["return_generator"]: # List of parameters to exclude
|
411
|
+
filtered_kwargs[key] = value
|
412
|
+
|
413
|
+
data = {
|
414
|
+
"prompt": prompt,
|
415
|
+
"model": formatted_model,
|
416
|
+
"n": n,
|
417
|
+
"size": size,
|
418
|
+
"quality": quality,
|
419
|
+
"style": style,
|
420
|
+
"additional_params": filtered_kwargs,
|
421
|
+
}
|
422
|
+
|
423
|
+
return self._request("POST", IMAGE_ENDPOINT, data)
|
424
|
+
|
425
|
+
def models(self, provider: Optional[str] = None) -> Dict[str, Any]:
|
426
|
+
"""
|
427
|
+
Get available models.
|
428
|
+
|
429
|
+
Args:
|
430
|
+
provider: Provider to filter by
|
431
|
+
|
432
|
+
Returns:
|
433
|
+
List of available models with pricing information
|
434
|
+
"""
|
435
|
+
endpoint = MODEL_ENDPOINT
|
436
|
+
if provider:
|
437
|
+
endpoint = f"{MODEL_ENDPOINT}/{provider}"
|
438
|
+
|
439
|
+
return self._request("GET", endpoint)
|
440
|
+
|
441
|
+
def get_model_info(self, provider: str, model: str) -> Dict[str, Any]:
|
442
|
+
"""
|
443
|
+
Get information about a specific model.
|
444
|
+
|
445
|
+
Args:
|
446
|
+
provider: Provider ID
|
447
|
+
model: Model ID
|
448
|
+
|
449
|
+
Returns:
|
450
|
+
Model information including pricing
|
451
|
+
"""
|
452
|
+
return self._request("GET", f"{MODEL_ENDPOINT}/{provider}/{model}")
|
453
|
+
|
454
|
+
def get_usage(self) -> Dict[str, Any]:
|
455
|
+
"""
|
456
|
+
Get usage statistics for the current user.
|
457
|
+
|
458
|
+
Returns:
|
459
|
+
Usage statistics
|
460
|
+
"""
|
461
|
+
return self._request("GET", USAGE_ENDPOINT)
|
462
|
+
|
463
|
+
def test_connection(self) -> Dict[str, Any]:
|
464
|
+
"""
|
465
|
+
Test the connection to the server and return server status information.
|
466
|
+
|
467
|
+
This method can be used to diagnose connection issues and verify that
|
468
|
+
the server is accessible and properly configured.
|
469
|
+
|
470
|
+
Returns:
|
471
|
+
Dictionary containing server status information
|
472
|
+
"""
|
473
|
+
try:
|
474
|
+
# Try to access the base URL
|
475
|
+
response = self.session.get(self.base_url, timeout=self.timeout)
|
476
|
+
|
477
|
+
# Try to get server info if available
|
478
|
+
server_info = {}
|
479
|
+
try:
|
480
|
+
if response.headers.get("Content-Type", "").startswith(
|
481
|
+
"application/json"
|
482
|
+
):
|
483
|
+
server_info = response.json()
|
484
|
+
except:
|
485
|
+
pass
|
486
|
+
|
487
|
+
return {
|
488
|
+
"status": "connected",
|
489
|
+
"url": self.base_url,
|
490
|
+
"status_code": response.status_code,
|
491
|
+
"server_info": server_info,
|
492
|
+
"headers": dict(response.headers),
|
493
|
+
}
|
494
|
+
except requests.RequestException as e:
|
495
|
+
return {
|
496
|
+
"status": "error",
|
497
|
+
"url": self.base_url,
|
498
|
+
"error": str(e),
|
499
|
+
"error_type": type(e).__name__,
|
500
|
+
}
|
501
|
+
|
502
|
+
def diagnose_request(self, endpoint: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
503
|
+
"""
|
504
|
+
Diagnose potential issues with a request before sending it to the server.
|
505
|
+
|
506
|
+
This method checks for common issues like malformed model strings,
|
507
|
+
invalid message formats, or missing required parameters.
|
508
|
+
|
509
|
+
Args:
|
510
|
+
endpoint: API endpoint
|
511
|
+
data: Request data
|
512
|
+
|
513
|
+
Returns:
|
514
|
+
Dictionary with diagnosis results
|
515
|
+
"""
|
516
|
+
issues = []
|
517
|
+
warnings = []
|
518
|
+
|
519
|
+
# Check if this is a chat request
|
520
|
+
if endpoint == CHAT_ENDPOINT:
|
521
|
+
# Check model format
|
522
|
+
if "model" in data:
|
523
|
+
model = data["model"]
|
524
|
+
# Check if the model is already formatted as JSON
|
525
|
+
if (
|
526
|
+
isinstance(model, str)
|
527
|
+
and model.startswith("{")
|
528
|
+
and model.endswith("}")
|
529
|
+
):
|
530
|
+
try:
|
531
|
+
model_json = json.loads(model)
|
532
|
+
if (
|
533
|
+
not isinstance(model_json, dict)
|
534
|
+
or "provider" not in model_json
|
535
|
+
or "model" not in model_json
|
536
|
+
):
|
537
|
+
issues.append(f"Invalid model JSON format: {model}")
|
538
|
+
except json.JSONDecodeError:
|
539
|
+
issues.append(f"Invalid model JSON format: {model}")
|
540
|
+
elif not isinstance(model, str):
|
541
|
+
issues.append(f"Model must be a string, got {type(model).__name__}")
|
542
|
+
elif "/" not in model:
|
543
|
+
issues.append(
|
544
|
+
f"Model '{model}' is missing provider prefix (should be 'provider/model')"
|
545
|
+
)
|
546
|
+
else:
|
547
|
+
provider, model_name = model.split("/", 1)
|
548
|
+
if not provider or not model_name:
|
549
|
+
issues.append(
|
550
|
+
f"Invalid model format: '{model}'. Should be 'provider/model'"
|
551
|
+
)
|
552
|
+
else:
|
553
|
+
warnings.append("No model specified, will use default model")
|
554
|
+
|
555
|
+
# Check messages format
|
556
|
+
if "messages" in data:
|
557
|
+
messages = data["messages"]
|
558
|
+
if not isinstance(messages, list):
|
559
|
+
issues.append(
|
560
|
+
f"Messages must be a list, got {type(messages).__name__}"
|
561
|
+
)
|
562
|
+
elif not messages:
|
563
|
+
issues.append("Messages list is empty")
|
564
|
+
else:
|
565
|
+
for i, msg in enumerate(messages):
|
566
|
+
if not isinstance(msg, dict):
|
567
|
+
issues.append(
|
568
|
+
f"Message {i} must be a dictionary, got {type(msg).__name__}"
|
569
|
+
)
|
570
|
+
elif "role" not in msg:
|
571
|
+
issues.append(f"Message {i} is missing 'role' field")
|
572
|
+
elif "content" not in msg:
|
573
|
+
issues.append(f"Message {i} is missing 'content' field")
|
574
|
+
else:
|
575
|
+
issues.append("No messages specified")
|
576
|
+
|
577
|
+
# Check if this is a completion request
|
578
|
+
elif endpoint == COMPLETION_ENDPOINT:
|
579
|
+
# Check model format (same as chat)
|
580
|
+
if "model" in data:
|
581
|
+
model = data["model"]
|
582
|
+
if not isinstance(model, str):
|
583
|
+
issues.append(f"Model must be a string, got {type(model).__name__}")
|
584
|
+
elif "/" not in model:
|
585
|
+
issues.append(
|
586
|
+
f"Model '{model}' is missing provider prefix (should be 'provider/model')"
|
587
|
+
)
|
588
|
+
else:
|
589
|
+
warnings.append("No model specified, will use default model")
|
590
|
+
|
591
|
+
# Check prompt
|
592
|
+
if "prompt" not in data:
|
593
|
+
issues.append("No prompt specified")
|
594
|
+
elif not isinstance(data["prompt"], str):
|
595
|
+
issues.append(
|
596
|
+
f"Prompt must be a string, got {type(data['prompt']).__name__}"
|
597
|
+
)
|
598
|
+
|
599
|
+
# Return diagnosis results
|
600
|
+
return {
|
601
|
+
"endpoint": endpoint,
|
602
|
+
"issues": issues,
|
603
|
+
"warnings": warnings,
|
604
|
+
"is_valid": len(issues) == 0,
|
605
|
+
"data": data,
|
606
|
+
}
|
607
|
+
|
608
|
+
def _handle_streaming_response(self, response):
|
609
|
+
"""
|
610
|
+
Handle a streaming response.
|
611
|
+
|
612
|
+
Args:
|
613
|
+
response: Streaming response
|
614
|
+
|
615
|
+
Returns:
|
616
|
+
Generator yielding response chunks
|
617
|
+
"""
|
618
|
+
try:
|
619
|
+
for line in response.iter_lines():
|
620
|
+
if line:
|
621
|
+
line = line.decode("utf-8")
|
622
|
+
if line.startswith("data: "):
|
623
|
+
data = line[6:]
|
624
|
+
if data == "[DONE]":
|
625
|
+
break
|
626
|
+
try:
|
627
|
+
# Parse JSON chunk
|
628
|
+
chunk = json.loads(data)
|
629
|
+
|
630
|
+
# For chat responses, return the processed chunk
|
631
|
+
# with data field for backward compatibility
|
632
|
+
if "choices" in chunk:
|
633
|
+
# For delta responses (streaming)
|
634
|
+
choice = chunk["choices"][0]
|
635
|
+
if "delta" in choice and "content" in choice["delta"]:
|
636
|
+
# Add a data field for backward compatibility
|
637
|
+
chunk["data"] = choice["delta"]["content"]
|
638
|
+
# For text responses (completion)
|
639
|
+
elif "text" in choice:
|
640
|
+
chunk["data"] = choice["text"]
|
641
|
+
|
642
|
+
yield chunk
|
643
|
+
except json.JSONDecodeError:
|
644
|
+
# For raw text responses
|
645
|
+
yield {"data": data}
|
646
|
+
finally:
|
647
|
+
response.close()
|
648
|
+
|
649
|
+
def close(self):
|
650
|
+
"""Close the session."""
|
651
|
+
self.session.close()
|
652
|
+
|
653
|
+
def __enter__(self):
|
654
|
+
"""Enter context manager."""
|
655
|
+
return self
|
656
|
+
|
657
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
658
|
+
"""Exit context manager."""
|
659
|
+
self.close()
|
660
|
+
|
661
|
+
def set_base_url(self, base_url: str) -> None:
|
662
|
+
"""
|
663
|
+
Set a new base URL for the API.
|
664
|
+
|
665
|
+
Args:
|
666
|
+
base_url: New base URL for the API.
|
667
|
+
"""
|
668
|
+
self.base_url = base_url
|
669
|
+
logger.debug(f"Base URL set to {base_url}")
|
670
|
+
|
671
|
+
|
672
|
+
IndoxRouter = Client
|
@@ -0,0 +1,31 @@
|
|
1
|
+
"""
|
2
|
+
Constants for the IndoxRouter client.
|
3
|
+
"""
|
4
|
+
|
5
|
+
# API settings
|
6
|
+
DEFAULT_API_VERSION = "v1"
|
7
|
+
DEFAULT_BASE_URL = "https://91.107.253.133" # Production server IP
|
8
|
+
# DEFAULT_BASE_URL = "http://localhost:8000" # Local development server
|
9
|
+
DEFAULT_TIMEOUT = 60
|
10
|
+
|
11
|
+
# Default models
|
12
|
+
DEFAULT_MODEL = "openai/gpt-4o-mini"
|
13
|
+
DEFAULT_EMBEDDING_MODEL = "openai/text-embedding-3-small"
|
14
|
+
DEFAULT_IMAGE_MODEL = "openai/dall-e-3"
|
15
|
+
|
16
|
+
# API endpoints
|
17
|
+
CHAT_ENDPOINT = "chat/completions"
|
18
|
+
COMPLETION_ENDPOINT = "completions"
|
19
|
+
EMBEDDING_ENDPOINT = "embeddings"
|
20
|
+
IMAGE_ENDPOINT = "images/generations"
|
21
|
+
MODEL_ENDPOINT = "models"
|
22
|
+
USAGE_ENDPOINT = "user/usage"
|
23
|
+
|
24
|
+
# Error messages
|
25
|
+
ERROR_INVALID_API_KEY = "API key must be provided either as an argument or as the INDOXROUTER_API_KEY environment variable"
|
26
|
+
ERROR_NETWORK = "Network error occurred while communicating with the IndoxRouter API"
|
27
|
+
ERROR_RATE_LIMIT = "Rate limit exceeded for the IndoxRouter API"
|
28
|
+
ERROR_PROVIDER_NOT_FOUND = "Provider not found"
|
29
|
+
ERROR_MODEL_NOT_FOUND = "Model not found"
|
30
|
+
ERROR_INVALID_PARAMETERS = "Invalid parameters provided"
|
31
|
+
ERROR_INSUFFICIENT_CREDITS = "Insufficient credits"
|
@@ -0,0 +1,62 @@
|
|
1
|
+
"""
|
2
|
+
Exceptions for the IndoxRouter client.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from datetime import datetime
|
6
|
+
from typing import Optional
|
7
|
+
|
8
|
+
|
9
|
+
class IndoxRouterError(Exception):
|
10
|
+
"""Base exception for all IndoxRouter errors."""
|
11
|
+
|
12
|
+
pass
|
13
|
+
|
14
|
+
|
15
|
+
class AuthenticationError(IndoxRouterError):
|
16
|
+
"""Raised when authentication fails."""
|
17
|
+
|
18
|
+
pass
|
19
|
+
|
20
|
+
|
21
|
+
class NetworkError(IndoxRouterError):
|
22
|
+
"""Raised when a network error occurs."""
|
23
|
+
|
24
|
+
pass
|
25
|
+
|
26
|
+
|
27
|
+
class RateLimitError(IndoxRouterError):
|
28
|
+
"""Raised when rate limits are exceeded."""
|
29
|
+
|
30
|
+
def __init__(self, message: str, reset_time: Optional[datetime] = None):
|
31
|
+
super().__init__(message)
|
32
|
+
self.reset_time = reset_time
|
33
|
+
|
34
|
+
|
35
|
+
class ProviderError(IndoxRouterError):
|
36
|
+
"""Raised when a provider returns an error."""
|
37
|
+
|
38
|
+
pass
|
39
|
+
|
40
|
+
|
41
|
+
class ModelNotFoundError(ProviderError):
|
42
|
+
"""Raised when a requested model is not found."""
|
43
|
+
|
44
|
+
pass
|
45
|
+
|
46
|
+
|
47
|
+
class ProviderNotFoundError(ProviderError):
|
48
|
+
"""Raised when a requested provider is not found."""
|
49
|
+
|
50
|
+
pass
|
51
|
+
|
52
|
+
|
53
|
+
class InvalidParametersError(IndoxRouterError):
|
54
|
+
"""Raised when invalid parameters are provided."""
|
55
|
+
|
56
|
+
pass
|
57
|
+
|
58
|
+
|
59
|
+
class InsufficientCreditsError(IndoxRouterError):
|
60
|
+
"""Raised when the user doesn't have enough credits."""
|
61
|
+
|
62
|
+
pass
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: indoxrouter
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.7
|
4
4
|
Summary: A unified client for various AI providers
|
5
5
|
Home-page: https://github.com/indoxrouter/indoxrouter
|
6
6
|
Author: indoxRouter Team
|
@@ -63,18 +63,6 @@ from indoxrouter import Client
|
|
63
63
|
# Initialize with API key (default connects to localhost:8000)
|
64
64
|
client = Client(api_key="your_api_key")
|
65
65
|
|
66
|
-
# Or specify a custom server URL
|
67
|
-
client = Client(
|
68
|
-
api_key="your_api_key",
|
69
|
-
base_url="http://your-server-url:8000"
|
70
|
-
)
|
71
|
-
|
72
|
-
# Connect to Docker container inside the Docker network
|
73
|
-
client = Client(
|
74
|
-
api_key="your_api_key",
|
75
|
-
base_url="http://indoxrouter-server:8000"
|
76
|
-
)
|
77
|
-
|
78
66
|
# Using environment variables
|
79
67
|
# Set INDOX_ROUTER_API_KEY environment variable
|
80
68
|
import os
|
@@ -3,6 +3,10 @@ README.md
|
|
3
3
|
setup.py
|
4
4
|
cookbook/README.md
|
5
5
|
cookbook/indoxRouter_cookbook.ipynb
|
6
|
+
indoxrouter/__init__.py
|
7
|
+
indoxrouter/client.py
|
8
|
+
indoxrouter/constants.py
|
9
|
+
indoxrouter/exceptions.py
|
6
10
|
indoxrouter.egg-info/PKG-INFO
|
7
11
|
indoxrouter.egg-info/SOURCES.txt
|
8
12
|
indoxrouter.egg-info/dependency_links.txt
|
@@ -0,0 +1 @@
|
|
1
|
+
indoxrouter
|
@@ -9,14 +9,14 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|
9
9
|
|
10
10
|
setup(
|
11
11
|
name="indoxrouter",
|
12
|
-
version="0.1.
|
12
|
+
version="0.1.7",
|
13
13
|
author="indoxRouter Team",
|
14
14
|
author_email="ashkan.eskandari.dev@gmail.com",
|
15
15
|
description="A unified client for various AI providers",
|
16
16
|
long_description=long_description,
|
17
17
|
long_description_content_type="text/markdown",
|
18
18
|
url="https://github.com/indoxrouter/indoxrouter",
|
19
|
-
packages=
|
19
|
+
packages=["indoxrouter"],
|
20
20
|
classifiers=[
|
21
21
|
"Development Status :: 3 - Alpha",
|
22
22
|
"Intended Audience :: Developers",
|
@@ -1 +0,0 @@
|
|
1
|
-
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|