indoxrouter 0.1.15__tar.gz → 0.1.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,13 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: indoxrouter
3
- Version: 0.1.15
3
+ Version: 0.1.17
4
4
  Summary: A unified client for various AI providers
5
5
  Author-email: indoxRouter Team <ashkan.eskandari.dev@gmail.com>
6
+ License: MIT
6
7
  Project-URL: Homepage, https://github.com/indoxrouter/indoxrouter
8
+ Project-URL: Repository, https://github.com/indoxrouter/indoxrouter
9
+ Project-URL: Issues, https://github.com/indoxrouter/indoxrouter/issues
10
+ Keywords: ai,api,client,openai,anthropic,google,mistral,xai,imagen,grok,image-generation
7
11
  Classifier: Development Status :: 3 - Alpha
8
12
  Classifier: Intended Audience :: Developers
9
13
  Classifier: Programming Language :: Python :: 3
@@ -129,13 +133,43 @@ print(f"First embedding: {response['data'][0]['embedding'][:5]}...")
129
133
  ### Image Generation
130
134
 
131
135
  ```python
136
+ # OpenAI Image Generation
132
137
  response = client.images(
133
138
  prompt="A serene landscape with mountains and a lake",
134
139
  model="openai/dall-e-3",
135
- size="1024x1024"
140
+ size="1024x1024",
141
+ quality="standard", # Options: standard, hd
142
+ style="vivid" # Options: vivid, natural
136
143
  )
137
144
 
138
145
  print(f"Image URL: {response['data'][0]['url']}")
146
+
147
+ # Google Imagen Image Generation
148
+ from indoxrouter.constants import GOOGLE_IMAGE_MODEL
149
+
150
+ response = client.images(
151
+ prompt="A robot holding a red skateboard in a futuristic city",
152
+ model=GOOGLE_IMAGE_MODEL,
153
+ n=2, # Generate 2 images
154
+ negative_prompt="broken, damaged, low quality",
155
+ guidance_scale=7.5, # Control adherence to prompt
156
+ seed=42, # For reproducible results
157
+ )
158
+
159
+ # xAI Grok Image Generation
160
+ from indoxrouter.constants import XAI_IMAGE_MODEL
161
+
162
+ response = client.images(
163
+ prompt="A cat in a tree",
164
+ model=XAI_IMAGE_MODEL,
165
+ n=1,
166
+ response_format="b64_json" # Get base64 encoded image
167
+ )
168
+
169
+ # Access base64 encoded image data
170
+ if "b64_json" in response["data"][0]:
171
+ b64_data = response["data"][0]["b64_json"]
172
+ # Use the base64 data (e.g., to display in HTML or save to file)
139
173
  ```
140
174
 
141
175
  ### Streaming Responses
@@ -1,22 +1,3 @@
1
- Metadata-Version: 2.4
2
- Name: indoxrouter
3
- Version: 0.1.15
4
- Summary: A unified client for various AI providers
5
- Author-email: indoxRouter Team <ashkan.eskandari.dev@gmail.com>
6
- Project-URL: Homepage, https://github.com/indoxrouter/indoxrouter
7
- Classifier: Development Status :: 3 - Alpha
8
- Classifier: Intended Audience :: Developers
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: Programming Language :: Python :: 3.8
11
- Classifier: Programming Language :: Python :: 3.9
12
- Classifier: Programming Language :: Python :: 3.10
13
- Classifier: Programming Language :: Python :: 3.11
14
- Classifier: Programming Language :: Python :: 3.12
15
- Requires-Python: >=3.8
16
- Description-Content-Type: text/markdown
17
- Requires-Dist: requests>=2.25.0
18
- Requires-Dist: python-dotenv>=1.0.0
19
-
20
1
  # IndoxRouter Client
21
2
 
22
3
  A unified client for various AI providers, including OpenAI, anthropic, Google, and Mistral.
@@ -129,13 +110,43 @@ print(f"First embedding: {response['data'][0]['embedding'][:5]}...")
129
110
  ### Image Generation
130
111
 
131
112
  ```python
113
+ # OpenAI Image Generation
132
114
  response = client.images(
133
115
  prompt="A serene landscape with mountains and a lake",
134
116
  model="openai/dall-e-3",
135
- size="1024x1024"
117
+ size="1024x1024",
118
+ quality="standard", # Options: standard, hd
119
+ style="vivid" # Options: vivid, natural
136
120
  )
137
121
 
138
122
  print(f"Image URL: {response['data'][0]['url']}")
123
+
124
+ # Google Imagen Image Generation
125
+ from indoxrouter.constants import GOOGLE_IMAGE_MODEL
126
+
127
+ response = client.images(
128
+ prompt="A robot holding a red skateboard in a futuristic city",
129
+ model=GOOGLE_IMAGE_MODEL,
130
+ n=2, # Generate 2 images
131
+ negative_prompt="broken, damaged, low quality",
132
+ guidance_scale=7.5, # Control adherence to prompt
133
+ seed=42, # For reproducible results
134
+ )
135
+
136
+ # xAI Grok Image Generation
137
+ from indoxrouter.constants import XAI_IMAGE_MODEL
138
+
139
+ response = client.images(
140
+ prompt="A cat in a tree",
141
+ model=XAI_IMAGE_MODEL,
142
+ n=1,
143
+ response_format="b64_json" # Get base64 encoded image
144
+ )
145
+
146
+ # Access base64 encoded image data
147
+ if "b64_json" in response["data"][0]:
148
+ b64_data = response["data"][0]["b64_json"]
149
+ # Use the base64 data (e.g., to display in HTML or save to file)
139
150
  ```
140
151
 
141
152
  ### Streaming Responses
@@ -14,52 +14,6 @@
14
14
  "\n"
15
15
  ]
16
16
  },
17
- {
18
- "cell_type": "code",
19
- "execution_count": 2,
20
- "id": "479b6ce6",
21
- "metadata": {},
22
- "outputs": [
23
- {
24
- "name": "stdout",
25
- "output_type": "stream",
26
- "text": [
27
- "Collecting passlib\n",
28
- " Using cached passlib-1.7.4-py2.py3-none-any.whl.metadata (1.7 kB)\n",
29
- "Using cached passlib-1.7.4-py2.py3-none-any.whl (525 kB)\n",
30
- "Installing collected packages: passlib\n",
31
- "Successfully installed passlib-1.7.4\n",
32
- "Note: you may need to restart the kernel to use updated packages.\n"
33
- ]
34
- },
35
- {
36
- "name": "stderr",
37
- "output_type": "stream",
38
- "text": [
39
- "\n",
40
- "[notice] A new release of pip is available: 24.3.1 -> 25.0.1\n",
41
- "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
42
- ]
43
- }
44
- ],
45
- "source": [
46
- "pip install passlib"
47
- ]
48
- },
49
- {
50
- "cell_type": "code",
51
- "execution_count": 1,
52
- "id": "e03bc1cc",
53
- "metadata": {},
54
- "outputs": [],
55
- "source": [
56
- "import sys\n",
57
- "import os\n",
58
- "module_path = os.path.abspath('E:/Codes/indoxRouter/')\n",
59
- "if module_path not in sys.path:\n",
60
- " sys.path.append(module_path)"
61
- ]
62
- },
63
17
  {
64
18
  "cell_type": "code",
65
19
  "execution_count": null,
@@ -252,14 +206,15 @@
252
206
  "metadata": {},
253
207
  "outputs": [],
254
208
  "source": [
209
+ "from indoxrouter import Client\n",
210
+ "client = Client(api_key=\"your-api-key\")\n",
211
+ "\n",
255
212
  "response = client.chat(\n",
256
213
  " messages=[\n",
257
214
  " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
258
- " {\"role\": \"user\", \"content\": \"Write a short poem about AI.\"}\n",
215
+ " {\"role\": \"user\", \"content\": \"What is the capital of France?\"}\n",
259
216
  " ],\n",
260
- " model=\"deepseek/deepseek-chat\",\n",
261
- " temperature=0.8,\n",
262
- " max_tokens=500\n",
217
+ " model=\"openai/gpt-4o-mini\" \n",
263
218
  ")\n",
264
219
  "\n",
265
220
  "print(\"Response:\", response[\"data\"])\n",
@@ -1358,6 +1313,207 @@
1358
1313
  "\n",
1359
1314
  "For more information, refer to the [IndoxRouter documentation](https://docs.indoxrouter.com).\n"
1360
1315
  ]
1316
+ },
1317
+ {
1318
+ "cell_type": "code",
1319
+ "execution_count": 1,
1320
+ "id": "60ca0fd8",
1321
+ "metadata": {},
1322
+ "outputs": [
1323
+ {
1324
+ "name": "stdout",
1325
+ "output_type": "stream",
1326
+ "text": [
1327
+ "Requirement already satisfied: openai in e:\\anaconda\\lib\\site-packages (1.52.2)\n",
1328
+ "Requirement already satisfied: anyio<5,>=3.5.0 in e:\\anaconda\\lib\\site-packages (from openai) (4.2.0)\n",
1329
+ "Requirement already satisfied: distro<2,>=1.7.0 in e:\\anaconda\\lib\\site-packages (from openai) (1.9.0)\n",
1330
+ "Requirement already satisfied: httpx<1,>=0.23.0 in e:\\anaconda\\lib\\site-packages (from openai) (0.27.0)\n",
1331
+ "Requirement already satisfied: jiter<1,>=0.4.0 in e:\\anaconda\\lib\\site-packages (from openai) (0.6.1)\n",
1332
+ "Requirement already satisfied: pydantic<3,>=1.9.0 in c:\\users\\ashkan\\appdata\\roaming\\python\\python312\\site-packages (from openai) (2.9.2)\n",
1333
+ "Requirement already satisfied: sniffio in e:\\anaconda\\lib\\site-packages (from openai) (1.3.0)\n",
1334
+ "Requirement already satisfied: tqdm>4 in e:\\anaconda\\lib\\site-packages (from openai) (4.66.4)\n",
1335
+ "Requirement already satisfied: typing-extensions<5,>=4.11 in e:\\anaconda\\lib\\site-packages (from openai) (4.11.0)\n",
1336
+ "Requirement already satisfied: idna>=2.8 in e:\\anaconda\\lib\\site-packages (from anyio<5,>=3.5.0->openai) (3.7)\n",
1337
+ "Requirement already satisfied: certifi in e:\\anaconda\\lib\\site-packages (from httpx<1,>=0.23.0->openai) (2024.6.2)\n",
1338
+ "Requirement already satisfied: httpcore==1.* in e:\\anaconda\\lib\\site-packages (from httpx<1,>=0.23.0->openai) (1.0.5)\n",
1339
+ "Requirement already satisfied: h11<0.15,>=0.13 in e:\\anaconda\\lib\\site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)\n",
1340
+ "Requirement already satisfied: annotated-types>=0.6.0 in e:\\anaconda\\lib\\site-packages (from pydantic<3,>=1.9.0->openai) (0.6.0)\n",
1341
+ "Requirement already satisfied: pydantic-core==2.23.4 in e:\\anaconda\\lib\\site-packages (from pydantic<3,>=1.9.0->openai) (2.23.4)\n",
1342
+ "Requirement already satisfied: colorama in e:\\anaconda\\lib\\site-packages (from tqdm>4->openai) (0.4.6)\n"
1343
+ ]
1344
+ },
1345
+ {
1346
+ "name": "stderr",
1347
+ "output_type": "stream",
1348
+ "text": [
1349
+ "\n",
1350
+ "[notice] A new release of pip is available: 24.3.1 -> 25.1.1\n",
1351
+ "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
1352
+ ]
1353
+ }
1354
+ ],
1355
+ "source": [
1356
+ "!pip install openai"
1357
+ ]
1358
+ },
1359
+ {
1360
+ "cell_type": "code",
1361
+ "execution_count": 2,
1362
+ "id": "4da1a40d",
1363
+ "metadata": {},
1364
+ "outputs": [],
1365
+ "source": [
1366
+ "from openai import OpenAI\n",
1367
+ "client = OpenAI(\n",
1368
+ " base_url=\"https://api.indoxrouter.com\",\n",
1369
+ " api_key=\"indox-vgfI5PEfUoZ6qufR2z6QqXFV4BHgxrez\",\n",
1370
+ ")\n"
1371
+ ]
1372
+ },
1373
+ {
1374
+ "cell_type": "code",
1375
+ "execution_count": 5,
1376
+ "id": "648eff2d",
1377
+ "metadata": {},
1378
+ "outputs": [],
1379
+ "source": [
1380
+ "from openai import OpenAI\n",
1381
+ "client = OpenAI(\n",
1382
+ " base_url=\"https://api.indoxrouter.com\",\n",
1383
+ " api_key=\"indox-vgfI5PEfUoZ6qufR2z6QqXFV4BHgxrez\",\n",
1384
+ ")\n",
1385
+ "\n",
1386
+ "completion = client.chat.completions.create(\n",
1387
+ " model=\"deepseek/deepseek-chat\",\n",
1388
+ " messages=[\n",
1389
+ " {\n",
1390
+ " \"role\": \"user\",\n",
1391
+ " \"content\": \"What is the meaning of life?\"\n",
1392
+ " }\n",
1393
+ " ]\n",
1394
+ ")"
1395
+ ]
1396
+ },
1397
+ {
1398
+ "cell_type": "code",
1399
+ "execution_count": 6,
1400
+ "id": "becbf4fa",
1401
+ "metadata": {},
1402
+ "outputs": [
1403
+ {
1404
+ "data": {
1405
+ "text/plain": [
1406
+ "ChatCompletion(id=None, choices=None, created=None, model='deepseek-chat', object=None, service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=None, prompt_tokens=None, total_tokens=None, completion_tokens_details=None, prompt_tokens_details=None, tokens_prompt=10, tokens_completion=348, tokens_total=358, cost=0.000706, latency=19.672869205474854, timestamp='2025-05-19T06:05:02.416455'), request_id='5ce677cf-0e30-49e3-a9d9-bf99f91edbb6', created_at='2025-05-19T06:05:02.434101', duration_ms=21821.611166000366, provider='deepseek', success=True, message='', raw_response=None, data='The meaning of life is one of the most profound and debated questions in philosophy, religion, and science. Different perspectives offer various answers:\\n\\n1. **Philosophical Perspectives:** \\n - **Existentialism (e.g., Sartre, Camus):** Life has no inherent meaning—we must create our own purpose through choices and actions. \\n - **Absurdism (Camus):** The search for meaning in a meaningless universe is absurd, but we must embrace life anyway. \\n - **Stoicism:** Meaning comes from virtue, wisdom, and living in harmony with nature. \\n\\n2. **Religious/Spiritual Views:** \\n - **Theistic (Christianity, Islam, etc.):** Life’s purpose is to serve, love, or unite with God (or the divine). \\n - **Eastern Traditions (Buddhism, Hinduism):** Meaning may involve enlightenment (breaking the cycle of suffering) or fulfilling dharma (duty). \\n\\n3. **Scientific Perspectives:** \\n - **Biological:** Life’s \"purpose\" is survival, reproduction, and passing on genes (evolutionary biology). \\n - **Cosmic:** From a physics standpoint, life may be a rare, fleeting phenomenon in an indifferent universe. \\n\\n4. **Personal/Subjective Meaning:** \\n Many find purpose in relationships, creativity, helping others, or personal growth. Viktor Frankl (Holocaust survivor) argued that meaning arises from suffering, love, and work. \\n\\n**Short answer?** There’s no universal meaning—it’s up to you to define it. What gives *your* life purpose? \\n\\nWould you like to explore a specific perspective?', finish_reason=None)"
1407
+ ]
1408
+ },
1409
+ "execution_count": 6,
1410
+ "metadata": {},
1411
+ "output_type": "execute_result"
1412
+ }
1413
+ ],
1414
+ "source": [
1415
+ "completion"
1416
+ ]
1417
+ },
1418
+ {
1419
+ "cell_type": "code",
1420
+ "execution_count": 7,
1421
+ "id": "410449d5",
1422
+ "metadata": {},
1423
+ "outputs": [
1424
+ {
1425
+ "name": "stdout",
1426
+ "output_type": "stream",
1427
+ "text": [
1428
+ "ChatCompletion(id=None, choices=None, created=None, model='deepseek-chat', object=None, service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=None, prompt_tokens=None, total_tokens=None, completion_tokens_details=None, prompt_tokens_details=None, tokens_prompt=10, tokens_completion=348, tokens_total=358, cost=0.000706, latency=19.672869205474854, timestamp='2025-05-19T06:05:02.416455'), request_id='5ce677cf-0e30-49e3-a9d9-bf99f91edbb6', created_at='2025-05-19T06:05:02.434101', duration_ms=21821.611166000366, provider='deepseek', success=True, message='', raw_response=None, data='The meaning of life is one of the most profound and debated questions in philosophy, religion, and science. Different perspectives offer various answers:\\n\\n1. **Philosophical Perspectives:** \\n - **Existentialism (e.g., Sartre, Camus):** Life has no inherent meaning—we must create our own purpose through choices and actions. \\n - **Absurdism (Camus):** The search for meaning in a meaningless universe is absurd, but we must embrace life anyway. \\n - **Stoicism:** Meaning comes from virtue, wisdom, and living in harmony with nature. \\n\\n2. **Religious/Spiritual Views:** \\n - **Theistic (Christianity, Islam, etc.):** Life’s purpose is to serve, love, or unite with God (or the divine). \\n - **Eastern Traditions (Buddhism, Hinduism):** Meaning may involve enlightenment (breaking the cycle of suffering) or fulfilling dharma (duty). \\n\\n3. **Scientific Perspectives:** \\n - **Biological:** Life’s \"purpose\" is survival, reproduction, and passing on genes (evolutionary biology). \\n - **Cosmic:** From a physics standpoint, life may be a rare, fleeting phenomenon in an indifferent universe. \\n\\n4. **Personal/Subjective Meaning:** \\n Many find purpose in relationships, creativity, helping others, or personal growth. Viktor Frankl (Holocaust survivor) argued that meaning arises from suffering, love, and work. \\n\\n**Short answer?** There’s no universal meaning—it’s up to you to define it. What gives *your* life purpose? \\n\\nWould you like to explore a specific perspective?', finish_reason=None)\n"
1429
+ ]
1430
+ }
1431
+ ],
1432
+ "source": [
1433
+ "from pprint import pprint\n",
1434
+ "pprint(completion)"
1435
+ ]
1436
+ },
1437
+ {
1438
+ "cell_type": "code",
1439
+ "execution_count": 8,
1440
+ "id": "ff185a0a",
1441
+ "metadata": {},
1442
+ "outputs": [
1443
+ {
1444
+ "name": "stdout",
1445
+ "output_type": "stream",
1446
+ "text": [
1447
+ "Note: you may need to restart the kernel to use updated packages.Collecting indoxrouter\n",
1448
+ " Using cached indoxrouter-0.1.15-py3-none-any.whl.metadata (5.4 kB)\n",
1449
+ "Requirement already satisfied: requests>=2.25.0 in e:\\anaconda\\lib\\site-packages (from indoxrouter) (2.32.3)\n",
1450
+ "Requirement already satisfied: python-dotenv>=1.0.0 in e:\\anaconda\\lib\\site-packages (from indoxrouter) (1.0.1)\n",
1451
+ "Requirement already satisfied: charset-normalizer<4,>=2 in e:\\anaconda\\lib\\site-packages (from requests>=2.25.0->indoxrouter) (3.3.2)\n",
1452
+ "Requirement already satisfied: idna<4,>=2.5 in e:\\anaconda\\lib\\site-packages (from requests>=2.25.0->indoxrouter) (3.7)\n",
1453
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in e:\\anaconda\\lib\\site-packages (from requests>=2.25.0->indoxrouter) (2.2.2)\n",
1454
+ "Requirement already satisfied: certifi>=2017.4.17 in e:\\anaconda\\lib\\site-packages (from requests>=2.25.0->indoxrouter) (2024.6.2)\n",
1455
+ "Using cached indoxrouter-0.1.15-py3-none-any.whl (11 kB)\n",
1456
+ "Installing collected packages: indoxrouter\n",
1457
+ "Successfully installed indoxrouter-0.1.15\n",
1458
+ "\n"
1459
+ ]
1460
+ },
1461
+ {
1462
+ "name": "stderr",
1463
+ "output_type": "stream",
1464
+ "text": [
1465
+ "\n",
1466
+ "[notice] A new release of pip is available: 24.3.1 -> 25.1.1\n",
1467
+ "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
1468
+ ]
1469
+ }
1470
+ ],
1471
+ "source": [
1472
+ "pip install indoxrouter"
1473
+ ]
1474
+ },
1475
+ {
1476
+ "cell_type": "code",
1477
+ "execution_count": 9,
1478
+ "id": "b584d7c4",
1479
+ "metadata": {},
1480
+ "outputs": [
1481
+ {
1482
+ "data": {
1483
+ "text/plain": [
1484
+ "{'request_id': 'c08cc108-6b0d-48bd-a660-546143f1b9fa',\n",
1485
+ " 'created_at': '2025-05-19T06:07:38.077269',\n",
1486
+ " 'duration_ms': 9664.651870727539,\n",
1487
+ " 'provider': 'deepseek',\n",
1488
+ " 'model': 'deepseek-chat',\n",
1489
+ " 'success': True,\n",
1490
+ " 'message': '',\n",
1491
+ " 'usage': {'tokens_prompt': 15,\n",
1492
+ " 'tokens_completion': 107,\n",
1493
+ " 'tokens_total': 122,\n",
1494
+ " 'cost': 0.000229,\n",
1495
+ " 'latency': 9.487398862838745,\n",
1496
+ " 'timestamp': '2025-05-19T06:07:38.065330'},\n",
1497
+ " 'raw_response': None,\n",
1498
+ " 'data': 'In a bustling city, a small cleaning robot named Pip discovered a lost kitten in an alley. Despite its simple programming, Pip felt a strange urge to protect the tiny creature. It carried the kitten through busy streets, dodging humans and traffic, until it found a kind woman who gasped in delight. The woman took the kitten home, and Pip watched from the sidewalk, its circuits humming with something like happiness. That night, as it returned to its charging station, Pip replayed the memory—its first act of kindness beyond code.',\n",
1499
+ " 'finish_reason': None}"
1500
+ ]
1501
+ },
1502
+ "execution_count": 9,
1503
+ "metadata": {},
1504
+ "output_type": "execute_result"
1505
+ }
1506
+ ],
1507
+ "source": [
1508
+ "from indoxrouter import Client\n",
1509
+ "client= Client(api_key=\"indox-vgfI5PEfUoZ6qufR2z6QqXFV4BHgxrez\")\n",
1510
+ "client.chat(\n",
1511
+ " messages=[\n",
1512
+ " {\"role\": \"user\", \"content\": \"Tell me a story about a robot in 5 sentences.\"}\n",
1513
+ " ],\n",
1514
+ " model=\"deepseek/deepseek-chat\",\n",
1515
+ ")"
1516
+ ]
1361
1517
  }
1362
1518
  ],
1363
1519
  "metadata": {
@@ -30,7 +30,7 @@ For custom server URLs:
30
30
  ```
31
31
  """
32
32
 
33
- from .client import Client
33
+ from .client import Client, IndoxRouter
34
34
  from .exceptions import (
35
35
  IndoxRouterError,
36
36
  AuthenticationError,
@@ -39,13 +39,18 @@ from .exceptions import (
39
39
  ProviderError,
40
40
  ModelNotFoundError,
41
41
  ProviderNotFoundError,
42
+ ModelNotAvailableError,
42
43
  InvalidParametersError,
44
+ RequestError,
43
45
  InsufficientCreditsError,
46
+ ValidationError,
47
+ APIError,
44
48
  )
45
49
 
46
50
  __version__ = "0.2.1"
47
51
  __all__ = [
48
52
  "Client",
53
+ "IndoxRouter",
49
54
  "IndoxRouterError",
50
55
  "AuthenticationError",
51
56
  "NetworkError",
@@ -53,6 +58,10 @@ __all__ = [
53
58
  "ProviderError",
54
59
  "ModelNotFoundError",
55
60
  "ProviderNotFoundError",
61
+ "ModelNotAvailableError",
56
62
  "InvalidParametersError",
63
+ "RequestError",
57
64
  "InsufficientCreditsError",
65
+ "ValidationError",
66
+ "APIError",
58
67
  ]
@@ -57,10 +57,14 @@ from .exceptions import (
57
57
  NetworkError,
58
58
  ProviderNotFoundError,
59
59
  ModelNotFoundError,
60
+ ModelNotAvailableError,
60
61
  InvalidParametersError,
61
62
  RateLimitError,
62
63
  ProviderError,
64
+ RequestError,
63
65
  InsufficientCreditsError,
66
+ ValidationError,
67
+ APIError,
64
68
  )
65
69
  from .constants import (
66
70
  DEFAULT_BASE_URL,
@@ -316,29 +320,57 @@ class Client:
316
320
  if "provider" in error_message.lower():
317
321
  raise ProviderNotFoundError(error_message)
318
322
  elif "model" in error_message.lower():
319
- raise ModelNotFoundError(error_message)
323
+ # Check if it's a model not found vs model not available
324
+ if (
325
+ "not supported" in error_message.lower()
326
+ or "disabled" in error_message.lower()
327
+ or "unavailable" in error_message.lower()
328
+ ):
329
+ raise ModelNotAvailableError(error_message)
330
+ else:
331
+ raise ModelNotFoundError(error_message)
320
332
  else:
321
- raise NetworkError(
322
- f"Resource not found: {error_message} (URL: {url})"
323
- )
333
+ raise APIError(f"Resource not found: {error_message} (URL: {url})")
324
334
  elif status_code == 429:
325
335
  raise RateLimitError(f"Rate limit exceeded: {error_message}")
326
336
  elif status_code == 400:
327
- raise InvalidParametersError(f"Invalid parameters: {error_message}")
337
+ # Check if it's a validation error or invalid parameters
338
+ if (
339
+ "validation" in error_message.lower()
340
+ or "invalid format" in error_message.lower()
341
+ ):
342
+ raise ValidationError(f"Request validation failed: {error_message}")
343
+ else:
344
+ raise InvalidParametersError(f"Invalid parameters: {error_message}")
328
345
  elif status_code == 402:
329
346
  raise InsufficientCreditsError(f"Insufficient credits: {error_message}")
347
+ elif status_code == 422:
348
+ # Unprocessable Entity - typically validation errors
349
+ raise ValidationError(f"Request validation failed: {error_message}")
350
+ elif status_code == 503:
351
+ # Service Unavailable - model might be temporarily unavailable
352
+ if "model" in error_message.lower():
353
+ raise ModelNotAvailableError(
354
+ f"Model temporarily unavailable: {error_message}"
355
+ )
356
+ else:
357
+ raise APIError(f"Service unavailable: {error_message}")
330
358
  elif status_code == 500:
331
359
  # Provide more detailed information for server errors
332
360
  error_detail = error_data.get("detail", "No details provided")
333
361
  # Include the request data in the error message for better debugging
334
362
  request_data_str = json.dumps(data, indent=2) if data else "None"
335
- raise ProviderError(
363
+ raise RequestError(
336
364
  f"Server error (500): {error_detail}. URL: {url}.\n"
337
365
  f"Request data: {request_data_str}\n"
338
366
  f"This may indicate an issue with the server configuration or a problem with the provider service."
339
367
  )
368
+ elif status_code >= 400 and status_code < 500:
369
+ # Client errors
370
+ raise APIError(f"Client error ({status_code}): {error_message}")
340
371
  else:
341
- raise ProviderError(f"Provider error ({status_code}): {error_message}")
372
+ # Server errors
373
+ raise RequestError(f"Server error ({status_code}): {error_message}")
342
374
  except requests.RequestException as e:
343
375
  logger.error(f"Request exception: {str(e)}")
344
376
  raise NetworkError(f"Network error: {str(e)}")
@@ -503,6 +535,26 @@ class Client:
503
535
  n: int = 1,
504
536
  quality: str = "standard",
505
537
  style: str = "vivid",
538
+ # Standard parameters
539
+ response_format: Optional[str] = None,
540
+ user: Optional[str] = None,
541
+ # OpenAI-specific parameters
542
+ background: Optional[str] = None,
543
+ moderation: Optional[str] = None,
544
+ output_compression: Optional[int] = None,
545
+ output_format: Optional[str] = None,
546
+ # Google-specific parameters
547
+ negative_prompt: Optional[str] = None,
548
+ guidance_scale: Optional[float] = None,
549
+ seed: Optional[int] = None,
550
+ safety_filter_level: Optional[str] = None,
551
+ person_generation: Optional[str] = None,
552
+ include_safety_attributes: Optional[bool] = None,
553
+ include_rai_reason: Optional[bool] = None,
554
+ language: Optional[str] = None,
555
+ output_mime_type: Optional[str] = None,
556
+ add_watermark: Optional[bool] = None,
557
+ enhance_prompt: Optional[bool] = None,
506
558
  **kwargs,
507
559
  ) -> Dict[str, Any]:
508
560
  """
@@ -510,11 +562,35 @@ class Client:
510
562
 
511
563
  Args:
512
564
  prompt: Text prompt
513
- model: Model to use in the format "provider/model" (e.g., "openai/dall-e-3")
565
+ model: Model to use in the format "provider/model" (e.g., "openai/dall-e-3", "google/imagen-3.0-generate-002")
514
566
  size: Image size (e.g., "1024x1024")
515
567
  n: Number of images to generate
516
568
  quality: Image quality (e.g., "standard", "hd")
517
569
  style: Image style (e.g., "vivid", "natural")
570
+
571
+ # Standard parameters
572
+ response_format: Format of the response - "url" or "b64_json"
573
+ user: A unique identifier for the end-user
574
+
575
+ # OpenAI-specific parameters
576
+ background: Background style - "transparent", "opaque", or "auto"
577
+ moderation: Moderation level - "low" or "auto"
578
+ output_compression: Compression quality for output images (0-100)
579
+ output_format: Output format - "png", "jpeg", or "webp"
580
+
581
+ # Google-specific parameters
582
+ negative_prompt: Description of what to discourage in the generated images
583
+ guidance_scale: Controls how much the model adheres to the prompt
584
+ seed: Random seed for image generation
585
+ safety_filter_level: Filter level for safety filtering
586
+ person_generation: Controls generation of people ("dont_allow", "allow_adult", "allow_all")
587
+ include_safety_attributes: Whether to report safety scores of generated images
588
+ include_rai_reason: Whether to include filter reason if the image is filtered
589
+ language: Language of the text in the prompt
590
+ output_mime_type: MIME type of the generated image
591
+ add_watermark: Whether to add a watermark to the generated images
592
+ enhance_prompt: Whether to use prompt rewriting logic
593
+
518
594
  **kwargs: Additional parameters to pass to the API
519
595
 
520
596
  Returns:
@@ -529,6 +605,7 @@ class Client:
529
605
  if key not in ["return_generator"]: # List of parameters to exclude
530
606
  filtered_kwargs[key] = value
531
607
 
608
+ # Create the base request data
532
609
  data = {
533
610
  "prompt": prompt,
534
611
  "model": formatted_model,
@@ -536,9 +613,52 @@ class Client:
536
613
  "size": size,
537
614
  "quality": quality,
538
615
  "style": style,
539
- "additional_params": filtered_kwargs,
540
616
  }
541
617
 
618
+ # Add standard parameters if provided
619
+ if response_format is not None:
620
+ data["response_format"] = response_format
621
+ if user is not None:
622
+ data["user"] = user
623
+
624
+ # Add OpenAI-specific parameters if provided
625
+ if background is not None:
626
+ data["background"] = background
627
+ if moderation is not None:
628
+ data["moderation"] = moderation
629
+ if output_compression is not None:
630
+ data["output_compression"] = output_compression
631
+ if output_format is not None:
632
+ data["output_format"] = output_format
633
+
634
+ # Add Google-specific parameters if provided
635
+ if negative_prompt is not None:
636
+ data["negative_prompt"] = negative_prompt
637
+ if guidance_scale is not None:
638
+ data["guidance_scale"] = guidance_scale
639
+ if seed is not None:
640
+ data["seed"] = seed
641
+ if safety_filter_level is not None:
642
+ data["safety_filter_level"] = safety_filter_level
643
+ if person_generation is not None:
644
+ data["person_generation"] = person_generation
645
+ if include_safety_attributes is not None:
646
+ data["include_safety_attributes"] = include_safety_attributes
647
+ if include_rai_reason is not None:
648
+ data["include_rai_reason"] = include_rai_reason
649
+ if language is not None:
650
+ data["language"] = language
651
+ if output_mime_type is not None:
652
+ data["output_mime_type"] = output_mime_type
653
+ if add_watermark is not None:
654
+ data["add_watermark"] = add_watermark
655
+ if enhance_prompt is not None:
656
+ data["enhance_prompt"] = enhance_prompt
657
+
658
+ # Add any remaining parameters
659
+ if filtered_kwargs:
660
+ data["additional_params"] = filtered_kwargs
661
+
542
662
  return self._request("POST", IMAGE_ENDPOINT, data)
543
663
 
544
664
  def models(self, provider: Optional[str] = None) -> Dict[str, Any]:
@@ -13,6 +13,10 @@ USE_COOKIES = True # Always use cookie-based authentication
13
13
  DEFAULT_MODEL = "openai/gpt-4o-mini"
14
14
  DEFAULT_EMBEDDING_MODEL = "openai/text-embedding-3-small"
15
15
  DEFAULT_IMAGE_MODEL = "openai/dall-e-3"
16
+ GOOGLE_IMAGE_MODEL = "google/imagen-3.0-generate-002"
17
+ XAI_IMAGE_MODEL = "xai/grok-2-image"
18
+ XAI_IMAGE_LATEST_MODEL = "xai/grok-2-image-latest"
19
+ XAI_IMAGE_SPECIFIC_MODEL = "xai/grok-2-image-1212"
16
20
 
17
21
  # API endpoints
18
22
  CHAT_ENDPOINT = "chat/completions"
@@ -38,14 +38,20 @@ class ProviderError(IndoxRouterError):
38
38
  pass
39
39
 
40
40
 
41
+ class ProviderNotFoundError(ProviderError):
42
+ """Raised when a requested provider is not found."""
43
+
44
+ pass
45
+
46
+
41
47
  class ModelNotFoundError(ProviderError):
42
48
  """Raised when a requested model is not found."""
43
49
 
44
50
  pass
45
51
 
46
52
 
47
- class ProviderNotFoundError(ProviderError):
48
- """Raised when a requested provider is not found."""
53
+ class ModelNotAvailableError(ProviderError):
54
+ """Raised when a model is disabled or not supported by the provider."""
49
55
 
50
56
  pass
51
57
 
@@ -56,7 +62,25 @@ class InvalidParametersError(IndoxRouterError):
56
62
  pass
57
63
 
58
64
 
65
+ class RequestError(IndoxRouterError):
66
+ """Raised when a request to a provider fails."""
67
+
68
+ pass
69
+
70
+
59
71
  class InsufficientCreditsError(IndoxRouterError):
60
72
  """Raised when the user doesn't have enough credits."""
61
73
 
62
74
  pass
75
+
76
+
77
+ class ValidationError(IndoxRouterError):
78
+ """Raised when request validation fails."""
79
+
80
+ pass
81
+
82
+
83
+ class APIError(IndoxRouterError):
84
+ """Raised when the API returns an error."""
85
+
86
+ pass
@@ -1,3 +1,26 @@
1
+ Metadata-Version: 2.4
2
+ Name: indoxrouter
3
+ Version: 0.1.17
4
+ Summary: A unified client for various AI providers
5
+ Author-email: indoxRouter Team <ashkan.eskandari.dev@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/indoxrouter/indoxrouter
8
+ Project-URL: Repository, https://github.com/indoxrouter/indoxrouter
9
+ Project-URL: Issues, https://github.com/indoxrouter/indoxrouter/issues
10
+ Keywords: ai,api,client,openai,anthropic,google,mistral,xai,imagen,grok,image-generation
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Requires-Python: >=3.8
20
+ Description-Content-Type: text/markdown
21
+ Requires-Dist: requests>=2.25.0
22
+ Requires-Dist: python-dotenv>=1.0.0
23
+
1
24
  # IndoxRouter Client
2
25
 
3
26
  A unified client for various AI providers, including OpenAI, anthropic, Google, and Mistral.
@@ -110,13 +133,43 @@ print(f"First embedding: {response['data'][0]['embedding'][:5]}...")
110
133
  ### Image Generation
111
134
 
112
135
  ```python
136
+ # OpenAI Image Generation
113
137
  response = client.images(
114
138
  prompt="A serene landscape with mountains and a lake",
115
139
  model="openai/dall-e-3",
116
- size="1024x1024"
140
+ size="1024x1024",
141
+ quality="standard", # Options: standard, hd
142
+ style="vivid" # Options: vivid, natural
117
143
  )
118
144
 
119
145
  print(f"Image URL: {response['data'][0]['url']}")
146
+
147
+ # Google Imagen Image Generation
148
+ from indoxrouter.constants import GOOGLE_IMAGE_MODEL
149
+
150
+ response = client.images(
151
+ prompt="A robot holding a red skateboard in a futuristic city",
152
+ model=GOOGLE_IMAGE_MODEL,
153
+ n=2, # Generate 2 images
154
+ negative_prompt="broken, damaged, low quality",
155
+ guidance_scale=7.5, # Control adherence to prompt
156
+ seed=42, # For reproducible results
157
+ )
158
+
159
+ # xAI Grok Image Generation
160
+ from indoxrouter.constants import XAI_IMAGE_MODEL
161
+
162
+ response = client.images(
163
+ prompt="A cat in a tree",
164
+ model=XAI_IMAGE_MODEL,
165
+ n=1,
166
+ response_format="b64_json" # Get base64 encoded image
167
+ )
168
+
169
+ # Access base64 encoded image data
170
+ if "b64_json" in response["data"][0]:
171
+ b64_data = response["data"][0]["b64_json"]
172
+ # Use the base64 data (e.g., to display in HTML or save to file)
120
173
  ```
121
174
 
122
175
  ### Streaming Responses
@@ -1,16 +1,18 @@
1
1
  [build-system]
2
- requires = ["setuptools>=42", "wheel"]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
3
  build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "indoxrouter"
7
- version = "0.1.15"
7
+ version = "0.1.17"
8
8
  authors = [
9
9
  {name = "indoxRouter Team", email = "ashkan.eskandari.dev@gmail.com"},
10
10
  ]
11
11
  description = "A unified client for various AI providers"
12
12
  readme = "README.md"
13
13
  requires-python = ">=3.8"
14
+ license = {text = "MIT"}
15
+ keywords = ["ai", "api", "client", "openai", "anthropic", "google", "mistral", "xai", "imagen", "grok", "image-generation"]
14
16
  classifiers = [
15
17
  "Development Status :: 3 - Alpha",
16
18
  "Intended Audience :: Developers",
@@ -28,7 +30,11 @@ dependencies = [
28
30
 
29
31
  [project.urls]
30
32
  Homepage = "https://github.com/indoxrouter/indoxrouter"
33
+ Repository = "https://github.com/indoxrouter/indoxrouter"
34
+ Issues = "https://github.com/indoxrouter/indoxrouter/issues"
31
35
 
32
- [tool.setuptools]
33
- packages = ["indoxrouter"]
36
+ [tool.setuptools.packages.find]
37
+ where = ["."]
38
+ include = ["indoxrouter*"]
39
+ exclude = ["tests*"]
34
40
 
File without changes
File without changes