indoxrouter 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: indoxrouter
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: A unified client for various AI providers
5
5
  Home-page: https://github.com/indoxrouter/indoxrouter
6
6
  Author: indoxRouter Team
@@ -13,6 +13,7 @@ Classifier: Programming Language :: Python :: 3.8
13
13
  Classifier: Programming Language :: Python :: 3.9
14
14
  Classifier: Programming Language :: Python :: 3.10
15
15
  Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
16
17
  Requires-Python: >=3.8
17
18
  Description-Content-Type: text/markdown
18
19
  Requires-Dist: requests>=2.25.0
@@ -37,7 +38,7 @@ Dynamic: summary
37
38
 
38
39
  # IndoxRouter Client
39
40
 
40
- A unified client for various AI providers, including OpenAI, Anthropic, Cohere, Google, and Mistral.
41
+ A unified client for various AI providers, including OpenAI, Anthropic, Google, and Mistral.
41
42
 
42
43
  ## Features
43
44
 
@@ -59,19 +60,26 @@ pip install indoxrouter
59
60
  ```python
60
61
  from indoxrouter import Client
61
62
 
62
- # Initialize with API key
63
- client = Client(api_key="your_api_key", base_url="http://your-server-url:8000")
63
+ # Initialize with API key (default connects to localhost:8000)
64
+ client = Client(api_key="your_api_key")
64
65
 
65
- # Or initialize with username and password
66
+ # Or specify a custom server URL
66
67
  client = Client(
67
- username="your_username",
68
- password="your_password",
68
+ api_key="your_api_key",
69
69
  base_url="http://your-server-url:8000"
70
70
  )
71
71
 
72
+ # Connect to Docker container inside the Docker network
73
+ client = Client(
74
+ api_key="your_api_key",
75
+ base_url="http://indoxrouter-server:8000"
76
+ )
77
+
72
78
  # Using environment variables
73
- # Set INDOXROUTER_API_KEY or INDOXROUTER_USERNAME and INDOXROUTER_PASSWORD
74
- client = Client(base_url="http://your-server-url:8000")
79
+ # Set INDOX_ROUTER_API_KEY environment variable
80
+ import os
81
+ os.environ["INDOX_ROUTER_API_KEY"] = "your_api_key"
82
+ client = Client()
75
83
  ```
76
84
 
77
85
  ### Chat Completions
@@ -82,8 +90,7 @@ response = client.chat(
82
90
  {"role": "system", "content": "You are a helpful assistant."},
83
91
  {"role": "user", "content": "Tell me a joke."}
84
92
  ],
85
- provider="openai",
86
- model="gpt-3.5-turbo",
93
+ model="openai/gpt-4o-mini", # Provider/model format
87
94
  temperature=0.7
88
95
  )
89
96
 
@@ -95,8 +102,7 @@ print(response["choices"][0]["message"]["content"])
95
102
  ```python
96
103
  response = client.completion(
97
104
  prompt="Once upon a time,",
98
- provider="openai",
99
- model="gpt-3.5-turbo-instruct",
105
+ model="openai/gpt-4o-mini",
100
106
  max_tokens=100
101
107
  )
102
108
 
@@ -108,12 +114,11 @@ print(response["choices"][0]["text"])
108
114
  ```python
109
115
  response = client.embeddings(
110
116
  text=["Hello world", "AI is amazing"],
111
- provider="openai",
112
- model="text-embedding-ada-002"
117
+ model="openai/text-embedding-3-small"
113
118
  )
114
119
 
115
- print(f"Dimensions: {response['dimensions']}")
116
- print(f"First embedding: {response['embeddings'][0][:5]}...")
120
+ print(f"Dimensions: {len(response['data'][0]['embedding'])}")
121
+ print(f"First embedding: {response['data'][0]['embedding'][:5]}...")
117
122
  ```
118
123
 
119
124
  ### Image Generation
@@ -121,12 +126,11 @@ print(f"First embedding: {response['embeddings'][0][:5]}...")
121
126
  ```python
122
127
  response = client.images(
123
128
  prompt="A serene landscape with mountains and a lake",
124
- provider="openai",
125
- model="dall-e-3",
129
+ model="openai/dall-e-3",
126
130
  size="1024x1024"
127
131
  )
128
132
 
129
- print(f"Image URL: {response['images'][0]['url']}")
133
+ print(f"Image URL: {response['data'][0]['url']}")
130
134
  ```
131
135
 
132
136
  ### Streaming Responses
@@ -134,9 +138,10 @@ print(f"Image URL: {response['images'][0]['url']}")
134
138
  ```python
135
139
  for chunk in client.chat(
136
140
  messages=[{"role": "user", "content": "Write a short story."}],
141
+ model="openai/gpt-4o-mini",
137
142
  stream=True
138
143
  ):
139
- if "choices" in chunk and len(chunk["choices"]) > 0:
144
+ if chunk.get("choices") and len(chunk["choices"]) > 0:
140
145
  content = chunk["choices"][0].get("delta", {}).get("content", "")
141
146
  print(content, end="", flush=True)
142
147
  ```
@@ -149,7 +154,7 @@ providers = client.models()
149
154
  for provider in providers:
150
155
  print(f"Provider: {provider['name']}")
151
156
  for model in provider["models"]:
152
- print(f" - {model['id']}: {model['name']}")
157
+ print(f" - {model['id']}: {model['description'] or ''}")
153
158
 
154
159
  # Get models for a specific provider
155
160
  openai_provider = client.models("openai")
@@ -162,11 +167,10 @@ print(f"OpenAI models: {[m['id'] for m in openai_provider['models']]}")
162
167
  from indoxrouter import Client, ModelNotFoundError, ProviderError
163
168
 
164
169
  try:
165
- client = Client(api_key="your_api_key", base_url="http://your-server-url:8000")
170
+ client = Client(api_key="your_api_key")
166
171
  response = client.chat(
167
172
  messages=[{"role": "user", "content": "Hello"}],
168
- provider="nonexistent",
169
- model="nonexistent-model"
173
+ model="nonexistent-provider/nonexistent-model"
170
174
  )
171
175
  except ModelNotFoundError as e:
172
176
  print(f"Model not found: {e}")
@@ -177,8 +181,11 @@ except ProviderError as e:
177
181
  ## Context Manager
178
182
 
179
183
  ```python
180
- with Client(api_key="your_api_key", base_url="http://your-server-url:8000") as client:
181
- response = client.chat([{"role": "user", "content": "Hello!"}])
184
+ with Client(api_key="your_api_key") as client:
185
+ response = client.chat(
186
+ messages=[{"role": "user", "content": "Hello!"}],
187
+ model="openai/gpt-4o-mini"
188
+ )
182
189
  print(response["choices"][0]["message"]["content"])
183
190
  # Client is automatically closed when exiting the block
184
191
  ```
@@ -1,6 +1,6 @@
1
1
  # IndoxRouter Client
2
2
 
3
- A unified client for various AI providers, including OpenAI, Anthropic, Cohere, Google, and Mistral.
3
+ A unified client for various AI providers, including OpenAI, Anthropic, Google, and Mistral.
4
4
 
5
5
  ## Features
6
6
 
@@ -22,19 +22,26 @@ pip install indoxrouter
22
22
  ```python
23
23
  from indoxrouter import Client
24
24
 
25
- # Initialize with API key
26
- client = Client(api_key="your_api_key", base_url="http://your-server-url:8000")
25
+ # Initialize with API key (default connects to localhost:8000)
26
+ client = Client(api_key="your_api_key")
27
27
 
28
- # Or initialize with username and password
28
+ # Or specify a custom server URL
29
29
  client = Client(
30
- username="your_username",
31
- password="your_password",
30
+ api_key="your_api_key",
32
31
  base_url="http://your-server-url:8000"
33
32
  )
34
33
 
34
+ # Connect to Docker container inside the Docker network
35
+ client = Client(
36
+ api_key="your_api_key",
37
+ base_url="http://indoxrouter-server:8000"
38
+ )
39
+
35
40
  # Using environment variables
36
- # Set INDOXROUTER_API_KEY or INDOXROUTER_USERNAME and INDOXROUTER_PASSWORD
37
- client = Client(base_url="http://your-server-url:8000")
41
+ # Set INDOX_ROUTER_API_KEY environment variable
42
+ import os
43
+ os.environ["INDOX_ROUTER_API_KEY"] = "your_api_key"
44
+ client = Client()
38
45
  ```
39
46
 
40
47
  ### Chat Completions
@@ -45,8 +52,7 @@ response = client.chat(
45
52
  {"role": "system", "content": "You are a helpful assistant."},
46
53
  {"role": "user", "content": "Tell me a joke."}
47
54
  ],
48
- provider="openai",
49
- model="gpt-3.5-turbo",
55
+ model="openai/gpt-4o-mini", # Provider/model format
50
56
  temperature=0.7
51
57
  )
52
58
 
@@ -58,8 +64,7 @@ print(response["choices"][0]["message"]["content"])
58
64
  ```python
59
65
  response = client.completion(
60
66
  prompt="Once upon a time,",
61
- provider="openai",
62
- model="gpt-3.5-turbo-instruct",
67
+ model="openai/gpt-4o-mini",
63
68
  max_tokens=100
64
69
  )
65
70
 
@@ -71,12 +76,11 @@ print(response["choices"][0]["text"])
71
76
  ```python
72
77
  response = client.embeddings(
73
78
  text=["Hello world", "AI is amazing"],
74
- provider="openai",
75
- model="text-embedding-ada-002"
79
+ model="openai/text-embedding-3-small"
76
80
  )
77
81
 
78
- print(f"Dimensions: {response['dimensions']}")
79
- print(f"First embedding: {response['embeddings'][0][:5]}...")
82
+ print(f"Dimensions: {len(response['data'][0]['embedding'])}")
83
+ print(f"First embedding: {response['data'][0]['embedding'][:5]}...")
80
84
  ```
81
85
 
82
86
  ### Image Generation
@@ -84,12 +88,11 @@ print(f"First embedding: {response['embeddings'][0][:5]}...")
84
88
  ```python
85
89
  response = client.images(
86
90
  prompt="A serene landscape with mountains and a lake",
87
- provider="openai",
88
- model="dall-e-3",
91
+ model="openai/dall-e-3",
89
92
  size="1024x1024"
90
93
  )
91
94
 
92
- print(f"Image URL: {response['images'][0]['url']}")
95
+ print(f"Image URL: {response['data'][0]['url']}")
93
96
  ```
94
97
 
95
98
  ### Streaming Responses
@@ -97,9 +100,10 @@ print(f"Image URL: {response['images'][0]['url']}")
97
100
  ```python
98
101
  for chunk in client.chat(
99
102
  messages=[{"role": "user", "content": "Write a short story."}],
103
+ model="openai/gpt-4o-mini",
100
104
  stream=True
101
105
  ):
102
- if "choices" in chunk and len(chunk["choices"]) > 0:
106
+ if chunk.get("choices") and len(chunk["choices"]) > 0:
103
107
  content = chunk["choices"][0].get("delta", {}).get("content", "")
104
108
  print(content, end="", flush=True)
105
109
  ```
@@ -112,7 +116,7 @@ providers = client.models()
112
116
  for provider in providers:
113
117
  print(f"Provider: {provider['name']}")
114
118
  for model in provider["models"]:
115
- print(f" - {model['id']}: {model['name']}")
119
+ print(f" - {model['id']}: {model['description'] or ''}")
116
120
 
117
121
  # Get models for a specific provider
118
122
  openai_provider = client.models("openai")
@@ -125,11 +129,10 @@ print(f"OpenAI models: {[m['id'] for m in openai_provider['models']]}")
125
129
  from indoxrouter import Client, ModelNotFoundError, ProviderError
126
130
 
127
131
  try:
128
- client = Client(api_key="your_api_key", base_url="http://your-server-url:8000")
132
+ client = Client(api_key="your_api_key")
129
133
  response = client.chat(
130
134
  messages=[{"role": "user", "content": "Hello"}],
131
- provider="nonexistent",
132
- model="nonexistent-model"
135
+ model="nonexistent-provider/nonexistent-model"
133
136
  )
134
137
  except ModelNotFoundError as e:
135
138
  print(f"Model not found: {e}")
@@ -140,8 +143,11 @@ except ProviderError as e:
140
143
  ## Context Manager
141
144
 
142
145
  ```python
143
- with Client(api_key="your_api_key", base_url="http://your-server-url:8000") as client:
144
- response = client.chat([{"role": "user", "content": "Hello!"}])
146
+ with Client(api_key="your_api_key") as client:
147
+ response = client.chat(
148
+ messages=[{"role": "user", "content": "Hello!"}],
149
+ model="openai/gpt-4o-mini"
150
+ )
145
151
  print(response["choices"][0]["message"]["content"])
146
152
  # Client is automatically closed when exiting the block
147
153
  ```
@@ -16,7 +16,7 @@
16
16
  },
17
17
  {
18
18
  "cell_type": "code",
19
- "execution_count": 1,
19
+ "execution_count": null,
20
20
  "id": "e03bc1cc",
21
21
  "metadata": {},
22
22
  "outputs": [],
@@ -30,7 +30,7 @@
30
30
  },
31
31
  {
32
32
  "cell_type": "code",
33
- "execution_count": 2,
33
+ "execution_count": null,
34
34
  "metadata": {},
35
35
  "outputs": [],
36
36
  "source": [
@@ -38,7 +38,9 @@
38
38
  "# !pip install indoxrouter\n",
39
39
  "\n",
40
40
  "# Import the client and exceptions\n",
41
- "from indoxRouter_client import Client"
41
+ "from indoxRouter import Client\n",
42
+ "\n",
43
+ "from pprint import pprint"
42
44
  ]
43
45
  },
44
46
  {
@@ -51,19 +53,12 @@
51
53
  },
52
54
  {
53
55
  "cell_type": "code",
54
- "execution_count": 3,
56
+ "execution_count": null,
55
57
  "metadata": {},
56
58
  "outputs": [],
57
59
  "source": [
58
60
  "# Initialize with API key\n",
59
- "client = Client(api_key=\"indox_iCifK9DShPRcFW0RbWuMb6vPzwDEtQz2YkU40uu_DDY\")\n",
60
- "\n",
61
- "# Or use environment variable\n",
62
- "# export INDOX_ROUTER_API_KEY=your_api_key\n",
63
- "# client = Client()\n",
64
- "\n",
65
- "# You can also set a custom timeout\n",
66
- "# client = Client(api_key=\"your_api_key\", timeout=30) # 30 seconds timeout"
61
+ "client = Client(api_key=\"indox_4mfg24GRj_qGaZ2-qXw-mXYqaNqMkyGkG1lncGUrRkA\")"
67
62
  ]
68
63
  },
69
64
  {
@@ -82,49 +77,32 @@
82
77
  },
83
78
  {
84
79
  "cell_type": "code",
85
- "execution_count": 4,
86
- "metadata": {},
87
- "outputs": [
88
- {
89
- "name": "stdout",
90
- "output_type": "stream",
91
- "text": [
92
- "Response: The capital of France is Paris.\n",
93
- "Cost: 0.0032\n",
94
- "Tokens used: 32\n"
95
- ]
96
- }
97
- ],
80
+ "execution_count": null,
81
+ "id": "82ec17da",
82
+ "metadata": {},
83
+ "outputs": [],
84
+ "source": [
85
+ "client.get_model_info(provider=\"openai\",model=\"gpt-4o-mini\")"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": null,
91
+ "metadata": {},
92
+ "outputs": [],
98
93
  "source": [
94
+ "\n",
99
95
  "response = client.chat(\n",
100
96
  " messages=[\n",
101
97
  " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
102
98
  " {\"role\": \"user\", \"content\": \"What is the capital of France?\"}\n",
103
99
  " ],\n",
104
- " model=\"openai/gpt-4o-mini\"\n",
100
+ " model=\"openai/gpt-4o-mini\" \n",
105
101
  ")\n",
106
102
  "\n",
107
103
  "print(\"Response:\", response[\"data\"])\n",
108
- "print(\"Cost:\", response[\"usage\"][\"cost\"])\n",
109
- "print(\"Tokens used:\", response[\"usage\"][\"tokens_total\"])"
110
- ]
111
- },
112
- {
113
- "cell_type": "code",
114
- "execution_count": 5,
115
- "id": "fb681be0",
116
- "metadata": {},
117
- "outputs": [
118
- {
119
- "name": "stdout",
120
- "output_type": "stream",
121
- "text": [
122
- "{'request_id': '8f7a1a49-2df2-4716-b5d8-621dfbaaaad1', 'created_at': '2025-04-05T17:31:53.680969', 'duration_ms': 4284.862756729126, 'provider': 'openai', 'model': 'openai/gpt-4o-mini', 'success': True, 'message': '', 'usage': {'tokens_prompt': 24, 'tokens_completion': 8, 'tokens_total': 32, 'cost': 0.0032, 'latency': 2.7800915241241455, 'timestamp': '2025-04-05T17:31:53.541938'}, 'raw_response': None, 'data': 'The capital of France is Paris.', 'finish_reason': None}\n"
123
- ]
124
- }
125
- ],
126
- "source": [
127
- "print(response)"
104
+ "print(\"Tokens:\", response[\"usage\"][\"tokens_total\"])\n",
105
+ "\n"
128
106
  ]
129
107
  },
130
108
  {
@@ -150,7 +128,28 @@
150
128
  " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
151
129
  " {\"role\": \"user\", \"content\": \"Write a short poem about AI.\"}\n",
152
130
  " ],\n",
153
- " model=\"anthropic/claude-3-haiku\",\n",
131
+ " model=\"mistral/mistral-large-latest\",\n",
132
+ " temperature=0.8,\n",
133
+ " max_tokens=500\n",
134
+ ")\n",
135
+ "\n",
136
+ "print(\"Response:\", response[\"data\"])\n",
137
+ "print(\"Cost:\", response[\"usage\"][\"cost\"])"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": null,
143
+ "id": "d530c4ed",
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "response = client.chat(\n",
148
+ " messages=[\n",
149
+ " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
150
+ " {\"role\": \"user\", \"content\": \"Write a short poem about AI.\"}\n",
151
+ " ],\n",
152
+ " model=\"deepseek/deepseek-chat\",\n",
154
153
  " temperature=0.8,\n",
155
154
  " max_tokens=500\n",
156
155
  ")\n",
@@ -177,11 +176,13 @@
177
176
  "metadata": {},
178
177
  "outputs": [],
179
178
  "source": [
179
+ "# Initialize conversation with system prompt and first user message\n",
180
180
  "conversation = [\n",
181
- " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
182
- " {\"role\": \"user\", \"content\": \"Hello, how are you?\"}\n",
181
+ " {\"role\": \"system\", \"content\": \"You are a helpful assistant with expertise in programming.\"},\n",
182
+ " {\"role\": \"user\", \"content\": \"Hello, I'm learning about APIs. Can you help me?\"}\n",
183
183
  "]\n",
184
184
  "\n",
185
+ "# Get first response from the model\n",
185
186
  "response = client.chat(\n",
186
187
  " messages=conversation,\n",
187
188
  " model=\"openai/gpt-4o-mini\"\n",
@@ -189,16 +190,41 @@
189
190
  "\n",
190
191
  "# Add the assistant's response to the conversation\n",
191
192
  "conversation.append({\"role\": \"assistant\", \"content\": response[\"data\"]})\n",
193
+ "print(f\"Response 1: {response['data'][:50]}...\") # Print preview of response\n",
192
194
  "\n",
193
- "# Continue the conversation\n",
194
- "conversation.append({\"role\": \"user\", \"content\": \"Tell me a joke about programming.\"})\n",
195
+ "# Second turn - asking a specific question\n",
196
+ "conversation.append({\"role\": \"user\", \"content\": \"What's the difference between REST and GraphQL APIs?\"})\n",
195
197
  "\n",
198
+ "# Get second response\n",
196
199
  "response = client.chat(\n",
197
200
  " messages=conversation,\n",
198
201
  " model=\"openai/gpt-4o-mini\"\n",
199
202
  ")\n",
203
+ "conversation.append({\"role\": \"assistant\", \"content\": response[\"data\"]})\n",
204
+ "print(f\"Response 2: {response['data'][:50]}...\")\n",
200
205
  "\n",
201
- "print(\"Response:\", response[\"data\"])"
206
+ "# Third turn - follow-up question to test context retention\n",
207
+ "conversation.append({\"role\": \"user\", \"content\": \"Can you give me a simple example of each?\"})\n",
208
+ "\n",
209
+ "# Get third response\n",
210
+ "response = client.chat(\n",
211
+ " messages=conversation,\n",
212
+ " model=\"openai/gpt-4o-mini\"\n",
213
+ ")\n",
214
+ "conversation.append({\"role\": \"assistant\", \"content\": response[\"data\"]})\n",
215
+ "print(f\"Response 3: {response['data'][:50]}...\")\n",
216
+ "\n",
217
+ "# Fourth turn - testing memory of previous discussion\n",
218
+ "conversation.append({\"role\": \"user\", \"content\": \"Which one would you recommend for a beginner building a small blog site?\"})\n",
219
+ "\n",
220
+ "# Get fourth response\n",
221
+ "response = client.chat(\n",
222
+ " messages=conversation,\n",
223
+ " model=\"openai/gpt-4o-mini\"\n",
224
+ ")\n",
225
+ "print(f\"Response 4: {response['data'][:50]}...\")\n",
226
+ "\n",
227
+ "pprint(conversation)"
202
228
  ]
203
229
  },
204
230
  {
@@ -224,7 +250,7 @@
224
250
  " messages=[\n",
225
251
  " {\"role\": \"user\", \"content\": \"Tell me a story about a robot in 5 sentences.\"}\n",
226
252
  " ],\n",
227
- " model=\"openai/gpt-4o-mini\",\n",
253
+ " model=\"mistral/mistral-large-latest\",\n",
228
254
  " stream=True\n",
229
255
  "):\n",
230
256
  " if isinstance(chunk, dict) and \"data\" in chunk:\n",
@@ -315,6 +341,22 @@
315
341
  "print(\"Cost:\", response[\"usage\"][\"cost\"])"
316
342
  ]
317
343
  },
344
+ {
345
+ "cell_type": "code",
346
+ "execution_count": null,
347
+ "id": "17b3ca0a",
348
+ "metadata": {},
349
+ "outputs": [],
350
+ "source": [
351
+ "response = client.completion(\n",
352
+ " prompt=\"Once upon a time\",\n",
353
+ " model=\"deepseek/deepseek-chat\",\n",
354
+ ")\n",
355
+ "\n",
356
+ "print(\"Response:\", response[\"data\"])\n",
357
+ "print(\"Cost:\", response[\"usage\"][\"cost\"])"
358
+ ]
359
+ },
318
360
  {
319
361
  "cell_type": "markdown",
320
362
  "metadata": {},
@@ -335,7 +377,7 @@
335
377
  "source": [
336
378
  "response = client.completion(\n",
337
379
  " prompt=\"Write a recipe for chocolate cake\",\n",
338
- " model=\"anthropic/claude-3-haiku\",\n",
380
+ " model=\"mistral/mistral-large-latest\",\n",
339
381
  " temperature=0.7,\n",
340
382
  " max_tokens=1000\n",
341
383
  ")\n",
@@ -364,7 +406,7 @@
364
406
  "print(\"Streaming response:\")\n",
365
407
  "for chunk in client.completion(\n",
366
408
  " prompt=\"Explain quantum computing in simple terms\",\n",
367
- " model=\"openai/gpt-4o-mini\",\n",
409
+ " model=\"mistral/mistral-large-latest\",\n",
368
410
  " stream=True\n",
369
411
  "):\n",
370
412
  " if isinstance(chunk, dict) and \"data\" in chunk:\n",
@@ -390,18 +432,9 @@
390
432
  },
391
433
  {
392
434
  "cell_type": "code",
393
- "execution_count": 4,
394
- "metadata": {},
395
- "outputs": [
396
- {
397
- "name": "stdout",
398
- "output_type": "stream",
399
- "text": [
400
- "Embedding dimensions: 1536\n",
401
- "First 5 values: [-0.019193023443222046, -0.025299284607172012, -0.0016930076526477933, 0.018802976235747337, -0.03383997827768326]\n"
402
- ]
403
- }
404
- ],
435
+ "execution_count": null,
436
+ "metadata": {},
437
+ "outputs": [],
405
438
  "source": [
406
439
  "response = client.embeddings(\n",
407
440
  " text=\"Hello, world!\", \n",
@@ -432,12 +465,11 @@
432
465
  "source": [
433
466
  "response = client.embeddings(\n",
434
467
  " text=[\"Hello, world!\", \"How are you?\", \"IndoxRouter is awesome!\"],\n",
435
- " model=\"openai/text-embedding-ada-002\"\n",
468
+ " model=\"openai/text-embedding-3-small\"\n",
436
469
  ")\n",
437
470
  "\n",
438
471
  "print(\"Number of embeddings:\", len(response[\"data\"]))\n",
439
- "print(\"Dimensions of each embedding:\", len(response[\"data\"][0]))\n",
440
- "print(\"Cost:\", response[\"usage\"][\"cost\"])"
472
+ "print(\"Dimensions of each embedding:\", len(response[\"data\"][0]))"
441
473
  ]
442
474
  },
443
475
  {
@@ -461,11 +493,11 @@
461
493
  "# Cohere embeddings\n",
462
494
  "response = client.embeddings(\n",
463
495
  " text=\"Hello, world!\",\n",
464
- " model=\"cohere/embed-english-v3.0\"\n",
496
+ " model=\"mistral-embed\"\n",
465
497
  ")\n",
466
498
  "\n",
467
- "print(\"Cohere embedding dimensions:\", len(response[\"data\"][0]))\n",
468
- "print(\"Cost:\", response[\"usage\"][\"cost\"])"
499
+ "print(\"Mistral embedding dimensions:\", len(response[\"data\"][0]))\n",
500
+ "print(\"First 5 values:\", response[\"data\"][0][:5])\n"
469
501
  ]
470
502
  },
471
503
  {
@@ -592,7 +624,7 @@
592
624
  "models = client.models()\n",
593
625
  "\n",
594
626
  "print(\"Available providers:\")\n",
595
- "for provider in models[\"providers\"]:\n",
627
+ "for provider in models:\n",
596
628
  " print(f\"- {provider['name']} ({provider['id']})\")\n",
597
629
  " print(f\" Capabilities: {', '.join(provider['capabilities'])}\")\n",
598
630
  " print(f\" Models: {len(provider['models'])}\")"
@@ -1122,7 +1154,7 @@
1122
1154
  "outputs": [],
1123
1155
  "source": [
1124
1156
  "# First few characters of the API key (for security)\n",
1125
- " print(f\"API key starts with: {client.api_key[:5]}...\")"
1157
+ "print(f\"API key starts with: {client.api_key[:5]}...\")"
1126
1158
  ]
1127
1159
  },
1128
1160
  {
@@ -1149,7 +1181,7 @@
1149
1181
  "outputs": [],
1150
1182
  "source": [
1151
1183
  "connection_info = client.test_connection()\n",
1152
- " print(f\"Connection status: {connection_info}\")"
1184
+ "print(f\"Connection status: {connection_info}\")"
1153
1185
  ]
1154
1186
  },
1155
1187
  {
@@ -1178,7 +1210,7 @@
1178
1210
  "try:\n",
1179
1211
  " usage = client.get_usage()\n",
1180
1212
  " print(f\"Remaining credits: ${usage['remaining_credits']}\")\n",
1181
- " except Exception as e:\n",
1213
+ "except Exception as e:\n",
1182
1214
  " print(f\"Error getting usage: {e}\")"
1183
1215
  ]
1184
1216
  },