orbitalsai 1.0.0__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {orbitalsai-1.0.0/orbitalsai.egg-info → orbitalsai-1.1.0}/PKG-INFO +68 -16
  2. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/README.md +67 -15
  3. orbitalsai-1.1.0/examples/model_selection.py +81 -0
  4. orbitalsai-1.1.0/examples/model_selection_async.py +52 -0
  5. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/examples/simple_transcribe.py +2 -2
  6. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai/__init__.py +2 -1
  7. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai/async_client.py +25 -1
  8. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai/client.py +26 -50
  9. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai/models.py +10 -0
  10. {orbitalsai-1.0.0 → orbitalsai-1.1.0/orbitalsai.egg-info}/PKG-INFO +68 -16
  11. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai.egg-info/SOURCES.txt +3 -0
  12. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/pyproject.toml +1 -1
  13. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/setup.py +1 -1
  14. orbitalsai-1.1.0/test_model_update.py +129 -0
  15. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/tests/test_client.py +1 -1
  16. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/.gitignore +0 -0
  17. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/LICENSE +0 -0
  18. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/examples/async_transcribe.py +0 -0
  19. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/examples/error_handling.py +0 -0
  20. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/examples/manage_balance.py +0 -0
  21. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/examples/with_srt.py +0 -0
  22. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai/exceptions.py +0 -0
  23. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai/utils.py +0 -0
  24. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai.egg-info/dependency_links.txt +0 -0
  25. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai.egg-info/requires.txt +0 -0
  26. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/orbitalsai.egg-info/top_level.txt +0 -0
  27. {orbitalsai-1.0.0 → orbitalsai-1.1.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: orbitalsai
3
- Version: 1.0.0
3
+ Version: 1.1.0
4
4
  Summary: A simple and powerful Python SDK for the OrbitalsAI API
5
5
  Home-page: https://github.com/orbitalsai/orbitalsai-python-sdk
6
6
  Author: OrbitalsAI
@@ -138,27 +138,61 @@ print(transcript.text)
138
138
  print(transcript.srt_content) # SRT subtitle content
139
139
  ```
140
140
 
141
- ### Don't Wait (Advanced)
141
+ ## 🤖 AI Model Selection
142
+
143
+ Choose which AI model to use for transcription. Different models may have different pricing and capabilities.
144
+
145
+ ### List Available Models
142
146
 
143
147
  ```python
144
148
  import orbitalsai
145
149
 
146
150
  client = orbitalsai.Client(api_key="your_api_key_here")
147
151
 
148
- # Start transcription without waiting
149
- task = client.transcribe("audio.mp3", wait=False)
150
- print(f"Task started: {task.task_id}")
152
+ # Get all available models
153
+ models = client.get_models()
154
+
155
+ for model in models:
156
+ print(f"{model.model_name}: ${model.transcription_rate_per_hour:.2f}/hour")
157
+ ```
158
+
159
+ ### Transcribe with Specific Model
151
160
 
152
- # Check status later
153
- status = client.get_task(task.task_id)
154
- if status.status == "completed":
155
- print(status.result_text)
161
+ ```python
162
+ import orbitalsai
163
+
164
+ client = orbitalsai.Client(api_key="your_api_key_here")
165
+
166
+ # Transcribe with Perigee-1 model
167
+ transcript = client.transcribe(
168
+ "audio.mp3",
169
+ language="hausa",
170
+ model_name="Perigee-1" # Specify the model
171
+ )
156
172
 
157
- # Or wait for completion
158
- transcript = client.wait_for_task(task.task_id)
159
173
  print(transcript.text)
160
174
  ```
161
175
 
176
+ ### Choose Model Based on Budget
177
+
178
+ ```python
179
+ import orbitalsai
180
+
181
+ client = orbitalsai.Client(api_key="your_api_key_here")
182
+
183
+ # Get the cheapest available model
184
+ models = client.get_models()
185
+ cheapest_model = min(models, key=lambda m: m.transcription_rate_per_hour)
186
+
187
+ print(f"Using {cheapest_model.model_name} at ${cheapest_model.transcription_rate_per_hour:.2f}/hour")
188
+
189
+ transcript = client.transcribe(
190
+ "audio.mp3",
191
+ language="english",
192
+ model_name=cheapest_model.model_name
193
+ )
194
+ ```
195
+
162
196
  ## 🔄 Async Usage
163
197
 
164
198
  Perfect for processing multiple files or building web applications.
@@ -169,11 +203,15 @@ import orbitalsai
169
203
 
170
204
  async def main():
171
205
  async with orbitalsai.AsyncClient(api_key="your_api_key_here") as client:
206
+ # List available models
207
+ models = await client.get_models()
208
+ print(f"Available models: {[m.model_name for m in models]}")
209
+
172
210
  # Transcribe multiple files concurrently
173
211
  tasks = await asyncio.gather(
174
- client.transcribe("audio1.mp3"),
175
- client.transcribe("audio2.wav"),
176
- client.transcribe("audio3.m4a")
212
+ client.transcribe("audio1.mp3", model_name="Perigee-1"),
213
+ client.transcribe("audio2.wav", model_name="Perigee-1"),
214
+ client.transcribe("audio3.m4a", model_name="Perigee-1")
177
215
  )
178
216
 
179
217
  for transcript in tasks:
@@ -202,7 +240,7 @@ print(f"Last updated: {balance.last_updated}")
202
240
  import orbitalsai
203
241
  from datetime import date, timedelta
204
242
 
205
- client = orbitalsai.Client(api_key="QDOBQ2PJ.4ThV4fwk-27hpBvh8pYVvAdOOQlA4Lk1fJQdI6EL9Yk")
243
+ client = orbitalsai.Client(api_key="your_api_key_here")
206
244
 
207
245
  # Get last 7 days of usage
208
246
  end_date = date.today()
@@ -280,7 +318,13 @@ except Exception as e:
280
318
 
281
319
  ### Client Methods
282
320
 
283
- #### `transcribe(file_path, language="english", generate_srt=False, wait=True, timeout=300, poll_interval=5)`
321
+ #### `get_models()`
322
+
323
+ Get all available AI models with their pricing information.
324
+
325
+ **Returns:** List of `Model` objects
326
+
327
+ #### `transcribe(file_path, language="english", generate_srt=False, model_name="Perigee-1", wait=True, timeout=300, poll_interval=5)`
284
328
 
285
329
  Transcribe an audio file.
286
330
 
@@ -288,6 +332,7 @@ Transcribe an audio file.
288
332
  - `file_path` (str): Path to the audio file
289
333
  - `language` (str): Language code (default: "english")
290
334
  - `generate_srt` (bool): Generate SRT subtitles (default: False)
335
+ - `model_name` (str): AI model to use (default: "Perigee-1")
291
336
  - `wait` (bool): Wait for completion (default: True)
292
337
  - `timeout` (int): Maximum wait time in seconds (default: 300)
293
338
  - `poll_interval` (int): Seconds between status checks (default: 5)
@@ -367,6 +412,13 @@ Get current user details.
367
412
  - `balance` (float): Current balance in credits
368
413
  - `last_updated` (datetime): Last update timestamp
369
414
 
415
+ #### `Model`
416
+ - `id` (int): Model ID
417
+ - `model_name` (str): Name of the model (e.g., "Perigee-1")
418
+ - `transcription_rate_per_second` (float): Cost per second of audio
419
+ - `transcription_rate_per_hour` (float): Cost per hour of audio
420
+ - `is_active` (bool): Whether the model is currently available
421
+
370
422
  ## 🌍 Supported Languages
371
423
 
372
424
  - **English** (`english`)
@@ -95,27 +95,61 @@ print(transcript.text)
95
95
  print(transcript.srt_content) # SRT subtitle content
96
96
  ```
97
97
 
98
- ### Don't Wait (Advanced)
98
+ ## 🤖 AI Model Selection
99
+
100
+ Choose which AI model to use for transcription. Different models may have different pricing and capabilities.
101
+
102
+ ### List Available Models
99
103
 
100
104
  ```python
101
105
  import orbitalsai
102
106
 
103
107
  client = orbitalsai.Client(api_key="your_api_key_here")
104
108
 
105
- # Start transcription without waiting
106
- task = client.transcribe("audio.mp3", wait=False)
107
- print(f"Task started: {task.task_id}")
109
+ # Get all available models
110
+ models = client.get_models()
111
+
112
+ for model in models:
113
+ print(f"{model.model_name}: ${model.transcription_rate_per_hour:.2f}/hour")
114
+ ```
115
+
116
+ ### Transcribe with Specific Model
108
117
 
109
- # Check status later
110
- status = client.get_task(task.task_id)
111
- if status.status == "completed":
112
- print(status.result_text)
118
+ ```python
119
+ import orbitalsai
120
+
121
+ client = orbitalsai.Client(api_key="your_api_key_here")
122
+
123
+ # Transcribe with Perigee-1 model
124
+ transcript = client.transcribe(
125
+ "audio.mp3",
126
+ language="hausa",
127
+ model_name="Perigee-1" # Specify the model
128
+ )
113
129
 
114
- # Or wait for completion
115
- transcript = client.wait_for_task(task.task_id)
116
130
  print(transcript.text)
117
131
  ```
118
132
 
133
+ ### Choose Model Based on Budget
134
+
135
+ ```python
136
+ import orbitalsai
137
+
138
+ client = orbitalsai.Client(api_key="your_api_key_here")
139
+
140
+ # Get the cheapest available model
141
+ models = client.get_models()
142
+ cheapest_model = min(models, key=lambda m: m.transcription_rate_per_hour)
143
+
144
+ print(f"Using {cheapest_model.model_name} at ${cheapest_model.transcription_rate_per_hour:.2f}/hour")
145
+
146
+ transcript = client.transcribe(
147
+ "audio.mp3",
148
+ language="english",
149
+ model_name=cheapest_model.model_name
150
+ )
151
+ ```
152
+
119
153
  ## 🔄 Async Usage
120
154
 
121
155
  Perfect for processing multiple files or building web applications.
@@ -126,11 +160,15 @@ import orbitalsai
126
160
 
127
161
  async def main():
128
162
  async with orbitalsai.AsyncClient(api_key="your_api_key_here") as client:
163
+ # List available models
164
+ models = await client.get_models()
165
+ print(f"Available models: {[m.model_name for m in models]}")
166
+
129
167
  # Transcribe multiple files concurrently
130
168
  tasks = await asyncio.gather(
131
- client.transcribe("audio1.mp3"),
132
- client.transcribe("audio2.wav"),
133
- client.transcribe("audio3.m4a")
169
+ client.transcribe("audio1.mp3", model_name="Perigee-1"),
170
+ client.transcribe("audio2.wav", model_name="Perigee-1"),
171
+ client.transcribe("audio3.m4a", model_name="Perigee-1")
134
172
  )
135
173
 
136
174
  for transcript in tasks:
@@ -159,7 +197,7 @@ print(f"Last updated: {balance.last_updated}")
159
197
  import orbitalsai
160
198
  from datetime import date, timedelta
161
199
 
162
- client = orbitalsai.Client(api_key="QDOBQ2PJ.4ThV4fwk-27hpBvh8pYVvAdOOQlA4Lk1fJQdI6EL9Yk")
200
+ client = orbitalsai.Client(api_key="your_api_key_here")
163
201
 
164
202
  # Get last 7 days of usage
165
203
  end_date = date.today()
@@ -237,7 +275,13 @@ except Exception as e:
237
275
 
238
276
  ### Client Methods
239
277
 
240
- #### `transcribe(file_path, language="english", generate_srt=False, wait=True, timeout=300, poll_interval=5)`
278
+ #### `get_models()`
279
+
280
+ Get all available AI models with their pricing information.
281
+
282
+ **Returns:** List of `Model` objects
283
+
284
+ #### `transcribe(file_path, language="english", generate_srt=False, model_name="Perigee-1", wait=True, timeout=300, poll_interval=5)`
241
285
 
242
286
  Transcribe an audio file.
243
287
 
@@ -245,6 +289,7 @@ Transcribe an audio file.
245
289
  - `file_path` (str): Path to the audio file
246
290
  - `language` (str): Language code (default: "english")
247
291
  - `generate_srt` (bool): Generate SRT subtitles (default: False)
292
+ - `model_name` (str): AI model to use (default: "Perigee-1")
248
293
  - `wait` (bool): Wait for completion (default: True)
249
294
  - `timeout` (int): Maximum wait time in seconds (default: 300)
250
295
  - `poll_interval` (int): Seconds between status checks (default: 5)
@@ -324,6 +369,13 @@ Get current user details.
324
369
  - `balance` (float): Current balance in credits
325
370
  - `last_updated` (datetime): Last update timestamp
326
371
 
372
+ #### `Model`
373
+ - `id` (int): Model ID
374
+ - `model_name` (str): Name of the model (e.g., "Perigee-1")
375
+ - `transcription_rate_per_second` (float): Cost per second of audio
376
+ - `transcription_rate_per_hour` (float): Cost per hour of audio
377
+ - `is_active` (bool): Whether the model is currently available
378
+
327
379
  ## 🌍 Supported Languages
328
380
 
329
381
  - **English** (`english`)
@@ -0,0 +1,81 @@
1
+ """
2
+ Example: AI Model Selection
3
+
4
+ This example demonstrates how to:
5
+ 1. List available AI models
6
+ 2. View model pricing
7
+ 3. Select a specific model for transcription
8
+ """
9
+
10
+ import orbitalsai
11
+
12
+ # Initialize client
13
+ client = orbitalsai.Client(api_key="your_api_key_here")
14
+
15
+ # Example 1: List all available models
16
+ print("=" * 60)
17
+ print("Available AI Models")
18
+ print("=" * 60)
19
+
20
+ models = client.get_models()
21
+ for model in models:
22
+ print(f"\nModel: {model.model_name}")
23
+ print(f" Active: {model.is_active}")
24
+ print(f" Pricing per second: ${model.transcription_rate_per_second:.9f}")
25
+ print(f" Pricing per hour: ${model.transcription_rate_per_hour:.2f}")
26
+
27
+ # Example 2: Transcribe with default model (Perigee-1)
28
+ print("\n" + "=" * 60)
29
+ print("Transcribing with Default Model")
30
+ print("=" * 60)
31
+
32
+ transcript = client.transcribe(
33
+ "audio.mp3",
34
+ language="english"
35
+ )
36
+ print(f"Transcription: {transcript.text}")
37
+
38
+ # Example 3: Transcribe with a specific model
39
+ print("\n" + "=" * 60)
40
+ print("Transcribing with Specific Model")
41
+ print("=" * 60)
42
+
43
+ transcript = client.transcribe(
44
+ "audio.mp3",
45
+ language="hausa",
46
+ model_name="Perigee-1" # Explicitly specify model
47
+ )
48
+ print(f"Transcription: {transcript.text}")
49
+
50
+ # Example 4: Choose model based on pricing
51
+ print("\n" + "=" * 60)
52
+ print("Choosing Model Based on Budget")
53
+ print("=" * 60)
54
+
55
+ # Get models and sort by price
56
+ models = client.get_models()
57
+ cheapest_model = min(models, key=lambda m: m.transcription_rate_per_hour)
58
+
59
+ print(f"Using cheapest model: {cheapest_model.model_name}")
60
+ print(f"Cost: ${cheapest_model.transcription_rate_per_hour:.2f}/hour")
61
+
62
+ transcript = client.transcribe(
63
+ "audio.mp3",
64
+ language="english",
65
+ model_name=cheapest_model.model_name
66
+ )
67
+ print(f"Transcription: {transcript.text}")
68
+
69
+ # Example 5: Estimate transcription cost
70
+ print("\n" + "=" * 60)
71
+ print("Estimating Transcription Cost")
72
+ print("=" * 60)
73
+
74
+ # Get audio duration (in seconds) - you would get this from your audio file
75
+ audio_duration_seconds = 120 # 2 minutes
76
+
77
+ for model in models:
78
+ estimated_cost = model.transcription_rate_per_second * audio_duration_seconds
79
+ print(f"\n{model.model_name}:")
80
+ print(f" For {audio_duration_seconds}s audio: ${estimated_cost:.4f}")
81
+
@@ -0,0 +1,52 @@
1
+ """
2
+ Example: AI Model Selection (Async)
3
+
4
+ This example demonstrates how to use AI model selection asynchronously.
5
+ """
6
+
7
+ import asyncio
8
+ import orbitalsai
9
+
10
+ async def main():
11
+ async with orbitalsai.AsyncClient(api_key="your_api_key_here") as client:
12
+ # List all available models
13
+ print("=" * 60)
14
+ print("Available AI Models")
15
+ print("=" * 60)
16
+
17
+ models = await client.get_models()
18
+ for model in models:
19
+ print(f"\nModel: {model.model_name}")
20
+ print(f" Active: {model.is_active}")
21
+ print(f" Pricing per second: ${model.transcription_rate_per_second:.9f}")
22
+ print(f" Pricing per hour: ${model.transcription_rate_per_hour:.2f}")
23
+
24
+ # Transcribe with specific model
25
+ print("\n" + "=" * 60)
26
+ print("Transcribing with Specific Model")
27
+ print("=" * 60)
28
+
29
+ transcript = await client.transcribe(
30
+ "audio.mp3",
31
+ language="hausa",
32
+ model_name="Perigee-1"
33
+ )
34
+ print(f"Transcription: {transcript.text}")
35
+
36
+ # Process multiple files with different models concurrently
37
+ print("\n" + "=" * 60)
38
+ print("Processing Multiple Files")
39
+ print("=" * 60)
40
+
41
+ tasks = await asyncio.gather(
42
+ client.transcribe("audio1.mp3", language="english", model_name="Perigee-1"),
43
+ client.transcribe("audio2.mp3", language="hausa", model_name="Perigee-1"),
44
+ client.transcribe("audio3.mp3", language="yoruba", model_name="Perigee-1")
45
+ )
46
+
47
+ for i, transcript in enumerate(tasks, 1):
48
+ print(f"\nFile {i}: {transcript.text[:100]}...")
49
+
50
+ if __name__ == "__main__":
51
+ asyncio.run(main())
52
+
@@ -8,11 +8,11 @@ import orbitalsai
8
8
 
9
9
  def main():
10
10
  # Initialize the client
11
- client = orbitalsai.Client(api_key="your_api_key_here")
11
+ client = orbitalsai.Client(api_key="543EFX45.hwcUuaF2qnQCg4eDG-m7ebphGQErO1XGxe_gSUtGTZg")
12
12
 
13
13
  # Transcribe an audio file (waits automatically)
14
14
  print("Transcribing audio file...")
15
- transcript = client.transcribe("path/to/your/audio.mp3")
15
+ transcript = client.transcribe("/home/azureuser/orbitalsai-python-sdk/f8302363-ee30-4ab2-8f69-216fe0ca08ed.wav", model_name="Perigee-1")
16
16
 
17
17
  # Print the result
18
18
  print(f"Transcription: {transcript.text}")
@@ -20,7 +20,7 @@ Example:
20
20
  from .client import Client
21
21
  from .async_client import AsyncClient
22
22
  from .models import (
23
- TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage, User,
23
+ TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage, User, Model,
24
24
  SUPPORTED_LANGUAGES, SUPPORTED_AUDIO_FORMATS, SUPPORTED_AUDIO_MIMETYPES
25
25
  )
26
26
  from .exceptions import (
@@ -45,6 +45,7 @@ __all__ = [
45
45
  "UsageHistory",
46
46
  "DailyUsage",
47
47
  "User",
48
+ "Model",
48
49
 
49
50
  # Constants
50
51
  "SUPPORTED_LANGUAGES",
@@ -11,7 +11,7 @@ from datetime import datetime, date
11
11
 
12
12
  from .models import (
13
13
  TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage,
14
- User, APIKey, UsageRecord, DailyUsageRecord
14
+ User, APIKey, UsageRecord, DailyUsageRecord, Model
15
15
  )
16
16
  from .exceptions import (
17
17
  OrbitalsAIError, AuthenticationError, InsufficientBalanceError,
@@ -101,11 +101,33 @@ class AsyncClient:
101
101
  except aiohttp.ClientError as e:
102
102
  raise OrbitalsAIError(f"Request failed: {str(e)}")
103
103
 
104
+ async def get_models(self) -> List[Model]:
105
+ """
106
+ Get all available AI models with their pricing information.
107
+
108
+ Returns:
109
+ List of Model objects with pricing details
110
+ """
111
+ response = await self._make_request("GET", "/models")
112
+
113
+ models = []
114
+ for model_data in response:
115
+ models.append(Model(
116
+ id=model_data["id"],
117
+ model_name=model_data["model_name"],
118
+ transcription_rate_per_second=model_data["transcription_rate_per_second"],
119
+ transcription_rate_per_hour=model_data["transcription_rate_per_hour"],
120
+ is_active=model_data["is_active"]
121
+ ))
122
+
123
+ return models
124
+
104
125
  async def transcribe(
105
126
  self,
106
127
  file_path: str,
107
128
  language: str = "english",
108
129
  generate_srt: bool = False,
130
+ model_name: str = "Perigee-1",
109
131
  wait: bool = True,
110
132
  timeout: int = 300,
111
133
  poll_interval: int = 5
@@ -117,6 +139,7 @@ class AsyncClient:
117
139
  file_path: Path to the audio file
118
140
  language: Language of the audio (default: "english")
119
141
  generate_srt: Whether to generate SRT subtitles (default: False)
142
+ model_name: AI model to use for transcription (default: "Perigee-1")
120
143
  wait: Whether to wait for completion (default: True)
121
144
  timeout: Maximum time to wait in seconds (default: 300)
122
145
  poll_interval: Seconds to wait between status checks (default: 5)
@@ -141,6 +164,7 @@ class AsyncClient:
141
164
  data.add_field('file', f, filename=file_path.split('/')[-1], content_type='audio/mpeg')
142
165
  data.add_field('language', language)
143
166
  data.add_field('generate_srt', str(generate_srt).lower())
167
+ data.add_field('model_name', model_name)
144
168
 
145
169
  # Upload file
146
170
  response = await self._make_request("POST", "/audio/upload", data=data)
@@ -12,7 +12,7 @@ from pathlib import Path
12
12
 
13
13
  from .models import (
14
14
  TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage,
15
- User, APIKey, UsageRecord, DailyUsageRecord
15
+ User, APIKey, UsageRecord, DailyUsageRecord, Model
16
16
  )
17
17
  from .exceptions import (
18
18
  OrbitalsAIError, AuthenticationError, InsufficientBalanceError,
@@ -89,11 +89,33 @@ class Client:
89
89
  except requests.exceptions.RequestException as e:
90
90
  raise OrbitalsAIError(f"Request failed: {str(e)}")
91
91
 
92
+ def get_models(self) -> List[Model]:
93
+ """
94
+ Get all available AI models with their pricing information.
95
+
96
+ Returns:
97
+ List of Model objects with pricing details
98
+ """
99
+ response = self._make_request("GET", "/models")
100
+
101
+ models = []
102
+ for model_data in response:
103
+ models.append(Model(
104
+ id=model_data["id"],
105
+ model_name=model_data["model_name"],
106
+ transcription_rate_per_second=model_data["transcription_rate_per_second"],
107
+ transcription_rate_per_hour=model_data["transcription_rate_per_hour"],
108
+ is_active=model_data["is_active"]
109
+ ))
110
+
111
+ return models
112
+
92
113
  def transcribe(
93
114
  self,
94
115
  file_path: str,
95
116
  language: str = "english",
96
117
  generate_srt: bool = False,
118
+ model_name: str = "Perigee-1",
97
119
  wait: bool = True,
98
120
  timeout: int = 300,
99
121
  poll_interval: int = 5
@@ -105,6 +127,7 @@ class Client:
105
127
  file_path: Path to the audio file
106
128
  language: Language of the audio (default: "english")
107
129
  generate_srt: Whether to generate SRT subtitles (default: False)
130
+ model_name: AI model to use for transcription (default: "Perigee-1")
108
131
  wait: Whether to wait for completion (default: True)
109
132
  timeout: Maximum time to wait in seconds (default: 300)
110
133
  poll_interval: Seconds to wait between status checks (default: 5)
@@ -128,7 +151,8 @@ class Client:
128
151
  files = {"file": (Path(file_path).name, f, "audio/mpeg")}
129
152
  data = {
130
153
  "language": language,
131
- "generate_srt": str(generate_srt).lower()
154
+ "generate_srt": str(generate_srt).lower(),
155
+ "model_name": model_name
132
156
  }
133
157
 
134
158
  # Upload file
@@ -249,54 +273,6 @@ class Client:
249
273
  last_updated=datetime.fromisoformat(response["last_updated"].replace('Z', '+00:00'))
250
274
  )
251
275
 
252
- def get_usage_history(
253
- self,
254
- start_date: Optional[datetime] = None,
255
- end_date: Optional[datetime] = None,
256
- page: int = 1,
257
- page_size: int = 50
258
- ) -> UsageHistory:
259
- """
260
- Get usage history for the current user.
261
-
262
- Args:
263
- start_date: Start date for the history (default: 30 days ago)
264
- end_date: End date for the history (default: now)
265
- page: Page number (default: 1)
266
- page_size: Number of records per page (default: 50)
267
-
268
- Returns:
269
- UsageHistory object with usage records
270
- """
271
- params = {"page": page, "page_size": page_size}
272
- if start_date:
273
- params["start_date"] = start_date.isoformat()
274
- if end_date:
275
- params["end_date"] = end_date.isoformat()
276
-
277
- response = self._make_request("GET", "/billing/usage-history", params=params)
278
-
279
- records = []
280
- for record_data in response["records"]:
281
- records.append(UsageRecord(
282
- id=record_data["id"],
283
- service_type=record_data["service_type"],
284
- usage_amount=record_data["total_audio_usage"],
285
- cost=record_data["cost"],
286
- timestamp=datetime.fromisoformat(record_data["timestamp"].replace('Z', '+00:00')),
287
- api_key_id=record_data.get("api_key_id")
288
- ))
289
-
290
- return UsageHistory(
291
- records=records,
292
- total_records=response["total_records"],
293
- total_pages=response["total_pages"],
294
- current_page=response["current_page"],
295
- start_date=datetime.fromisoformat(response["start_date"].replace('Z', '+00:00')),
296
- end_date=datetime.fromisoformat(response["end_date"].replace('Z', '+00:00')),
297
- period_summary=response["period_summary"]
298
- )
299
-
300
276
  def get_daily_usage(
301
277
  self,
302
278
  start_date: Optional[date] = None,
@@ -114,6 +114,16 @@ class APIKey:
114
114
  last_used: Optional[datetime] = None
115
115
 
116
116
 
117
+ @dataclass
118
+ class Model:
119
+ """Represents an AI model with pricing information."""
120
+ id: int
121
+ model_name: str
122
+ transcription_rate_per_second: float
123
+ transcription_rate_per_hour: float
124
+ is_active: bool
125
+
126
+
117
127
  # Supported languages and file formats
118
128
  SUPPORTED_LANGUAGES = [
119
129
  "english", "hausa", "igbo", "yoruba", "swahili", "pidgin", "kinyarwanda"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: orbitalsai
3
- Version: 1.0.0
3
+ Version: 1.1.0
4
4
  Summary: A simple and powerful Python SDK for the OrbitalsAI API
5
5
  Home-page: https://github.com/orbitalsai/orbitalsai-python-sdk
6
6
  Author: OrbitalsAI
@@ -138,27 +138,61 @@ print(transcript.text)
138
138
  print(transcript.srt_content) # SRT subtitle content
139
139
  ```
140
140
 
141
- ### Don't Wait (Advanced)
141
+ ## 🤖 AI Model Selection
142
+
143
+ Choose which AI model to use for transcription. Different models may have different pricing and capabilities.
144
+
145
+ ### List Available Models
142
146
 
143
147
  ```python
144
148
  import orbitalsai
145
149
 
146
150
  client = orbitalsai.Client(api_key="your_api_key_here")
147
151
 
148
- # Start transcription without waiting
149
- task = client.transcribe("audio.mp3", wait=False)
150
- print(f"Task started: {task.task_id}")
152
+ # Get all available models
153
+ models = client.get_models()
154
+
155
+ for model in models:
156
+ print(f"{model.model_name}: ${model.transcription_rate_per_hour:.2f}/hour")
157
+ ```
158
+
159
+ ### Transcribe with Specific Model
151
160
 
152
- # Check status later
153
- status = client.get_task(task.task_id)
154
- if status.status == "completed":
155
- print(status.result_text)
161
+ ```python
162
+ import orbitalsai
163
+
164
+ client = orbitalsai.Client(api_key="your_api_key_here")
165
+
166
+ # Transcribe with Perigee-1 model
167
+ transcript = client.transcribe(
168
+ "audio.mp3",
169
+ language="hausa",
170
+ model_name="Perigee-1" # Specify the model
171
+ )
156
172
 
157
- # Or wait for completion
158
- transcript = client.wait_for_task(task.task_id)
159
173
  print(transcript.text)
160
174
  ```
161
175
 
176
+ ### Choose Model Based on Budget
177
+
178
+ ```python
179
+ import orbitalsai
180
+
181
+ client = orbitalsai.Client(api_key="your_api_key_here")
182
+
183
+ # Get the cheapest available model
184
+ models = client.get_models()
185
+ cheapest_model = min(models, key=lambda m: m.transcription_rate_per_hour)
186
+
187
+ print(f"Using {cheapest_model.model_name} at ${cheapest_model.transcription_rate_per_hour:.2f}/hour")
188
+
189
+ transcript = client.transcribe(
190
+ "audio.mp3",
191
+ language="english",
192
+ model_name=cheapest_model.model_name
193
+ )
194
+ ```
195
+
162
196
  ## 🔄 Async Usage
163
197
 
164
198
  Perfect for processing multiple files or building web applications.
@@ -169,11 +203,15 @@ import orbitalsai
169
203
 
170
204
  async def main():
171
205
  async with orbitalsai.AsyncClient(api_key="your_api_key_here") as client:
206
+ # List available models
207
+ models = await client.get_models()
208
+ print(f"Available models: {[m.model_name for m in models]}")
209
+
172
210
  # Transcribe multiple files concurrently
173
211
  tasks = await asyncio.gather(
174
- client.transcribe("audio1.mp3"),
175
- client.transcribe("audio2.wav"),
176
- client.transcribe("audio3.m4a")
212
+ client.transcribe("audio1.mp3", model_name="Perigee-1"),
213
+ client.transcribe("audio2.wav", model_name="Perigee-1"),
214
+ client.transcribe("audio3.m4a", model_name="Perigee-1")
177
215
  )
178
216
 
179
217
  for transcript in tasks:
@@ -202,7 +240,7 @@ print(f"Last updated: {balance.last_updated}")
202
240
  import orbitalsai
203
241
  from datetime import date, timedelta
204
242
 
205
- client = orbitalsai.Client(api_key="QDOBQ2PJ.4ThV4fwk-27hpBvh8pYVvAdOOQlA4Lk1fJQdI6EL9Yk")
243
+ client = orbitalsai.Client(api_key="your_api_key_here")
206
244
 
207
245
  # Get last 7 days of usage
208
246
  end_date = date.today()
@@ -280,7 +318,13 @@ except Exception as e:
280
318
 
281
319
  ### Client Methods
282
320
 
283
- #### `transcribe(file_path, language="english", generate_srt=False, wait=True, timeout=300, poll_interval=5)`
321
+ #### `get_models()`
322
+
323
+ Get all available AI models with their pricing information.
324
+
325
+ **Returns:** List of `Model` objects
326
+
327
+ #### `transcribe(file_path, language="english", generate_srt=False, model_name="Perigee-1", wait=True, timeout=300, poll_interval=5)`
284
328
 
285
329
  Transcribe an audio file.
286
330
 
@@ -288,6 +332,7 @@ Transcribe an audio file.
288
332
  - `file_path` (str): Path to the audio file
289
333
  - `language` (str): Language code (default: "english")
290
334
  - `generate_srt` (bool): Generate SRT subtitles (default: False)
335
+ - `model_name` (str): AI model to use (default: "Perigee-1")
291
336
  - `wait` (bool): Wait for completion (default: True)
292
337
  - `timeout` (int): Maximum wait time in seconds (default: 300)
293
338
  - `poll_interval` (int): Seconds between status checks (default: 5)
@@ -367,6 +412,13 @@ Get current user details.
367
412
  - `balance` (float): Current balance in credits
368
413
  - `last_updated` (datetime): Last update timestamp
369
414
 
415
+ #### `Model`
416
+ - `id` (int): Model ID
417
+ - `model_name` (str): Name of the model (e.g., "Perigee-1")
418
+ - `transcription_rate_per_second` (float): Cost per second of audio
419
+ - `transcription_rate_per_hour` (float): Cost per hour of audio
420
+ - `is_active` (bool): Whether the model is currently available
421
+
370
422
  ## 🌍 Supported Languages
371
423
 
372
424
  - **English** (`english`)
@@ -3,9 +3,12 @@ LICENSE
3
3
  README.md
4
4
  pyproject.toml
5
5
  setup.py
6
+ test_model_update.py
6
7
  examples/async_transcribe.py
7
8
  examples/error_handling.py
8
9
  examples/manage_balance.py
10
+ examples/model_selection.py
11
+ examples/model_selection_async.py
9
12
  examples/simple_transcribe.py
10
13
  examples/with_srt.py
11
14
  orbitalsai/__init__.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "orbitalsai"
7
- version = "1.0.0"
7
+ version = "1.1.0"
8
8
  description = "A simple and powerful Python SDK for the OrbitalsAI API"
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
@@ -9,7 +9,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
9
9
 
10
10
  setup(
11
11
  name="orbitalsai",
12
- version="1.0.0",
12
+ version="1.1.0",
13
13
  author="OrbitalsAI",
14
14
  author_email="support@orbitalsai.com",
15
15
  description="A simple and powerful Python SDK for the OrbitalsAI API",
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Quick test script to verify model selection updates
4
+
5
+ This script tests:
6
+ 1. Model dataclass can be imported
7
+ 2. Client has get_models method
8
+ 3. AsyncClient has get_models method
9
+ 4. transcribe method accepts model_name parameter
10
+ """
11
+
12
+ import sys
13
+
14
+ def test_imports():
15
+ """Test that all imports work"""
16
+ print("Testing imports...")
17
+ try:
18
+ from orbitalsai import Client, AsyncClient, Model
19
+ from orbitalsai.models import Model as ModelDirect
20
+ print("✅ All imports successful")
21
+ return True
22
+ except ImportError as e:
23
+ print(f"❌ Import failed: {e}")
24
+ return False
25
+
26
+ def test_model_dataclass():
27
+ """Test Model dataclass"""
28
+ print("\nTesting Model dataclass...")
29
+ try:
30
+ from orbitalsai import Model
31
+
32
+ model = Model(
33
+ id=1,
34
+ model_name="Perigee-1",
35
+ transcription_rate_per_second=0.000069444,
36
+ transcription_rate_per_hour=0.25,
37
+ is_active=True
38
+ )
39
+
40
+ assert model.model_name == "Perigee-1"
41
+ assert model.transcription_rate_per_hour == 0.25
42
+ assert model.is_active is True
43
+
44
+ print("✅ Model dataclass works correctly")
45
+ return True
46
+ except Exception as e:
47
+ print(f"❌ Model dataclass test failed: {e}")
48
+ return False
49
+
50
+ def test_client_methods():
51
+ """Test that Client has new methods"""
52
+ print("\nTesting Client methods...")
53
+ try:
54
+ from orbitalsai import Client
55
+ import inspect
56
+
57
+ # Check get_models method exists
58
+ assert hasattr(Client, 'get_models')
59
+ print(" ✅ Client.get_models() method exists")
60
+
61
+ # Check transcribe accepts model_name
62
+ sig = inspect.signature(Client.transcribe)
63
+ params = sig.parameters
64
+ assert 'model_name' in params
65
+ assert params['model_name'].default == "Perigee-1"
66
+ print(" ✅ Client.transcribe() accepts model_name parameter")
67
+
68
+ return True
69
+ except Exception as e:
70
+ print(f"❌ Client method test failed: {e}")
71
+ return False
72
+
73
+ def test_async_client_methods():
74
+ """Test that AsyncClient has new methods"""
75
+ print("\nTesting AsyncClient methods...")
76
+ try:
77
+ from orbitalsai import AsyncClient
78
+ import inspect
79
+
80
+ # Check get_models method exists
81
+ assert hasattr(AsyncClient, 'get_models')
82
+ print(" ✅ AsyncClient.get_models() method exists")
83
+
84
+ # Check transcribe accepts model_name
85
+ sig = inspect.signature(AsyncClient.transcribe)
86
+ params = sig.parameters
87
+ assert 'model_name' in params
88
+ assert params['model_name'].default == "Perigee-1"
89
+ print(" ✅ AsyncClient.transcribe() accepts model_name parameter")
90
+
91
+ return True
92
+ except Exception as e:
93
+ print(f"❌ AsyncClient method test failed: {e}")
94
+ return False
95
+
96
+ def main():
97
+ """Run all tests"""
98
+ print("=" * 60)
99
+ print("OrbitalsAI SDK - Model Selection Update Tests")
100
+ print("=" * 60)
101
+
102
+ tests = [
103
+ test_imports,
104
+ test_model_dataclass,
105
+ test_client_methods,
106
+ test_async_client_methods,
107
+ ]
108
+
109
+ results = []
110
+ for test in tests:
111
+ results.append(test())
112
+
113
+ print("\n" + "=" * 60)
114
+ print("Test Summary")
115
+ print("=" * 60)
116
+ passed = sum(results)
117
+ total = len(results)
118
+ print(f"Passed: {passed}/{total}")
119
+
120
+ if passed == total:
121
+ print("\n✅ All tests passed! SDK update is working correctly.")
122
+ return 0
123
+ else:
124
+ print(f"\n❌ {total - passed} test(s) failed. Please review the errors above.")
125
+ return 1
126
+
127
+ if __name__ == "__main__":
128
+ sys.exit(main())
129
+
@@ -5,7 +5,7 @@ Basic tests for the OrbitalsAI SDK
5
5
  import pytest
6
6
  from unittest.mock import Mock, patch
7
7
  from orbitalsai import Client, AsyncClient
8
- from orbitalsai.exceptions import AuthenticationError, UnsupportedFileError
8
+ from orbitalsai.exceptions import AuthenticationError
9
9
 
10
10
 
11
11
  class TestClient:
File without changes
File without changes
File without changes