orbitalsai 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orbitalsai/__init__.py +2 -1
- orbitalsai/async_client.py +25 -1
- orbitalsai/client.py +26 -50
- orbitalsai/models.py +10 -0
- {orbitalsai-1.0.0.dist-info → orbitalsai-1.1.0.dist-info}/METADATA +68 -16
- orbitalsai-1.1.0.dist-info/RECORD +11 -0
- orbitalsai-1.0.0.dist-info/RECORD +0 -11
- {orbitalsai-1.0.0.dist-info → orbitalsai-1.1.0.dist-info}/WHEEL +0 -0
- {orbitalsai-1.0.0.dist-info → orbitalsai-1.1.0.dist-info}/licenses/LICENSE +0 -0
- {orbitalsai-1.0.0.dist-info → orbitalsai-1.1.0.dist-info}/top_level.txt +0 -0
orbitalsai/__init__.py
CHANGED
|
@@ -20,7 +20,7 @@ Example:
|
|
|
20
20
|
from .client import Client
|
|
21
21
|
from .async_client import AsyncClient
|
|
22
22
|
from .models import (
|
|
23
|
-
TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage, User,
|
|
23
|
+
TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage, User, Model,
|
|
24
24
|
SUPPORTED_LANGUAGES, SUPPORTED_AUDIO_FORMATS, SUPPORTED_AUDIO_MIMETYPES
|
|
25
25
|
)
|
|
26
26
|
from .exceptions import (
|
|
@@ -45,6 +45,7 @@ __all__ = [
|
|
|
45
45
|
"UsageHistory",
|
|
46
46
|
"DailyUsage",
|
|
47
47
|
"User",
|
|
48
|
+
"Model",
|
|
48
49
|
|
|
49
50
|
# Constants
|
|
50
51
|
"SUPPORTED_LANGUAGES",
|
orbitalsai/async_client.py
CHANGED
|
@@ -11,7 +11,7 @@ from datetime import datetime, date
|
|
|
11
11
|
|
|
12
12
|
from .models import (
|
|
13
13
|
TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage,
|
|
14
|
-
User, APIKey, UsageRecord, DailyUsageRecord
|
|
14
|
+
User, APIKey, UsageRecord, DailyUsageRecord, Model
|
|
15
15
|
)
|
|
16
16
|
from .exceptions import (
|
|
17
17
|
OrbitalsAIError, AuthenticationError, InsufficientBalanceError,
|
|
@@ -101,11 +101,33 @@ class AsyncClient:
|
|
|
101
101
|
except aiohttp.ClientError as e:
|
|
102
102
|
raise OrbitalsAIError(f"Request failed: {str(e)}")
|
|
103
103
|
|
|
104
|
+
async def get_models(self) -> List[Model]:
|
|
105
|
+
"""
|
|
106
|
+
Get all available AI models with their pricing information.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List of Model objects with pricing details
|
|
110
|
+
"""
|
|
111
|
+
response = await self._make_request("GET", "/models")
|
|
112
|
+
|
|
113
|
+
models = []
|
|
114
|
+
for model_data in response:
|
|
115
|
+
models.append(Model(
|
|
116
|
+
id=model_data["id"],
|
|
117
|
+
model_name=model_data["model_name"],
|
|
118
|
+
transcription_rate_per_second=model_data["transcription_rate_per_second"],
|
|
119
|
+
transcription_rate_per_hour=model_data["transcription_rate_per_hour"],
|
|
120
|
+
is_active=model_data["is_active"]
|
|
121
|
+
))
|
|
122
|
+
|
|
123
|
+
return models
|
|
124
|
+
|
|
104
125
|
async def transcribe(
|
|
105
126
|
self,
|
|
106
127
|
file_path: str,
|
|
107
128
|
language: str = "english",
|
|
108
129
|
generate_srt: bool = False,
|
|
130
|
+
model_name: str = "Perigee-1",
|
|
109
131
|
wait: bool = True,
|
|
110
132
|
timeout: int = 300,
|
|
111
133
|
poll_interval: int = 5
|
|
@@ -117,6 +139,7 @@ class AsyncClient:
|
|
|
117
139
|
file_path: Path to the audio file
|
|
118
140
|
language: Language of the audio (default: "english")
|
|
119
141
|
generate_srt: Whether to generate SRT subtitles (default: False)
|
|
142
|
+
model_name: AI model to use for transcription (default: "Perigee-1")
|
|
120
143
|
wait: Whether to wait for completion (default: True)
|
|
121
144
|
timeout: Maximum time to wait in seconds (default: 300)
|
|
122
145
|
poll_interval: Seconds to wait between status checks (default: 5)
|
|
@@ -141,6 +164,7 @@ class AsyncClient:
|
|
|
141
164
|
data.add_field('file', f, filename=file_path.split('/')[-1], content_type='audio/mpeg')
|
|
142
165
|
data.add_field('language', language)
|
|
143
166
|
data.add_field('generate_srt', str(generate_srt).lower())
|
|
167
|
+
data.add_field('model_name', model_name)
|
|
144
168
|
|
|
145
169
|
# Upload file
|
|
146
170
|
response = await self._make_request("POST", "/audio/upload", data=data)
|
orbitalsai/client.py
CHANGED
|
@@ -12,7 +12,7 @@ from pathlib import Path
|
|
|
12
12
|
|
|
13
13
|
from .models import (
|
|
14
14
|
TranscriptTask, Transcript, Balance, UsageHistory, DailyUsage,
|
|
15
|
-
User, APIKey, UsageRecord, DailyUsageRecord
|
|
15
|
+
User, APIKey, UsageRecord, DailyUsageRecord, Model
|
|
16
16
|
)
|
|
17
17
|
from .exceptions import (
|
|
18
18
|
OrbitalsAIError, AuthenticationError, InsufficientBalanceError,
|
|
@@ -89,11 +89,33 @@ class Client:
|
|
|
89
89
|
except requests.exceptions.RequestException as e:
|
|
90
90
|
raise OrbitalsAIError(f"Request failed: {str(e)}")
|
|
91
91
|
|
|
92
|
+
def get_models(self) -> List[Model]:
|
|
93
|
+
"""
|
|
94
|
+
Get all available AI models with their pricing information.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
List of Model objects with pricing details
|
|
98
|
+
"""
|
|
99
|
+
response = self._make_request("GET", "/models")
|
|
100
|
+
|
|
101
|
+
models = []
|
|
102
|
+
for model_data in response:
|
|
103
|
+
models.append(Model(
|
|
104
|
+
id=model_data["id"],
|
|
105
|
+
model_name=model_data["model_name"],
|
|
106
|
+
transcription_rate_per_second=model_data["transcription_rate_per_second"],
|
|
107
|
+
transcription_rate_per_hour=model_data["transcription_rate_per_hour"],
|
|
108
|
+
is_active=model_data["is_active"]
|
|
109
|
+
))
|
|
110
|
+
|
|
111
|
+
return models
|
|
112
|
+
|
|
92
113
|
def transcribe(
|
|
93
114
|
self,
|
|
94
115
|
file_path: str,
|
|
95
116
|
language: str = "english",
|
|
96
117
|
generate_srt: bool = False,
|
|
118
|
+
model_name: str = "Perigee-1",
|
|
97
119
|
wait: bool = True,
|
|
98
120
|
timeout: int = 300,
|
|
99
121
|
poll_interval: int = 5
|
|
@@ -105,6 +127,7 @@ class Client:
|
|
|
105
127
|
file_path: Path to the audio file
|
|
106
128
|
language: Language of the audio (default: "english")
|
|
107
129
|
generate_srt: Whether to generate SRT subtitles (default: False)
|
|
130
|
+
model_name: AI model to use for transcription (default: "Perigee-1")
|
|
108
131
|
wait: Whether to wait for completion (default: True)
|
|
109
132
|
timeout: Maximum time to wait in seconds (default: 300)
|
|
110
133
|
poll_interval: Seconds to wait between status checks (default: 5)
|
|
@@ -128,7 +151,8 @@ class Client:
|
|
|
128
151
|
files = {"file": (Path(file_path).name, f, "audio/mpeg")}
|
|
129
152
|
data = {
|
|
130
153
|
"language": language,
|
|
131
|
-
"generate_srt": str(generate_srt).lower()
|
|
154
|
+
"generate_srt": str(generate_srt).lower(),
|
|
155
|
+
"model_name": model_name
|
|
132
156
|
}
|
|
133
157
|
|
|
134
158
|
# Upload file
|
|
@@ -249,54 +273,6 @@ class Client:
|
|
|
249
273
|
last_updated=datetime.fromisoformat(response["last_updated"].replace('Z', '+00:00'))
|
|
250
274
|
)
|
|
251
275
|
|
|
252
|
-
def get_usage_history(
|
|
253
|
-
self,
|
|
254
|
-
start_date: Optional[datetime] = None,
|
|
255
|
-
end_date: Optional[datetime] = None,
|
|
256
|
-
page: int = 1,
|
|
257
|
-
page_size: int = 50
|
|
258
|
-
) -> UsageHistory:
|
|
259
|
-
"""
|
|
260
|
-
Get usage history for the current user.
|
|
261
|
-
|
|
262
|
-
Args:
|
|
263
|
-
start_date: Start date for the history (default: 30 days ago)
|
|
264
|
-
end_date: End date for the history (default: now)
|
|
265
|
-
page: Page number (default: 1)
|
|
266
|
-
page_size: Number of records per page (default: 50)
|
|
267
|
-
|
|
268
|
-
Returns:
|
|
269
|
-
UsageHistory object with usage records
|
|
270
|
-
"""
|
|
271
|
-
params = {"page": page, "page_size": page_size}
|
|
272
|
-
if start_date:
|
|
273
|
-
params["start_date"] = start_date.isoformat()
|
|
274
|
-
if end_date:
|
|
275
|
-
params["end_date"] = end_date.isoformat()
|
|
276
|
-
|
|
277
|
-
response = self._make_request("GET", "/billing/usage-history", params=params)
|
|
278
|
-
|
|
279
|
-
records = []
|
|
280
|
-
for record_data in response["records"]:
|
|
281
|
-
records.append(UsageRecord(
|
|
282
|
-
id=record_data["id"],
|
|
283
|
-
service_type=record_data["service_type"],
|
|
284
|
-
usage_amount=record_data["total_audio_usage"],
|
|
285
|
-
cost=record_data["cost"],
|
|
286
|
-
timestamp=datetime.fromisoformat(record_data["timestamp"].replace('Z', '+00:00')),
|
|
287
|
-
api_key_id=record_data.get("api_key_id")
|
|
288
|
-
))
|
|
289
|
-
|
|
290
|
-
return UsageHistory(
|
|
291
|
-
records=records,
|
|
292
|
-
total_records=response["total_records"],
|
|
293
|
-
total_pages=response["total_pages"],
|
|
294
|
-
current_page=response["current_page"],
|
|
295
|
-
start_date=datetime.fromisoformat(response["start_date"].replace('Z', '+00:00')),
|
|
296
|
-
end_date=datetime.fromisoformat(response["end_date"].replace('Z', '+00:00')),
|
|
297
|
-
period_summary=response["period_summary"]
|
|
298
|
-
)
|
|
299
|
-
|
|
300
276
|
def get_daily_usage(
|
|
301
277
|
self,
|
|
302
278
|
start_date: Optional[date] = None,
|
orbitalsai/models.py
CHANGED
|
@@ -114,6 +114,16 @@ class APIKey:
|
|
|
114
114
|
last_used: Optional[datetime] = None
|
|
115
115
|
|
|
116
116
|
|
|
117
|
+
@dataclass
|
|
118
|
+
class Model:
|
|
119
|
+
"""Represents an AI model with pricing information."""
|
|
120
|
+
id: int
|
|
121
|
+
model_name: str
|
|
122
|
+
transcription_rate_per_second: float
|
|
123
|
+
transcription_rate_per_hour: float
|
|
124
|
+
is_active: bool
|
|
125
|
+
|
|
126
|
+
|
|
117
127
|
# Supported languages and file formats
|
|
118
128
|
SUPPORTED_LANGUAGES = [
|
|
119
129
|
"english", "hausa", "igbo", "yoruba", "swahili", "pidgin", "kinyarwanda"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: orbitalsai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.0
|
|
4
4
|
Summary: A simple and powerful Python SDK for the OrbitalsAI API
|
|
5
5
|
Home-page: https://github.com/orbitalsai/orbitalsai-python-sdk
|
|
6
6
|
Author: OrbitalsAI
|
|
@@ -138,27 +138,61 @@ print(transcript.text)
|
|
|
138
138
|
print(transcript.srt_content) # SRT subtitle content
|
|
139
139
|
```
|
|
140
140
|
|
|
141
|
-
|
|
141
|
+
## 🤖 AI Model Selection
|
|
142
|
+
|
|
143
|
+
Choose which AI model to use for transcription. Different models may have different pricing and capabilities.
|
|
144
|
+
|
|
145
|
+
### List Available Models
|
|
142
146
|
|
|
143
147
|
```python
|
|
144
148
|
import orbitalsai
|
|
145
149
|
|
|
146
150
|
client = orbitalsai.Client(api_key="your_api_key_here")
|
|
147
151
|
|
|
148
|
-
#
|
|
149
|
-
|
|
150
|
-
|
|
152
|
+
# Get all available models
|
|
153
|
+
models = client.get_models()
|
|
154
|
+
|
|
155
|
+
for model in models:
|
|
156
|
+
print(f"{model.model_name}: ${model.transcription_rate_per_hour:.2f}/hour")
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
### Transcribe with Specific Model
|
|
151
160
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
161
|
+
```python
|
|
162
|
+
import orbitalsai
|
|
163
|
+
|
|
164
|
+
client = orbitalsai.Client(api_key="your_api_key_here")
|
|
165
|
+
|
|
166
|
+
# Transcribe with Perigee-1 model
|
|
167
|
+
transcript = client.transcribe(
|
|
168
|
+
"audio.mp3",
|
|
169
|
+
language="hausa",
|
|
170
|
+
model_name="Perigee-1" # Specify the model
|
|
171
|
+
)
|
|
156
172
|
|
|
157
|
-
# Or wait for completion
|
|
158
|
-
transcript = client.wait_for_task(task.task_id)
|
|
159
173
|
print(transcript.text)
|
|
160
174
|
```
|
|
161
175
|
|
|
176
|
+
### Choose Model Based on Budget
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
import orbitalsai
|
|
180
|
+
|
|
181
|
+
client = orbitalsai.Client(api_key="your_api_key_here")
|
|
182
|
+
|
|
183
|
+
# Get the cheapest available model
|
|
184
|
+
models = client.get_models()
|
|
185
|
+
cheapest_model = min(models, key=lambda m: m.transcription_rate_per_hour)
|
|
186
|
+
|
|
187
|
+
print(f"Using {cheapest_model.model_name} at ${cheapest_model.transcription_rate_per_hour:.2f}/hour")
|
|
188
|
+
|
|
189
|
+
transcript = client.transcribe(
|
|
190
|
+
"audio.mp3",
|
|
191
|
+
language="english",
|
|
192
|
+
model_name=cheapest_model.model_name
|
|
193
|
+
)
|
|
194
|
+
```
|
|
195
|
+
|
|
162
196
|
## 🔄 Async Usage
|
|
163
197
|
|
|
164
198
|
Perfect for processing multiple files or building web applications.
|
|
@@ -169,11 +203,15 @@ import orbitalsai
|
|
|
169
203
|
|
|
170
204
|
async def main():
|
|
171
205
|
async with orbitalsai.AsyncClient(api_key="your_api_key_here") as client:
|
|
206
|
+
# List available models
|
|
207
|
+
models = await client.get_models()
|
|
208
|
+
print(f"Available models: {[m.model_name for m in models]}")
|
|
209
|
+
|
|
172
210
|
# Transcribe multiple files concurrently
|
|
173
211
|
tasks = await asyncio.gather(
|
|
174
|
-
client.transcribe("audio1.mp3"),
|
|
175
|
-
client.transcribe("audio2.wav"),
|
|
176
|
-
client.transcribe("audio3.m4a")
|
|
212
|
+
client.transcribe("audio1.mp3", model_name="Perigee-1"),
|
|
213
|
+
client.transcribe("audio2.wav", model_name="Perigee-1"),
|
|
214
|
+
client.transcribe("audio3.m4a", model_name="Perigee-1")
|
|
177
215
|
)
|
|
178
216
|
|
|
179
217
|
for transcript in tasks:
|
|
@@ -202,7 +240,7 @@ print(f"Last updated: {balance.last_updated}")
|
|
|
202
240
|
import orbitalsai
|
|
203
241
|
from datetime import date, timedelta
|
|
204
242
|
|
|
205
|
-
client = orbitalsai.Client(api_key="
|
|
243
|
+
client = orbitalsai.Client(api_key="your_api_key_here")
|
|
206
244
|
|
|
207
245
|
# Get last 7 days of usage
|
|
208
246
|
end_date = date.today()
|
|
@@ -280,7 +318,13 @@ except Exception as e:
|
|
|
280
318
|
|
|
281
319
|
### Client Methods
|
|
282
320
|
|
|
283
|
-
#### `
|
|
321
|
+
#### `get_models()`
|
|
322
|
+
|
|
323
|
+
Get all available AI models with their pricing information.
|
|
324
|
+
|
|
325
|
+
**Returns:** List of `Model` objects
|
|
326
|
+
|
|
327
|
+
#### `transcribe(file_path, language="english", generate_srt=False, model_name="Perigee-1", wait=True, timeout=300, poll_interval=5)`
|
|
284
328
|
|
|
285
329
|
Transcribe an audio file.
|
|
286
330
|
|
|
@@ -288,6 +332,7 @@ Transcribe an audio file.
|
|
|
288
332
|
- `file_path` (str): Path to the audio file
|
|
289
333
|
- `language` (str): Language code (default: "english")
|
|
290
334
|
- `generate_srt` (bool): Generate SRT subtitles (default: False)
|
|
335
|
+
- `model_name` (str): AI model to use (default: "Perigee-1")
|
|
291
336
|
- `wait` (bool): Wait for completion (default: True)
|
|
292
337
|
- `timeout` (int): Maximum wait time in seconds (default: 300)
|
|
293
338
|
- `poll_interval` (int): Seconds between status checks (default: 5)
|
|
@@ -367,6 +412,13 @@ Get current user details.
|
|
|
367
412
|
- `balance` (float): Current balance in credits
|
|
368
413
|
- `last_updated` (datetime): Last update timestamp
|
|
369
414
|
|
|
415
|
+
#### `Model`
|
|
416
|
+
- `id` (int): Model ID
|
|
417
|
+
- `model_name` (str): Name of the model (e.g., "Perigee-1")
|
|
418
|
+
- `transcription_rate_per_second` (float): Cost per second of audio
|
|
419
|
+
- `transcription_rate_per_hour` (float): Cost per hour of audio
|
|
420
|
+
- `is_active` (bool): Whether the model is currently available
|
|
421
|
+
|
|
370
422
|
## 🌍 Supported Languages
|
|
371
423
|
|
|
372
424
|
- **English** (`english`)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
orbitalsai/__init__.py,sha256=l1PSfVe3tVHU53VWOMc7c6qx3iBP_SPyRFN98-taCo4,1634
|
|
2
|
+
orbitalsai/async_client.py,sha256=zW8eJrGTnu7Z0mDKo6y8b732s_oGpM9zXWWukj_QQhQ,14983
|
|
3
|
+
orbitalsai/client.py,sha256=5pUFDVWXfRbrMVUx7Pw7lV7fvEJfaCqexmNk8aTOHjo,12352
|
|
4
|
+
orbitalsai/exceptions.py,sha256=5jdLYiWUXsbpifFV4-rY4FRZtmRqnqvqXTlunh5DxgE,1390
|
|
5
|
+
orbitalsai/models.py,sha256=ktmZJoYRnEuCZ1bGhR8vy6eBXkXC-wNMdA11CalZ458,3373
|
|
6
|
+
orbitalsai/utils.py,sha256=HtRCWfg7xmk3c_KPqm2-P5kx178QZYxy2hDGv1IS9hM,2791
|
|
7
|
+
orbitalsai-1.1.0.dist-info/licenses/LICENSE,sha256=C7Eee8rAV6XiweKYlvcabxiMvFX75B_0arFlrLnfnf0,1067
|
|
8
|
+
orbitalsai-1.1.0.dist-info/METADATA,sha256=wcuxtbOZRljBf1Y4DRtJO_GetzmRJILwiOzHNQDmSoo,13278
|
|
9
|
+
orbitalsai-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
10
|
+
orbitalsai-1.1.0.dist-info/top_level.txt,sha256=KZ4ueNRg-PyNeu3RKTOpe5PRCSidh3ieL70-YGf62ro,11
|
|
11
|
+
orbitalsai-1.1.0.dist-info/RECORD,,
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
orbitalsai/__init__.py,sha256=cpl690jw9L93AYPqOFV_1kKVdfZI6UUiVl2av5KLtPg,1614
|
|
2
|
-
orbitalsai/async_client.py,sha256=4-_AtYLNtSZ97xgLfiVebxTRLY9I9xglmFWDT9rVteA,14054
|
|
3
|
-
orbitalsai/client.py,sha256=G7zuiyeQatWKqfIbiSMJ2wtCjQdf-aingeQ2UE9vDk4,13344
|
|
4
|
-
orbitalsai/exceptions.py,sha256=5jdLYiWUXsbpifFV4-rY4FRZtmRqnqvqXTlunh5DxgE,1390
|
|
5
|
-
orbitalsai/models.py,sha256=8SEo8zfxTdO4Rlhrhey21bILv2Vq4qwFQDIzhwPp9aA,3156
|
|
6
|
-
orbitalsai/utils.py,sha256=HtRCWfg7xmk3c_KPqm2-P5kx178QZYxy2hDGv1IS9hM,2791
|
|
7
|
-
orbitalsai-1.0.0.dist-info/licenses/LICENSE,sha256=C7Eee8rAV6XiweKYlvcabxiMvFX75B_0arFlrLnfnf0,1067
|
|
8
|
-
orbitalsai-1.0.0.dist-info/METADATA,sha256=ysRwoHUQEzZe8X67cTtj9Jkn-ZQAjX4hA9T2E0l2Bxo,11853
|
|
9
|
-
orbitalsai-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
10
|
-
orbitalsai-1.0.0.dist-info/top_level.txt,sha256=KZ4ueNRg-PyNeu3RKTOpe5PRCSidh3ieL70-YGf62ro,11
|
|
11
|
-
orbitalsai-1.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|