livellm 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
livellm/livellm.py CHANGED
@@ -1,7 +1,9 @@
1
1
  """LiveLLM Client - Python client for the LiveLLM Proxy and Realtime APIs."""
2
+ import asyncio
2
3
  import httpx
3
4
  import json
4
- from typing import List, Optional, AsyncIterator, Union
5
+ import warnings
6
+ from typing import List, Optional, AsyncIterator, Union, overload
5
7
  from .models.common import Settings, SuccessResponse
6
8
  from .models.agent.agent import AgentRequest, AgentResponse
7
9
  from .models.audio.speak import SpeakRequest
@@ -119,15 +121,16 @@ class LivellmClient:
119
121
  error_response = error_response.decode("utf-8")
120
122
  raise Exception(f"Failed to post to {endpoint}: {error_response}")
121
123
  if expect_stream:
122
- async def stream_response() -> AsyncIterator[Union[dict, bytes]]:
124
+ async def json_stream_response() -> AsyncIterator[dict]:
123
125
  async for chunk in response.aiter_lines():
124
- if expect_json:
125
- chunk = chunk.strip()
126
- if not chunk:
127
- continue
128
- yield json.loads(chunk)
129
- else:
130
- yield chunk
126
+ chunk = chunk.strip()
127
+ if not chunk:
128
+ continue
129
+ yield json.loads(chunk)
130
+ async def bytes_stream_response() -> AsyncIterator[bytes]:
131
+ async for chunk in response.aiter_bytes():
132
+ yield chunk
133
+ stream_response = json_stream_response if expect_json else bytes_stream_response
131
134
  return stream_response()
132
135
  else:
133
136
  if expect_json:
@@ -164,6 +167,7 @@ class LivellmClient:
164
167
  Should be called when you're done using the client.
165
168
  """
166
169
  for config in self.settings:
170
+ config: Settings = config
167
171
  await self.delete_config(config.uid)
168
172
  await self.client.aclose()
169
173
 
@@ -175,43 +179,473 @@ class LivellmClient:
175
179
  """Async context manager exit."""
176
180
  await self.cleanup()
177
181
 
182
+ def __del__(self):
183
+ """
184
+ Destructor to clean up resources when the client is garbage collected.
185
+ This will close the HTTP client and attempt to delete configs if cleanup wasn't called.
186
+ Note: It's recommended to use the async context manager or call cleanup() explicitly.
187
+ """
188
+ # Warn user if cleanup wasn't called
189
+ if self.settings:
190
+ warnings.warn(
191
+ "LivellmClient is being garbage collected without explicit cleanup. "
192
+ "Provider configs may not be deleted from the server. "
193
+ "Consider using 'async with' or calling 'await client.cleanup()' explicitly.",
194
+ ResourceWarning,
195
+ stacklevel=2
196
+ )
197
+
198
+ # Close the httpx client synchronously
199
+ # httpx.AsyncClient stores a sync Transport that needs cleanup
200
+ try:
201
+ with httpx.Client(base_url=self.base_url) as client:
202
+ for config in self.settings:
203
+ config: Settings = config
204
+ client.delete("providers/config/{config.uid}", headers=self.headers)
205
+ except Exception:
206
+ # Silently fail - we're in a destructor
207
+ pass
178
208
 
209
+ @overload
210
+ async def agent_run(
211
+ self,
212
+ request: Union[AgentRequest, AgentFallbackRequest],
213
+ ) -> AgentResponse:
214
+ ...
215
+
216
+ @overload
217
+ async def agent_run(
218
+ self,
219
+ *,
220
+ provider_uid: str,
221
+ model: str,
222
+ messages: list,
223
+ tools: Optional[list] = None,
224
+ **kwargs
225
+ ) -> AgentResponse:
226
+ ...
227
+
179
228
  async def agent_run(
180
229
  self,
181
- request: Union[AgentRequest, AgentFallbackRequest]
230
+ request: Optional[Union[AgentRequest, AgentFallbackRequest]] = None,
231
+ *,
232
+ provider_uid: Optional[str] = None,
233
+ model: Optional[str] = None,
234
+ messages: Optional[list] = None,
235
+ tools: Optional[list] = None,
236
+ **kwargs
182
237
  ) -> AgentResponse:
183
- result = await self.post(request.model_dump(), "agent/run", expect_json=True)
238
+ """
239
+ Run an agent request.
240
+
241
+ Can be called in two ways:
242
+
243
+ 1. With a request object:
244
+ await client.agent_run(AgentRequest(...))
245
+ await client.agent_run(AgentFallbackRequest(...))
246
+
247
+ 2. With individual parameters (keyword arguments):
248
+ await client.agent_run(
249
+ provider_uid="...",
250
+ model="gpt-4",
251
+ messages=[TextMessage(...)],
252
+ tools=[]
253
+ )
254
+
255
+ Args:
256
+ request: An AgentRequest or AgentFallbackRequest object
257
+ provider_uid: The provider UID string
258
+ model: The model to use
259
+ messages: List of messages
260
+ tools: Optional list of tools
261
+ gen_config: Optional generation configuration
262
+
263
+ Returns:
264
+ AgentResponse with the agent's output
265
+ """
266
+ # Check if first argument is a request object
267
+ if request is not None:
268
+ if not isinstance(request, (AgentRequest, AgentFallbackRequest)):
269
+ raise TypeError(
270
+ f"First positional argument must be AgentRequest or AgentFallbackRequest, got {type(request)}"
271
+ )
272
+ result = await self.post(request.model_dump(), "agent/run", expect_json=True)
273
+ return AgentResponse(**result)
274
+
275
+ # Otherwise, use keyword arguments
276
+ if provider_uid is None or model is None or messages is None:
277
+ raise ValueError(
278
+ "provider_uid, model, and messages are required. "
279
+ "Alternatively, pass an AgentRequest object as the first positional argument."
280
+ )
281
+
282
+ agent_request = AgentRequest(
283
+ provider_uid=provider_uid,
284
+ model=model,
285
+ messages=messages,
286
+ tools=tools or [],
287
+ gen_config=kwargs or None
288
+ )
289
+ result = await self.post(agent_request.model_dump(), "agent/run", expect_json=True)
184
290
  return AgentResponse(**result)
185
291
 
292
+ @overload
293
+ def agent_run_stream(
294
+ self,
295
+ request: Union[AgentRequest, AgentFallbackRequest],
296
+ ) -> AsyncIterator[AgentResponse]:
297
+ ...
298
+
299
+ @overload
300
+ def agent_run_stream(
301
+ self,
302
+ *,
303
+ provider_uid: str,
304
+ model: str,
305
+ messages: list,
306
+ tools: Optional[list] = None,
307
+ **kwargs
308
+ ) -> AsyncIterator[AgentResponse]:
309
+ ...
310
+
186
311
  async def agent_run_stream(
187
312
  self,
188
- request: Union[AgentRequest, AgentFallbackRequest]
313
+ request: Optional[Union[AgentRequest, AgentFallbackRequest]] = None,
314
+ *,
315
+ provider_uid: Optional[str] = None,
316
+ model: Optional[str] = None,
317
+ messages: Optional[list] = None,
318
+ tools: Optional[list] = None,
319
+ **kwargs
189
320
  ) -> AsyncIterator[AgentResponse]:
190
- stream = await self.post(request.model_dump(), "agent/run_stream", expect_stream=True, expect_json=True)
191
- async for chunk in stream:
192
- yield AgentResponse(**chunk)
321
+ """
322
+ Run an agent request with streaming response.
323
+
324
+ Can be called in two ways:
325
+
326
+ 1. With a request object:
327
+ async for chunk in client.agent_run_stream(AgentRequest(...)):
328
+ ...
329
+ async for chunk in client.agent_run_stream(AgentFallbackRequest(...)):
330
+ ...
331
+
332
+ 2. With individual parameters (keyword arguments):
333
+ async for chunk in client.agent_run_stream(
334
+ provider_uid="...",
335
+ model="gpt-4",
336
+ messages=[TextMessage(...)],
337
+ tools=[]
338
+ ):
339
+ ...
340
+
341
+ Args:
342
+ request: An AgentRequest or AgentFallbackRequest object
343
+ provider_uid: The provider UID string
344
+ model: The model to use
345
+ messages: List of messages
346
+ tools: Optional list of tools
347
+ gen_config: Optional generation configuration
348
+
349
+ Returns:
350
+ AsyncIterator of AgentResponse chunks
351
+ """
352
+ # Check if first argument is a request object
353
+ if request is not None:
354
+ if not isinstance(request, (AgentRequest, AgentFallbackRequest)):
355
+ raise TypeError(
356
+ f"First positional argument must be AgentRequest or AgentFallbackRequest, got {type(request)}"
357
+ )
358
+ stream = await self.post(request.model_dump(), "agent/run_stream", expect_stream=True, expect_json=True)
359
+ async for chunk in stream:
360
+ yield AgentResponse(**chunk)
361
+ else:
362
+ # Otherwise, use keyword arguments
363
+ if provider_uid is None or model is None or messages is None:
364
+ raise ValueError(
365
+ "provider_uid, model, and messages are required. "
366
+ "Alternatively, pass an AgentRequest object as the first positional argument."
367
+ )
368
+
369
+ agent_request = AgentRequest(
370
+ provider_uid=provider_uid,
371
+ model=model,
372
+ messages=messages,
373
+ tools=tools or [],
374
+ gen_config=kwargs or None
375
+ )
376
+ stream = await self.post(agent_request.model_dump(), "agent/run_stream", expect_stream=True, expect_json=True)
377
+ async for chunk in stream:
378
+ yield AgentResponse(**chunk)
379
+
380
+ @overload
381
+ async def speak(
382
+ self,
383
+ request: Union[SpeakRequest, AudioFallbackRequest],
384
+ ) -> bytes:
385
+ ...
386
+
387
+ @overload
388
+ async def speak(
389
+ self,
390
+ *,
391
+ provider_uid: str,
392
+ model: str,
393
+ text: str,
394
+ voice: str,
395
+ mime_type: str,
396
+ sample_rate: int,
397
+ chunk_size: int = 20,
398
+ **kwargs
399
+ ) -> bytes:
400
+ ...
193
401
 
194
402
  async def speak(
195
403
  self,
196
- request: Union[SpeakRequest, AudioFallbackRequest]
404
+ request: Optional[Union[SpeakRequest, AudioFallbackRequest]] = None,
405
+ *,
406
+ provider_uid: Optional[str] = None,
407
+ model: Optional[str] = None,
408
+ text: Optional[str] = None,
409
+ voice: Optional[str] = None,
410
+ mime_type: Optional[str] = None,
411
+ sample_rate: Optional[int] = None,
412
+ chunk_size: int = 20,
413
+ **kwargs
197
414
  ) -> bytes:
198
- return await self.post(request.model_dump(), "audio/speak", expect_json=False)
415
+ """
416
+ Generate speech from text.
417
+
418
+ Can be called in two ways:
419
+
420
+ 1. With a request object:
421
+ await client.speak(SpeakRequest(...))
422
+ await client.speak(AudioFallbackRequest(...))
423
+
424
+ 2. With individual parameters (keyword arguments):
425
+ await client.speak(
426
+ provider_uid="...",
427
+ model="tts-1",
428
+ text="Hello, world!",
429
+ voice="alloy",
430
+ mime_type="audio/pcm",
431
+ sample_rate=24000
432
+ )
433
+
434
+ Args:
435
+ request: A SpeakRequest or AudioFallbackRequest object
436
+ provider_uid: The provider UID string
437
+ model: The model to use for TTS
438
+ text: The text to convert to speech
439
+ voice: The voice to use
440
+ mime_type: The MIME type of the output audio
441
+ sample_rate: The sample rate of the output audio
442
+ chunk_size: Chunk size in milliseconds (default: 20ms)
443
+ gen_config: Optional generation configuration
444
+
445
+ Returns:
446
+ Audio data as bytes
447
+ """
448
+ # Check if first argument is a request object
449
+ if request is not None:
450
+ if not isinstance(request, (SpeakRequest, AudioFallbackRequest)):
451
+ raise TypeError(
452
+ f"First positional argument must be SpeakRequest or AudioFallbackRequest, got {type(request)}"
453
+ )
454
+ return await self.post(request.model_dump(), "audio/speak", expect_json=False)
455
+
456
+ # Otherwise, use keyword arguments
457
+ if provider_uid is None or model is None or text is None or voice is None or mime_type is None or sample_rate is None:
458
+ raise ValueError(
459
+ "provider_uid, model, text, voice, mime_type, and sample_rate are required. "
460
+ "Alternatively, pass a SpeakRequest object as the first positional argument."
461
+ )
462
+
463
+ speak_request = SpeakRequest(
464
+ provider_uid=provider_uid,
465
+ model=model,
466
+ text=text,
467
+ voice=voice,
468
+ mime_type=mime_type,
469
+ sample_rate=sample_rate,
470
+ chunk_size=chunk_size,
471
+ gen_config=kwargs or None
472
+ )
473
+ return await self.post(speak_request.model_dump(), "audio/speak", expect_json=False)
474
+
475
+ @overload
476
+ def speak_stream(
477
+ self,
478
+ request: Union[SpeakRequest, AudioFallbackRequest],
479
+ ) -> AsyncIterator[bytes]:
480
+ ...
481
+
482
+ @overload
483
+ def speak_stream(
484
+ self,
485
+ *,
486
+ provider_uid: str,
487
+ model: str,
488
+ text: str,
489
+ voice: str,
490
+ mime_type: str,
491
+ sample_rate: int,
492
+ chunk_size: int = 20,
493
+ **kwargs
494
+ ) -> AsyncIterator[bytes]:
495
+ ...
199
496
 
200
497
  async def speak_stream(
201
498
  self,
202
- request: Union[SpeakRequest, AudioFallbackRequest]
499
+ request: Optional[Union[SpeakRequest, AudioFallbackRequest]] = None,
500
+ *,
501
+ provider_uid: Optional[str] = None,
502
+ model: Optional[str] = None,
503
+ text: Optional[str] = None,
504
+ voice: Optional[str] = None,
505
+ mime_type: Optional[str] = None,
506
+ sample_rate: Optional[int] = None,
507
+ chunk_size: int = 20,
508
+ **kwargs
203
509
  ) -> AsyncIterator[bytes]:
204
- return await self.post(request.model_dump(), "audio/speak_stream", expect_stream=True, expect_json=False)
510
+ """
511
+ Generate speech from text with streaming response.
512
+
513
+ Can be called in two ways:
514
+
515
+ 1. With a request object:
516
+ async for chunk in client.speak_stream(SpeakRequest(...)):
517
+ ...
518
+ async for chunk in client.speak_stream(AudioFallbackRequest(...)):
519
+ ...
520
+
521
+ 2. With individual parameters (keyword arguments):
522
+ async for chunk in client.speak_stream(
523
+ provider_uid="...",
524
+ model="tts-1",
525
+ text="Hello, world!",
526
+ voice="alloy",
527
+ mime_type="audio/pcm",
528
+ sample_rate=24000
529
+ ):
530
+ ...
531
+
532
+ Args:
533
+ request: A SpeakRequest or AudioFallbackRequest object
534
+ provider_uid: The provider UID string
535
+ model: The model to use for TTS
536
+ text: The text to convert to speech
537
+ voice: The voice to use
538
+ mime_type: The MIME type of the output audio
539
+ sample_rate: The sample rate of the output audio
540
+ chunk_size: Chunk size in milliseconds (default: 20ms)
541
+ gen_config: Optional generation configuration
542
+
543
+ Returns:
544
+ AsyncIterator of audio data chunks as bytes
545
+ """
546
+ # Check if first argument is a request object
547
+ if request is not None:
548
+ if not isinstance(request, (SpeakRequest, AudioFallbackRequest)):
549
+ raise TypeError(
550
+ f"First positional argument must be SpeakRequest or AudioFallbackRequest, got {type(request)}"
551
+ )
552
+ speak_stream = await self.post(request.model_dump(), "audio/speak_stream", expect_stream=True, expect_json=False)
553
+ async for chunk in speak_stream:
554
+ yield chunk
555
+ else:
556
+ # Otherwise, use keyword arguments
557
+ if provider_uid is None or model is None or text is None or voice is None or mime_type is None or sample_rate is None:
558
+ raise ValueError(
559
+ "provider_uid, model, text, voice, mime_type, and sample_rate are required. "
560
+ "Alternatively, pass a SpeakRequest object as the first positional argument."
561
+ )
562
+
563
+ speak_request = SpeakRequest(
564
+ provider_uid=provider_uid,
565
+ model=model,
566
+ text=text,
567
+ voice=voice,
568
+ mime_type=mime_type,
569
+ sample_rate=sample_rate,
570
+ chunk_size=chunk_size,
571
+ gen_config=kwargs or None
572
+ )
573
+ speak_stream = await self.post(speak_request.model_dump(), "audio/speak_stream", expect_stream=True, expect_json=False)
574
+ async for chunk in speak_stream:
575
+ yield chunk
205
576
 
206
-
577
+ @overload
578
+ async def transcribe(
579
+ self,
580
+ request: Union[TranscribeRequest, TranscribeFallbackRequest],
581
+ ) -> TranscribeResponse:
582
+ ...
583
+
584
+ @overload
207
585
  async def transcribe(
208
586
  self,
587
+ *,
209
588
  provider_uid: str,
210
589
  file: File,
211
590
  model: str,
212
591
  language: Optional[str] = None,
213
- gen_config: Optional[dict] = None
592
+ **kwargs
214
593
  ) -> TranscribeResponse:
594
+ ...
595
+
596
+ async def transcribe(
597
+ self,
598
+ request: Optional[Union[TranscribeRequest, TranscribeFallbackRequest]] = None,
599
+ *,
600
+ provider_uid: Optional[str] = None,
601
+ file: Optional[File] = None,
602
+ model: Optional[str] = None,
603
+ language: Optional[str] = None,
604
+ **kwargs
605
+ ) -> TranscribeResponse:
606
+ """
607
+ Transcribe audio to text.
608
+
609
+ Can be called in two ways:
610
+
611
+ 1. With a request object:
612
+ await client.transcribe(TranscribeRequest(...))
613
+
614
+ 2. With individual parameters (keyword arguments):
615
+ await client.transcribe(
616
+ provider_uid="...",
617
+ file=("filename", audio_bytes, "audio/wav"),
618
+ model="whisper-1"
619
+ )
620
+
621
+ Args:
622
+ request: A TranscribeRequest or TranscribeFallbackRequest object
623
+ provider_uid: The provider UID string
624
+ file: The audio file as a tuple (filename, content, content_type)
625
+ model: The model to use for transcription
626
+ language: Optional language code
627
+ gen_config: Optional generation configuration
628
+
629
+ Returns:
630
+ TranscribeResponse with transcription text and detected language
631
+ """
632
+ # Check if first argument is a request object
633
+ if request is not None:
634
+ if not isinstance(request, (TranscribeRequest, TranscribeFallbackRequest)):
635
+ raise TypeError(
636
+ f"First positional argument must be TranscribeRequest or TranscribeFallbackRequest, got {type(request)}"
637
+ )
638
+ # JSON-based request
639
+ result = await self.post(request.model_dump(), "audio/transcribe_json", expect_json=True)
640
+ return TranscribeResponse(**result)
641
+
642
+ # Otherwise, use keyword arguments with multipart form-data request
643
+ if provider_uid is None or file is None or model is None:
644
+ raise ValueError(
645
+ "provider_uid, file, and model are required. "
646
+ "Alternatively, pass a TranscribeRequest object as the first positional argument."
647
+ )
648
+
215
649
  files = {
216
650
  "file": file
217
651
  }
@@ -219,17 +653,10 @@ class LivellmClient:
219
653
  "provider_uid": provider_uid,
220
654
  "model": model,
221
655
  "language": language,
222
- "gen_config": json.dumps(gen_config) if gen_config else None
656
+ "gen_config": json.dumps(kwargs) if kwargs else None
223
657
  }
224
658
  result = await self.post_multipart(files, data, "audio/transcribe")
225
659
  return TranscribeResponse(**result)
226
-
227
- async def transcribe_json(
228
- self,
229
- request: Union[TranscribeRequest, TranscribeFallbackRequest]
230
- ) -> TranscribeResponse:
231
- result = await self.post(request.model_dump(), "audio/transcribe_json", expect_json=True)
232
- return TranscribeResponse(**result)
233
660
 
234
661
 
235
662
 
@@ -1,6 +1,6 @@
1
1
  # models for full run: AgentRequest, AgentResponse
2
2
 
3
- from pydantic import BaseModel, Field
3
+ from pydantic import BaseModel, Field, field_validator
4
4
  from typing import Optional, List, Union
5
5
  from .chat import TextMessage, BinaryMessage
6
6
  from .tools import WebSearchInput, MCPStreamableServerInput
@@ -9,11 +9,10 @@ from ..common import BaseRequest
9
9
 
10
10
  class AgentRequest(BaseRequest):
11
11
  model: str = Field(..., description="The model to use")
12
- messages: List[Union[TextMessage, BinaryMessage]]
13
- tools: List[Union[WebSearchInput, MCPStreamableServerInput]]
12
+ messages: List[Union[TextMessage, BinaryMessage]] = Field(..., description="The messages to use")
13
+ tools: List[Union[WebSearchInput, MCPStreamableServerInput]] = Field(default_factory=list, description="The tools to use")
14
14
  gen_config: Optional[dict] = Field(default=None, description="The configuration for the generation")
15
15
 
16
-
17
16
  class AgentResponseUsage(BaseModel):
18
17
  input_tokens: int = Field(..., description="The number of input tokens used")
19
18
  output_tokens: int = Field(..., description="The number of output tokens used")
@@ -1,18 +1,17 @@
1
1
  # models for chat messages
2
- from pydantic import BaseModel, Field, model_validator
2
+ from pydantic import BaseModel, Field, model_validator, field_serializer
3
3
  from enum import Enum
4
- from typing import Optional
4
+ from typing import Optional, Union
5
5
 
6
- class MessageRole(Enum):
6
+ class MessageRole(str, Enum):
7
7
  USER = "user"
8
8
  MODEL = "model"
9
9
  SYSTEM = "system"
10
10
 
11
11
 
12
12
  class Message(BaseModel):
13
- role: MessageRole = Field(..., description="The role of the message")
14
-
15
-
13
+ role: Union[MessageRole, str] = Field(..., description="The role of the message")
14
+
16
15
  class TextMessage(Message):
17
16
  content: str = Field(..., description="The content of the message")
18
17
 
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, field_validator
3
3
  from typing import Literal
4
4
  from enum import Enum
5
5
 
6
- class ToolKind(Enum):
6
+ class ToolKind(str, Enum):
7
7
  WEB_SEARCH = "web_search"
8
8
  MCP_STREAMABLE_SERVER = "mcp_streamable_server"
9
9
 
@@ -6,7 +6,7 @@ from ..common import BaseRequest
6
6
  SpeakStreamResponse: TypeAlias = Tuple[AsyncIterator[bytes], str, int]
7
7
 
8
8
 
9
- class SpeakMimeType(Enum):
9
+ class SpeakMimeType(str, Enum):
10
10
  PCM = "audio/pcm"
11
11
  WAV = "audio/wav"
12
12
  MP3 = "audio/mpeg"
@@ -30,17 +30,16 @@ class TranscribeRequest(BaseRequest):
30
30
 
31
31
  # If content is already bytes, return as-is
32
32
  if isinstance(content, bytes):
33
- return (filename, content, content_type)
33
+ try:
34
+ encoded_content = base64.b64encode(content).decode("utf-8") # base64 encode the content
35
+ return (filename, encoded_content, content_type)
36
+ except Exception as e:
37
+ raise ValueError(f"Failed to encode base64 content: {str(e)}")
34
38
 
35
39
  # If content is a string, assume it's base64 encoded
36
40
  elif isinstance(content, str):
37
- try:
38
- decoded_content = base64.b64decode(content)
39
- return (filename, decoded_content, content_type)
40
- except Exception as e:
41
- raise ValueError(f"Failed to decode base64 content: {str(e)}")
42
- else:
43
- raise ValueError(f"file content must be either bytes or base64 string, got {type(content)}")
41
+ # assume it's already base64 encoded
42
+ return (filename, content, content_type)
44
43
 
45
44
 
46
45
  class TranscribeResponse(BaseModel):
@@ -1,4 +1,4 @@
1
- from pydantic import BaseModel, Field
1
+ from pydantic import BaseModel, Field, model_validator
2
2
  from typing import List
3
3
  from .common import BaseRequest
4
4
  from .audio.speak import SpeakRequest
@@ -6,7 +6,7 @@ from .audio.transcribe import TranscribeRequest
6
6
  from .agent.agent import AgentRequest
7
7
  from enum import Enum
8
8
 
9
- class FallbackStrategy(Enum):
9
+ class FallbackStrategy(str, Enum):
10
10
  SEQUENTIAL = "sequential"
11
11
  PARALLEL = "parallel"
12
12
 
@@ -14,7 +14,7 @@ class FallbackRequest(BaseModel):
14
14
  requests: List[BaseRequest] = Field(..., description="List of requests to try as fallbacks")
15
15
  strategy: FallbackStrategy = Field(FallbackStrategy.SEQUENTIAL, description="The strategy to use for fallback")
16
16
  timeout_per_request: int = Field(default=360, description="The timeout to use for each request")
17
-
17
+
18
18
  class AgentFallbackRequest(FallbackRequest):
19
19
  requests: List[AgentRequest] = Field(..., description="List of agent requests to try as fallbacks")
20
20