livellm 1.1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
livellm/livellm.py CHANGED
@@ -3,7 +3,7 @@ import asyncio
3
3
  import httpx
4
4
  import json
5
5
  import warnings
6
- from typing import List, Optional, AsyncIterator, Union
6
+ from typing import List, Optional, AsyncIterator, Union, overload
7
7
  from .models.common import Settings, SuccessResponse
8
8
  from .models.agent.agent import AgentRequest, AgentResponse
9
9
  from .models.audio.speak import SpeakRequest
@@ -121,15 +121,16 @@ class LivellmClient:
121
121
  error_response = error_response.decode("utf-8")
122
122
  raise Exception(f"Failed to post to {endpoint}: {error_response}")
123
123
  if expect_stream:
124
- async def stream_response() -> AsyncIterator[Union[dict, bytes]]:
124
+ async def json_stream_response() -> AsyncIterator[dict]:
125
125
  async for chunk in response.aiter_lines():
126
- if expect_json:
127
- chunk = chunk.strip()
128
- if not chunk:
129
- continue
130
- yield json.loads(chunk)
131
- else:
132
- yield chunk
126
+ chunk = chunk.strip()
127
+ if not chunk:
128
+ continue
129
+ yield json.loads(chunk)
130
+ async def bytes_stream_response() -> AsyncIterator[bytes]:
131
+ async for chunk in response.aiter_bytes():
132
+ yield chunk
133
+ stream_response = json_stream_response if expect_json else bytes_stream_response
133
134
  return stream_response()
134
135
  else:
135
136
  if expect_json:
@@ -205,42 +206,446 @@ class LivellmClient:
205
206
  # Silently fail - we're in a destructor
206
207
  pass
207
208
 
209
+ @overload
208
210
  async def agent_run(
209
211
  self,
210
- request: Union[AgentRequest, AgentFallbackRequest]
212
+ request: Union[AgentRequest, AgentFallbackRequest],
211
213
  ) -> AgentResponse:
212
- result = await self.post(request.model_dump(), "agent/run", expect_json=True)
214
+ ...
215
+
216
+ @overload
217
+ async def agent_run(
218
+ self,
219
+ *,
220
+ provider_uid: str,
221
+ model: str,
222
+ messages: list,
223
+ tools: Optional[list] = None,
224
+ **kwargs
225
+ ) -> AgentResponse:
226
+ ...
227
+
228
+ async def agent_run(
229
+ self,
230
+ request: Optional[Union[AgentRequest, AgentFallbackRequest]] = None,
231
+ *,
232
+ provider_uid: Optional[str] = None,
233
+ model: Optional[str] = None,
234
+ messages: Optional[list] = None,
235
+ tools: Optional[list] = None,
236
+ **kwargs
237
+ ) -> AgentResponse:
238
+ """
239
+ Run an agent request.
240
+
241
+ Can be called in two ways:
242
+
243
+ 1. With a request object:
244
+ await client.agent_run(AgentRequest(...))
245
+ await client.agent_run(AgentFallbackRequest(...))
246
+
247
+ 2. With individual parameters (keyword arguments):
248
+ await client.agent_run(
249
+ provider_uid="...",
250
+ model="gpt-4",
251
+ messages=[TextMessage(...)],
252
+ tools=[]
253
+ )
254
+
255
+ Args:
256
+ request: An AgentRequest or AgentFallbackRequest object
257
+ provider_uid: The provider UID string
258
+ model: The model to use
259
+ messages: List of messages
260
+ tools: Optional list of tools
261
+ gen_config: Optional generation configuration
262
+
263
+ Returns:
264
+ AgentResponse with the agent's output
265
+ """
266
+ # Check if first argument is a request object
267
+ if request is not None:
268
+ if not isinstance(request, (AgentRequest, AgentFallbackRequest)):
269
+ raise TypeError(
270
+ f"First positional argument must be AgentRequest or AgentFallbackRequest, got {type(request)}"
271
+ )
272
+ result = await self.post(request.model_dump(), "agent/run", expect_json=True)
273
+ return AgentResponse(**result)
274
+
275
+ # Otherwise, use keyword arguments
276
+ if provider_uid is None or model is None or messages is None:
277
+ raise ValueError(
278
+ "provider_uid, model, and messages are required. "
279
+ "Alternatively, pass an AgentRequest object as the first positional argument."
280
+ )
281
+
282
+ agent_request = AgentRequest(
283
+ provider_uid=provider_uid,
284
+ model=model,
285
+ messages=messages,
286
+ tools=tools or [],
287
+ gen_config=kwargs or None
288
+ )
289
+ result = await self.post(agent_request.model_dump(), "agent/run", expect_json=True)
213
290
  return AgentResponse(**result)
214
291
 
292
+ @overload
293
+ def agent_run_stream(
294
+ self,
295
+ request: Union[AgentRequest, AgentFallbackRequest],
296
+ ) -> AsyncIterator[AgentResponse]:
297
+ ...
298
+
299
+ @overload
300
+ def agent_run_stream(
301
+ self,
302
+ *,
303
+ provider_uid: str,
304
+ model: str,
305
+ messages: list,
306
+ tools: Optional[list] = None,
307
+ **kwargs
308
+ ) -> AsyncIterator[AgentResponse]:
309
+ ...
310
+
215
311
  async def agent_run_stream(
216
312
  self,
217
- request: Union[AgentRequest, AgentFallbackRequest]
313
+ request: Optional[Union[AgentRequest, AgentFallbackRequest]] = None,
314
+ *,
315
+ provider_uid: Optional[str] = None,
316
+ model: Optional[str] = None,
317
+ messages: Optional[list] = None,
318
+ tools: Optional[list] = None,
319
+ **kwargs
218
320
  ) -> AsyncIterator[AgentResponse]:
219
- stream = await self.post(request.model_dump(), "agent/run_stream", expect_stream=True, expect_json=True)
220
- async for chunk in stream:
221
- yield AgentResponse(**chunk)
321
+ """
322
+ Run an agent request with streaming response.
323
+
324
+ Can be called in two ways:
325
+
326
+ 1. With a request object:
327
+ async for chunk in client.agent_run_stream(AgentRequest(...)):
328
+ ...
329
+ async for chunk in client.agent_run_stream(AgentFallbackRequest(...)):
330
+ ...
331
+
332
+ 2. With individual parameters (keyword arguments):
333
+ async for chunk in client.agent_run_stream(
334
+ provider_uid="...",
335
+ model="gpt-4",
336
+ messages=[TextMessage(...)],
337
+ tools=[]
338
+ ):
339
+ ...
340
+
341
+ Args:
342
+ request: An AgentRequest or AgentFallbackRequest object
343
+ provider_uid: The provider UID string
344
+ model: The model to use
345
+ messages: List of messages
346
+ tools: Optional list of tools
347
+ gen_config: Optional generation configuration
348
+
349
+ Returns:
350
+ AsyncIterator of AgentResponse chunks
351
+ """
352
+ # Check if first argument is a request object
353
+ if request is not None:
354
+ if not isinstance(request, (AgentRequest, AgentFallbackRequest)):
355
+ raise TypeError(
356
+ f"First positional argument must be AgentRequest or AgentFallbackRequest, got {type(request)}"
357
+ )
358
+ stream = await self.post(request.model_dump(), "agent/run_stream", expect_stream=True, expect_json=True)
359
+ async for chunk in stream:
360
+ yield AgentResponse(**chunk)
361
+ else:
362
+ # Otherwise, use keyword arguments
363
+ if provider_uid is None or model is None or messages is None:
364
+ raise ValueError(
365
+ "provider_uid, model, and messages are required. "
366
+ "Alternatively, pass an AgentRequest object as the first positional argument."
367
+ )
368
+
369
+ agent_request = AgentRequest(
370
+ provider_uid=provider_uid,
371
+ model=model,
372
+ messages=messages,
373
+ tools=tools or [],
374
+ gen_config=kwargs or None
375
+ )
376
+ stream = await self.post(agent_request.model_dump(), "agent/run_stream", expect_stream=True, expect_json=True)
377
+ async for chunk in stream:
378
+ yield AgentResponse(**chunk)
222
379
 
380
+ @overload
223
381
  async def speak(
224
382
  self,
225
- request: Union[SpeakRequest, AudioFallbackRequest]
383
+ request: Union[SpeakRequest, AudioFallbackRequest],
226
384
  ) -> bytes:
227
- return await self.post(request.model_dump(), "audio/speak", expect_json=False)
385
+ ...
386
+
387
+ @overload
388
+ async def speak(
389
+ self,
390
+ *,
391
+ provider_uid: str,
392
+ model: str,
393
+ text: str,
394
+ voice: str,
395
+ mime_type: str,
396
+ sample_rate: int,
397
+ chunk_size: int = 20,
398
+ **kwargs
399
+ ) -> bytes:
400
+ ...
401
+
402
+ async def speak(
403
+ self,
404
+ request: Optional[Union[SpeakRequest, AudioFallbackRequest]] = None,
405
+ *,
406
+ provider_uid: Optional[str] = None,
407
+ model: Optional[str] = None,
408
+ text: Optional[str] = None,
409
+ voice: Optional[str] = None,
410
+ mime_type: Optional[str] = None,
411
+ sample_rate: Optional[int] = None,
412
+ chunk_size: int = 20,
413
+ **kwargs
414
+ ) -> bytes:
415
+ """
416
+ Generate speech from text.
417
+
418
+ Can be called in two ways:
419
+
420
+ 1. With a request object:
421
+ await client.speak(SpeakRequest(...))
422
+ await client.speak(AudioFallbackRequest(...))
423
+
424
+ 2. With individual parameters (keyword arguments):
425
+ await client.speak(
426
+ provider_uid="...",
427
+ model="tts-1",
428
+ text="Hello, world!",
429
+ voice="alloy",
430
+ mime_type="audio/pcm",
431
+ sample_rate=24000
432
+ )
433
+
434
+ Args:
435
+ request: A SpeakRequest or AudioFallbackRequest object
436
+ provider_uid: The provider UID string
437
+ model: The model to use for TTS
438
+ text: The text to convert to speech
439
+ voice: The voice to use
440
+ mime_type: The MIME type of the output audio
441
+ sample_rate: The sample rate of the output audio
442
+ chunk_size: Chunk size in milliseconds (default: 20ms)
443
+ gen_config: Optional generation configuration
444
+
445
+ Returns:
446
+ Audio data as bytes
447
+ """
448
+ # Check if first argument is a request object
449
+ if request is not None:
450
+ if not isinstance(request, (SpeakRequest, AudioFallbackRequest)):
451
+ raise TypeError(
452
+ f"First positional argument must be SpeakRequest or AudioFallbackRequest, got {type(request)}"
453
+ )
454
+ return await self.post(request.model_dump(), "audio/speak", expect_json=False)
455
+
456
+ # Otherwise, use keyword arguments
457
+ if provider_uid is None or model is None or text is None or voice is None or mime_type is None or sample_rate is None:
458
+ raise ValueError(
459
+ "provider_uid, model, text, voice, mime_type, and sample_rate are required. "
460
+ "Alternatively, pass a SpeakRequest object as the first positional argument."
461
+ )
462
+
463
+ speak_request = SpeakRequest(
464
+ provider_uid=provider_uid,
465
+ model=model,
466
+ text=text,
467
+ voice=voice,
468
+ mime_type=mime_type,
469
+ sample_rate=sample_rate,
470
+ chunk_size=chunk_size,
471
+ gen_config=kwargs or None
472
+ )
473
+ return await self.post(speak_request.model_dump(), "audio/speak", expect_json=False)
474
+
475
+ @overload
476
+ def speak_stream(
477
+ self,
478
+ request: Union[SpeakRequest, AudioFallbackRequest],
479
+ ) -> AsyncIterator[bytes]:
480
+ ...
481
+
482
+ @overload
483
+ def speak_stream(
484
+ self,
485
+ *,
486
+ provider_uid: str,
487
+ model: str,
488
+ text: str,
489
+ voice: str,
490
+ mime_type: str,
491
+ sample_rate: int,
492
+ chunk_size: int = 20,
493
+ **kwargs
494
+ ) -> AsyncIterator[bytes]:
495
+ ...
228
496
 
229
497
  async def speak_stream(
230
498
  self,
231
- request: Union[SpeakRequest, AudioFallbackRequest]
499
+ request: Optional[Union[SpeakRequest, AudioFallbackRequest]] = None,
500
+ *,
501
+ provider_uid: Optional[str] = None,
502
+ model: Optional[str] = None,
503
+ text: Optional[str] = None,
504
+ voice: Optional[str] = None,
505
+ mime_type: Optional[str] = None,
506
+ sample_rate: Optional[int] = None,
507
+ chunk_size: int = 20,
508
+ **kwargs
232
509
  ) -> AsyncIterator[bytes]:
233
- return await self.post(request.model_dump(), "audio/speak_stream", expect_stream=True, expect_json=False)
510
+ """
511
+ Generate speech from text with streaming response.
512
+
513
+ Can be called in two ways:
514
+
515
+ 1. With a request object:
516
+ async for chunk in client.speak_stream(SpeakRequest(...)):
517
+ ...
518
+ async for chunk in client.speak_stream(AudioFallbackRequest(...)):
519
+ ...
520
+
521
+ 2. With individual parameters (keyword arguments):
522
+ async for chunk in client.speak_stream(
523
+ provider_uid="...",
524
+ model="tts-1",
525
+ text="Hello, world!",
526
+ voice="alloy",
527
+ mime_type="audio/pcm",
528
+ sample_rate=24000
529
+ ):
530
+ ...
531
+
532
+ Args:
533
+ request: A SpeakRequest or AudioFallbackRequest object
534
+ provider_uid: The provider UID string
535
+ model: The model to use for TTS
536
+ text: The text to convert to speech
537
+ voice: The voice to use
538
+ mime_type: The MIME type of the output audio
539
+ sample_rate: The sample rate of the output audio
540
+ chunk_size: Chunk size in milliseconds (default: 20ms)
541
+ gen_config: Optional generation configuration
542
+
543
+ Returns:
544
+ AsyncIterator of audio data chunks as bytes
545
+ """
546
+ # Check if first argument is a request object
547
+ if request is not None:
548
+ if not isinstance(request, (SpeakRequest, AudioFallbackRequest)):
549
+ raise TypeError(
550
+ f"First positional argument must be SpeakRequest or AudioFallbackRequest, got {type(request)}"
551
+ )
552
+ speak_stream = await self.post(request.model_dump(), "audio/speak_stream", expect_stream=True, expect_json=False)
553
+ async for chunk in speak_stream:
554
+ yield chunk
555
+ else:
556
+ # Otherwise, use keyword arguments
557
+ if provider_uid is None or model is None or text is None or voice is None or mime_type is None or sample_rate is None:
558
+ raise ValueError(
559
+ "provider_uid, model, text, voice, mime_type, and sample_rate are required. "
560
+ "Alternatively, pass a SpeakRequest object as the first positional argument."
561
+ )
562
+
563
+ speak_request = SpeakRequest(
564
+ provider_uid=provider_uid,
565
+ model=model,
566
+ text=text,
567
+ voice=voice,
568
+ mime_type=mime_type,
569
+ sample_rate=sample_rate,
570
+ chunk_size=chunk_size,
571
+ gen_config=kwargs or None
572
+ )
573
+ speak_stream = await self.post(speak_request.model_dump(), "audio/speak_stream", expect_stream=True, expect_json=False)
574
+ async for chunk in speak_stream:
575
+ yield chunk
234
576
 
235
-
577
+ @overload
578
+ async def transcribe(
579
+ self,
580
+ request: Union[TranscribeRequest, TranscribeFallbackRequest],
581
+ ) -> TranscribeResponse:
582
+ ...
583
+
584
+ @overload
236
585
  async def transcribe(
237
586
  self,
587
+ *,
238
588
  provider_uid: str,
239
589
  file: File,
240
590
  model: str,
241
591
  language: Optional[str] = None,
242
- gen_config: Optional[dict] = None
592
+ **kwargs
593
+ ) -> TranscribeResponse:
594
+ ...
595
+
596
+ async def transcribe(
597
+ self,
598
+ request: Optional[Union[TranscribeRequest, TranscribeFallbackRequest]] = None,
599
+ *,
600
+ provider_uid: Optional[str] = None,
601
+ file: Optional[File] = None,
602
+ model: Optional[str] = None,
603
+ language: Optional[str] = None,
604
+ **kwargs
243
605
  ) -> TranscribeResponse:
606
+ """
607
+ Transcribe audio to text.
608
+
609
+ Can be called in two ways:
610
+
611
+ 1. With a request object:
612
+ await client.transcribe(TranscribeRequest(...))
613
+
614
+ 2. With individual parameters (keyword arguments):
615
+ await client.transcribe(
616
+ provider_uid="...",
617
+ file=("filename", audio_bytes, "audio/wav"),
618
+ model="whisper-1"
619
+ )
620
+
621
+ Args:
622
+ request: A TranscribeRequest or TranscribeFallbackRequest object
623
+ provider_uid: The provider UID string
624
+ file: The audio file as a tuple (filename, content, content_type)
625
+ model: The model to use for transcription
626
+ language: Optional language code
627
+ gen_config: Optional generation configuration
628
+
629
+ Returns:
630
+ TranscribeResponse with transcription text and detected language
631
+ """
632
+ # Check if first argument is a request object
633
+ if request is not None:
634
+ if not isinstance(request, (TranscribeRequest, TranscribeFallbackRequest)):
635
+ raise TypeError(
636
+ f"First positional argument must be TranscribeRequest or TranscribeFallbackRequest, got {type(request)}"
637
+ )
638
+ # JSON-based request
639
+ result = await self.post(request.model_dump(), "audio/transcribe_json", expect_json=True)
640
+ return TranscribeResponse(**result)
641
+
642
+ # Otherwise, use keyword arguments with multipart form-data request
643
+ if provider_uid is None or file is None or model is None:
644
+ raise ValueError(
645
+ "provider_uid, file, and model are required. "
646
+ "Alternatively, pass a TranscribeRequest object as the first positional argument."
647
+ )
648
+
244
649
  files = {
245
650
  "file": file
246
651
  }
@@ -248,17 +653,10 @@ class LivellmClient:
248
653
  "provider_uid": provider_uid,
249
654
  "model": model,
250
655
  "language": language,
251
- "gen_config": json.dumps(gen_config) if gen_config else None
656
+ "gen_config": json.dumps(kwargs) if kwargs else None
252
657
  }
253
658
  result = await self.post_multipart(files, data, "audio/transcribe")
254
659
  return TranscribeResponse(**result)
255
-
256
- async def transcribe_json(
257
- self,
258
- request: Union[TranscribeRequest, TranscribeFallbackRequest]
259
- ) -> TranscribeResponse:
260
- result = await self.post(request.model_dump(), "audio/transcribe_json", expect_json=True)
261
- return TranscribeResponse(**result)
262
660
 
263
661
 
264
662
 
@@ -1,6 +1,6 @@
1
1
  # models for full run: AgentRequest, AgentResponse
2
2
 
3
- from pydantic import BaseModel, Field
3
+ from pydantic import BaseModel, Field, field_validator
4
4
  from typing import Optional, List, Union
5
5
  from .chat import TextMessage, BinaryMessage
6
6
  from .tools import WebSearchInput, MCPStreamableServerInput
@@ -9,11 +9,10 @@ from ..common import BaseRequest
9
9
 
10
10
  class AgentRequest(BaseRequest):
11
11
  model: str = Field(..., description="The model to use")
12
- messages: List[Union[TextMessage, BinaryMessage]]
13
- tools: List[Union[WebSearchInput, MCPStreamableServerInput]]
12
+ messages: List[Union[TextMessage, BinaryMessage]] = Field(..., description="The messages to use")
13
+ tools: List[Union[WebSearchInput, MCPStreamableServerInput]] = Field(default_factory=list, description="The tools to use")
14
14
  gen_config: Optional[dict] = Field(default=None, description="The configuration for the generation")
15
15
 
16
-
17
16
  class AgentResponseUsage(BaseModel):
18
17
  input_tokens: int = Field(..., description="The number of input tokens used")
19
18
  output_tokens: int = Field(..., description="The number of output tokens used")
@@ -30,17 +30,16 @@ class TranscribeRequest(BaseRequest):
30
30
 
31
31
  # If content is already bytes, return as-is
32
32
  if isinstance(content, bytes):
33
- return (filename, content, content_type)
33
+ try:
34
+ encoded_content = base64.b64encode(content).decode("utf-8") # base64 encode the content
35
+ return (filename, encoded_content, content_type)
36
+ except Exception as e:
37
+ raise ValueError(f"Failed to encode base64 content: {str(e)}")
34
38
 
35
39
  # If content is a string, assume it's base64 encoded
36
40
  elif isinstance(content, str):
37
- try:
38
- decoded_content = base64.b64decode(content)
39
- return (filename, decoded_content, content_type)
40
- except Exception as e:
41
- raise ValueError(f"Failed to decode base64 content: {str(e)}")
42
- else:
43
- raise ValueError(f"file content must be either bytes or base64 string, got {type(content)}")
41
+ # assume it's already base64 encoded
42
+ return (filename, content, content_type)
44
43
 
45
44
 
46
45
  class TranscribeResponse(BaseModel):
@@ -1,4 +1,4 @@
1
- from pydantic import BaseModel, Field
1
+ from pydantic import BaseModel, Field, model_validator
2
2
  from typing import List
3
3
  from .common import BaseRequest
4
4
  from .audio.speak import SpeakRequest
@@ -6,7 +6,7 @@ from .audio.transcribe import TranscribeRequest
6
6
  from .agent.agent import AgentRequest
7
7
  from enum import Enum
8
8
 
9
- class FallbackStrategy(Enum):
9
+ class FallbackStrategy(str, Enum):
10
10
  SEQUENTIAL = "sequential"
11
11
  PARALLEL = "parallel"
12
12
 
@@ -14,7 +14,7 @@ class FallbackRequest(BaseModel):
14
14
  requests: List[BaseRequest] = Field(..., description="List of requests to try as fallbacks")
15
15
  strategy: FallbackStrategy = Field(FallbackStrategy.SEQUENTIAL, description="The strategy to use for fallback")
16
16
  timeout_per_request: int = Field(default=360, description="The timeout to use for each request")
17
-
17
+
18
18
  class AgentFallbackRequest(FallbackRequest):
19
19
  requests: List[AgentRequest] = Field(..., description="List of agent requests to try as fallbacks")
20
20