langroid 0.25.0__py3-none-any.whl → 0.26.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/batch.py +33 -0
- langroid/agent/callbacks/chainlit.py +57 -129
- {langroid-0.25.0.dist-info → langroid-0.26.1.dist-info}/METADATA +3 -3
- {langroid-0.25.0.dist-info → langroid-0.26.1.dist-info}/RECORD +7 -7
- pyproject.toml +4 -4
- {langroid-0.25.0.dist-info → langroid-0.26.1.dist-info}/LICENSE +0 -0
- {langroid-0.25.0.dist-info → langroid-0.26.1.dist-info}/WHEEL +0 -0
langroid/agent/batch.py
CHANGED
@@ -363,3 +363,36 @@ def agent_response_batch(
|
|
363
363
|
sequential=sequential,
|
364
364
|
stop_on_first_result=stop_on_first_result,
|
365
365
|
)
|
366
|
+
|
367
|
+
|
368
|
+
def run_batch_function(
|
369
|
+
function: Callable[[T], U],
|
370
|
+
items: list[T],
|
371
|
+
sequential: bool = True,
|
372
|
+
batch_size: Optional[int] = None,
|
373
|
+
) -> List[U]:
|
374
|
+
async def _do_task(item: T) -> U:
|
375
|
+
return function(item)
|
376
|
+
|
377
|
+
async def _do_all(items: Iterable[T]) -> List[U]:
|
378
|
+
if sequential:
|
379
|
+
results = []
|
380
|
+
for item in items:
|
381
|
+
result = await _do_task(item)
|
382
|
+
results.append(result)
|
383
|
+
return results
|
384
|
+
|
385
|
+
return await asyncio.gather(*(_do_task(item) for item in items))
|
386
|
+
|
387
|
+
results: List[U] = []
|
388
|
+
|
389
|
+
if batch_size is None:
|
390
|
+
with status(f"[bold green]Running {len(items)} tasks:"):
|
391
|
+
results = asyncio.run(_do_all(items))
|
392
|
+
else:
|
393
|
+
batches = batched(items, batch_size)
|
394
|
+
for batch in batches:
|
395
|
+
with status(f"[bold green]Running batch of {len(batch)} tasks:"):
|
396
|
+
results.extend(asyncio.run(_do_all(batch)))
|
397
|
+
|
398
|
+
return results
|
@@ -16,7 +16,6 @@ except ImportError:
|
|
16
16
|
raise LangroidImportError("chainlit", "chainlit")
|
17
17
|
|
18
18
|
from chainlit import run_sync
|
19
|
-
from chainlit.config import config
|
20
19
|
from chainlit.logger import logger
|
21
20
|
|
22
21
|
import langroid as lr
|
@@ -227,7 +226,6 @@ class ChainlitAgentCallbacks:
|
|
227
226
|
def __init__(
|
228
227
|
self,
|
229
228
|
agent: lr.Agent,
|
230
|
-
msg: cl.Message = None,
|
231
229
|
config: ChainlitCallbackConfig = ChainlitCallbackConfig(),
|
232
230
|
):
|
233
231
|
"""Add callbacks to the agent, and save the initial message,
|
@@ -240,6 +238,7 @@ class ChainlitAgentCallbacks:
|
|
240
238
|
agent.callbacks.show_llm_response = self.show_llm_response
|
241
239
|
agent.callbacks.show_agent_response = self.show_agent_response
|
242
240
|
agent.callbacks.get_user_response = self.get_user_response
|
241
|
+
agent.callbacks.get_user_response_async = self.get_user_response_async
|
243
242
|
agent.callbacks.get_last_step = self.get_last_step
|
244
243
|
agent.callbacks.set_parent_agent = self.set_parent_agent
|
245
244
|
agent.callbacks.show_error_message = self.show_error_message
|
@@ -250,8 +249,6 @@ class ChainlitAgentCallbacks:
|
|
250
249
|
# We don't want to suppress LLM output in async + streaming,
|
251
250
|
# since we often use chainlit async callbacks to display LLM output
|
252
251
|
self.agent.llm.config.async_stream_quiet = False
|
253
|
-
if msg is not None:
|
254
|
-
self.show_first_user_message(msg)
|
255
252
|
|
256
253
|
def _get_parent_id(self) -> str | None:
|
257
254
|
"""Get step id under which we need to nest the current step:
|
@@ -281,10 +278,11 @@ class ChainlitAgentCallbacks:
|
|
281
278
|
|
282
279
|
def start_llm_stream(self) -> Callable[[str], None]:
|
283
280
|
"""Returns a streaming fn that can be passed to the LLM class"""
|
284
|
-
self.stream = cl.
|
281
|
+
self.stream = cl.Message(
|
282
|
+
content="",
|
285
283
|
id=self.curr_step.id if self.curr_step is not None else None,
|
286
|
-
|
287
|
-
type="
|
284
|
+
author=self._entity_name("llm"),
|
285
|
+
type="assistant_message",
|
288
286
|
parent_id=self._get_parent_id(),
|
289
287
|
)
|
290
288
|
self.last_step = self.stream
|
@@ -296,7 +294,6 @@ class ChainlitAgentCallbacks:
|
|
296
294
|
under parent {self._get_parent_id()}
|
297
295
|
"""
|
298
296
|
)
|
299
|
-
run_sync(self.stream.send()) # type: ignore
|
300
297
|
|
301
298
|
def stream_token(t: str) -> None:
|
302
299
|
if self.stream is None:
|
@@ -307,10 +304,11 @@ class ChainlitAgentCallbacks:
|
|
307
304
|
|
308
305
|
async def start_llm_stream_async(self) -> Callable[[str], None]:
|
309
306
|
"""Returns a streaming fn that can be passed to the LLM class"""
|
310
|
-
self.stream = cl.
|
307
|
+
self.stream = cl.Message(
|
308
|
+
content="",
|
311
309
|
id=self.curr_step.id if self.curr_step is not None else None,
|
312
|
-
|
313
|
-
type="
|
310
|
+
author=self._entity_name("llm"),
|
311
|
+
type="assistant_message",
|
314
312
|
parent_id=self._get_parent_id(),
|
315
313
|
)
|
316
314
|
self.last_step = self.stream
|
@@ -320,9 +318,8 @@ class ChainlitAgentCallbacks:
|
|
320
318
|
Starting LLM stream for {self.agent.config.name}
|
321
319
|
id = {self.stream.id}
|
322
320
|
under parent {self._get_parent_id()}
|
323
|
-
|
321
|
+
"""
|
324
322
|
)
|
325
|
-
await self.stream.send() # type: ignore
|
326
323
|
|
327
324
|
async def stream_token(t: str) -> None:
|
328
325
|
if self.stream is None:
|
@@ -346,14 +343,14 @@ class ChainlitAgentCallbacks:
|
|
346
343
|
else:
|
347
344
|
run_sync(self.stream.update()) # type: ignore
|
348
345
|
stream_id = self.stream.id if content else None
|
349
|
-
step = cl.
|
346
|
+
step = cl.Message(
|
347
|
+
content=textwrap.dedent(content) or NO_ANSWER,
|
350
348
|
id=stream_id,
|
351
|
-
|
352
|
-
type="
|
349
|
+
author=self._entity_name("llm", tool=is_tool),
|
350
|
+
type="assistant_message",
|
353
351
|
parent_id=self._get_parent_id(),
|
354
352
|
language="json" if is_tool else None,
|
355
353
|
)
|
356
|
-
step.output = textwrap.dedent(content) or NO_ANSWER
|
357
354
|
logger.info(
|
358
355
|
f"""
|
359
356
|
Finish STREAM LLM response for {self.agent.config.name}
|
@@ -371,16 +368,16 @@ class ChainlitAgentCallbacks:
|
|
371
368
|
language: str | None = None,
|
372
369
|
) -> None:
|
373
370
|
"""Show non-streaming LLM response."""
|
374
|
-
step = cl.
|
371
|
+
step = cl.Message(
|
372
|
+
content=textwrap.dedent(content) or NO_ANSWER,
|
375
373
|
id=self.curr_step.id if self.curr_step is not None else None,
|
376
|
-
|
377
|
-
type="
|
378
|
-
parent_id=self._get_parent_id(),
|
374
|
+
author=self._entity_name("llm", tool=is_tool, cached=cached),
|
375
|
+
type="assistant_message",
|
379
376
|
language=language or ("json" if is_tool else None),
|
377
|
+
parent_id=self._get_parent_id(),
|
380
378
|
)
|
381
379
|
self.last_step = step
|
382
380
|
self.curr_step = None
|
383
|
-
step.output = textwrap.dedent(content) or NO_ANSWER
|
384
381
|
logger.info(
|
385
382
|
f"""
|
386
383
|
Showing NON-STREAM LLM response for {self.agent.config.name}
|
@@ -391,34 +388,31 @@ class ChainlitAgentCallbacks:
|
|
391
388
|
run_sync(step.send()) # type: ignore
|
392
389
|
|
393
390
|
def show_error_message(self, error: str) -> None:
|
394
|
-
"""Show error message
|
395
|
-
step = cl.
|
396
|
-
|
391
|
+
"""Show error message."""
|
392
|
+
step = cl.Message(
|
393
|
+
content=error,
|
394
|
+
author=self.agent.config.name + f"({ERROR})",
|
397
395
|
type="run",
|
398
|
-
parent_id=self._get_parent_id(),
|
399
396
|
language="text",
|
397
|
+
parent_id=self._get_parent_id(),
|
400
398
|
)
|
401
399
|
self.last_step = step
|
402
|
-
step.output = error
|
403
400
|
run_sync(step.send())
|
404
401
|
|
405
402
|
def show_agent_response(self, content: str, language="text") -> None:
|
406
|
-
"""Show message from agent (typically tool handler).
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
403
|
+
"""Show message from agent (typically tool handler)."""
|
404
|
+
if language == "text":
|
405
|
+
content = wrap_text_preserving_structure(content, width=90)
|
406
|
+
step = cl.Message(
|
407
|
+
content=content,
|
411
408
|
id=self.curr_step.id if self.curr_step is not None else None,
|
412
|
-
|
409
|
+
author=self._entity_name("agent"),
|
413
410
|
type="tool",
|
414
|
-
parent_id=self._get_parent_id(),
|
415
411
|
language=language,
|
412
|
+
parent_id=self._get_parent_id(),
|
416
413
|
)
|
417
|
-
if language == "text":
|
418
|
-
content = wrap_text_preserving_structure(content, width=90)
|
419
414
|
self.last_step = step
|
420
415
|
self.curr_step = None
|
421
|
-
step.output = content
|
422
416
|
logger.info(
|
423
417
|
f"""
|
424
418
|
Showing AGENT response for {self.agent.config.name}
|
@@ -433,13 +427,13 @@ class ChainlitAgentCallbacks:
|
|
433
427
|
so that the UI displays a spinner while the process is running."""
|
434
428
|
if self.curr_step is not None:
|
435
429
|
run_sync(self.curr_step.remove()) # type: ignore
|
436
|
-
step = cl.
|
437
|
-
|
430
|
+
step = cl.Message(
|
431
|
+
content="",
|
432
|
+
author=self._entity_name(entity),
|
438
433
|
type="run",
|
439
434
|
parent_id=self._get_parent_id(),
|
440
435
|
language="text",
|
441
436
|
)
|
442
|
-
step.output = ""
|
443
437
|
self.last_step = step
|
444
438
|
self.curr_step = step
|
445
439
|
logger.info(
|
@@ -503,54 +497,23 @@ class ChainlitAgentCallbacks:
|
|
503
497
|
return "" # process the "feedback" case here
|
504
498
|
|
505
499
|
def get_user_response(self, prompt: str) -> str:
|
506
|
-
"""Ask for user response, wait for it, and return it
|
507
|
-
as a cl.Step rather than as a cl.Message so we can nest it
|
508
|
-
under the parent step.
|
509
|
-
"""
|
510
|
-
return run_sync(self.ask_user_step(prompt=prompt, suppress_values=["c"]))
|
500
|
+
"""Ask for user response, wait for it, and return it"""
|
511
501
|
|
512
|
-
|
513
|
-
"""Show user response as a step."""
|
514
|
-
step = cl.Step(
|
515
|
-
id=cl.context.current_step.id,
|
516
|
-
name=self._entity_name("user"),
|
517
|
-
type="run",
|
518
|
-
parent_id=self._get_parent_id(),
|
519
|
-
)
|
520
|
-
step.output = message
|
521
|
-
logger.info(
|
522
|
-
f"""
|
523
|
-
Showing USER response for {self.agent.config.name}
|
524
|
-
id = {step.id}
|
525
|
-
under parent {self._get_parent_id()}
|
526
|
-
"""
|
527
|
-
)
|
528
|
-
run_sync(step.send())
|
502
|
+
return run_sync(self.ask_user(prompt=prompt, suppress_values=["c"]))
|
529
503
|
|
530
|
-
def
|
531
|
-
"""
|
532
|
-
|
533
|
-
|
534
|
-
name=self._entity_name("user"),
|
535
|
-
type="run",
|
536
|
-
parent_id=self._get_parent_id(),
|
537
|
-
)
|
538
|
-
self.last_step = step
|
539
|
-
step.output = msg.content
|
540
|
-
run_sync(step.update())
|
504
|
+
async def get_user_response_async(self, prompt: str) -> str:
|
505
|
+
"""Ask for user response, wait for it, and return it"""
|
506
|
+
|
507
|
+
return await self.ask_user(prompt=prompt, suppress_values=["c"])
|
541
508
|
|
542
|
-
async def
|
509
|
+
async def ask_user(
|
543
510
|
self,
|
544
511
|
prompt: str,
|
545
512
|
timeout: int = USER_TIMEOUT,
|
546
513
|
suppress_values: List[str] = ["c"],
|
547
514
|
) -> str:
|
548
515
|
"""
|
549
|
-
Ask user for input
|
550
|
-
Rather than rely entirely on AskUserMessage (which doesn't let us
|
551
|
-
nest the question + answer under a step), we instead create fake
|
552
|
-
steps for the question and answer, and only rely on AskUserMessage
|
553
|
-
with an empty prompt to await user response.
|
516
|
+
Ask user for input.
|
554
517
|
|
555
518
|
Args:
|
556
519
|
prompt (str): Prompt to display to user
|
@@ -561,31 +524,16 @@ class ChainlitAgentCallbacks:
|
|
561
524
|
Returns:
|
562
525
|
str: User response
|
563
526
|
"""
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
# force hide_cot to False so that the user question + response is visible
|
570
|
-
config.ui.hide_cot = False
|
571
|
-
|
572
|
-
if prompt != "":
|
573
|
-
# Create a question step to ask user
|
574
|
-
question_step = cl.Step(
|
575
|
-
name=f"{self.agent.config.name} (AskUser ❓)",
|
576
|
-
type="run",
|
577
|
-
parent_id=self._get_parent_id(),
|
578
|
-
)
|
579
|
-
question_step.output = prompt
|
580
|
-
await question_step.send() # type: ignore
|
581
|
-
|
582
|
-
# Use AskUserMessage to await user response,
|
583
|
-
# but with an empty prompt so the question is not visible,
|
584
|
-
# but still pauses for user input in the input box.
|
585
|
-
res = await cl.AskUserMessage(
|
586
|
-
content="",
|
527
|
+
ask_msg = cl.AskUserMessage(
|
528
|
+
content=prompt,
|
529
|
+
author=f"{self.agent.config.name}(Awaiting user input...)",
|
530
|
+
type="assistant_message",
|
587
531
|
timeout=timeout,
|
588
|
-
)
|
532
|
+
)
|
533
|
+
res = await ask_msg.send()
|
534
|
+
if prompt == "":
|
535
|
+
# if there was no actual prompt, clear the row from the UI for clarity.
|
536
|
+
await ask_msg.remove()
|
589
537
|
|
590
538
|
if res is None:
|
591
539
|
run_sync(
|
@@ -595,31 +543,10 @@ class ChainlitAgentCallbacks:
|
|
595
543
|
)
|
596
544
|
return "x"
|
597
545
|
|
598
|
-
# The above will try to display user response in res
|
599
|
-
# but we create fake step with same id as res and
|
600
|
-
# erase it using empty output so it's not displayed
|
601
|
-
step = cl.Step(
|
602
|
-
id=res["id"],
|
603
|
-
name="TempUserResponse",
|
604
|
-
type="run",
|
605
|
-
parent_id=self._get_parent_id(),
|
606
|
-
)
|
607
|
-
step.output = ""
|
608
|
-
await step.update() # type: ignore
|
609
|
-
|
610
546
|
# Finally, reproduce the user response at right nesting level
|
611
547
|
if res["output"] in suppress_values:
|
612
|
-
config.ui.hide_cot = hide_cot # restore original value
|
613
548
|
return ""
|
614
549
|
|
615
|
-
step = cl.Step(
|
616
|
-
name=self._entity_name(entity="user"),
|
617
|
-
type="run",
|
618
|
-
parent_id=self._get_parent_id(),
|
619
|
-
)
|
620
|
-
step.output = res["output"]
|
621
|
-
await step.send() # type: ignore
|
622
|
-
config.ui.hide_cot = hide_cot # restore original value
|
623
550
|
return res["output"]
|
624
551
|
|
625
552
|
|
@@ -632,13 +559,12 @@ class ChainlitTaskCallbacks(ChainlitAgentCallbacks):
|
|
632
559
|
def __init__(
|
633
560
|
self,
|
634
561
|
task: lr.Task,
|
635
|
-
msg: cl.Message = None,
|
636
562
|
config: ChainlitCallbackConfig = ChainlitCallbackConfig(),
|
637
563
|
):
|
638
564
|
"""Inject callbacks recursively, ensuring msg is passed to the
|
639
565
|
top-level agent"""
|
640
566
|
|
641
|
-
super().__init__(task.agent,
|
567
|
+
super().__init__(task.agent, config)
|
642
568
|
self._inject_callbacks(task)
|
643
569
|
self.task = task
|
644
570
|
if config.show_subtask_response:
|
@@ -659,12 +585,14 @@ class ChainlitTaskCallbacks(ChainlitAgentCallbacks):
|
|
659
585
|
"""Show sub-task response as a step, nested at the right level."""
|
660
586
|
|
661
587
|
# The step should nest under the calling agent's last step
|
662
|
-
step = cl.
|
663
|
-
|
588
|
+
step = cl.Message(
|
589
|
+
content=content or NO_ANSWER,
|
590
|
+
author=(
|
591
|
+
self.task.agent.config.name + f"( ⏎ From {task.agent.config.name})"
|
592
|
+
),
|
664
593
|
type="run",
|
665
594
|
parent_id=self._get_parent_id(),
|
666
595
|
language="json" if is_tool else None,
|
667
596
|
)
|
668
|
-
step.output = content or NO_ANSWER
|
669
597
|
self.last_step = step
|
670
598
|
run_sync(step.send())
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.26.1
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -42,7 +42,7 @@ Requires-Dist: arango-datasets (>=1.2.2,<2.0.0) ; extra == "all" or extra == "ar
|
|
42
42
|
Requires-Dist: async-generator (>=1.10,<2.0)
|
43
43
|
Requires-Dist: bs4 (>=0.0.1,<0.0.2)
|
44
44
|
Requires-Dist: cerebras-cloud-sdk (>=1.1.0,<2.0.0)
|
45
|
-
Requires-Dist: chainlit (
|
45
|
+
Requires-Dist: chainlit (>=1.3.2,<2.0.0) ; extra == "all" or extra == "chainlit"
|
46
46
|
Requires-Dist: chromadb (>=0.4.21,<=0.4.23) ; extra == "vecdbs" or extra == "all" or extra == "chromadb"
|
47
47
|
Requires-Dist: colorlog (>=6.7.0,<7.0.0)
|
48
48
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
@@ -54,7 +54,7 @@ Requires-Dist: fire (>=0.5.0,<0.6.0)
|
|
54
54
|
Requires-Dist: gitpython (>=3.1.43,<4.0.0)
|
55
55
|
Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
|
56
56
|
Requires-Dist: google-generativeai (>=0.5.2,<0.6.0)
|
57
|
-
Requires-Dist: groq (>=0.
|
57
|
+
Requires-Dist: groq (>=0.13.0,<0.14.0)
|
58
58
|
Requires-Dist: grpcio (>=1.62.1,<2.0.0)
|
59
59
|
Requires-Dist: halo (>=0.0.31,<0.0.32)
|
60
60
|
Requires-Dist: huggingface-hub (>=0.21.2,<0.22.0) ; extra == "hf-transformers" or extra == "all" or extra == "transformers"
|
@@ -1,9 +1,9 @@
|
|
1
1
|
langroid/__init__.py,sha256=z_fCOLQJPOw3LLRPBlFB5-2HyCjpPgQa4m4iY5Fvb8Y,1800
|
2
2
|
langroid/agent/__init__.py,sha256=ll0Cubd2DZ-fsCMl7e10hf9ZjFGKzphfBco396IKITY,786
|
3
3
|
langroid/agent/base.py,sha256=jAt7tbyPIWoGJDe6Xi75nthl-JY47yWB9Q5O1m9QJq0,76798
|
4
|
-
langroid/agent/batch.py,sha256=
|
4
|
+
langroid/agent/batch.py,sha256=qK3ph6VNj_1sOhfXCZY4r6gh035DglDKU751p8BU0tY,14665
|
5
5
|
langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
langroid/agent/callbacks/chainlit.py,sha256=
|
6
|
+
langroid/agent/callbacks/chainlit.py,sha256=C6zzzYC30qC4eMA7al7eFpRoTgoe3475kaMKyXgQM0Q,20695
|
7
7
|
langroid/agent/chat_agent.py,sha256=jZaeMOQsGZlrnWj7RBT7RR17Bd0zR9H4D6n_F4rPUn0,79517
|
8
8
|
langroid/agent/chat_document.py,sha256=xPUMGzR83rn4iAEXIw2jy5LQ6YJ6Y0TiZ78XRQeDnJQ,17778
|
9
9
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -142,8 +142,8 @@ langroid/vector_store/meilisearch.py,sha256=6frB7GFWeWmeKzRfLZIvzRjllniZ1cYj3Hmh
|
|
142
142
|
langroid/vector_store/momento.py,sha256=qR-zBF1RKVHQZPZQYW_7g-XpTwr46p8HJuYPCkfJbM4,10534
|
143
143
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
144
144
|
langroid/vector_store/qdrantdb.py,sha256=v88lqFkepADvlN6lByUj9I4NEKa9X9lWH16uTPPbYrE,17457
|
145
|
-
pyproject.toml,sha256=
|
146
|
-
langroid-0.
|
147
|
-
langroid-0.
|
148
|
-
langroid-0.
|
149
|
-
langroid-0.
|
145
|
+
pyproject.toml,sha256=eBqM11UEFyExp9V3T3mzm3ecLHYfwjlcMZ_p9iihz8I,7496
|
146
|
+
langroid-0.26.1.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
147
|
+
langroid-0.26.1.dist-info/METADATA,sha256=MA5FyOkURNni69GpGG3yEHoUufurJh4pwtENzET6WsI,57521
|
148
|
+
langroid-0.26.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
149
|
+
langroid-0.26.1.dist-info/RECORD,,
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "langroid"
|
3
|
-
version = "0.
|
3
|
+
version = "0.26.1"
|
4
4
|
description = "Harness LLMs with Multi-Agent Programming"
|
5
5
|
authors = ["Prasad Chalasani <pchalasani@gmail.com>"]
|
6
6
|
readme = "README.md"
|
@@ -22,7 +22,7 @@ pymysql = {version = "^1.1.0", optional = true}
|
|
22
22
|
meilisearch-python-sdk = {version="^2.2.3", optional=true}
|
23
23
|
litellm = {version = "^1.30.1", optional = true}
|
24
24
|
metaphor-python = {version = "^0.1.23", optional = true}
|
25
|
-
chainlit = {version = "1.
|
25
|
+
chainlit = {version = "^1.3.2", optional = true}
|
26
26
|
python-socketio = {version="^5.11.0", optional=true}
|
27
27
|
neo4j = {version = "^5.14.1", optional = true}
|
28
28
|
huggingface-hub = {version="^0.21.2", optional=true}
|
@@ -82,7 +82,7 @@ grpcio = "^1.62.1"
|
|
82
82
|
duckduckgo-search = "^6.0.0"
|
83
83
|
|
84
84
|
google-generativeai = "^0.5.2"
|
85
|
-
groq = "^0.
|
85
|
+
groq = "^0.13.0"
|
86
86
|
nest-asyncio = "^1.6.0"
|
87
87
|
async-generator = "^1.10"
|
88
88
|
|
@@ -242,7 +242,7 @@ lint.select = [
|
|
242
242
|
lint.exclude = ["docs/**", ".venv", "venv", "examples/**", "examples_dev", "langroid/utils/web", "notebooks", "__init__.py", "langroid/embedding_models/protoc/*"]
|
243
243
|
lint.fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
|
244
244
|
lint.unfixable = []
|
245
|
-
lint.extend-ignore = ["F821","F401"]
|
245
|
+
lint.extend-ignore = ["F821", "F401"]
|
246
246
|
|
247
247
|
[tool.pytest.ini_options]
|
248
248
|
filterwarnings = ["ignore::DeprecationWarning"]
|
File without changes
|
File without changes
|