langroid 0.23.2__py3-none-any.whl → 0.24.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -39,6 +39,7 @@ from langroid.language_models.base import (
39
39
  LLMMessage,
40
40
  LLMResponse,
41
41
  LLMTokenUsage,
42
+ OpenAIJsonSchemaSpec,
42
43
  OpenAIToolCall,
43
44
  OpenAIToolSpec,
44
45
  Role,
@@ -71,6 +72,9 @@ GLHF_BASE_URL = "https://glhf.chat/api/openai/v1"
71
72
  OLLAMA_API_KEY = "ollama"
72
73
  DUMMY_API_KEY = "xxx"
73
74
 
75
+ VLLM_API_KEY = os.environ.get("VLLM_API_KEY", DUMMY_API_KEY)
76
+ LLAMACPP_API_KEY = os.environ.get("LLAMA_API_KEY", DUMMY_API_KEY)
77
+
74
78
 
75
79
  class AnthropicModel(str, Enum):
76
80
  """Enum for Anthropic models"""
@@ -88,7 +92,7 @@ class OpenAIChatModel(str, Enum):
88
92
  GPT4 = "gpt-4"
89
93
  GPT4_32K = "gpt-4-32k"
90
94
  GPT4_TURBO = "gpt-4-turbo"
91
- GPT4o = "gpt-4o-2024-08-06"
95
+ GPT4o = "gpt-4o"
92
96
  GPT4o_MINI = "gpt-4o-mini"
93
97
  O1_PREVIEW = "o1-preview"
94
98
  O1_MINI = "o1-mini"
@@ -158,6 +162,11 @@ openAICompletionModelPreferenceList = [
158
162
  OpenAICompletionModel.TEXT_DA_VINCI_003,
159
163
  ]
160
164
 
165
+ openAIStructuredOutputList = [
166
+ OpenAIChatModel.GPT4o_MINI,
167
+ OpenAIChatModel.GPT4o,
168
+ ]
169
+
161
170
  NON_STREAMING_MODELS = [
162
171
  OpenAIChatModel.O1_MINI,
163
172
  OpenAIChatModel.O1_PREVIEW,
@@ -230,6 +239,15 @@ def gpt_3_5_warning() -> None:
230
239
  )
231
240
 
232
241
 
242
+ @cache
243
+ def parallel_strict_warning() -> None:
244
+ logging.warning(
245
+ "OpenAI tool calling in strict mode is not supported when "
246
+ "parallel tool calls are made. Disable parallel tool calling "
247
+ "to ensure correct behavior."
248
+ )
249
+
250
+
233
251
  def noop() -> None:
234
252
  """Does nothing."""
235
253
  return None
@@ -288,6 +306,14 @@ class OpenAIGPTConfig(LLMConfig):
288
306
  chat_model: str = defaultOpenAIChatModel
289
307
  completion_model: str = defaultOpenAICompletionModel
290
308
  run_on_first_use: Callable[[], None] = noop
309
+ parallel_tool_calls: Optional[bool] = None
310
+ # Supports constrained decoding which enforces that the output of the LLM
311
+ # adheres to a JSON schema
312
+ supports_json_schema: Optional[bool] = None
313
+ # Supports strict decoding for the generation of tool calls with
314
+ # the OpenAI Tools API; this ensures that the generated tools
315
+ # adhere to the provided schema.
316
+ supports_strict_tools: Optional[bool] = None
291
317
  # a string that roughly matches a HuggingFace chat_template,
292
318
  # e.g. "mistral-instruct-v0.2 (a fuzzy search is done to find the closest match)
293
319
  formatter: str | None = None
@@ -297,7 +323,7 @@ class OpenAIGPTConfig(LLMConfig):
297
323
  local_model = "api_base" in kwargs and kwargs["api_base"] is not None
298
324
 
299
325
  chat_model = kwargs.get("chat_model", "")
300
- local_prefixes = ["local/", "litellm/", "ollama/"]
326
+ local_prefixes = ["local/", "litellm/", "ollama/", "vllm/", "llamacpp/"]
301
327
  if any(chat_model.startswith(prefix) for prefix in local_prefixes):
302
328
  local_model = True
303
329
 
@@ -454,6 +480,9 @@ class OpenAIGPT(LanguageModel):
454
480
  HFPromptFormatterConfig(model_name=self.config.formatter)
455
481
  )
456
482
 
483
+ self.supports_json_schema: bool = self.config.supports_json_schema or False
484
+ self.supports_strict_tools: bool = self.config.supports_strict_tools or False
485
+
457
486
  # if model name starts with "litellm",
458
487
  # set the actual model name by stripping the "litellm/" prefix
459
488
  # and set the litellm flag to True
@@ -484,8 +513,30 @@ class OpenAIGPT(LanguageModel):
484
513
  self.api_base = self.config.api_base or OLLAMA_BASE_URL
485
514
  self.api_key = OLLAMA_API_KEY
486
515
  self.config.chat_model = self.config.chat_model.replace("ollama/", "")
516
+ elif self.config.chat_model.startswith("vllm/"):
517
+ self.supports_json_schema = True
518
+ self.config.chat_model = self.config.chat_model.replace("vllm/", "")
519
+ self.api_key = VLLM_API_KEY
520
+ self.api_base = self.config.api_base or "http://localhost:8000/v1"
521
+ if not self.api_base.startswith("http"):
522
+ self.api_base = "http://" + self.api_base
523
+ if not self.api_base.endswith("/v1"):
524
+ self.api_base = self.api_base + "/v1"
525
+ elif self.config.chat_model.startswith("llamacpp/"):
526
+ self.supports_json_schema = True
527
+ self.api_base = self.config.chat_model.split("/", 1)[1]
528
+ if not self.api_base.startswith("http"):
529
+ self.api_base = "http://" + self.api_base
530
+ self.api_key = LLAMACPP_API_KEY
487
531
  else:
488
532
  self.api_base = self.config.api_base
533
+ # If api_base is unset we use OpenAI's endpoint, which supports
534
+ # these features (with JSON schema restricted to a limited set of models)
535
+ self.supports_strict_tools = self.api_base is None
536
+ self.supports_json_schema = (
537
+ self.api_base is None
538
+ and self.config.chat_model in openAIStructuredOutputList
539
+ )
489
540
 
490
541
  if settings.chat_model != "":
491
542
  # if we're overriding chat model globally, set completion model to same
@@ -1420,6 +1471,7 @@ class OpenAIGPT(LanguageModel):
1420
1471
  tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
1421
1472
  functions: Optional[List[LLMFunctionSpec]] = None,
1422
1473
  function_call: str | Dict[str, str] = "auto",
1474
+ response_format: Optional[OpenAIJsonSchemaSpec] = None,
1423
1475
  ) -> LLMResponse:
1424
1476
  self.run_on_first_use()
1425
1477
 
@@ -1453,7 +1505,13 @@ class OpenAIGPT(LanguageModel):
1453
1505
  return self.generate(prompt=prompt, max_tokens=max_tokens)
1454
1506
  try:
1455
1507
  return self._chat(
1456
- messages, max_tokens, tools, tool_choice, functions, function_call
1508
+ messages,
1509
+ max_tokens,
1510
+ tools,
1511
+ tool_choice,
1512
+ functions,
1513
+ function_call,
1514
+ response_format,
1457
1515
  )
1458
1516
  except Exception as e:
1459
1517
  # log and re-raise exception
@@ -1468,6 +1526,7 @@ class OpenAIGPT(LanguageModel):
1468
1526
  tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
1469
1527
  functions: Optional[List[LLMFunctionSpec]] = None,
1470
1528
  function_call: str | Dict[str, str] = "auto",
1529
+ response_format: Optional[OpenAIJsonSchemaSpec] = None,
1471
1530
  ) -> LLMResponse:
1472
1531
  self.run_on_first_use()
1473
1532
 
@@ -1515,6 +1574,7 @@ class OpenAIGPT(LanguageModel):
1515
1574
  tool_choice,
1516
1575
  functions,
1517
1576
  function_call,
1577
+ response_format,
1518
1578
  )
1519
1579
  return result
1520
1580
  except Exception as e:
@@ -1582,6 +1642,7 @@ class OpenAIGPT(LanguageModel):
1582
1642
  tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
1583
1643
  functions: Optional[List[LLMFunctionSpec]] = None,
1584
1644
  function_call: str | Dict[str, str] = "auto",
1645
+ response_format: Optional[OpenAIJsonSchemaSpec] = None,
1585
1646
  ) -> Dict[str, Any]:
1586
1647
  """Prepare args for LLM chat-completion API call"""
1587
1648
  if isinstance(messages, str):
@@ -1622,18 +1683,30 @@ class OpenAIGPT(LanguageModel):
1622
1683
  )
1623
1684
  )
1624
1685
  if tools is not None:
1686
+ if self.config.parallel_tool_calls is not None:
1687
+ args["parallel_tool_calls"] = self.config.parallel_tool_calls
1688
+
1689
+ if any(t.strict for t in tools) and (
1690
+ self.config.parallel_tool_calls is None
1691
+ or self.config.parallel_tool_calls
1692
+ ):
1693
+ parallel_strict_warning()
1625
1694
  args.update(
1626
1695
  dict(
1627
1696
  tools=[
1628
1697
  dict(
1629
1698
  type="function",
1630
- function=t.function.dict(),
1699
+ function=t.function.dict()
1700
+ | ({"strict": t.strict} if t.strict is not None else {}),
1631
1701
  )
1632
1702
  for t in tools
1633
1703
  ],
1634
1704
  tool_choice=tool_choice,
1635
1705
  )
1636
1706
  )
1707
+ if response_format is not None:
1708
+ args["response_format"] = response_format.to_dict()
1709
+
1637
1710
  for p in self.unsupported_params():
1638
1711
  # some models e.g. o1-mini (as of sep 2024) don't support some params,
1639
1712
  # like temperature and stream, so we need to remove them.
@@ -1728,6 +1801,7 @@ class OpenAIGPT(LanguageModel):
1728
1801
  tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
1729
1802
  functions: Optional[List[LLMFunctionSpec]] = None,
1730
1803
  function_call: str | Dict[str, str] = "auto",
1804
+ response_format: Optional[OpenAIJsonSchemaSpec] = None,
1731
1805
  ) -> LLMResponse:
1732
1806
  """
1733
1807
  ChatCompletion API call to OpenAI.
@@ -1754,6 +1828,7 @@ class OpenAIGPT(LanguageModel):
1754
1828
  tool_choice,
1755
1829
  functions,
1756
1830
  function_call,
1831
+ response_format,
1757
1832
  )
1758
1833
  cached, hashed_key, response = self._chat_completions_with_backoff(**args)
1759
1834
  if self.get_stream() and not cached:
@@ -1774,6 +1849,7 @@ class OpenAIGPT(LanguageModel):
1774
1849
  tool_choice: ToolChoiceTypes | Dict[str, str | Dict[str, str]] = "auto",
1775
1850
  functions: Optional[List[LLMFunctionSpec]] = None,
1776
1851
  function_call: str | Dict[str, str] = "auto",
1852
+ response_format: Optional[OpenAIJsonSchemaSpec] = None,
1777
1853
  ) -> LLMResponse:
1778
1854
  """
1779
1855
  Async version of _chat(). See that function for details.
@@ -1785,6 +1861,7 @@ class OpenAIGPT(LanguageModel):
1785
1861
  tool_choice,
1786
1862
  functions,
1787
1863
  function_call,
1864
+ response_format,
1788
1865
  )
1789
1866
  cached, hashed_key, response = await self._achat_completions_with_backoff(
1790
1867
  **args
@@ -589,3 +589,14 @@ def extend_document_class(d: Document) -> Type[Document]:
589
589
  )
590
590
 
591
591
  return NewDocumentClass
592
+
593
+
594
+ class PydanticWrapper(BaseModel):
595
+ value: Any
596
+
597
+
598
+ def get_pydantic_wrapper(value_type: type) -> type[PydanticWrapper]:
599
+ class WrappedValue(PydanticWrapper):
600
+ value: value_type # type: ignore
601
+
602
+ return WrappedValue
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.23.2
3
+ Version: 0.24.1
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -249,6 +249,8 @@ teacher_task.run()
249
249
  <summary> <b>Click to expand</b></summary>
250
250
 
251
251
  - **Nov 2024:**
252
+ - **[0.24.0](https://langroid.github.io/langroid/notes/structured-output/)**:
253
+ Enables support for `Agent`s with strict JSON schema output format on compatible LLMs and strict mode for the OpenAI tools API.
252
254
  - **[0.23.0](https://langroid.github.io/langroid/tutorials/local-llm-setup/#local-llms-hosted-on-glhfchat)**:
253
255
  support for LLMs (e.g. `Qwen2.5-Coder-32b-Instruct`) hosted on glhf.chat
254
256
  - **[0.22.0](https://langroid.github.io/langroid/notes/large-tool-results/)**:
@@ -1,14 +1,14 @@
1
1
  langroid/__init__.py,sha256=z_fCOLQJPOw3LLRPBlFB5-2HyCjpPgQa4m4iY5Fvb8Y,1800
2
2
  langroid/agent/__init__.py,sha256=ll0Cubd2DZ-fsCMl7e10hf9ZjFGKzphfBco396IKITY,786
3
- langroid/agent/base.py,sha256=bYfVh_F-lYDecMpL_7SXzBetAZriT7ZfQE4vXHwI0xI,67945
3
+ langroid/agent/base.py,sha256=QTmWvt1FzrZpIN2JS1cdhcPNwcXMZkBRDF30L9IF2mo,69412
4
4
  langroid/agent/batch.py,sha256=QZdlt1563hx4l3AXrCaGovE-PNG93M3DsvQAbDzdiS8,13705
5
5
  langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  langroid/agent/callbacks/chainlit.py,sha256=JJXI3UGTyTDg2FFath4rqY1GyUo_0pbVBt8CZpvdtn4,23289
7
- langroid/agent/chat_agent.py,sha256=DU_wtNFAxSJJFRkxpWNNqvI1P5QHkdi529As-22-Aog,51955
8
- langroid/agent/chat_document.py,sha256=FZ_PkeKU5OVp1IUlMvspfqxIXzlyd7J_F32DSYrxQ7E,17651
7
+ langroid/agent/chat_agent.py,sha256=0xezOwMZeF_gXwq5-XbK67fdTr8ujTzyiRxUbYspRG0,78809
8
+ langroid/agent/chat_document.py,sha256=xPUMGzR83rn4iAEXIw2jy5LQ6YJ6Y0TiZ78XRQeDnJQ,17778
9
9
  langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
11
- langroid/agent/openai_assistant.py,sha256=2rjCZw45ysNBEGNzQM4uf0bTC4KkatGYAWcVcW4xcek,34337
11
+ langroid/agent/openai_assistant.py,sha256=JkAcs02bIrgPNVvUWVR06VCthc5-ulla2QMBzux_q6o,34340
12
12
  langroid/agent/special/__init__.py,sha256=gik_Xtm_zV7U9s30Mn8UX3Gyuy4jTjQe9zjiE3HWmEo,1273
13
13
  langroid/agent/special/arangodb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  langroid/agent/special/arangodb/arangodb_agent.py,sha256=12Y54c84c9qXV-YXRBcI5HaqyiY75JR4TmqlURYKJAM,25851
@@ -30,7 +30,7 @@ langroid/agent/special/neo4j/tools.py,sha256=Vw3HvtDfG2c4_bUHgt4_ZbJq48lpIQstbjj
30
30
  langroid/agent/special/relevance_extractor_agent.py,sha256=zIx8GUdVo1aGW6ASla0NPQjYYIpmriK_TYMijqAx3F8,4796
31
31
  langroid/agent/special/retriever_agent.py,sha256=lvMvf-u9rSosg4YASuFdUbGLgkzLPknXAbJZfZ1LZCc,1868
32
32
  langroid/agent/special/sql/__init__.py,sha256=mWfmm1QpXCezpFOS2eI57M0L_Ok3q5_ukG8tXBnBrEA,319
33
- langroid/agent/special/sql/sql_chat_agent.py,sha256=EsoF5_kheqhpiJw2wZs_6sgPfD0Or1YvfR5v2h6z74E,24094
33
+ langroid/agent/special/sql/sql_chat_agent.py,sha256=IsVyFLpMinXsPd_HzUIyPC2wIdVc8SLuqNX04X0jyfs,24618
34
34
  langroid/agent/special/sql/utils/__init__.py,sha256=JFif6CRTrN-bc91uuAI4K9fe2ndIWSNMVxJ0WA68--M,446
35
35
  langroid/agent/special/sql/utils/description_extractors.py,sha256=cX8TIpmTPXZXQTMpIi3OUFwFsPywxFFdurpx717Kq0I,6529
36
36
  langroid/agent/special/sql/utils/populate_metadata.py,sha256=1J22UsyEPKzwK0XlJZtYn9r6kYc0FXIr8-lZrndYlhc,3131
@@ -38,20 +38,20 @@ langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GS
38
38
  langroid/agent/special/sql/utils/tools.py,sha256=ovCePzq5cmbqw0vsVPBzxdZpUcSUIfTiDSMGXustZW8,1749
39
39
  langroid/agent/special/table_chat_agent.py,sha256=d9v2wsblaRx7oMnKhLV7uO_ujvk9gh59pSGvBXyeyNc,9659
40
40
  langroid/agent/structured_message.py,sha256=y7pud1EgRNeTFZlJmBkLmwME3yQJ_IYik-Xds9kdZbY,282
41
- langroid/agent/task.py,sha256=D7mGWdZ8H71AG2ZPLK6RIiG29Kegn_3lTEBfsBU0I_8,87397
42
- langroid/agent/tool_message.py,sha256=noPvn2PxFY_xJvJXJzv-n5RVgy3CjH-Y_FZ5jEik5pQ,11422
41
+ langroid/agent/task.py,sha256=r3sKXTD3QTEQoNV2YEvn5CPpCRQzHjueUcr9zg4ytFM,89685
42
+ langroid/agent/tool_message.py,sha256=aaJSypRy2sC8b2qOszc7efgr5TrQva51SUGnxTwKXEg,14406
43
43
  langroid/agent/tools/__init__.py,sha256=IMgCte-_ZIvCkozGQmvMqxIw7_nKLKzD78ccJL1bnQU,804
44
44
  langroid/agent/tools/duckduckgo_search_tool.py,sha256=NhsCaGZkdv28nja7yveAhSK_w6l_Ftym8agbrdzqgfo,1935
45
45
  langroid/agent/tools/file_tools.py,sha256=GjPB5YDILucYapElnvvoYpGJuZQ25ecLs2REv7edPEo,7292
46
46
  langroid/agent/tools/google_search_tool.py,sha256=y7b-3FtgXf0lfF4AYxrZ3K5pH2dhidvibUOAGBE--WI,1456
47
47
  langroid/agent/tools/metaphor_search_tool.py,sha256=qj4gt453cLEX3EGW7nVzVu6X7LCdrwjSlcNY0qJW104,2489
48
- langroid/agent/tools/orchestration.py,sha256=jBQOfSuN218i6jF1Ege8O-CpFqe7-7_4WAw_J_E5xQY,10984
48
+ langroid/agent/tools/orchestration.py,sha256=IwAZrSQMM0iMNqs5tcmkSBUIIKaLmH3wtcdyuYaY5uI,11004
49
49
  langroid/agent/tools/recipient_tool.py,sha256=dr0yTxgNEIoxUYxH6TtaExC4G_8WdJ0xGohIa4dFLhY,9808
50
50
  langroid/agent/tools/retrieval_tool.py,sha256=zcAV20PP_6VzSd-UE-IJcabaBseFL_QNz59Bnig8-lE,946
51
51
  langroid/agent/tools/rewind_tool.py,sha256=XAXL3BpNhCmBGYq_qi_sZfHJuIw7NY2jp4wnojJ7WRs,5606
52
52
  langroid/agent/tools/segment_extract_tool.py,sha256=__srZ_VGYLVOdPrITUM8S0HpmX4q7r5FHWMDdHdEv8w,1440
53
53
  langroid/agent/typed_task.py,sha256=oxja0Z3uLTv0BcR1xIMqDpo85MIGOruz4XsZ4ghjsW4,689
54
- langroid/agent/xml_tool_message.py,sha256=4Haxz_B2hyqKQmnHSf_BlloSqCNVjmBPeHzQaXlJq5c,14614
54
+ langroid/agent/xml_tool_message.py,sha256=6SshYZJKIfi4mkE-gIoSwjkEYekQ8GwcSiCv7a5uO9E,15054
55
55
  langroid/agent_config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
56
  langroid/cachedb/__init__.py,sha256=icAT2s7Vhf-ZGUeqpDQGNU6ob6o0aFEyjwcxxUGRFjg,225
57
57
  langroid/cachedb/base.py,sha256=ztVjB1DtN6pLCujCWnR6xruHxwVj3XkYniRTYAKKqk0,1354
@@ -71,11 +71,11 @@ langroid/exceptions.py,sha256=G60UVDChkUlBDVWHFr_43zUUszZHSejoU00tX_dfD68,2322
71
71
  langroid/language_models/.chainlit/config.toml,sha256=1t5lHORGzc2E6dkaO9P15jYHu2w-4Kl9pYjpDPc84vs,3716
72
72
  langroid/language_models/.chainlit/translations/en-US.json,sha256=DAFz2HjOFFfboCStrUfKFg2BpplJPK_OOtixwF_GivY,9931
73
73
  langroid/language_models/__init__.py,sha256=8o8D8Lxaq961_oxVpB_bC2iEJ1GXJqYXMlwUcn6OJb8,976
74
- langroid/language_models/azure_openai.py,sha256=G4le3j4YLHV7IwgB2C37hO3MKijZ1KjynbYlEvpIF7Y,6214
75
- langroid/language_models/base.py,sha256=xMFg8syIHiA7ABRNWPXeFM1vGeY_1EN84ki8C3dycfw,22722
74
+ langroid/language_models/azure_openai.py,sha256=l3A7_ZUld05ndUZ0MnBevj2ySjLTKDNCLEFTh5itavA,6579
75
+ langroid/language_models/base.py,sha256=Fz_mosaDxUuoXhMf5_bjNEtoPuQIFuiwJRwp55zAFas,23410
76
76
  langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
77
- langroid/language_models/mock_lm.py,sha256=HuiAvjHiCfffYF5xjFJUq945HVTW0QPbeUUctOnNCzQ,3868
78
- langroid/language_models/openai_gpt.py,sha256=DwyRaJRwuY5fRqS0tT3iOk2zg6j3UpjD-iw7TUlxTEk,69934
77
+ langroid/language_models/mock_lm.py,sha256=5BgHKDVRWFbUwDT_PFgTZXz9-k8wJSA2e3PZmyDgQ1k,4022
78
+ langroid/language_models/openai_gpt.py,sha256=ezPvbWhoB0W4yyRbbHATZwfLMr6d0xS8ZOJfFSYOmA4,73346
79
79
  langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8yiscohil1ogbP1ECkYdBlBsk,372
80
80
  langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
81
81
  langroid/language_models/prompt_formatter/hf_formatter.py,sha256=PVJppmjRvD-2DF-XNC6mE05vTZ9wbu37SmXwZBQhad0,5055
@@ -129,7 +129,7 @@ langroid/utils/output/citations.py,sha256=PSY2cpti8W-ZGFMAgj1lYoEIZy0lsniLpCliMs
129
129
  langroid/utils/output/printing.py,sha256=yzPJZN-8_jyOJmI9N_oLwEDfjMwVgk3IDiwnZ4eK_AE,2962
130
130
  langroid/utils/output/status.py,sha256=rzbE7mDJcgNNvdtylCseQcPGCGghtJvVq3lB-OPJ49E,1049
131
131
  langroid/utils/pandas_utils.py,sha256=UctS986Jtl_MvU5rA7-GfrjEHXP7MNu8ePhepv0bTn0,755
132
- langroid/utils/pydantic_utils.py,sha256=iRy7uQhHhQmIDZTTPNX5jXb6fqefMe9N67p3fPfOmTI,20624
132
+ langroid/utils/pydantic_utils.py,sha256=R7Ps8VP56-eSo-LYHWllFo-SJ2zDmdItuuYpUq2gGJ8,20854
133
133
  langroid/utils/system.py,sha256=AiEehQy0K9c9qHdKsZRCscRrazDzuh5Tv3GRQsA0Cxg,8455
134
134
  langroid/utils/types.py,sha256=4GrOnU3HLWh-UwaUPp7LlB3V413q3K5OSzc0ggDoQ6A,2510
135
135
  langroid/utils/web/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -142,8 +142,8 @@ langroid/vector_store/meilisearch.py,sha256=6frB7GFWeWmeKzRfLZIvzRjllniZ1cYj3Hmh
142
142
  langroid/vector_store/momento.py,sha256=qR-zBF1RKVHQZPZQYW_7g-XpTwr46p8HJuYPCkfJbM4,10534
143
143
  langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
144
144
  langroid/vector_store/qdrantdb.py,sha256=v88lqFkepADvlN6lByUj9I4NEKa9X9lWH16uTPPbYrE,17457
145
- pyproject.toml,sha256=L8FRAfEnw4iobtDdufbCBvb4JoxH-oSYMhRcePfzii0,7488
146
- langroid-0.23.2.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
147
- langroid-0.23.2.dist-info/METADATA,sha256=U_J275x-bp2csL8EL6xK41m1r3q_aLDywDTBeutWtk0,57300
148
- langroid-0.23.2.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
149
- langroid-0.23.2.dist-info/RECORD,,
145
+ pyproject.toml,sha256=FRIhmvm-hdPUnPTa3_3nollKCbdObvmUdb9KihV8aNE,7495
146
+ langroid-0.24.1.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
147
+ langroid-0.24.1.dist-info/METADATA,sha256=6nB1o-vQeCT3ULDeZHZn4iIx7xPfeRwFw8ZOjoKnaW8,57514
148
+ langroid-0.24.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
149
+ langroid-0.24.1.dist-info/RECORD,,
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langroid"
3
- version = "0.23.2"
3
+ version = "0.24.1"
4
4
  description = "Harness LLMs with Multi-Agent Programming"
5
5
  authors = ["Prasad Chalasani <pchalasani@gmail.com>"]
6
6
  readme = "README.md"
@@ -242,7 +242,7 @@ lint.select = [
242
242
  lint.exclude = ["docs/**", ".venv", "venv", "examples/**", "examples_dev", "langroid/utils/web", "notebooks", "__init__.py", "langroid/embedding_models/protoc/*"]
243
243
  lint.fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
244
244
  lint.unfixable = []
245
- lint.extend-ignore = ["F821"]
245
+ lint.extend-ignore = ["F821","F401"]
246
246
 
247
247
  [tool.pytest.ini_options]
248
248
  filterwarnings = ["ignore::DeprecationWarning"]