langroid 0.56.7__py3-none-any.whl → 0.56.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -149,8 +149,8 @@ class DocChatAgentConfig(ChatAgentConfig):
149
149
  n_fuzzy_neighbor_words: int = 100 # num neighbor words to retrieve for fuzzy match
150
150
  use_fuzzy_match: bool = True
151
151
  use_bm25_search: bool = True
152
- use_reciprocal_rank_fusion: bool = True # ignored if using cross-encoder reranking
153
- cross_encoder_reranking_model: str = (
152
+ use_reciprocal_rank_fusion: bool = False
153
+ cross_encoder_reranking_model: str = ( # ignored if use_reciprocal_rank_fusion=True
154
154
  "cross-encoder/ms-marco-MiniLM-L-6-v2" if has_sentence_transformers else ""
155
155
  )
156
156
  rerank_diversity: bool = True # rerank to maximize diversity?
@@ -249,11 +249,10 @@ class DocChatAgent(ChatAgent):
249
249
  ):
250
250
  logger.warning(
251
251
  """
252
- You have set `use_reciprocal_rank_fusion` to True,
253
- but it will be ignored since you have also set
254
- `cross_encoder_reranking_model` to a non-empty value.
255
- To use RRF (Reciprocal Rank Fusion), set
256
- `cross_encoder_reranking_model` to an empty string.
252
+ Ignoring `cross_encoder_reranking_model` since you have set
253
+ `use_reciprocal_rank_fusion` to True.
254
+ To use cross-encoder reranking, set
255
+ `use_reciprocal_rank_fusion` to False.
257
256
  """
258
257
  )
259
258
 
@@ -1113,7 +1112,7 @@ class DocChatAgent(ChatAgent):
1113
1112
  key=lambda x: x[0],
1114
1113
  reverse=True,
1115
1114
  )
1116
- passages = [d for _, d in sorted_pairs[: self.config.n_similar_chunks]]
1115
+ passages = [d for _, d in sorted_pairs]
1117
1116
  return passages
1118
1117
 
1119
1118
  def rerank_with_diversity(self, passages: List[Document]) -> List[Document]:
@@ -1320,10 +1319,7 @@ class DocChatAgent(ChatAgent):
1320
1319
  # TODO: Add score threshold in config
1321
1320
  docs_scores = self.get_similar_chunks_bm25(query, retrieval_multiple)
1322
1321
  id2doc.update({d.id(): d for d, _ in docs_scores})
1323
- if (
1324
- self.config.cross_encoder_reranking_model == ""
1325
- and self.config.use_reciprocal_rank_fusion
1326
- ):
1322
+ if self.config.use_reciprocal_rank_fusion:
1327
1323
  # if we're not re-ranking with a cross-encoder, and have RRF enabled,
1328
1324
  # instead of accumulating the bm25 results into passages,
1329
1325
  # we collect these ranks for Reciprocal Rank Fusion down below.
@@ -1338,10 +1334,7 @@ class DocChatAgent(ChatAgent):
1338
1334
  if self.config.use_fuzzy_match:
1339
1335
  # TODO: Add score threshold in config
1340
1336
  fuzzy_match_doc_scores = self.get_fuzzy_matches(query, retrieval_multiple)
1341
- if (
1342
- self.config.cross_encoder_reranking_model == ""
1343
- and self.config.use_reciprocal_rank_fusion
1344
- ):
1337
+ if self.config.use_reciprocal_rank_fusion:
1345
1338
  # if we're not re-ranking with a cross-encoder,
1346
1339
  # instead of accumulating the fuzzy match results into passages,
1347
1340
  # we collect these ranks for Reciprocal Rank Fusion down below.
@@ -1357,10 +1350,8 @@ class DocChatAgent(ChatAgent):
1357
1350
  # eliminate duplicate ids
1358
1351
  passages = [id2doc[id] for id in id2doc.keys()]
1359
1352
 
1360
- if (
1361
- self.config.cross_encoder_reranking_model == ""
1362
- and self.config.use_reciprocal_rank_fusion
1363
- and (self.config.use_bm25_search or self.config.use_fuzzy_match)
1353
+ if self.config.use_reciprocal_rank_fusion and (
1354
+ self.config.use_bm25_search or self.config.use_fuzzy_match
1364
1355
  ):
1365
1356
  # Since we're not using cross-enocder re-ranking,
1366
1357
  # we need to re-order the retrieved chunks from potentially three
@@ -1382,9 +1373,9 @@ class DocChatAgent(ChatAgent):
1382
1373
  # Use max_rank instead of infinity to avoid bias against
1383
1374
  # single-method docs
1384
1375
  max_rank = self.config.n_similar_chunks * retrieval_multiple
1385
- rank_semantic = id2_rank_semantic.get(id_, max_rank)
1386
- rank_bm25 = id2_rank_bm25.get(id_, max_rank)
1387
- rank_fuzzy = id2_rank_fuzzy.get(id_, max_rank)
1376
+ rank_semantic = id2_rank_semantic.get(id_, max_rank + 1)
1377
+ rank_bm25 = id2_rank_bm25.get(id_, max_rank + 1)
1378
+ rank_fuzzy = id2_rank_fuzzy.get(id_, max_rank + 1)
1388
1379
  c = self.config.reciprocal_rank_fusion_constant
1389
1380
  reciprocal_fusion_score = (
1390
1381
  1 / (rank_semantic + c) + 1 / (rank_bm25 + c) + 1 / (rank_fuzzy + c)
@@ -1421,10 +1412,14 @@ class DocChatAgent(ChatAgent):
1421
1412
  passages_scores = self.add_context_window(passages_scores)
1422
1413
  passages = [p for p, _ in passages_scores]
1423
1414
  # now passages can potentially have a lot of doc chunks,
1424
- # so we re-rank them using a cross-encoder scoring model,
1415
+ # so we re-rank them using a cross-encoder scoring model
1416
+ # (provided that `reciprocal_rank_fusion` is not enabled),
1425
1417
  # and pick top k where k = config..n_similar_chunks
1426
1418
  # https://www.sbert.net/examples/applications/retrieve_rerank
1427
- if self.config.cross_encoder_reranking_model != "":
1419
+ if (
1420
+ self.config.cross_encoder_reranking_model != ""
1421
+ and not self.config.use_reciprocal_rank_fusion
1422
+ ):
1428
1423
  passages = self.rerank_with_cross_encoder(query, passages)
1429
1424
 
1430
1425
  if self.config.rerank_diversity:
@@ -24,6 +24,7 @@ class TaskTool(ToolMessage):
24
24
  will be generated.
25
25
  """
26
26
 
27
+ # TODO: setting up termination conditions of sub-task needs to be improved
27
28
  request: str = "task_tool"
28
29
  purpose: str = """
29
30
  <HowToUse>
@@ -77,6 +78,7 @@ class TaskTool(ToolMessage):
77
78
  To disable all tools, set it to a singleton list containing 'NONE'
78
79
  """,
79
80
  )
81
+ # TODO: ensure valid model name
80
82
  model: str = Field(
81
83
  default=None,
82
84
  description="""
@@ -183,5 +185,7 @@ class TaskTool(ToolMessage):
183
185
  """
184
186
  task = self._set_up_task(agent)
185
187
  # Run the task on the prompt, and return the result
188
+ # TODO eventually allow the various task setup configs,
189
+ # including termination conditions
186
190
  result = await task.run_async(self.prompt, turns=self.max_iterations or 10)
187
191
  return result
@@ -78,7 +78,7 @@ class LLMConfig(BaseSettings):
78
78
  completion_model: str = ""
79
79
  temperature: float = 0.0
80
80
  chat_context_length: int | None = None
81
- async_stream_quiet: bool = True # suppress streaming output in async mode?
81
+ async_stream_quiet: bool = False # suppress streaming output in async mode?
82
82
  completion_context_length: int | None = None
83
83
  # if input length + max_output_tokens > context length of model,
84
84
  # we will try shortening requested output
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langroid
3
- Version: 0.56.7
3
+ Version: 0.56.9
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  Author-email: Prasad Chalasani <pchalasani@gmail.com>
6
6
  License: MIT
@@ -15,7 +15,7 @@ langroid/agent/xml_tool_message.py,sha256=oeBKnJNoGaKdtz39XoWGMTNlVyXew2MWH5lgtY
15
15
  langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  langroid/agent/callbacks/chainlit.py,sha256=4rJw07NIIVTIVvksVY08h5PdLE_kRoJItjbQM0UjRn0,20962
17
17
  langroid/agent/special/__init__.py,sha256=gik_Xtm_zV7U9s30Mn8UX3Gyuy4jTjQe9zjiE3HWmEo,1273
18
- langroid/agent/special/doc_chat_agent.py,sha256=9Evo3d8iniCmU6Liw0lq2HElPcavHA_M9I7_kUMhrRk,68860
18
+ langroid/agent/special/doc_chat_agent.py,sha256=s3cQTUzbfuLiDOG8x5aRyZd1Ql-b1alHWOiQ4b5wgF8,68628
19
19
  langroid/agent/special/doc_chat_task.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  langroid/agent/special/lance_doc_chat_agent.py,sha256=6pIqi2DF-MvYYN3-blsdUgulYnOBTl7I21T7wPAt1zM,10413
21
21
  langroid/agent/special/lance_tools.py,sha256=qS8x4wi8mrqfbYV2ztFzrcxyhHQ0ZWOc-zkYiH7awj0,2105
@@ -54,7 +54,7 @@ langroid/agent/tools/recipient_tool.py,sha256=dr0yTxgNEIoxUYxH6TtaExC4G_8WdJ0xGo
54
54
  langroid/agent/tools/retrieval_tool.py,sha256=zcAV20PP_6VzSd-UE-IJcabaBseFL_QNz59Bnig8-lE,946
55
55
  langroid/agent/tools/rewind_tool.py,sha256=XAXL3BpNhCmBGYq_qi_sZfHJuIw7NY2jp4wnojJ7WRs,5606
56
56
  langroid/agent/tools/segment_extract_tool.py,sha256=__srZ_VGYLVOdPrITUM8S0HpmX4q7r5FHWMDdHdEv8w,1440
57
- langroid/agent/tools/task_tool.py,sha256=VOHWv8uFPRczf6qp8YZPpUirkCqE6YVhN6jgXCvdpe0,7102
57
+ langroid/agent/tools/task_tool.py,sha256=2qg3oRDYjF93pi7J0gwgBjPL4R9ByKH6yMwD-Cx8voo,7325
58
58
  langroid/agent/tools/tavily_search_tool.py,sha256=soI-j0HdgVQLf09wRQScaEK4b5RpAX9C4cwOivRFWWI,1903
59
59
  langroid/agent/tools/mcp/__init__.py,sha256=DJNM0VeFnFS3pJKCyFGggT8JVjVu0rBzrGzasT1HaSM,387
60
60
  langroid/agent/tools/mcp/decorators.py,sha256=h7dterhsmvWJ8q4mp_OopmuG2DF71ty8cZwOyzdDZuk,1127
@@ -73,7 +73,7 @@ langroid/embedding_models/protoc/embeddings_pb2.pyi,sha256=UkNy7BrNsmQm0vLb3NtGX
73
73
  langroid/embedding_models/protoc/embeddings_pb2_grpc.py,sha256=9dYQqkW3JPyBpSEjeGXTNpSqAkC-6FPtBHyteVob2Y8,2452
74
74
  langroid/language_models/__init__.py,sha256=3aD2qC1lz8v12HX4B-dilv27gNxYdGdeu1QvDlkqqHs,1095
75
75
  langroid/language_models/azure_openai.py,sha256=SW0Fp_y6HpERr9l6TtF6CYsKgKwjUf_hSL_2mhTV4wI,5034
76
- langroid/language_models/base.py,sha256=253xcwXZ0yxSQ1W4SR50tAPZKCDc35yyU1o35EqB9b8,28484
76
+ langroid/language_models/base.py,sha256=OlPgmhQS2o3Y5DLoO1IEBUp0kIOeQdYsZsd25sz7DY8,28485
77
77
  langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
78
78
  langroid/language_models/mock_lm.py,sha256=tA9JpURznsMZ59iRhFYMmaYQzAc0D0BT-PiJIV58sAk,4079
79
79
  langroid/language_models/model_info.py,sha256=0e011vJZMi7XU9OkKT6doxlybrNJfMlP54klLDDNgFg,14939
@@ -137,7 +137,7 @@ langroid/vector_store/pineconedb.py,sha256=otxXZNaBKb9f_H75HTaU3lMHiaR2NUp5MqwLZ
137
137
  langroid/vector_store/postgres.py,sha256=wHPtIi2qM4fhO4pMQr95pz1ZCe7dTb2hxl4VYspGZoA,16104
138
138
  langroid/vector_store/qdrantdb.py,sha256=O6dSBoDZ0jzfeVBd7LLvsXu083xs2fxXtPa9gGX3JX4,18443
139
139
  langroid/vector_store/weaviatedb.py,sha256=Yn8pg139gOy3zkaPfoTbMXEEBCiLiYa1MU5d_3UA1K4,11847
140
- langroid-0.56.7.dist-info/METADATA,sha256=czhMl375GcCjQoavgVE7vQUbMNkan6D4_S83glup2to,65744
141
- langroid-0.56.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
142
- langroid-0.56.7.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
143
- langroid-0.56.7.dist-info/RECORD,,
140
+ langroid-0.56.9.dist-info/METADATA,sha256=uppdkleoPy1RXOJ6sGIRfFDrYhcSUKAZ_1rfjaYErA8,65744
141
+ langroid-0.56.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
142
+ langroid-0.56.9.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
143
+ langroid-0.56.9.dist-info/RECORD,,