langroid 0.59.7__py3-none-any.whl → 0.59.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,7 +14,6 @@ from langroid.agent.chat_document import ChatDocument
14
14
  from langroid.agent.tools.segment_extract_tool import SegmentExtractTool
15
15
  from langroid.language_models.base import LLMConfig
16
16
  from langroid.language_models.openai_gpt import OpenAIGPTConfig
17
- from langroid.mytypes import Entity
18
17
  from langroid.parsing.utils import extract_numbered_segments, number_segments
19
18
  from langroid.utils.constants import DONE, NO_ANSWER
20
19
 
@@ -26,12 +25,19 @@ class RelevanceExtractorAgentConfig(ChatAgentConfig):
26
25
  llm: LLMConfig | None = OpenAIGPTConfig()
27
26
  segment_length: int = 1 # number of sentences per segment
28
27
  query: str = "" # query for relevance extraction
28
+ handle_llm_no_tool: str = """
29
+ You FORGOT to use the `extract_segments` tool!
30
+ Remember that your response MUST be a JSON-formatted string
31
+ starting with `{"request": "extract_segments", ...}`
32
+ """
29
33
  system_message: str = """
30
34
  The user will give you a PASSAGE containing segments numbered as
31
35
  <#1#>, <#2#>, <#3#>, etc.,
32
36
  followed by a QUERY. Extract ONLY the segment-numbers from
33
37
  the PASSAGE that are RELEVANT to the QUERY.
34
38
  Present the extracted segment-numbers using the `extract_segments` tool/function.
39
+ Note that your response MUST be a JSON-formatted string
40
+ starting with `{"request": "extract_segments", ...}`
35
41
  """
36
42
 
37
43
 
@@ -63,13 +69,22 @@ class RelevanceExtractorAgent(ChatAgent):
63
69
  self.numbered_passage = number_segments(message_str, self.config.segment_length)
64
70
  # compose prompt
65
71
  prompt = f"""
72
+ <Instructions>
73
+ Given the PASSAGE below with NUMBERED segments, and the QUERY,
74
+ extract ONLY the segment-numbers that are RELEVANT to the QUERY,
75
+ and present them using the `extract_segments` tool/function,
76
+ i.e. your response MUST be a JSON-formatted string starting with
77
+ `{{"request": "extract_segments", ...}}`
78
+ </Instructions>
79
+
66
80
  PASSAGE:
67
81
  {self.numbered_passage}
68
82
 
69
83
  QUERY: {self.config.query}
70
84
  """
71
85
  # send to LLM
72
- return super().llm_response(prompt)
86
+ response = super().llm_response(prompt)
87
+ return response
73
88
 
74
89
  @no_type_check
75
90
  async def llm_response_async(
@@ -99,7 +114,8 @@ class RelevanceExtractorAgent(ChatAgent):
99
114
  QUERY: {self.config.query}
100
115
  """
101
116
  # send to LLM
102
- return await super().llm_response_async(prompt)
117
+ response = await super().llm_response_async(prompt)
118
+ return response
103
119
 
104
120
  def extract_segments(self, msg: SegmentExtractTool) -> str:
105
121
  """Method to handle a segmentExtractTool message from LLM"""
@@ -116,12 +132,3 @@ class RelevanceExtractorAgent(ChatAgent):
116
132
  return DONE + " " + NO_ANSWER
117
133
  # this response ends the task by saying DONE
118
134
  return DONE + " " + extracts
119
-
120
- def handle_message_fallback(
121
- self, msg: str | ChatDocument
122
- ) -> str | ChatDocument | None:
123
- """Handle case where LLM forgets to use SegmentExtractTool"""
124
- if isinstance(msg, ChatDocument) and msg.metadata.sender == Entity.LLM:
125
- return DONE + " " + NO_ANSWER
126
- else:
127
- return None
@@ -324,8 +324,6 @@ MODEL_INFO: Dict[str, ModelInfo] = {
324
324
  input_cost_per_million=1.25,
325
325
  cached_cost_per_million=0.125,
326
326
  output_cost_per_million=10.00,
327
- allows_streaming=False,
328
- allows_system_message=False,
329
327
  has_structured_output=True,
330
328
  unsupported_params=["temperature"],
331
329
  rename_params={"max_tokens": "max_completion_tokens"},
@@ -340,8 +338,6 @@ MODEL_INFO: Dict[str, ModelInfo] = {
340
338
  input_cost_per_million=0.25,
341
339
  cached_cost_per_million=0.025,
342
340
  output_cost_per_million=2.00,
343
- allows_streaming=False,
344
- allows_system_message=False,
345
341
  has_structured_output=True,
346
342
  unsupported_params=["temperature"],
347
343
  rename_params={"max_tokens": "max_completion_tokens"},
@@ -356,8 +352,6 @@ MODEL_INFO: Dict[str, ModelInfo] = {
356
352
  input_cost_per_million=0.05,
357
353
  cached_cost_per_million=0.005,
358
354
  output_cost_per_million=0.40,
359
- allows_streaming=False,
360
- allows_system_message=False,
361
355
  has_structured_output=True,
362
356
  unsupported_params=["temperature"],
363
357
  rename_params={"max_tokens": "max_completion_tokens"},
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langroid
3
- Version: 0.59.7
3
+ Version: 0.59.9
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  Author-email: Prasad Chalasani <pchalasani@gmail.com>
6
6
  License: MIT
@@ -19,7 +19,7 @@ langroid/agent/special/doc_chat_agent.py,sha256=tUr4qex3OjqF32zeyvTOnNgUP1wdpe5h
19
19
  langroid/agent/special/doc_chat_task.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  langroid/agent/special/lance_doc_chat_agent.py,sha256=6pIqi2DF-MvYYN3-blsdUgulYnOBTl7I21T7wPAt1zM,10413
21
21
  langroid/agent/special/lance_tools.py,sha256=3j7Hsyf3-H9ccTXjyNOcnMnpJ7r1lXnqDLSMQgFa7ZI,2114
22
- langroid/agent/special/relevance_extractor_agent.py,sha256=Wa65UReGaNIB5MkXugzc4X9ci3c21-PwDrN7zNX-iVQ,4801
22
+ langroid/agent/special/relevance_extractor_agent.py,sha256=EgFDgxHZmnpzwh17vrzdncoRn2yPBsiRweN9wr85czc,5206
23
23
  langroid/agent/special/retriever_agent.py,sha256=o2UfqiCGME0t85SZ6qjK041_WZYqXSuV1SeH_3KtVuc,1931
24
24
  langroid/agent/special/table_chat_agent.py,sha256=T2YMFpOnW4YV-QXvB34MbaBGXBPiWeCiqO1bVKFykbg,10943
25
25
  langroid/agent/special/arangodb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -77,7 +77,7 @@ langroid/language_models/base.py,sha256=uRi-XtQhsEBN702_lGepxlNQ0e8gaPgaXjULYn2r
77
77
  langroid/language_models/client_cache.py,sha256=c8tAUTKxbd-CVGRy9WPYhI1pLtarFWWwX6-qm4ZXaqk,9399
78
78
  langroid/language_models/config.py,sha256=rhEZ9Wwq42q1uW_-oRFbilJhIw1vYlTvYqIk_7pHiSY,397
79
79
  langroid/language_models/mock_lm.py,sha256=tA9JpURznsMZ59iRhFYMmaYQzAc0D0BT-PiJIV58sAk,4079
80
- langroid/language_models/model_info.py,sha256=qAx_BSU2Ix7gkCtFirRnKwrtMq20ORPv9fwmOOOsa9Q,19532
80
+ langroid/language_models/model_info.py,sha256=BmyHgyf0qrcF4R9cUQAu-RckImLo4pKcRWSzg5q1AhI,19325
81
81
  langroid/language_models/openai_gpt.py,sha256=qz-KN8phkVglZ6GeqXDwW60Dr1--ZhhKdTEaXwftRZA,93543
82
82
  langroid/language_models/provider_params.py,sha256=upG4cBrX8fcvAo1g7fcsv-rBbsfypIqcDRRV9m1hohU,4846
83
83
  langroid/language_models/utils.py,sha256=n55Oe2_V_4VNGhytvPWLYC-0tFS07RTjN83KWl-p_MI,6032
@@ -139,7 +139,7 @@ langroid/vector_store/pineconedb.py,sha256=7V0Bkt4ZrOR3V90tdXvdFmyNGuww7SFdyPq7-
139
139
  langroid/vector_store/postgres.py,sha256=TY_VshimwFZglYgKYm7Qn1F-dCSL8GsXRTgmh7VTe9c,16110
140
140
  langroid/vector_store/qdrantdb.py,sha256=mqxMOrcLAQpl0opuL8vXhdIt6ppv2zYyAvddHZoEW0Y,19184
141
141
  langroid/vector_store/weaviatedb.py,sha256=BS95bxVKNYfQc9VPb85a1HlcgnXfAkgMzjydnjCgRHc,11853
142
- langroid-0.59.7.dist-info/METADATA,sha256=PEdzKonu-UCwiNQjBg0I-BqZGbY0-KYCB7UDs4_xbPI,66517
143
- langroid-0.59.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
144
- langroid-0.59.7.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
145
- langroid-0.59.7.dist-info/RECORD,,
142
+ langroid-0.59.9.dist-info/METADATA,sha256=wJlApwHFS887eho2UVRilBDaM4UHzKdzh4xK5P3WvO4,66517
143
+ langroid-0.59.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
144
+ langroid-0.59.9.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
145
+ langroid-0.59.9.dist-info/RECORD,,