langroid 0.1.227__py3-none-any.whl → 0.1.229__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -63,7 +63,6 @@ class LLMFunctionCall(BaseModel):
63
63
  """
64
64
 
65
65
  name: str # name of function to call
66
- to: str = "" # intended recipient
67
66
  arguments: Optional[Dict[str, Any]] = None
68
67
 
69
68
  @staticmethod
@@ -227,14 +226,9 @@ class LLMResponse(BaseModel):
227
226
  if self.function_call is not None:
228
227
  # in this case we ignore message, since all information is in function_call
229
228
  msg = ""
230
- # recipient may either have been specified as a special field "to" in
231
- # function_call, or as a parameter "recipient" in the arguments
232
- # (the latter can happen when using a Tool that has a 'recipient' parameter)
233
- recipient = self.function_call.to
234
- if recipient == "":
235
- args = self.function_call.arguments
236
- if isinstance(args, dict):
237
- recipient = args.get("recipient", "")
229
+ args = self.function_call.arguments
230
+ if isinstance(args, dict):
231
+ recipient = args.get("recipient", "")
238
232
  return recipient, msg
239
233
  else:
240
234
  msg = self.message
@@ -69,7 +69,7 @@ class OpenAIChatModel(str, Enum):
69
69
  GPT3_5_TURBO = "gpt-3.5-turbo-1106"
70
70
  GPT4 = "gpt-4"
71
71
  GPT4_32K = "gpt-4-32k"
72
- GPT4_TURBO = "gpt-4-turbo-preview"
72
+ GPT4_TURBO = "gpt-4-turbo"
73
73
 
74
74
 
75
75
  class OpenAICompletionModel(str, Enum):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.227
3
+ Version: 0.1.229
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -18,10 +18,8 @@ Provides-Extra: mkdocs
18
18
  Provides-Extra: mysql
19
19
  Provides-Extra: neo4j
20
20
  Provides-Extra: postgres
21
- Provides-Extra: sciphi
22
21
  Provides-Extra: transformers
23
22
  Provides-Extra: unstructured
24
- Requires-Dist: agent-search (>=0.0.7,<0.0.8) ; extra == "sciphi"
25
23
  Requires-Dist: aiohttp (>=3.9.1,<4.0.0)
26
24
  Requires-Dist: async-generator (>=1.10,<2.0)
27
25
  Requires-Dist: autopep8 (>=2.0.2,<3.0.0)
@@ -42,7 +42,6 @@ langroid/agent/tools/google_search_tool.py,sha256=cQxcNtb8XCNpOo_yCeYRwG_y-OATjP
42
42
  langroid/agent/tools/metaphor_search_tool.py,sha256=NKHss-AkI942_XhfMgUctAwHjIHpqp5NfYIebKV4UcE,2454
43
43
  langroid/agent/tools/recipient_tool.py,sha256=61vdKv06qgVdtnE3gxjzV8RvUEy8JhbC9eWa0J0BPdw,9171
44
44
  langroid/agent/tools/run_python_code.py,sha256=V3mHdHQYn0M0PAtyoHxjNvk6KvWWcQ4ugo0TOKc8HyI,1752
45
- langroid/agent/tools/sciphi_search_rag_tool.py,sha256=IAEgZY5-euQh8MndMzZnn1XVxaItvWYB2VF9-YHfunk,2496
46
45
  langroid/agent/tools/segment_extract_tool.py,sha256=W39poS7Av2EuJ34tGKhLhzgj3zEyZnBplpSt2goRAp4,1285
47
46
  langroid/agent_config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
47
  langroid/cachedb/__init__.py,sha256=ygx42MS7fvh2UwRMjukTk3dWBkzv_rACebTBRYa_MkU,148
@@ -60,10 +59,10 @@ langroid/embedding_models/protoc/embeddings_pb2_grpc.py,sha256=9dYQqkW3JPyBpSEje
60
59
  langroid/embedding_models/remote_embeds.py,sha256=6_kjXByVbqhY9cGwl9R83ZcYC2km-nGieNNAo1McHaY,5151
61
60
  langroid/language_models/__init__.py,sha256=5L9ndEEC8iLJHjDJmYFTnv6-2-3xsxWUMHcugR8IeDs,821
62
61
  langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAwyngXT_M4P7k,5989
63
- langroid/language_models/base.py,sha256=4ybrbvOnoWzEVzVuZ3AStsl8ELoljiKtgtdykUzRSxg,21014
62
+ langroid/language_models/base.py,sha256=Yy_6TP9Qj5CmNtDVQfbcyfytCsvGyow0e1OeqhWGY0A,20638
64
63
  langroid/language_models/config.py,sha256=5UF3DzO1a-Dfsc3vghE0XGq7g9t_xDsRCsuRiU4dgBg,366
65
64
  langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
66
- langroid/language_models/openai_gpt.py,sha256=6CE6I_hmnHfRIMJMh4qhVeKijgKEm_LNcWNn3vLMLlM,49680
65
+ langroid/language_models/openai_gpt.py,sha256=3W0gi7_Ja0c0vuT8SDv8ioOWXyUKs7zJORx8BV-QT2g,49672
67
66
  langroid/language_models/prompt_formatter/__init__.py,sha256=9JXFF22QNMmbQV1q4nrIeQVTtA3Tx8tEZABLtLBdFyc,352
68
67
  langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
69
68
  langroid/language_models/prompt_formatter/hf_formatter.py,sha256=TFL6ppmeQWnzr6CKQzRZFYY810zE1mr8DZnhw6i85ok,5217
@@ -121,7 +120,7 @@ langroid/vector_store/meilisearch.py,sha256=d2huA9P-NoYRuAQ9ZeXJmMKr7ry8u90RUSR2
121
120
  langroid/vector_store/momento.py,sha256=9cui31TTrILid2KIzUpBkN2Ey3g_CZWOQVdaFsA4Ors,10045
122
121
  langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
123
122
  langroid/vector_store/qdrantdb.py,sha256=foKRxRv0BBony6S4Vt0Vav9Rn9HMxZvcIh1cE7nosFE,13524
124
- langroid-0.1.227.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
125
- langroid-0.1.227.dist-info/METADATA,sha256=pm5XedC6otRVliKik-XnZd7l0DUn9QpSXnA_tFTiDfI,47951
126
- langroid-0.1.227.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
127
- langroid-0.1.227.dist-info/RECORD,,
123
+ langroid-0.1.229.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
124
+ langroid-0.1.229.dist-info/METADATA,sha256=2FXDkWjDrhYNvrCDCdshNanmBCAzW8N1t_4UUR9iNVI,47863
125
+ langroid-0.1.229.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
126
+ langroid-0.1.229.dist-info/RECORD,,
@@ -1,79 +0,0 @@
1
- """
2
- A tool which returns a Search RAG response from the SciPhi API.
3
- their titles, links, summaries. Since the tool is stateless (i.e. does not need
4
- access to agent state), it can be enabled for any agent, without having to define a
5
- special method inside the agent: `agent.enable_message(SciPhiSearchRAGTool)`
6
-
7
- Example return output appears as follows below:
8
-
9
- <-- Query -->
10
- ```
11
- Find 3 results on the internet about the LK-99 superconducting material.
12
- ``
13
-
14
- <-- Response (compressed for this example)-->
15
- ```
16
- [ result1 ]
17
-
18
- [ result2 ]
19
-
20
- [ result3 ]
21
-
22
- ```
23
-
24
- NOTE: Using this tool requires getting an API key from sciphi.ai.
25
- Setup is as simple as shown below
26
- # Get a free API key at https://www.sciphi.ai/account
27
- # export SCIPHI_API_KEY=$MY_SCIPHI_API_KEY before running the agent
28
- # OR add SCIPHI_API_KEY=$MY_SCIPHI_API_KEY to your .env file
29
-
30
- This tool requires installing langroid with the `sciphi` extra, e.g.
31
- `pip install langroid[sciphi]` or `poetry add langroid[sciphi]`
32
- (it installs the `agent-search` package from pypi).
33
-
34
- For more information, please refer to the official docs:
35
- https://agent-search.readthedocs.io/en/latest/
36
- """
37
-
38
- from typing import List
39
-
40
- try:
41
- from agent_search import SciPhi
42
- except ImportError:
43
- raise ImportError(
44
- "You are attempting to use the `agent-search` library;"
45
- "To use it, please install langroid with the `sciphi` extra, e.g. "
46
- "`pip install langroid[sciphi]` or `poetry add langroid[sciphi]` "
47
- "(it installs the `agent-search` package from pypi)."
48
- )
49
-
50
- from langroid.agent.tool_message import ToolMessage
51
-
52
-
53
- class SciPhiSearchRAGTool(ToolMessage):
54
- request: str = "web_search_rag"
55
- purpose: str = """
56
- To search the web with provider <search_provider> and
57
- return a response summary with llm model <llm_model> the given <query>.
58
- """
59
- query: str
60
-
61
- def handle(self) -> str:
62
- rag_response = SciPhi().get_search_rag_response(
63
- query=self.query, search_provider="bing", llm_model="SciPhi/Sensei-7B-V1"
64
- )
65
- result = rag_response["response"]
66
- result = (
67
- f"### RAG Response:\n{result}\n\n"
68
- + "### Related Queries:\n"
69
- + "\n".join(rag_response["related_queries"])
70
- )
71
- return result # type: ignore
72
-
73
- @classmethod
74
- def examples(cls) -> List["ToolMessage"]:
75
- return [
76
- cls(
77
- query="When was the Llama2 Large Language Model (LLM) released?",
78
- ),
79
- ]