langroid 0.58.2__py3-none-any.whl → 0.59.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. langroid/agent/base.py +39 -17
  2. langroid/agent/base.py-e +2216 -0
  3. langroid/agent/callbacks/chainlit.py +2 -1
  4. langroid/agent/chat_agent.py +73 -55
  5. langroid/agent/chat_agent.py-e +2086 -0
  6. langroid/agent/chat_document.py +7 -7
  7. langroid/agent/chat_document.py-e +513 -0
  8. langroid/agent/openai_assistant.py +9 -9
  9. langroid/agent/openai_assistant.py-e +882 -0
  10. langroid/agent/special/arangodb/arangodb_agent.py +10 -18
  11. langroid/agent/special/arangodb/arangodb_agent.py-e +648 -0
  12. langroid/agent/special/arangodb/tools.py +3 -3
  13. langroid/agent/special/doc_chat_agent.py +16 -14
  14. langroid/agent/special/lance_rag/critic_agent.py +2 -2
  15. langroid/agent/special/lance_rag/query_planner_agent.py +4 -4
  16. langroid/agent/special/lance_tools.py +6 -5
  17. langroid/agent/special/lance_tools.py-e +61 -0
  18. langroid/agent/special/neo4j/neo4j_chat_agent.py +3 -7
  19. langroid/agent/special/neo4j/neo4j_chat_agent.py-e +430 -0
  20. langroid/agent/special/relevance_extractor_agent.py +1 -1
  21. langroid/agent/special/sql/sql_chat_agent.py +11 -3
  22. langroid/agent/task.py +9 -87
  23. langroid/agent/task.py-e +2418 -0
  24. langroid/agent/tool_message.py +33 -17
  25. langroid/agent/tool_message.py-e +400 -0
  26. langroid/agent/tools/file_tools.py +4 -2
  27. langroid/agent/tools/file_tools.py-e +234 -0
  28. langroid/agent/tools/mcp/fastmcp_client.py +19 -6
  29. langroid/agent/tools/mcp/fastmcp_client.py-e +584 -0
  30. langroid/agent/tools/orchestration.py +22 -17
  31. langroid/agent/tools/orchestration.py-e +301 -0
  32. langroid/agent/tools/recipient_tool.py +3 -3
  33. langroid/agent/tools/task_tool.py +22 -16
  34. langroid/agent/tools/task_tool.py-e +249 -0
  35. langroid/agent/xml_tool_message.py +90 -35
  36. langroid/agent/xml_tool_message.py-e +392 -0
  37. langroid/cachedb/base.py +1 -1
  38. langroid/embedding_models/base.py +2 -2
  39. langroid/embedding_models/models.py +3 -7
  40. langroid/embedding_models/models.py-e +563 -0
  41. langroid/exceptions.py +4 -1
  42. langroid/language_models/azure_openai.py +2 -2
  43. langroid/language_models/azure_openai.py-e +134 -0
  44. langroid/language_models/base.py +6 -4
  45. langroid/language_models/base.py-e +812 -0
  46. langroid/language_models/client_cache.py +64 -0
  47. langroid/language_models/config.py +2 -4
  48. langroid/language_models/config.py-e +18 -0
  49. langroid/language_models/model_info.py +9 -1
  50. langroid/language_models/model_info.py-e +483 -0
  51. langroid/language_models/openai_gpt.py +119 -20
  52. langroid/language_models/openai_gpt.py-e +2280 -0
  53. langroid/language_models/provider_params.py +3 -22
  54. langroid/language_models/provider_params.py-e +153 -0
  55. langroid/mytypes.py +11 -4
  56. langroid/mytypes.py-e +132 -0
  57. langroid/parsing/code_parser.py +1 -1
  58. langroid/parsing/file_attachment.py +1 -1
  59. langroid/parsing/file_attachment.py-e +246 -0
  60. langroid/parsing/md_parser.py +14 -4
  61. langroid/parsing/md_parser.py-e +574 -0
  62. langroid/parsing/parser.py +22 -7
  63. langroid/parsing/parser.py-e +410 -0
  64. langroid/parsing/repo_loader.py +3 -1
  65. langroid/parsing/repo_loader.py-e +812 -0
  66. langroid/parsing/search.py +1 -1
  67. langroid/parsing/url_loader.py +17 -51
  68. langroid/parsing/url_loader.py-e +683 -0
  69. langroid/parsing/urls.py +5 -4
  70. langroid/parsing/urls.py-e +279 -0
  71. langroid/prompts/prompts_config.py +1 -1
  72. langroid/pydantic_v1/__init__.py +45 -6
  73. langroid/pydantic_v1/__init__.py-e +36 -0
  74. langroid/pydantic_v1/main.py +11 -4
  75. langroid/pydantic_v1/main.py-e +11 -0
  76. langroid/utils/configuration.py +13 -11
  77. langroid/utils/configuration.py-e +141 -0
  78. langroid/utils/constants.py +1 -1
  79. langroid/utils/constants.py-e +32 -0
  80. langroid/utils/globals.py +21 -5
  81. langroid/utils/globals.py-e +49 -0
  82. langroid/utils/html_logger.py +2 -1
  83. langroid/utils/html_logger.py-e +825 -0
  84. langroid/utils/object_registry.py +1 -1
  85. langroid/utils/object_registry.py-e +66 -0
  86. langroid/utils/pydantic_utils.py +55 -28
  87. langroid/utils/pydantic_utils.py-e +602 -0
  88. langroid/utils/types.py +2 -2
  89. langroid/utils/types.py-e +113 -0
  90. langroid/vector_store/base.py +3 -3
  91. langroid/vector_store/lancedb.py +5 -5
  92. langroid/vector_store/lancedb.py-e +404 -0
  93. langroid/vector_store/meilisearch.py +2 -2
  94. langroid/vector_store/pineconedb.py +4 -4
  95. langroid/vector_store/pineconedb.py-e +427 -0
  96. langroid/vector_store/postgres.py +1 -1
  97. langroid/vector_store/qdrantdb.py +3 -3
  98. langroid/vector_store/weaviatedb.py +1 -1
  99. {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/METADATA +3 -2
  100. langroid-0.59.0b1.dist-info/RECORD +181 -0
  101. langroid/agent/special/doc_chat_task.py +0 -0
  102. langroid/mcp/__init__.py +0 -1
  103. langroid/mcp/server/__init__.py +0 -1
  104. langroid-0.58.2.dist-info/RECORD +0 -145
  105. {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/WHEEL +0 -0
  106. {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -16,8 +16,9 @@ from typing import (
16
16
  no_type_check,
17
17
  )
18
18
 
19
+ from pydantic_settings import BaseSettings
20
+
19
21
  from langroid.exceptions import LangroidImportError
20
- from langroid.pydantic_v1 import BaseSettings
21
22
 
22
23
  try:
23
24
  import chainlit as cl
@@ -8,6 +8,8 @@ from inspect import isclass
8
8
  from typing import Any, Dict, List, Optional, Self, Set, Tuple, Type, Union, cast
9
9
 
10
10
  import openai
11
+ from pydantic import BaseModel, ValidationError
12
+ from pydantic.fields import ModelPrivateAttr
11
13
  from rich import print
12
14
  from rich.console import Console
13
15
  from rich.markup import escape
@@ -32,7 +34,6 @@ from langroid.language_models.base import (
32
34
  )
33
35
  from langroid.language_models.openai_gpt import OpenAIGPT
34
36
  from langroid.mytypes import Entity, NonToolAction
35
- from langroid.pydantic_v1 import BaseModel, ValidationError
36
37
  from langroid.utils.configuration import settings
37
38
  from langroid.utils.object_registry import ObjectRegistry
38
39
  from langroid.utils.output import status
@@ -730,7 +731,10 @@ class ChatAgent(Agent):
730
731
 
731
732
  if use:
732
733
  tool_class = self.llm_tools_map[t]
733
- if tool_class._allow_llm_use:
734
+ allow_llm_use = tool_class._allow_llm_use
735
+ if isinstance(allow_llm_use, ModelPrivateAttr):
736
+ allow_llm_use = allow_llm_use.default
737
+ if allow_llm_use:
734
738
  self.llm_tools_usable.add(t)
735
739
  self.llm_functions_usable.add(t)
736
740
  else:
@@ -844,7 +848,7 @@ class ChatAgent(Agent):
844
848
  use_functions_api,
845
849
  use_tools,
846
850
  ) = self.saved_requests_and_tool_setings
847
- self.config = self.config.copy()
851
+ self.config = self.config.model_copy()
848
852
  self.enabled_requests_for_inference = requests_for_inference
849
853
  self.config.use_functions_api = use_functions_api
850
854
  self.config.use_tools = use_tools
@@ -884,15 +888,13 @@ class ChatAgent(Agent):
884
888
  if use:
885
889
  # We must copy `llm_tools_usable` so the base agent
886
890
  # is unmodified
887
- self.llm_tools_usable = copy.copy(self.llm_tools_usable)
888
- self.llm_functions_usable = copy.copy(
889
- self.llm_functions_usable
890
- )
891
+ self.llm_tools_usable = self.llm_tools_usable.copy()
892
+ self.llm_functions_usable = self.llm_functions_usable.copy()
891
893
  if handle:
892
894
  # If handling the tool, do the same for `llm_tools_handled`
893
- self.llm_tools_handled = copy.copy(self.llm_tools_handled)
894
- self.llm_functions_handled = copy.copy(
895
- self.llm_functions_handled
895
+ self.llm_tools_handled = self.llm_tools_handled.copy()
896
+ self.llm_functions_handled = (
897
+ self.llm_functions_handled.copy()
896
898
  )
897
899
  # Enable `output_type`
898
900
  self.enable_message(
@@ -941,7 +943,7 @@ class ChatAgent(Agent):
941
943
  defaults=self.config.output_format_include_defaults,
942
944
  ).parameters
943
945
  else:
944
- output_format_schema = output_type.schema()
946
+ output_format_schema = output_type.model_json_schema()
945
947
 
946
948
  format_schema_for_strict(output_format_schema)
947
949
 
@@ -960,7 +962,7 @@ class ChatAgent(Agent):
960
962
  output_type.default_value("request")
961
963
  }
962
964
  if self.config.use_functions_api:
963
- self.config = self.config.copy()
965
+ self.config = self.config.model_copy()
964
966
  self.config.use_functions_api = False
965
967
  self.config.use_tools = True
966
968
 
@@ -1010,7 +1012,7 @@ class ChatAgent(Agent):
1010
1012
  Args:
1011
1013
  message_class: The only ToolMessage class to allow
1012
1014
  """
1013
- request = message_class.__fields__["request"].default
1015
+ request = message_class.model_fields["request"].default
1014
1016
  to_remove = [r for r in self.llm_tools_usable if r != request]
1015
1017
  for r in to_remove:
1016
1018
  self.llm_tools_usable.discard(r)
@@ -1054,7 +1056,7 @@ class ChatAgent(Agent):
1054
1056
 
1055
1057
  content = attempt.arguments
1056
1058
 
1057
- content_any = self.output_format.parse_obj(content)
1059
+ content_any = self.output_format.model_validate(content)
1058
1060
 
1059
1061
  if issubclass(self.output_format, PydanticWrapper):
1060
1062
  message.content_any = content_any.value # type: ignore
@@ -1094,34 +1096,36 @@ class ChatAgent(Agent):
1094
1096
  try:
1095
1097
  tools = super().get_tool_messages(msg, all_tools)
1096
1098
  except ValidationError as ve:
1097
- tool_class = ve.model
1098
- if issubclass(tool_class, ToolMessage):
1099
- was_strict = (
1100
- self.config.use_functions_api
1101
- and self.config.use_tools_api
1102
- and self._strict_mode_for_tool(tool_class)
1103
- )
1104
- # If the result of strict output for a tool using the
1105
- # OpenAI tools API fails to parse, we infer that the
1106
- # schema edits necessary for compatibility prevented
1107
- # adherence to the underlying `ToolMessage` schema and
1108
- # disable strict output for the tool
1109
- if was_strict:
1110
- name = tool_class.default_value("request")
1111
- self.disable_strict_tools_set.add(name)
1112
- logging.warning(
1113
- f"""
1114
- Validation error occured with strict tool format.
1115
- Disabling strict mode for the {name} tool.
1116
- """
1099
+ # Check if tool class was attached to the exception
1100
+ if hasattr(ve, "tool_class") and ve.tool_class:
1101
+ tool_class = ve.tool_class # type: ignore
1102
+ if issubclass(tool_class, ToolMessage):
1103
+ was_strict = (
1104
+ self.config.use_functions_api
1105
+ and self.config.use_tools_api
1106
+ and self._strict_mode_for_tool(tool_class)
1117
1107
  )
1118
- else:
1119
- # We will trigger the strict recovery mechanism to force
1120
- # the LLM to correct its output, allowing us to parse
1121
- if isinstance(msg, ChatDocument):
1122
- self.tool_error = msg.metadata.sender == Entity.LLM
1108
+ # If the result of strict output for a tool using the
1109
+ # OpenAI tools API fails to parse, we infer that the
1110
+ # schema edits necessary for compatibility prevented
1111
+ # adherence to the underlying `ToolMessage` schema and
1112
+ # disable strict output for the tool
1113
+ if was_strict:
1114
+ name = tool_class.default_value("request")
1115
+ self.disable_strict_tools_set.add(name)
1116
+ logging.warning(
1117
+ f"""
1118
+ Validation error occured with strict tool format.
1119
+ Disabling strict mode for the {name} tool.
1120
+ """
1121
+ )
1123
1122
  else:
1124
- self.tool_error = most_recent_sent_by_llm
1123
+ # We will trigger the strict recovery mechanism to force
1124
+ # the LLM to correct its output, allowing us to parse
1125
+ if isinstance(msg, ChatDocument):
1126
+ self.tool_error = msg.metadata.sender == Entity.LLM
1127
+ else:
1128
+ self.tool_error = most_recent_sent_by_llm
1125
1129
 
1126
1130
  if was_llm:
1127
1131
  raise ve
@@ -1168,7 +1172,9 @@ class ChatAgent(Agent):
1168
1172
  request = self.tool.request
1169
1173
  if request not in agent.llm_tools_map:
1170
1174
  return None
1171
- tool = agent.llm_tools_map[request].parse_raw(self.tool.to_json())
1175
+ tool = agent.llm_tools_map[request].model_validate_json(
1176
+ self.tool.to_json()
1177
+ )
1172
1178
 
1173
1179
  return agent.handle_tool_message(tool)
1174
1180
 
@@ -1187,7 +1193,9 @@ class ChatAgent(Agent):
1187
1193
  request = self.tool.request
1188
1194
  if request not in agent.llm_tools_map:
1189
1195
  return None
1190
- tool = agent.llm_tools_map[request].parse_raw(self.tool.to_json())
1196
+ tool = agent.llm_tools_map[request].model_validate_json(
1197
+ self.tool.to_json()
1198
+ )
1191
1199
 
1192
1200
  return await agent.handle_tool_message_async(tool)
1193
1201
 
@@ -1269,19 +1277,29 @@ class ChatAgent(Agent):
1269
1277
  """
1270
1278
  parent_message: ChatDocument | None = message.parent
1271
1279
  tools = [] if parent_message is None else parent_message.tool_messages
1272
- truncate_tools = [t for t in tools if t._max_retained_tokens is not None]
1280
+ truncate_tools = []
1281
+ for t in tools:
1282
+ max_retained_tokens = t._max_retained_tokens
1283
+ if isinstance(max_retained_tokens, ModelPrivateAttr):
1284
+ max_retained_tokens = max_retained_tokens.default
1285
+ if max_retained_tokens is not None:
1286
+ truncate_tools.append(t)
1273
1287
  limiting_tool = truncate_tools[0] if len(truncate_tools) > 0 else None
1274
- if limiting_tool is not None and limiting_tool._max_retained_tokens is not None:
1275
- tool_name = limiting_tool.default_value("request")
1276
- max_tokens: int = limiting_tool._max_retained_tokens
1277
- truncation_warning = f"""
1278
- The result of the {tool_name} tool were too large,
1279
- and has been truncated to {max_tokens} tokens.
1280
- To obtain the full result, the tool needs to be re-used.
1281
- """
1282
- self.truncate_message(
1283
- message.metadata.msg_idx, max_tokens, truncation_warning
1284
- )
1288
+ if limiting_tool is not None:
1289
+ max_retained_tokens = limiting_tool._max_retained_tokens
1290
+ if isinstance(max_retained_tokens, ModelPrivateAttr):
1291
+ max_retained_tokens = max_retained_tokens.default
1292
+ if max_retained_tokens is not None:
1293
+ tool_name = limiting_tool.default_value("request")
1294
+ max_tokens: int = max_retained_tokens
1295
+ truncation_warning = f"""
1296
+ The result of the {tool_name} tool were too large,
1297
+ and has been truncated to {max_tokens} tokens.
1298
+ To obtain the full result, the tool needs to be re-used.
1299
+ """
1300
+ self.truncate_message(
1301
+ message.metadata.msg_idx, max_tokens, truncation_warning
1302
+ )
1285
1303
 
1286
1304
  def llm_response(
1287
1305
  self, message: Optional[str | ChatDocument] = None
@@ -1743,7 +1761,7 @@ class ChatAgent(Agent):
1743
1761
  function=spec,
1744
1762
  )
1745
1763
  elif issubclass(self.output_format, BaseModel):
1746
- param_spec = self.output_format.schema()
1764
+ param_spec = self.output_format.model_json_schema()
1747
1765
  format_schema_for_strict(param_spec)
1748
1766
 
1749
1767
  output_format = OpenAIJsonSchemaSpec(