langroid 0.58.2__py3-none-any.whl → 0.59.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +39 -17
- langroid/agent/base.py-e +2216 -0
- langroid/agent/callbacks/chainlit.py +2 -1
- langroid/agent/chat_agent.py +73 -55
- langroid/agent/chat_agent.py-e +2086 -0
- langroid/agent/chat_document.py +7 -7
- langroid/agent/chat_document.py-e +513 -0
- langroid/agent/openai_assistant.py +9 -9
- langroid/agent/openai_assistant.py-e +882 -0
- langroid/agent/special/arangodb/arangodb_agent.py +10 -18
- langroid/agent/special/arangodb/arangodb_agent.py-e +648 -0
- langroid/agent/special/arangodb/tools.py +3 -3
- langroid/agent/special/doc_chat_agent.py +16 -14
- langroid/agent/special/lance_rag/critic_agent.py +2 -2
- langroid/agent/special/lance_rag/query_planner_agent.py +4 -4
- langroid/agent/special/lance_tools.py +6 -5
- langroid/agent/special/lance_tools.py-e +61 -0
- langroid/agent/special/neo4j/neo4j_chat_agent.py +3 -7
- langroid/agent/special/neo4j/neo4j_chat_agent.py-e +430 -0
- langroid/agent/special/relevance_extractor_agent.py +1 -1
- langroid/agent/special/sql/sql_chat_agent.py +11 -3
- langroid/agent/task.py +9 -87
- langroid/agent/task.py-e +2418 -0
- langroid/agent/tool_message.py +33 -17
- langroid/agent/tool_message.py-e +400 -0
- langroid/agent/tools/file_tools.py +4 -2
- langroid/agent/tools/file_tools.py-e +234 -0
- langroid/agent/tools/mcp/fastmcp_client.py +19 -6
- langroid/agent/tools/mcp/fastmcp_client.py-e +584 -0
- langroid/agent/tools/orchestration.py +22 -17
- langroid/agent/tools/orchestration.py-e +301 -0
- langroid/agent/tools/recipient_tool.py +3 -3
- langroid/agent/tools/task_tool.py +22 -16
- langroid/agent/tools/task_tool.py-e +249 -0
- langroid/agent/xml_tool_message.py +90 -35
- langroid/agent/xml_tool_message.py-e +392 -0
- langroid/cachedb/base.py +1 -1
- langroid/embedding_models/base.py +2 -2
- langroid/embedding_models/models.py +3 -7
- langroid/embedding_models/models.py-e +563 -0
- langroid/exceptions.py +4 -1
- langroid/language_models/azure_openai.py +2 -2
- langroid/language_models/azure_openai.py-e +134 -0
- langroid/language_models/base.py +6 -4
- langroid/language_models/base.py-e +812 -0
- langroid/language_models/client_cache.py +64 -0
- langroid/language_models/config.py +2 -4
- langroid/language_models/config.py-e +18 -0
- langroid/language_models/model_info.py +9 -1
- langroid/language_models/model_info.py-e +483 -0
- langroid/language_models/openai_gpt.py +119 -20
- langroid/language_models/openai_gpt.py-e +2280 -0
- langroid/language_models/provider_params.py +3 -22
- langroid/language_models/provider_params.py-e +153 -0
- langroid/mytypes.py +11 -4
- langroid/mytypes.py-e +132 -0
- langroid/parsing/code_parser.py +1 -1
- langroid/parsing/file_attachment.py +1 -1
- langroid/parsing/file_attachment.py-e +246 -0
- langroid/parsing/md_parser.py +14 -4
- langroid/parsing/md_parser.py-e +574 -0
- langroid/parsing/parser.py +22 -7
- langroid/parsing/parser.py-e +410 -0
- langroid/parsing/repo_loader.py +3 -1
- langroid/parsing/repo_loader.py-e +812 -0
- langroid/parsing/search.py +1 -1
- langroid/parsing/url_loader.py +17 -51
- langroid/parsing/url_loader.py-e +683 -0
- langroid/parsing/urls.py +5 -4
- langroid/parsing/urls.py-e +279 -0
- langroid/prompts/prompts_config.py +1 -1
- langroid/pydantic_v1/__init__.py +45 -6
- langroid/pydantic_v1/__init__.py-e +36 -0
- langroid/pydantic_v1/main.py +11 -4
- langroid/pydantic_v1/main.py-e +11 -0
- langroid/utils/configuration.py +13 -11
- langroid/utils/configuration.py-e +141 -0
- langroid/utils/constants.py +1 -1
- langroid/utils/constants.py-e +32 -0
- langroid/utils/globals.py +21 -5
- langroid/utils/globals.py-e +49 -0
- langroid/utils/html_logger.py +2 -1
- langroid/utils/html_logger.py-e +825 -0
- langroid/utils/object_registry.py +1 -1
- langroid/utils/object_registry.py-e +66 -0
- langroid/utils/pydantic_utils.py +55 -28
- langroid/utils/pydantic_utils.py-e +602 -0
- langroid/utils/types.py +2 -2
- langroid/utils/types.py-e +113 -0
- langroid/vector_store/base.py +3 -3
- langroid/vector_store/lancedb.py +5 -5
- langroid/vector_store/lancedb.py-e +404 -0
- langroid/vector_store/meilisearch.py +2 -2
- langroid/vector_store/pineconedb.py +4 -4
- langroid/vector_store/pineconedb.py-e +427 -0
- langroid/vector_store/postgres.py +1 -1
- langroid/vector_store/qdrantdb.py +3 -3
- langroid/vector_store/weaviatedb.py +1 -1
- {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/METADATA +3 -2
- langroid-0.59.0b1.dist-info/RECORD +181 -0
- langroid/agent/special/doc_chat_task.py +0 -0
- langroid/mcp/__init__.py +0 -1
- langroid/mcp/server/__init__.py +0 -1
- langroid-0.58.2.dist-info/RECORD +0 -145
- {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/WHEEL +0 -0
- {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -12,6 +12,7 @@ from typing import (
|
|
12
12
|
Callable,
|
13
13
|
Dict,
|
14
14
|
List,
|
15
|
+
Mapping,
|
15
16
|
Optional,
|
16
17
|
Tuple,
|
17
18
|
Type,
|
@@ -24,6 +25,8 @@ from cerebras.cloud.sdk import AsyncCerebras, Cerebras
|
|
24
25
|
from groq import AsyncGroq, Groq
|
25
26
|
from httpx import Timeout
|
26
27
|
from openai import AsyncOpenAI, OpenAI
|
28
|
+
from pydantic import BaseModel
|
29
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
27
30
|
from rich import print
|
28
31
|
from rich.markup import escape
|
29
32
|
|
@@ -78,7 +81,6 @@ from langroid.language_models.utils import (
|
|
78
81
|
retry_with_exponential_backoff,
|
79
82
|
)
|
80
83
|
from langroid.parsing.parse_json import parse_imperfect_json
|
81
|
-
from langroid.pydantic_v1 import BaseModel, BaseSettings
|
82
84
|
from langroid.utils.configuration import settings
|
83
85
|
from langroid.utils.constants import Colors
|
84
86
|
from langroid.utils.system import friendly_error
|
@@ -220,7 +222,7 @@ class OpenAICallParams(BaseModel):
|
|
220
222
|
extra_body: Dict[str, Any] | None = None # additional params for API request body
|
221
223
|
|
222
224
|
def to_dict_exclude_none(self) -> Dict[str, Any]:
|
223
|
-
return {k: v for k, v in self.
|
225
|
+
return {k: v for k, v in self.model_dump().items() if v is not None}
|
224
226
|
|
225
227
|
|
226
228
|
class LiteLLMProxyConfig(BaseSettings):
|
@@ -229,8 +231,7 @@ class LiteLLMProxyConfig(BaseSettings):
|
|
229
231
|
api_key: str = "" # read from env var LITELLM_API_KEY if set
|
230
232
|
api_base: str = "" # read from env var LITELLM_API_BASE if set
|
231
233
|
|
232
|
-
|
233
|
-
env_prefix = "LITELLM_"
|
234
|
+
model_config = SettingsConfigDict(env_prefix="LITELLM_")
|
234
235
|
|
235
236
|
|
236
237
|
class OpenAIGPTConfig(LLMConfig):
|
@@ -259,7 +260,7 @@ class OpenAIGPTConfig(LLMConfig):
|
|
259
260
|
litellm_proxy: LiteLLMProxyConfig = LiteLLMProxyConfig()
|
260
261
|
ollama: bool = False # use ollama's OpenAI-compatible endpoint?
|
261
262
|
min_output_tokens: int = 1
|
262
|
-
use_chat_for_completion = True # do not change this, for OpenAI models!
|
263
|
+
use_chat_for_completion: bool = True # do not change this, for OpenAI models!
|
263
264
|
timeout: int = 20
|
264
265
|
temperature: float = 0.2
|
265
266
|
seed: int | None = 42
|
@@ -287,6 +288,9 @@ class OpenAIGPTConfig(LLMConfig):
|
|
287
288
|
langdb_params: LangDBParams = LangDBParams()
|
288
289
|
portkey_params: PortkeyParams = PortkeyParams()
|
289
290
|
headers: Dict[str, str] = {}
|
291
|
+
http_client_factory: Optional[Callable[[], Any]] = None # Factory for httpx.Client
|
292
|
+
http_verify_ssl: bool = True # Simple flag for SSL verification
|
293
|
+
http_client_config: Optional[Dict[str, Any]] = None # Config dict for httpx.Client
|
290
294
|
|
291
295
|
def __init__(self, **kwargs) -> None: # type: ignore
|
292
296
|
local_model = "api_base" in kwargs and kwargs["api_base"] is not None
|
@@ -313,8 +317,43 @@ class OpenAIGPTConfig(LLMConfig):
|
|
313
317
|
|
314
318
|
super().__init__(**kwargs)
|
315
319
|
|
316
|
-
|
317
|
-
|
320
|
+
model_config = SettingsConfigDict(env_prefix="OPENAI_")
|
321
|
+
|
322
|
+
def model_copy(
|
323
|
+
self, *, update: Mapping[str, Any] | None = None, deep: bool = False
|
324
|
+
) -> "OpenAIGPTConfig":
|
325
|
+
"""
|
326
|
+
Override model_copy to handle unpicklable fields properly.
|
327
|
+
|
328
|
+
This preserves fields like http_client_factory during normal copying
|
329
|
+
while still allowing exclusion for pickling operations.
|
330
|
+
"""
|
331
|
+
# Save references to unpicklable fields
|
332
|
+
http_client_factory = self.http_client_factory
|
333
|
+
streamer = self.streamer
|
334
|
+
streamer_async = self.streamer_async
|
335
|
+
|
336
|
+
# Get the current model data, excluding problematic fields
|
337
|
+
data = self.model_dump(
|
338
|
+
exclude={"http_client_factory", "streamer", "streamer_async"}
|
339
|
+
)
|
340
|
+
|
341
|
+
# Apply any updates
|
342
|
+
if update:
|
343
|
+
data.update(update)
|
344
|
+
|
345
|
+
# Create a new instance with the copied data
|
346
|
+
new_instance = self.__class__(**data)
|
347
|
+
|
348
|
+
# Restore the unpicklable fields if they weren't overridden by update
|
349
|
+
if "http_client_factory" not in (update or {}):
|
350
|
+
new_instance.http_client_factory = http_client_factory
|
351
|
+
if "streamer" not in (update or {}):
|
352
|
+
new_instance.streamer = streamer
|
353
|
+
if "streamer_async" not in (update or {}):
|
354
|
+
new_instance.streamer_async = streamer_async
|
355
|
+
|
356
|
+
return new_instance
|
318
357
|
|
319
358
|
def _validate_litellm(self) -> None:
|
320
359
|
"""
|
@@ -327,12 +366,12 @@ class OpenAIGPTConfig(LLMConfig):
|
|
327
366
|
import litellm
|
328
367
|
except ImportError:
|
329
368
|
raise LangroidImportError("litellm", "litellm")
|
369
|
+
|
330
370
|
litellm.telemetry = False
|
331
371
|
litellm.drop_params = True # drop un-supported params without crashing
|
332
|
-
# modify params to fit the model expectations, and avoid crashing
|
333
|
-
# (e.g. anthropic doesn't like first msg to be system msg)
|
334
372
|
litellm.modify_params = True
|
335
373
|
self.seed = None # some local mdls don't support seed
|
374
|
+
|
336
375
|
if self.api_key == DUMMY_API_KEY:
|
337
376
|
keys_dict = litellm.utils.validate_environment(self.chat_model)
|
338
377
|
missing_keys = keys_dict.get("missing_keys", [])
|
@@ -362,8 +401,7 @@ class OpenAIGPTConfig(LLMConfig):
|
|
362
401
|
class DynamicConfig(OpenAIGPTConfig):
|
363
402
|
pass
|
364
403
|
|
365
|
-
DynamicConfig.
|
366
|
-
|
404
|
+
DynamicConfig.model_config = SettingsConfigDict(env_prefix=prefix.upper() + "_")
|
367
405
|
return DynamicConfig
|
368
406
|
|
369
407
|
|
@@ -404,7 +442,7 @@ class OpenAIGPT(LanguageModel):
|
|
404
442
|
config: configuration for openai-gpt model
|
405
443
|
"""
|
406
444
|
# copy the config to avoid modifying the original
|
407
|
-
config = config.
|
445
|
+
config = config.model_copy()
|
408
446
|
super().__init__(config)
|
409
447
|
self.config: OpenAIGPTConfig = config
|
410
448
|
# save original model name such as `provider/model` before
|
@@ -631,6 +669,32 @@ class OpenAIGPT(LanguageModel):
|
|
631
669
|
# Add Portkey-specific headers
|
632
670
|
self.config.headers.update(self.config.portkey_params.get_headers())
|
633
671
|
|
672
|
+
# Create http_client if needed - Priority order:
|
673
|
+
# 1. http_client_factory (most flexibility, not cacheable)
|
674
|
+
# 2. http_client_config (cacheable, moderate flexibility)
|
675
|
+
# 3. http_verify_ssl=False (cacheable, simple SSL bypass)
|
676
|
+
http_client = None
|
677
|
+
async_http_client = None
|
678
|
+
http_client_config_used = None
|
679
|
+
|
680
|
+
if self.config.http_client_factory is not None:
|
681
|
+
# Use the factory to create http_client (not cacheable)
|
682
|
+
http_client = self.config.http_client_factory()
|
683
|
+
# Don't set async_http_client from sync client - create separately
|
684
|
+
# This avoids type mismatch issues
|
685
|
+
async_http_client = None
|
686
|
+
elif self.config.http_client_config is not None:
|
687
|
+
# Use config dict (cacheable)
|
688
|
+
http_client_config_used = self.config.http_client_config
|
689
|
+
elif not self.config.http_verify_ssl:
|
690
|
+
# Simple SSL bypass (cacheable)
|
691
|
+
http_client_config_used = {"verify": False}
|
692
|
+
logging.warning(
|
693
|
+
"SSL verification has been disabled. This is insecure and "
|
694
|
+
"should only be used in trusted environments (e.g., "
|
695
|
+
"corporate networks with self-signed certificates)."
|
696
|
+
)
|
697
|
+
|
634
698
|
if self.config.use_cached_client:
|
635
699
|
self.client = get_openai_client(
|
636
700
|
api_key=self.api_key,
|
@@ -638,6 +702,8 @@ class OpenAIGPT(LanguageModel):
|
|
638
702
|
organization=self.config.organization,
|
639
703
|
timeout=Timeout(self.config.timeout),
|
640
704
|
default_headers=self.config.headers,
|
705
|
+
http_client=http_client,
|
706
|
+
http_client_config=http_client_config_used,
|
641
707
|
)
|
642
708
|
self.async_client = get_async_openai_client(
|
643
709
|
api_key=self.api_key,
|
@@ -645,23 +711,56 @@ class OpenAIGPT(LanguageModel):
|
|
645
711
|
organization=self.config.organization,
|
646
712
|
timeout=Timeout(self.config.timeout),
|
647
713
|
default_headers=self.config.headers,
|
714
|
+
http_client=async_http_client,
|
715
|
+
http_client_config=http_client_config_used,
|
648
716
|
)
|
649
717
|
else:
|
650
718
|
# Create new clients without caching
|
651
|
-
|
719
|
+
client_kwargs: Dict[str, Any] = dict(
|
652
720
|
api_key=self.api_key,
|
653
721
|
base_url=self.api_base,
|
654
722
|
organization=self.config.organization,
|
655
723
|
timeout=Timeout(self.config.timeout),
|
656
724
|
default_headers=self.config.headers,
|
657
725
|
)
|
658
|
-
|
726
|
+
if http_client is not None:
|
727
|
+
client_kwargs["http_client"] = http_client
|
728
|
+
elif http_client_config_used is not None:
|
729
|
+
# Create http_client from config for non-cached scenario
|
730
|
+
try:
|
731
|
+
from httpx import Client
|
732
|
+
|
733
|
+
client_kwargs["http_client"] = Client(**http_client_config_used)
|
734
|
+
except ImportError:
|
735
|
+
raise ValueError(
|
736
|
+
"httpx is required to use http_client_config. "
|
737
|
+
"Install it with: pip install httpx"
|
738
|
+
)
|
739
|
+
self.client = OpenAI(**client_kwargs)
|
740
|
+
|
741
|
+
async_client_kwargs: Dict[str, Any] = dict(
|
659
742
|
api_key=self.api_key,
|
660
743
|
base_url=self.api_base,
|
661
744
|
organization=self.config.organization,
|
662
745
|
timeout=Timeout(self.config.timeout),
|
663
746
|
default_headers=self.config.headers,
|
664
747
|
)
|
748
|
+
if async_http_client is not None:
|
749
|
+
async_client_kwargs["http_client"] = async_http_client
|
750
|
+
elif http_client_config_used is not None:
|
751
|
+
# Create async http_client from config for non-cached scenario
|
752
|
+
try:
|
753
|
+
from httpx import AsyncClient
|
754
|
+
|
755
|
+
async_client_kwargs["http_client"] = AsyncClient(
|
756
|
+
**http_client_config_used
|
757
|
+
)
|
758
|
+
except ImportError:
|
759
|
+
raise ValueError(
|
760
|
+
"httpx is required to use http_client_config. "
|
761
|
+
"Install it with: pip install httpx"
|
762
|
+
)
|
763
|
+
self.async_client = AsyncOpenAI(**async_client_kwargs)
|
665
764
|
|
666
765
|
self.cache: CacheDB | None = None
|
667
766
|
use_cache = self.config.cache_config is not None
|
@@ -1413,7 +1512,7 @@ class OpenAIGPT(LanguageModel):
|
|
1413
1512
|
|
1414
1513
|
if has_function:
|
1415
1514
|
function_call = LLMFunctionCall(name=function_name)
|
1416
|
-
function_call_dict = function_call.
|
1515
|
+
function_call_dict = function_call.model_dump()
|
1417
1516
|
if function_args == "":
|
1418
1517
|
function_call.arguments = None
|
1419
1518
|
else:
|
@@ -1465,7 +1564,7 @@ class OpenAIGPT(LanguageModel):
|
|
1465
1564
|
),
|
1466
1565
|
),
|
1467
1566
|
),
|
1468
|
-
openai_response.
|
1567
|
+
openai_response.model_dump(),
|
1469
1568
|
)
|
1470
1569
|
|
1471
1570
|
def _cache_store(self, k: str, v: Any) -> None:
|
@@ -1616,7 +1715,7 @@ class OpenAIGPT(LanguageModel):
|
|
1616
1715
|
cached, hashed_key, response = completions_with_backoff(**args)
|
1617
1716
|
# assume response is an actual response rather than a streaming event
|
1618
1717
|
if not isinstance(response, dict):
|
1619
|
-
response = response.
|
1718
|
+
response = response.model_dump()
|
1620
1719
|
if "message" in response["choices"][0]:
|
1621
1720
|
msg = response["choices"][0]["message"]["content"].strip()
|
1622
1721
|
else:
|
@@ -1694,7 +1793,7 @@ class OpenAIGPT(LanguageModel):
|
|
1694
1793
|
)
|
1695
1794
|
# assume response is an actual response rather than a streaming event
|
1696
1795
|
if not isinstance(response, dict):
|
1697
|
-
response = response.
|
1796
|
+
response = response.model_dump()
|
1698
1797
|
if "message" in response["choices"][0]:
|
1699
1798
|
msg = response["choices"][0]["message"]["content"].strip()
|
1700
1799
|
else:
|
@@ -1992,7 +2091,7 @@ class OpenAIGPT(LanguageModel):
|
|
1992
2091
|
if functions is not None:
|
1993
2092
|
args.update(
|
1994
2093
|
dict(
|
1995
|
-
functions=[f.
|
2094
|
+
functions=[f.model_dump() for f in functions],
|
1996
2095
|
function_call=function_call,
|
1997
2096
|
)
|
1998
2097
|
)
|
@@ -2010,7 +2109,7 @@ class OpenAIGPT(LanguageModel):
|
|
2010
2109
|
tools=[
|
2011
2110
|
dict(
|
2012
2111
|
type="function",
|
2013
|
-
function=t.function.
|
2112
|
+
function=t.function.model_dump()
|
2014
2113
|
| ({"strict": t.strict} if t.strict is not None else {}),
|
2015
2114
|
)
|
2016
2115
|
for t in tools
|