langroid 0.50.2__py3-none-any.whl → 0.50.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +9 -5
- langroid/agent/chat_agent.py +2 -7
- langroid/agent/special/doc_chat_task.py +0 -0
- langroid/language_models/base.py +15 -1
- langroid/language_models/openai_gpt.py +109 -56
- {langroid-0.50.2.dist-info → langroid-0.50.4.dist-info}/METADATA +1 -1
- {langroid-0.50.2.dist-info → langroid-0.50.4.dist-info}/RECORD +9 -8
- {langroid-0.50.2.dist-info → langroid-0.50.4.dist-info}/WHEEL +0 -0
- {langroid-0.50.2.dist-info → langroid-0.50.4.dist-info}/licenses/LICENSE +0 -0
langroid/agent/base.py
CHANGED
@@ -1929,10 +1929,13 @@ class Agent(ABC):
|
|
1929
1929
|
print_response_stats: bool = True,
|
1930
1930
|
) -> None:
|
1931
1931
|
"""
|
1932
|
-
Updates `response.usage` obj (token usage and cost fields)
|
1933
|
-
|
1934
|
-
|
1935
|
-
|
1932
|
+
Updates `response.usage` obj (token usage and cost fields) if needed.
|
1933
|
+
An update is needed only if:
|
1934
|
+
- stream is True (i.e. streaming was enabled), and
|
1935
|
+
- the response was NOT obtained from cached, and
|
1936
|
+
- the API did NOT provide the usage/cost fields during streaming
|
1937
|
+
(As of Sep 2024, the OpenAI API started providing these; for other APIs
|
1938
|
+
this may not necessarily be the case).
|
1936
1939
|
|
1937
1940
|
Args:
|
1938
1941
|
response (LLMResponse): LLMResponse object
|
@@ -1945,10 +1948,11 @@ class Agent(ABC):
|
|
1945
1948
|
if response is None or self.llm is None:
|
1946
1949
|
return
|
1947
1950
|
|
1951
|
+
no_usage_info = response.usage is None or response.usage.prompt_tokens == 0
|
1948
1952
|
# Note: If response was not streamed, then
|
1949
1953
|
# `response.usage` would already have been set by the API,
|
1950
1954
|
# so we only need to update in the stream case.
|
1951
|
-
if stream:
|
1955
|
+
if stream and no_usage_info:
|
1952
1956
|
# usage, cost = 0 when response is from cache
|
1953
1957
|
prompt_tokens = 0
|
1954
1958
|
completion_tokens = 0
|
langroid/agent/chat_agent.py
CHANGED
@@ -94,7 +94,7 @@ class ChatAgentConfig(AgentConfig):
|
|
94
94
|
handle_llm_no_tool: Any = None
|
95
95
|
use_tools: bool = False
|
96
96
|
use_functions_api: bool = True
|
97
|
-
use_tools_api: bool =
|
97
|
+
use_tools_api: bool = True
|
98
98
|
strict_recovery: bool = True
|
99
99
|
enable_orchestration_tool_handling: bool = True
|
100
100
|
output_format: Optional[type] = None
|
@@ -308,12 +308,7 @@ class ChatAgent(Agent):
|
|
308
308
|
|
309
309
|
def _fn_call_available(self) -> bool:
|
310
310
|
"""Does this agent's LLM support function calling?"""
|
311
|
-
return (
|
312
|
-
self.llm is not None
|
313
|
-
and isinstance(self.llm, OpenAIGPT)
|
314
|
-
and self.llm.is_openai_chat_model()
|
315
|
-
and self.llm.supports_functions_or_tools()
|
316
|
-
)
|
311
|
+
return self.llm is not None and self.llm.supports_functions_or_tools()
|
317
312
|
|
318
313
|
def _strict_tools_available(self) -> bool:
|
319
314
|
"""Does this agent's LLM support strict tools?"""
|
File without changes
|
langroid/language_models/base.py
CHANGED
@@ -216,7 +216,7 @@ class LLMTokenUsage(BaseModel):
|
|
216
216
|
prompt_tokens: int = 0
|
217
217
|
completion_tokens: int = 0
|
218
218
|
cost: float = 0.0
|
219
|
-
calls: int = 0 # how many API calls
|
219
|
+
calls: int = 0 # how many API calls - not used as of 2025-04-04
|
220
220
|
|
221
221
|
def reset(self) -> None:
|
222
222
|
self.prompt_tokens = 0
|
@@ -626,6 +626,20 @@ class LanguageModel(ABC):
|
|
626
626
|
)
|
627
627
|
return get_model_info(orig_model, model)
|
628
628
|
|
629
|
+
def supports_functions_or_tools(self) -> bool:
|
630
|
+
"""
|
631
|
+
Does this Model's API support "native" tool-calling, i.e.
|
632
|
+
can we call the API with arguments that contain a list of available tools,
|
633
|
+
and their schemas?
|
634
|
+
Note that, given the plethora of LLM provider APIs this determination is
|
635
|
+
imperfect at best, and leans towards returning True.
|
636
|
+
When the API calls fails with an error indicating tools are not supported,
|
637
|
+
then users are encouraged to use the Langroid-based prompt-based
|
638
|
+
ToolMessage mechanism, which works with ANY LLM. To enable this,
|
639
|
+
in your ChatAgentConfig, set `use_functions_api=False`, and `use_tools=True`.
|
640
|
+
"""
|
641
|
+
return self.info().has_tools
|
642
|
+
|
629
643
|
def chat_context_length(self) -> int:
|
630
644
|
return self.config.chat_context_length or DEFAULT_CONTEXT_LENGTH
|
631
645
|
|
@@ -682,9 +682,6 @@ class OpenAIGPT(LanguageModel):
|
|
682
682
|
openai_chat_models = [e.value for e in OpenAIChatModel]
|
683
683
|
return self.config.chat_model in openai_chat_models
|
684
684
|
|
685
|
-
def supports_functions_or_tools(self) -> bool:
|
686
|
-
return self.is_openai_chat_model() and self.info().has_tools
|
687
|
-
|
688
685
|
def is_openai_completion_model(self) -> bool:
|
689
686
|
openai_completion_models = [e.value for e in OpenAICompletionModel]
|
690
687
|
return self.config.completion_model in openai_completion_models
|
@@ -780,22 +777,39 @@ class OpenAIGPT(LanguageModel):
|
|
780
777
|
reasoning: str = "",
|
781
778
|
function_args: str = "",
|
782
779
|
function_name: str = "",
|
783
|
-
) -> Tuple[bool, bool, str, str]:
|
780
|
+
) -> Tuple[bool, bool, str, str, Dict[str, int]]:
|
784
781
|
"""Process state vars while processing a streaming API response.
|
785
782
|
Returns a tuple consisting of:
|
786
783
|
- is_break: whether to break out of the loop
|
787
784
|
- has_function: whether the response contains a function_call
|
788
785
|
- function_name: name of the function
|
789
786
|
- function_args: args of the function
|
787
|
+
- completion: completion text
|
788
|
+
- reasoning: reasoning text
|
789
|
+
- usage: usage dict
|
790
790
|
"""
|
791
791
|
# convert event obj (of type ChatCompletionChunk) to dict so rest of code,
|
792
792
|
# which expects dicts, works as it did before switching to openai v1.x
|
793
793
|
if not isinstance(event, dict):
|
794
794
|
event = event.model_dump()
|
795
795
|
|
796
|
+
usage = event.get("usage", {}) or {}
|
796
797
|
choices = event.get("choices", [{}])
|
797
|
-
if len(choices) == 0:
|
798
|
+
if choices is None or len(choices) == 0:
|
798
799
|
choices = [{}]
|
800
|
+
if len(usage) > 0 and len(choices[0]) == 0:
|
801
|
+
# we have a "usage" chunk, and empty choices, so we're done
|
802
|
+
# ASSUMPTION: a usage chunk ONLY arrives AFTER all normal completion text!
|
803
|
+
# If any API does not follow this, we need to change this code.
|
804
|
+
return (
|
805
|
+
True,
|
806
|
+
has_function,
|
807
|
+
function_name,
|
808
|
+
function_args,
|
809
|
+
completion,
|
810
|
+
reasoning,
|
811
|
+
usage,
|
812
|
+
)
|
799
813
|
event_args = ""
|
800
814
|
event_fn_name = ""
|
801
815
|
event_tool_deltas: Optional[List[Dict[str, Any]]] = None
|
@@ -876,23 +890,23 @@ class OpenAIGPT(LanguageModel):
|
|
876
890
|
self.config.streamer(tool_fn_args, StreamEventType.TOOL_ARGS)
|
877
891
|
|
878
892
|
# show this delta in the stream
|
879
|
-
|
893
|
+
is_break = finish_reason in [
|
880
894
|
"stop",
|
881
895
|
"function_call",
|
882
896
|
"tool_calls",
|
883
|
-
]
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
|
897
|
+
]
|
898
|
+
# for function_call, finish_reason does not necessarily
|
899
|
+
# contain "function_call" as mentioned in the docs.
|
900
|
+
# So we check for "stop" or "function_call" here.
|
901
|
+
return (
|
902
|
+
is_break,
|
903
|
+
has_function,
|
904
|
+
function_name,
|
905
|
+
function_args,
|
906
|
+
completion,
|
907
|
+
reasoning,
|
908
|
+
usage,
|
909
|
+
)
|
896
910
|
|
897
911
|
@no_type_check
|
898
912
|
async def _process_stream_event_async(
|
@@ -912,15 +926,30 @@ class OpenAIGPT(LanguageModel):
|
|
912
926
|
- has_function: whether the response contains a function_call
|
913
927
|
- function_name: name of the function
|
914
928
|
- function_args: args of the function
|
929
|
+
- completion: completion text
|
930
|
+
- reasoning: reasoning text
|
931
|
+
- usage: usage dict
|
915
932
|
"""
|
916
933
|
# convert event obj (of type ChatCompletionChunk) to dict so rest of code,
|
917
934
|
# which expects dicts, works as it did before switching to openai v1.x
|
918
935
|
if not isinstance(event, dict):
|
919
936
|
event = event.model_dump()
|
920
937
|
|
938
|
+
usage = event.get("usage", {}) or {}
|
921
939
|
choices = event.get("choices", [{}])
|
922
940
|
if len(choices) == 0:
|
923
941
|
choices = [{}]
|
942
|
+
if len(usage) > 0 and len(choices[0]) == 0:
|
943
|
+
# we got usage chunk, and empty choices, so we're done
|
944
|
+
return (
|
945
|
+
True,
|
946
|
+
has_function,
|
947
|
+
function_name,
|
948
|
+
function_args,
|
949
|
+
completion,
|
950
|
+
reasoning,
|
951
|
+
usage,
|
952
|
+
)
|
924
953
|
event_args = ""
|
925
954
|
event_fn_name = ""
|
926
955
|
event_tool_deltas: Optional[List[Dict[str, Any]]] = None
|
@@ -996,23 +1025,23 @@ class OpenAIGPT(LanguageModel):
|
|
996
1025
|
)
|
997
1026
|
|
998
1027
|
# show this delta in the stream
|
999
|
-
|
1028
|
+
is_break = choices[0].get("finish_reason", "") in [
|
1000
1029
|
"stop",
|
1001
1030
|
"function_call",
|
1002
1031
|
"tool_calls",
|
1003
|
-
]
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1032
|
+
]
|
1033
|
+
# for function_call, finish_reason does not necessarily
|
1034
|
+
# contain "function_call" as mentioned in the docs.
|
1035
|
+
# So we check for "stop" or "function_call" here.
|
1036
|
+
return (
|
1037
|
+
is_break,
|
1038
|
+
has_function,
|
1039
|
+
function_name,
|
1040
|
+
function_args,
|
1041
|
+
completion,
|
1042
|
+
reasoning,
|
1043
|
+
usage,
|
1044
|
+
)
|
1016
1045
|
|
1017
1046
|
@retry_with_exponential_backoff
|
1018
1047
|
def _stream_response( # type: ignore
|
@@ -1038,6 +1067,8 @@ class OpenAIGPT(LanguageModel):
|
|
1038
1067
|
sys.stdout.flush()
|
1039
1068
|
has_function = False
|
1040
1069
|
tool_deltas: List[Dict[str, Any]] = []
|
1070
|
+
token_usage: Dict[str, int] = {}
|
1071
|
+
done: bool = False
|
1041
1072
|
try:
|
1042
1073
|
for event in response:
|
1043
1074
|
(
|
@@ -1047,6 +1078,7 @@ class OpenAIGPT(LanguageModel):
|
|
1047
1078
|
function_args,
|
1048
1079
|
completion,
|
1049
1080
|
reasoning,
|
1081
|
+
usage,
|
1050
1082
|
) = self._process_stream_event(
|
1051
1083
|
event,
|
1052
1084
|
chat=chat,
|
@@ -1057,8 +1089,17 @@ class OpenAIGPT(LanguageModel):
|
|
1057
1089
|
function_args=function_args,
|
1058
1090
|
function_name=function_name,
|
1059
1091
|
)
|
1092
|
+
if len(usage) > 0:
|
1093
|
+
# capture the token usage when non-empty
|
1094
|
+
token_usage = usage
|
1060
1095
|
if is_break:
|
1061
|
-
|
1096
|
+
if not self.get_stream() or done:
|
1097
|
+
# if not streaming, then we don't wait for last "usage" chunk
|
1098
|
+
break
|
1099
|
+
else:
|
1100
|
+
# mark done, so we quit after the last "usage" chunk
|
1101
|
+
done = True
|
1102
|
+
|
1062
1103
|
except Exception as e:
|
1063
1104
|
logging.warning("Error while processing stream response: %s", str(e))
|
1064
1105
|
|
@@ -1073,6 +1114,7 @@ class OpenAIGPT(LanguageModel):
|
|
1073
1114
|
reasoning=reasoning,
|
1074
1115
|
function_args=function_args,
|
1075
1116
|
function_name=function_name,
|
1117
|
+
usage=token_usage,
|
1076
1118
|
)
|
1077
1119
|
|
1078
1120
|
@async_retry_with_exponential_backoff
|
@@ -1100,6 +1142,8 @@ class OpenAIGPT(LanguageModel):
|
|
1100
1142
|
sys.stdout.flush()
|
1101
1143
|
has_function = False
|
1102
1144
|
tool_deltas: List[Dict[str, Any]] = []
|
1145
|
+
token_usage: Dict[str, int] = {}
|
1146
|
+
done: bool = False
|
1103
1147
|
try:
|
1104
1148
|
async for event in response:
|
1105
1149
|
(
|
@@ -1109,6 +1153,7 @@ class OpenAIGPT(LanguageModel):
|
|
1109
1153
|
function_args,
|
1110
1154
|
completion,
|
1111
1155
|
reasoning,
|
1156
|
+
usage,
|
1112
1157
|
) = await self._process_stream_event_async(
|
1113
1158
|
event,
|
1114
1159
|
chat=chat,
|
@@ -1119,8 +1164,17 @@ class OpenAIGPT(LanguageModel):
|
|
1119
1164
|
function_args=function_args,
|
1120
1165
|
function_name=function_name,
|
1121
1166
|
)
|
1167
|
+
if len(usage) > 0:
|
1168
|
+
# capture the token usage when non-empty
|
1169
|
+
token_usage = usage
|
1122
1170
|
if is_break:
|
1123
|
-
|
1171
|
+
if not self.get_stream() or done:
|
1172
|
+
# if not streaming, then we don't wait for last "usage" chunk
|
1173
|
+
break
|
1174
|
+
else:
|
1175
|
+
# mark done, so we quit after the next "usage" chunk
|
1176
|
+
done = True
|
1177
|
+
|
1124
1178
|
except Exception as e:
|
1125
1179
|
logging.warning("Error while processing stream response: %s", str(e))
|
1126
1180
|
|
@@ -1135,6 +1189,7 @@ class OpenAIGPT(LanguageModel):
|
|
1135
1189
|
reasoning=reasoning,
|
1136
1190
|
function_args=function_args,
|
1137
1191
|
function_name=function_name,
|
1192
|
+
usage=token_usage,
|
1138
1193
|
)
|
1139
1194
|
|
1140
1195
|
@staticmethod
|
@@ -1272,6 +1327,7 @@ class OpenAIGPT(LanguageModel):
|
|
1272
1327
|
reasoning: str = "",
|
1273
1328
|
function_args: str = "",
|
1274
1329
|
function_name: str = "",
|
1330
|
+
usage: Dict[str, int] = {},
|
1275
1331
|
) -> Tuple[LLMResponse, Dict[str, Any]]:
|
1276
1332
|
"""
|
1277
1333
|
Create an LLMResponse object from the streaming API response.
|
@@ -1281,8 +1337,10 @@ class OpenAIGPT(LanguageModel):
|
|
1281
1337
|
tool_deltas: list of tool deltas received from streaming API
|
1282
1338
|
has_function: whether the response contains a function_call
|
1283
1339
|
completion: completion text
|
1340
|
+
reasoning: reasoning text
|
1284
1341
|
function_args: string representing function args
|
1285
1342
|
function_name: name of the function
|
1343
|
+
usage: token usage dict
|
1286
1344
|
Returns:
|
1287
1345
|
Tuple consisting of:
|
1288
1346
|
LLMResponse object (with message, usage),
|
@@ -1347,6 +1405,14 @@ class OpenAIGPT(LanguageModel):
|
|
1347
1405
|
# don't allow empty list [] here
|
1348
1406
|
oai_tool_calls=tool_calls or None if len(tool_deltas) > 0 else None,
|
1349
1407
|
function_call=function_call if has_function else None,
|
1408
|
+
usage=LLMTokenUsage(
|
1409
|
+
prompt_tokens=usage.get("prompt_tokens", 0),
|
1410
|
+
completion_tokens=usage.get("completion_tokens", 0),
|
1411
|
+
cost=self._cost_chat_model(
|
1412
|
+
usage.get("prompt_tokens", 0),
|
1413
|
+
usage.get("completion_tokens", 0),
|
1414
|
+
),
|
1415
|
+
),
|
1350
1416
|
),
|
1351
1417
|
openai_response.dict(),
|
1352
1418
|
)
|
@@ -1575,17 +1641,6 @@ class OpenAIGPT(LanguageModel):
|
|
1575
1641
|
) -> LLMResponse:
|
1576
1642
|
self.run_on_first_use()
|
1577
1643
|
|
1578
|
-
if [functions, tools] != [None, None] and not self.is_openai_chat_model():
|
1579
|
-
raise ValueError(
|
1580
|
-
f"""
|
1581
|
-
`functions` and `tools` can only be specified for OpenAI chat LLMs,
|
1582
|
-
or LLMs served via an OpenAI-compatible API.
|
1583
|
-
{self.config.chat_model} does not support function-calling or tools.
|
1584
|
-
Instead, please use Langroid's ToolMessages, which are equivalent.
|
1585
|
-
In the ChatAgentConfig, set `use_functions_api=False`
|
1586
|
-
and `use_tools=True`, this will enable ToolMessages.
|
1587
|
-
"""
|
1588
|
-
)
|
1589
1644
|
if self.config.use_completion_for_chat and not self.is_openai_chat_model():
|
1590
1645
|
# only makes sense for non-OpenAI models
|
1591
1646
|
if self.config.formatter is None or self.config.hf_formatter is None:
|
@@ -1630,16 +1685,6 @@ class OpenAIGPT(LanguageModel):
|
|
1630
1685
|
) -> LLMResponse:
|
1631
1686
|
self.run_on_first_use()
|
1632
1687
|
|
1633
|
-
if [functions, tools] != [None, None] and not self.is_openai_chat_model():
|
1634
|
-
raise ValueError(
|
1635
|
-
f"""
|
1636
|
-
`functions` and `tools` can only be specified for OpenAI chat models;
|
1637
|
-
{self.config.chat_model} does not support function-calling or tools.
|
1638
|
-
Instead, please use Langroid's ToolMessages, which are equivalent.
|
1639
|
-
In the ChatAgentConfig, set `use_functions_api=False`
|
1640
|
-
and `use_tools=True`, this will enable ToolMessages.
|
1641
|
-
"""
|
1642
|
-
)
|
1643
1688
|
# turn off streaming for async calls
|
1644
1689
|
if (
|
1645
1690
|
self.config.use_completion_for_chat
|
@@ -1833,6 +1878,14 @@ class OpenAIGPT(LanguageModel):
|
|
1833
1878
|
max_tokens=max_tokens,
|
1834
1879
|
stream=self.get_stream(),
|
1835
1880
|
)
|
1881
|
+
if self.get_stream():
|
1882
|
+
args.update(
|
1883
|
+
dict(
|
1884
|
+
# get token-usage numbers in stream mode from OpenAI API,
|
1885
|
+
# and possibly other OpenAI-compatible APIs.
|
1886
|
+
stream_options=dict(include_usage=True),
|
1887
|
+
)
|
1888
|
+
)
|
1836
1889
|
args.update(self._openai_api_call_params(args))
|
1837
1890
|
# only include functions-related args if functions are provided
|
1838
1891
|
# since the OpenAI API will throw an error if `functions` is None or []
|
@@ -3,9 +3,9 @@ langroid/exceptions.py,sha256=OPjece_8cwg94DLPcOGA1ddzy5bGh65pxzcHMnssTz8,2995
|
|
3
3
|
langroid/mytypes.py,sha256=HIcYAqGeA9OK0Hlscym2FI5Oax9QFljDZoVgRlomhRk,4014
|
4
4
|
langroid/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
langroid/agent/__init__.py,sha256=ll0Cubd2DZ-fsCMl7e10hf9ZjFGKzphfBco396IKITY,786
|
6
|
-
langroid/agent/base.py,sha256=
|
6
|
+
langroid/agent/base.py,sha256=bs5OLCf534mhsdR7Rgf27GqVNuSV2bOVbD46Y86mGFA,79829
|
7
7
|
langroid/agent/batch.py,sha256=vi1r5i1-vN80WfqHDSwjEym_KfGsqPGUtwktmiK1nuk,20635
|
8
|
-
langroid/agent/chat_agent.py,sha256=
|
8
|
+
langroid/agent/chat_agent.py,sha256=m1alf-KNJSU6PeFF6ocwTHSG0cmTE-iy1o7UYAvRGQE,85081
|
9
9
|
langroid/agent/chat_document.py,sha256=xzMtrPbaW-Y-BnF7kuhr2dorsD-D5rMWzfOqJ8HAoo8,17885
|
10
10
|
langroid/agent/openai_assistant.py,sha256=JkAcs02bIrgPNVvUWVR06VCthc5-ulla2QMBzux_q6o,34340
|
11
11
|
langroid/agent/task.py,sha256=HB6N-Jn80HFqCf0ZYOC1v3Bn3oO7NLjShHQJJFwW0q4,90557
|
@@ -15,6 +15,7 @@ langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
15
15
|
langroid/agent/callbacks/chainlit.py,sha256=UHB6P_J40vsVnssosqkpkOVWRf9NK4TOY0_G2g_Arsg,20900
|
16
16
|
langroid/agent/special/__init__.py,sha256=gik_Xtm_zV7U9s30Mn8UX3Gyuy4jTjQe9zjiE3HWmEo,1273
|
17
17
|
langroid/agent/special/doc_chat_agent.py,sha256=dOL9Y0xAslkwepCdKU8Dc1m5Vk8qgk-gLbU4JzsmTII,65234
|
18
|
+
langroid/agent/special/doc_chat_task.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
19
|
langroid/agent/special/lance_doc_chat_agent.py,sha256=s8xoRs0gGaFtDYFUSIRchsgDVbS5Q3C2b2mr3V1Fd-Q,10419
|
19
20
|
langroid/agent/special/lance_tools.py,sha256=qS8x4wi8mrqfbYV2ztFzrcxyhHQ0ZWOc-zkYiH7awj0,2105
|
20
21
|
langroid/agent/special/relevance_extractor_agent.py,sha256=zIx8GUdVo1aGW6ASla0NPQjYYIpmriK_TYMijqAx3F8,4796
|
@@ -68,11 +69,11 @@ langroid/embedding_models/protoc/embeddings_pb2.pyi,sha256=UkNy7BrNsmQm0vLb3NtGX
|
|
68
69
|
langroid/embedding_models/protoc/embeddings_pb2_grpc.py,sha256=9dYQqkW3JPyBpSEjeGXTNpSqAkC-6FPtBHyteVob2Y8,2452
|
69
70
|
langroid/language_models/__init__.py,sha256=3aD2qC1lz8v12HX4B-dilv27gNxYdGdeu1QvDlkqqHs,1095
|
70
71
|
langroid/language_models/azure_openai.py,sha256=SW0Fp_y6HpERr9l6TtF6CYsKgKwjUf_hSL_2mhTV4wI,5034
|
71
|
-
langroid/language_models/base.py,sha256=
|
72
|
+
langroid/language_models/base.py,sha256=A9A83SJOGh_elRScKNIDAcVT4mw9vKCou7txsPMVSv4,27023
|
72
73
|
langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
|
73
74
|
langroid/language_models/mock_lm.py,sha256=5BgHKDVRWFbUwDT_PFgTZXz9-k8wJSA2e3PZmyDgQ1k,4022
|
74
75
|
langroid/language_models/model_info.py,sha256=tfBBxL0iUf2mVN6CjcvqflzFUVg2oZqOJZexZ8jHTYA,12216
|
75
|
-
langroid/language_models/openai_gpt.py,sha256=
|
76
|
+
langroid/language_models/openai_gpt.py,sha256=t5oFJFxfm0ZwbD5kS73TY8Hal6aCFUWX7O7dBZc-7fw,84480
|
76
77
|
langroid/language_models/utils.py,sha256=L4_CbihDMTGcsg0TOG1Yd5JFEto46--h7CX_14m89sQ,5016
|
77
78
|
langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8yiscohil1ogbP1ECkYdBlBsk,372
|
78
79
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
@@ -128,7 +129,7 @@ langroid/vector_store/pineconedb.py,sha256=otxXZNaBKb9f_H75HTaU3lMHiaR2NUp5MqwLZ
|
|
128
129
|
langroid/vector_store/postgres.py,sha256=wHPtIi2qM4fhO4pMQr95pz1ZCe7dTb2hxl4VYspGZoA,16104
|
129
130
|
langroid/vector_store/qdrantdb.py,sha256=O6dSBoDZ0jzfeVBd7LLvsXu083xs2fxXtPa9gGX3JX4,18443
|
130
131
|
langroid/vector_store/weaviatedb.py,sha256=Yn8pg139gOy3zkaPfoTbMXEEBCiLiYa1MU5d_3UA1K4,11847
|
131
|
-
langroid-0.50.
|
132
|
-
langroid-0.50.
|
133
|
-
langroid-0.50.
|
134
|
-
langroid-0.50.
|
132
|
+
langroid-0.50.4.dist-info/METADATA,sha256=3HQH6x3pAFIDSUIt8TLI912CA-8EN_Jl_nKpQ09oli0,63641
|
133
|
+
langroid-0.50.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
134
|
+
langroid-0.50.4.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
135
|
+
langroid-0.50.4.dist-info/RECORD,,
|
File without changes
|
File without changes
|