langroid 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/openai_assistant.py +17 -4
- langroid/agent/task.py +31 -5
- langroid/agent/tools/note_tool.py +0 -0
- langroid/language_models/base.py +2 -1
- langroid/language_models/openai_gpt.py +10 -5
- langroid/mytypes.py +7 -1
- langroid/utils/configuration.py +1 -1
- langroid/utils/constants.py +7 -2
- {langroid-0.2.5.dist-info → langroid-0.2.7.dist-info}/METADATA +8 -8
- {langroid-0.2.5.dist-info → langroid-0.2.7.dist-info}/RECORD +13 -12
- pyproject.toml +1 -1
- {langroid-0.2.5.dist-info → langroid-0.2.7.dist-info}/LICENSE +0 -0
- {langroid-0.2.5.dist-info → langroid-0.2.7.dist-info}/WHEEL +0 -0
@@ -100,6 +100,10 @@ class OpenAIAssistant(ChatAgent):
|
|
100
100
|
super().__init__(config)
|
101
101
|
self.config: OpenAIAssistantConfig = config
|
102
102
|
self.llm: OpenAIGPT = OpenAIGPT(self.config.llm)
|
103
|
+
assert (
|
104
|
+
self.llm.cache is not None
|
105
|
+
), "OpenAIAssistant requires a cache to store Assistant and Thread ids"
|
106
|
+
|
103
107
|
if not isinstance(self.llm.client, openai.OpenAI):
|
104
108
|
raise ValueError("Client must be OpenAI")
|
105
109
|
# handles for various entities and methods
|
@@ -235,19 +239,23 @@ class OpenAIAssistant(ChatAgent):
|
|
235
239
|
"""Try to retrieve cached thread_id associated with
|
236
240
|
this user + machine + organization"""
|
237
241
|
key = self._cache_thread_key()
|
242
|
+
if self.llm.cache is None:
|
243
|
+
return None
|
238
244
|
return self.llm.cache.retrieve(key)
|
239
245
|
|
240
246
|
@no_type_check
|
241
247
|
def _cache_assistant_lookup(self) -> str | None:
|
242
248
|
"""Try to retrieve cached assistant_id associated with
|
243
249
|
this user + machine + organization"""
|
250
|
+
if self.llm.cache is None:
|
251
|
+
return None
|
244
252
|
key = self._cache_assistant_key()
|
245
253
|
return self.llm.cache.retrieve(key)
|
246
254
|
|
247
255
|
@no_type_check
|
248
256
|
def _cache_messages_lookup(self) -> LLMResponse | None:
|
249
257
|
"""Try to retrieve cached response for the message-list-hash"""
|
250
|
-
if not settings.cache:
|
258
|
+
if not settings.cache or self.llm.cache is None:
|
251
259
|
return None
|
252
260
|
key = self._cache_messages_key()
|
253
261
|
cached_dict = self.llm.cache.retrieve(key)
|
@@ -260,6 +268,8 @@ class OpenAIAssistant(ChatAgent):
|
|
260
268
|
Cache the assistant_id, thread_id associated with
|
261
269
|
this user + machine + organization
|
262
270
|
"""
|
271
|
+
if self.llm.cache is None:
|
272
|
+
return
|
263
273
|
if self.thread is None or self.assistant is None:
|
264
274
|
raise ValueError("Thread or Assistant is None")
|
265
275
|
thread_key = self._cache_thread_key()
|
@@ -336,7 +346,8 @@ class OpenAIAssistant(ChatAgent):
|
|
336
346
|
Could not delete thread with id {cached}, ignoring.
|
337
347
|
"""
|
338
348
|
)
|
339
|
-
self.llm.cache
|
349
|
+
if self.llm.cache is not None:
|
350
|
+
self.llm.cache.delete_keys([self._cache_thread_key()])
|
340
351
|
if self.thread is None:
|
341
352
|
if self.assistant is None:
|
342
353
|
raise ValueError("Assistant is None")
|
@@ -392,7 +403,8 @@ class OpenAIAssistant(ChatAgent):
|
|
392
403
|
Could not delete assistant with id {cached}, ignoring.
|
393
404
|
"""
|
394
405
|
)
|
395
|
-
self.llm.cache
|
406
|
+
if self.llm.cache is not None:
|
407
|
+
self.llm.cache.delete_keys([self._cache_assistant_key()])
|
396
408
|
if self.assistant is None:
|
397
409
|
self.assistant = self.client.beta.assistants.create(
|
398
410
|
name=self.config.name,
|
@@ -614,7 +626,8 @@ class OpenAIAssistant(ChatAgent):
|
|
614
626
|
usage=None, # TODO
|
615
627
|
cached=False, # TODO - revisit when able to insert Assistant responses
|
616
628
|
)
|
617
|
-
self.llm.cache
|
629
|
+
if self.llm.cache is not None:
|
630
|
+
self.llm.cache.store(key, result.dict())
|
618
631
|
return result
|
619
632
|
|
620
633
|
def _parse_run_required_action(self) -> List[AssistantToolCall]:
|
langroid/agent/task.py
CHANGED
@@ -41,7 +41,6 @@ from langroid.parsing.routing import parse_addressed_message
|
|
41
41
|
from langroid.pydantic_v1 import BaseModel
|
42
42
|
from langroid.utils.configuration import settings
|
43
43
|
from langroid.utils.constants import (
|
44
|
-
AT, # regex for start of an addressed recipient e.g. "@"
|
45
44
|
DONE,
|
46
45
|
NO_ANSWER,
|
47
46
|
PASS,
|
@@ -74,6 +73,22 @@ class TaskConfig(BaseModel):
|
|
74
73
|
inf_loop_wait_factor (int): wait this * cycle_len msgs before loop-check
|
75
74
|
restart_subtask_run (bool): whether to restart *every* run of this task
|
76
75
|
when run as a subtask.
|
76
|
+
addressing_prefix (str): "@"-like prefix an agent can use to address other
|
77
|
+
agents, or entities of the agent. E.g., if this is "@", the addressing
|
78
|
+
string would be "@Alice", or "@user", "@llm", "@agent", etc.
|
79
|
+
If this is an empty string, then addressing is disabled.
|
80
|
+
Default is empty string "".
|
81
|
+
CAUTION: this is a deprecated practice, since normal prompts
|
82
|
+
can accidentally contain such addressing prefixes, and will break
|
83
|
+
your runs. This could happen especially when your prompt/context
|
84
|
+
contains code, but of course could occur in normal text as well.
|
85
|
+
Instead, use the `RecipientTool` to have agents address other agents or
|
86
|
+
entities. If you do choose to use `addressing_prefix`, the recommended
|
87
|
+
setting is to use `langroid.utils.constants.AT`, which currently is "|@|".
|
88
|
+
Note that this setting does NOT affect the use of `constants.SEND_TO` --
|
89
|
+
this is always enabled since this is a critical way for responders to
|
90
|
+
indicate that the message should be sent to a specific entity/agent.
|
91
|
+
(Search for "SEND_TO" in the examples/ dir to see how this is used.)
|
77
92
|
"""
|
78
93
|
|
79
94
|
inf_loop_cycle_len: int = 10
|
@@ -81,6 +96,7 @@ class TaskConfig(BaseModel):
|
|
81
96
|
inf_loop_wait_factor: int = 5
|
82
97
|
restart_as_subtask: bool = False
|
83
98
|
logs_dir: str = "logs"
|
99
|
+
addressing_prefix: str = ""
|
84
100
|
|
85
101
|
|
86
102
|
class Task:
|
@@ -211,7 +227,7 @@ class Task:
|
|
211
227
|
set_parent_agent=noop_fn,
|
212
228
|
)
|
213
229
|
self.config = config
|
214
|
-
# how to behave as a sub-task; can be
|
230
|
+
# how to behave as a sub-task; can be overridden by `add_sub_task()`
|
215
231
|
self.config_sub_task = copy.deepcopy(config)
|
216
232
|
# counts of distinct pending messages in history,
|
217
233
|
# to help detect (exact) infinite loops
|
@@ -1190,7 +1206,10 @@ class Task:
|
|
1190
1206
|
if result is None:
|
1191
1207
|
return None
|
1192
1208
|
# if result content starts with @name, set recipient to name
|
1193
|
-
is_pass, recipient, content = parse_routing(
|
1209
|
+
is_pass, recipient, content = parse_routing(
|
1210
|
+
result,
|
1211
|
+
addressing_prefix=self.config.addressing_prefix,
|
1212
|
+
)
|
1194
1213
|
if is_pass is None: # no routing, i.e. neither PASS nor SEND
|
1195
1214
|
return result
|
1196
1215
|
if is_pass:
|
@@ -1648,6 +1667,7 @@ class Task:
|
|
1648
1667
|
|
1649
1668
|
def parse_routing(
|
1650
1669
|
msg: ChatDocument | str,
|
1670
|
+
addressing_prefix: str = "",
|
1651
1671
|
) -> Tuple[bool | None, str | None, str | None]:
|
1652
1672
|
"""
|
1653
1673
|
Parse routing instruction if any, of the form:
|
@@ -1656,6 +1676,8 @@ def parse_routing(
|
|
1656
1676
|
@<recipient> <content> (send content to recipient)
|
1657
1677
|
Args:
|
1658
1678
|
msg (ChatDocument|str|None): message to parse
|
1679
|
+
addressing_prefix (str): prefix to address other agents or entities,
|
1680
|
+
(e.g. "@". See documentation of `TaskConfig` for details).
|
1659
1681
|
Returns:
|
1660
1682
|
Tuple[bool|None, str|None, str|None]:
|
1661
1683
|
bool: true=PASS, false=SEND, or None if neither
|
@@ -1682,8 +1704,12 @@ def parse_routing(
|
|
1682
1704
|
else:
|
1683
1705
|
return False, addressee, content_to_send
|
1684
1706
|
if (
|
1685
|
-
|
1686
|
-
and
|
1707
|
+
addressing_prefix != ""
|
1708
|
+
and addressing_prefix in content
|
1709
|
+
and (addressee_content := parse_addressed_message(content, addressing_prefix))[
|
1710
|
+
0
|
1711
|
+
]
|
1712
|
+
is not None
|
1687
1713
|
):
|
1688
1714
|
(addressee, content_to_send) = addressee_content
|
1689
1715
|
# if no content then treat same as PASS_TO
|
File without changes
|
langroid/language_models/base.py
CHANGED
@@ -17,6 +17,7 @@ from typing import (
|
|
17
17
|
)
|
18
18
|
|
19
19
|
from langroid.cachedb.base import CacheDBConfig
|
20
|
+
from langroid.cachedb.redis_cachedb import RedisCacheConfig
|
20
21
|
from langroid.parsing.agent_chats import parse_message
|
21
22
|
from langroid.parsing.parse_json import top_level_json_field
|
22
23
|
from langroid.prompts.dialog import collate_chat_history
|
@@ -50,7 +51,7 @@ class LLMConfig(BaseSettings):
|
|
50
51
|
# use chat model for completion? For OpenAI models, this MUST be set to True!
|
51
52
|
use_chat_for_completion: bool = True
|
52
53
|
stream: bool = True # stream output from API?
|
53
|
-
cache_config: None | CacheDBConfig =
|
54
|
+
cache_config: None | CacheDBConfig = RedisCacheConfig()
|
54
55
|
|
55
56
|
# Dict of model -> (input/prompt cost, output/completion cost)
|
56
57
|
chat_cost_per_1k_tokens: Tuple[float, float] = (0.0, 0.0)
|
@@ -478,8 +478,9 @@ class OpenAIGPT(LanguageModel):
|
|
478
478
|
timeout=Timeout(self.config.timeout),
|
479
479
|
)
|
480
480
|
|
481
|
-
self.cache: CacheDB
|
482
|
-
|
481
|
+
self.cache: CacheDB | None = None
|
482
|
+
use_cache = self.config.cache_config is not None
|
483
|
+
if settings.cache_type == "momento" and use_cache:
|
483
484
|
from langroid.cachedb.momento_cachedb import (
|
484
485
|
MomentoCache,
|
485
486
|
MomentoCacheConfig,
|
@@ -492,7 +493,7 @@ class OpenAIGPT(LanguageModel):
|
|
492
493
|
# switch to fresh momento config if needed
|
493
494
|
config.cache_config = MomentoCacheConfig()
|
494
495
|
self.cache = MomentoCache(config.cache_config)
|
495
|
-
elif "redis" in settings.cache_type:
|
496
|
+
elif "redis" in settings.cache_type and use_cache:
|
496
497
|
if config.cache_config is None or not isinstance(
|
497
498
|
config.cache_config,
|
498
499
|
RedisCacheConfig,
|
@@ -505,10 +506,10 @@ class OpenAIGPT(LanguageModel):
|
|
505
506
|
# force use of fake redis if global cache_type is "fakeredis"
|
506
507
|
config.cache_config.fake = True
|
507
508
|
self.cache = RedisCache(config.cache_config)
|
508
|
-
|
509
|
+
elif settings.cache_type != "none" and use_cache:
|
509
510
|
raise ValueError(
|
510
511
|
f"Invalid cache type {settings.cache_type}. "
|
511
|
-
"Valid types are momento, redis, fakeredis"
|
512
|
+
"Valid types are momento, redis, fakeredis, none"
|
512
513
|
)
|
513
514
|
|
514
515
|
self.config._validate_litellm()
|
@@ -818,6 +819,8 @@ class OpenAIGPT(LanguageModel):
|
|
818
819
|
)
|
819
820
|
|
820
821
|
def _cache_store(self, k: str, v: Any) -> None:
|
822
|
+
if self.cache is None:
|
823
|
+
return
|
821
824
|
try:
|
822
825
|
self.cache.store(k, v)
|
823
826
|
except Exception as e:
|
@@ -825,6 +828,8 @@ class OpenAIGPT(LanguageModel):
|
|
825
828
|
pass
|
826
829
|
|
827
830
|
def _cache_lookup(self, fn_name: str, **kwargs: Dict[str, Any]) -> Tuple[str, Any]:
|
831
|
+
if self.cache is None:
|
832
|
+
return "", None # no cache, return empty key and None result
|
828
833
|
# Use the kwargs as the cache key
|
829
834
|
sorted_kwargs_str = str(sorted(kwargs.items()))
|
830
835
|
raw_key = f"{fn_name}:{sorted_kwargs_str}"
|
langroid/mytypes.py
CHANGED
@@ -22,11 +22,17 @@ class Entity(str, Enum):
|
|
22
22
|
SYSTEM = "System"
|
23
23
|
|
24
24
|
def __eq__(self, other: object) -> bool:
|
25
|
-
"""Allow case-insensitive comparison with strings."""
|
25
|
+
"""Allow case-insensitive equality (==) comparison with strings."""
|
26
|
+
if other is None:
|
27
|
+
return False
|
26
28
|
if isinstance(other, str):
|
27
29
|
return self.value.lower() == other.lower()
|
28
30
|
return super().__eq__(other)
|
29
31
|
|
32
|
+
def __ne__(self, other: object) -> bool:
|
33
|
+
"""Allow case-insensitive non-equality (!=) comparison with strings."""
|
34
|
+
return not self.__eq__(other)
|
35
|
+
|
30
36
|
def __hash__(self) -> int:
|
31
37
|
"""Override this to ensure hashability of the enum,
|
32
38
|
so it can be used sets and dictionary keys.
|
langroid/utils/configuration.py
CHANGED
@@ -16,7 +16,7 @@ class Settings(BaseSettings):
|
|
16
16
|
progress: bool = False # show progress spinners/bars?
|
17
17
|
stream: bool = True # stream output?
|
18
18
|
cache: bool = True # use cache?
|
19
|
-
cache_type: Literal["redis", "fakeredis", "momento"] = "redis" # cache type
|
19
|
+
cache_type: Literal["redis", "fakeredis", "momento", "none"] = "redis" # cache type
|
20
20
|
interactive: bool = True # interactive mode?
|
21
21
|
gpt3_5: bool = True # use GPT-3.5?
|
22
22
|
chat_model: str = "" # language model name, e.g. litellm/ollama/llama2
|
langroid/utils/constants.py
CHANGED
@@ -18,6 +18,11 @@ DONE = "DONE"
|
|
18
18
|
USER_QUIT_STRINGS = ["q", "x", "quit", "exit", "bye", DONE]
|
19
19
|
PASS = "__PASS__"
|
20
20
|
PASS_TO = PASS + ":"
|
21
|
-
SEND_TO = "
|
21
|
+
SEND_TO = "__SEND__:"
|
22
22
|
TOOL = "TOOL"
|
23
|
-
|
23
|
+
# This is a recommended setting for TaskConfig.addressing_prefix if using it at all;
|
24
|
+
# prefer to use `RecipientTool` to allow agents addressing others.
|
25
|
+
# Caution the AT string should NOT contain any 'word' characters, i.e.
|
26
|
+
# it no letters, digits or underscores.
|
27
|
+
# See tests/main/test_msg_routing for example usage
|
28
|
+
AT = "|@|"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.7
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -150,7 +150,7 @@ This Multi-Agent paradigm is inspired by the
|
|
150
150
|
`Langroid` is a fresh take on LLM app-development, where considerable thought has gone
|
151
151
|
into simplifying the developer experience; it does not use `Langchain`.
|
152
152
|
|
153
|
-
Companies are using/adapting Langroid in production
|
153
|
+
📢 Companies are using/adapting Langroid in **production**. Here is a quote:
|
154
154
|
|
155
155
|
>[Nullify](https://www.nullify.ai) uses AI Agents for secure software development.
|
156
156
|
> It finds, prioritizes and fixes vulnerabilities. We have internally adapted Langroid's multi-agent orchestration framework in production, after evaluating CrewAI, Autogen, LangChain, Langflow, etc. We found Langroid to be far superior to those frameworks in terms of ease of setup and flexibility. Langroid's Agent and Task abstractions are intuitive, well thought out, and provide a great developer experience. We wanted the quickest way to get something in production. With other frameworks it would have taken us weeks, but with Langroid we got to good results in minutes. Highly recommended! <br> -- Jacky Wong, Head of AI at Nullify.
|
@@ -461,17 +461,17 @@ such as [ollama](https://github.com/ollama), [oobabooga](https://github.com/ooba
|
|
461
461
|
- **Observability, Logging, Lineage:** Langroid generates detailed logs of multi-agent interactions and
|
462
462
|
maintains provenance/lineage of messages, so that you can trace back
|
463
463
|
the origin of a message.
|
464
|
-
- **[Tools/Plugins/Function-calling](https://langroid.github.io/langroid/quick-start/chat-agent-tool/)**:
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
calling and tools have the same developer-facing interface, implemented
|
464
|
+
- **[Tools/Plugins/Function-calling](https://langroid.github.io/langroid/quick-start/chat-agent-tool/)**:
|
465
|
+
Langroid supports OpenAI's [function calling](https://platform.openai.com/docs/guides/gpt/function-calling), as
|
466
|
+
well as an equivalent `ToolMessage` mechanism which works with
|
467
|
+
any LLM, not just OpenAI's.
|
468
|
+
Function calling and tools have the same developer-facing interface, implemented
|
469
469
|
using [Pydantic](https://docs.pydantic.dev/latest/),
|
470
470
|
which makes it very easy to define tools/functions and enable agents
|
471
471
|
to use them. Benefits of using Pydantic are that you never have to write
|
472
472
|
complex JSON specs for function calling, and when the LLM
|
473
473
|
hallucinates malformed JSON, the Pydantic error message is sent back to
|
474
|
-
the LLM so it can fix it
|
474
|
+
the LLM so it can fix it.
|
475
475
|
|
476
476
|
---
|
477
477
|
|
@@ -8,7 +8,7 @@ langroid/agent/chat_agent.py,sha256=nO6Yx5WvFsul5RmTP-HCdzeQPhccmzU_mDcPNdkzQ-s,
|
|
8
8
|
langroid/agent/chat_document.py,sha256=MwtNABK28tfSzqCeQlxoauT8uPn8oldU7dlnrX8aQ10,11232
|
9
9
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
|
11
|
-
langroid/agent/openai_assistant.py,sha256=
|
11
|
+
langroid/agent/openai_assistant.py,sha256=3saI9PwF8IZNJcjqyUy-rj73TInAzdlk14LiOvT_Dkc,33548
|
12
12
|
langroid/agent/special/__init__.py,sha256=gik_Xtm_zV7U9s30Mn8UX3Gyuy4jTjQe9zjiE3HWmEo,1273
|
13
13
|
langroid/agent/special/doc_chat_agent.py,sha256=CXFLfDMEabaBZwZwFgNOaG3E3S86xcBM4txrsMD_70I,54014
|
14
14
|
langroid/agent/special/lance_doc_chat_agent.py,sha256=USp0U3eTaJzwF_3bdqE7CedSLbaqAi2tm-VzygcyLaA,10175
|
@@ -32,7 +32,7 @@ langroid/agent/special/sql/utils/populate_metadata.py,sha256=1J22UsyEPKzwK0XlJZt
|
|
32
32
|
langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
|
33
33
|
langroid/agent/special/sql/utils/tools.py,sha256=vFYysk6Vi7HJjII8B4RitA3pt_z3gkSglDNdhNVMiFc,1332
|
34
34
|
langroid/agent/special/table_chat_agent.py,sha256=d9v2wsblaRx7oMnKhLV7uO_ujvk9gh59pSGvBXyeyNc,9659
|
35
|
-
langroid/agent/task.py,sha256=
|
35
|
+
langroid/agent/task.py,sha256=vKM2dmRYSH4i_VA0lf2axUtZcTGU44rVHz6EyxI4kG0,73990
|
36
36
|
langroid/agent/tool_message.py,sha256=wIyZnUcZpxkiRPvM9O3MO3b5BBAdLEEan9kqPbvtApc,9743
|
37
37
|
langroid/agent/tools/__init__.py,sha256=e-63cfwQNk_ftRKQwgDAJQK16QLbRVWDBILeXIc7wLk,402
|
38
38
|
langroid/agent/tools/duckduckgo_search_tool.py,sha256=NhsCaGZkdv28nja7yveAhSK_w6l_Ftym8agbrdzqgfo,1935
|
@@ -40,6 +40,7 @@ langroid/agent/tools/extract_tool.py,sha256=u5lL9rKBzaLBOrRyLnTAZ97pQ1uxyLP39XsW
|
|
40
40
|
langroid/agent/tools/generator_tool.py,sha256=y0fB0ZObjA0b3L0uSTtrqRCKHDUR95arBftqiUeKD2o,663
|
41
41
|
langroid/agent/tools/google_search_tool.py,sha256=y7b-3FtgXf0lfF4AYxrZ3K5pH2dhidvibUOAGBE--WI,1456
|
42
42
|
langroid/agent/tools/metaphor_search_tool.py,sha256=qj4gt453cLEX3EGW7nVzVu6X7LCdrwjSlcNY0qJW104,2489
|
43
|
+
langroid/agent/tools/note_tool.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
43
44
|
langroid/agent/tools/recipient_tool.py,sha256=NrLxIeQT-kbMv7AeYX0uqvGeMK4Q3fIDvG15OVzlgk8,9624
|
44
45
|
langroid/agent/tools/retrieval_tool.py,sha256=2q2pfoYbZNfbWQ0McxrtmfF0ekGglIgRl-6uF26pa-E,871
|
45
46
|
langroid/agent/tools/rewind_tool.py,sha256=G4DiXuOt2nQ2fU7qvtJMdLyyf-rK7RZwLsFxsAUfk-Y,5606
|
@@ -63,16 +64,16 @@ langroid/embedding_models/remote_embeds.py,sha256=6_kjXByVbqhY9cGwl9R83ZcYC2km-n
|
|
63
64
|
langroid/exceptions.py,sha256=w_Cr41nPAmsa6gW5nNFaO9yDcBCWdQqRspL1jYvZf5w,2209
|
64
65
|
langroid/language_models/__init__.py,sha256=vrBtgR8Cq9UVfoI7nTms0IN7fd4y2JYpUP3GNV1DegY,898
|
65
66
|
langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAwyngXT_M4P7k,5989
|
66
|
-
langroid/language_models/base.py,sha256=
|
67
|
+
langroid/language_models/base.py,sha256=oAK2lXBqksMglqWqE2CjC03X3qPFXWgtjFWpH9hJ3C8,17500
|
67
68
|
langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
|
68
69
|
langroid/language_models/mock_lm.py,sha256=qdgj-wtbQBXlibo_0rIRfCt0hGTPRoxy1C4VjN6quI4,2707
|
69
|
-
langroid/language_models/openai_gpt.py,sha256=
|
70
|
+
langroid/language_models/openai_gpt.py,sha256=U1B9glcJyYJYt1SWGQ8JPnzh2ntzms-wFl9y1ovcK4c,51018
|
70
71
|
langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8yiscohil1ogbP1ECkYdBlBsk,372
|
71
72
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
72
73
|
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=TFL6ppmeQWnzr6CKQzRZFYY810zE1mr8DZnhw6i85ok,5217
|
73
74
|
langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
|
74
75
|
langroid/language_models/utils.py,sha256=o6Zo2cnnvKrfSgF26knVQ1xkSxEoE7yN85296gNdVOw,4858
|
75
|
-
langroid/mytypes.py,sha256=
|
76
|
+
langroid/mytypes.py,sha256=KRN_dBamplSl3SYekosT_Maj6ZA749LONypaqPVnmbI,2435
|
76
77
|
langroid/parsing/__init__.py,sha256=ZgSAfgTC6VsTLFlRSWT-TwYco7SQeRMeZG-49MnKYGY,936
|
77
78
|
langroid/parsing/agent_chats.py,sha256=sbZRV9ujdM5QXvvuHVjIi2ysYSYlap-uqfMMUKulrW0,1068
|
78
79
|
langroid/parsing/code-parsing.md,sha256=--cyyNiSZSDlIwcjAV4-shKrSiRe2ytF3AdSoS_hD2g,3294
|
@@ -103,8 +104,8 @@ langroid/pydantic_v1/main.py,sha256=p_k7kDY9eDrsA5dxNNqXusKLgx7mS_icGnS7fu4goqY,
|
|
103
104
|
langroid/utils/__init__.py,sha256=Sruos2tB4G7Tn0vlblvYlX9PEGR0plI2uE0PJ4d_EC4,353
|
104
105
|
langroid/utils/algorithms/__init__.py,sha256=WylYoZymA0fnzpB4vrsH_0n7WsoLhmuZq8qxsOCjUpM,41
|
105
106
|
langroid/utils/algorithms/graph.py,sha256=JbdpPnUOhw4-D6O7ou101JLA3xPCD0Lr3qaPoFCaRfo,2866
|
106
|
-
langroid/utils/configuration.py,sha256=
|
107
|
-
langroid/utils/constants.py,sha256=
|
107
|
+
langroid/utils/configuration.py,sha256=LgjHGB0qgKKTwBaVt84APiqvJbz6pLwylUvHWYmzyP0,3303
|
108
|
+
langroid/utils/constants.py,sha256=w3eBQ5Q2HjxMBN_y1UarK0keREqCwXSxQXizMafsG-M,911
|
108
109
|
langroid/utils/docker.py,sha256=kJQOLTgM0x9j9pgIIqp0dZNZCTvoUDhp6i8tYBq1Jr0,1105
|
109
110
|
langroid/utils/globals.py,sha256=Az9dOFqR6n9CoTYSqa2kLikQWS0oCQ9DFQIQAnG-2q8,1355
|
110
111
|
langroid/utils/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -128,8 +129,8 @@ langroid/vector_store/meilisearch.py,sha256=6frB7GFWeWmeKzRfLZIvzRjllniZ1cYj3Hmh
|
|
128
129
|
langroid/vector_store/momento.py,sha256=QaPzUnTwlswoawGB-paLtUPyLRvckFXLfLDfvbTzjNQ,10505
|
129
130
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
130
131
|
langroid/vector_store/qdrantdb.py,sha256=wYOuu5c2vIKn9ZgvTXcAiZXMpV8AOXEWFAzI8S8UP-0,16828
|
131
|
-
pyproject.toml,sha256=
|
132
|
-
langroid-0.2.
|
133
|
-
langroid-0.2.
|
134
|
-
langroid-0.2.
|
135
|
-
langroid-0.2.
|
132
|
+
pyproject.toml,sha256=sOrG9TDKiauO9bMWAotyGlQ8R-GQgLSt7ApBq1bhUKQ,6957
|
133
|
+
langroid-0.2.7.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
134
|
+
langroid-0.2.7.dist-info/METADATA,sha256=QqBF9VQobbivbIwaStn_2eMyVViqEx70iUtI5J37hBI,53950
|
135
|
+
langroid-0.2.7.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
136
|
+
langroid-0.2.7.dist-info/RECORD,,
|
pyproject.toml
CHANGED
File without changes
|
File without changes
|