langroid 0.1.26__py3-none-any.whl → 0.1.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,78 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ from datetime import timedelta
5
+ from typing import Any, Dict, Optional
6
+
7
+ import momento
8
+ from dotenv import load_dotenv
9
+ from momento.responses import CacheGet
10
+ from pydantic import BaseModel
11
+
12
+ from langroid.cachedb.base import CacheDB
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class MomentoCacheConfig(BaseModel):
18
+ """Configuration model for RedisCache."""
19
+
20
+ ttl: int = 60 * 60 * 24 * 7 # 1 week
21
+ cachename: str = "langroid_momento_cache"
22
+
23
+
24
+ class MomentoCache(CacheDB):
25
+ """Momento implementation of the CacheDB."""
26
+
27
+ def __init__(self, config: MomentoCacheConfig):
28
+ """
29
+ Initialize a MomentoCache with the given config.
30
+
31
+ Args:
32
+ config (MomentoCacheConfig): The configuration to use.
33
+ """
34
+ self.config = config
35
+ load_dotenv()
36
+
37
+ momento_token = os.getenv("MOMENTO_AUTH_TOKEN")
38
+ if momento_token is None:
39
+ raise ValueError("""MOMENTO_AUTH_TOKEN not set in .env file""")
40
+ else:
41
+ self.client = momento.CacheClient(
42
+ configuration=momento.Configurations.Laptop.v1(),
43
+ credential_provider=momento.CredentialProvider.from_environment_variable(
44
+ "MOMENTO_AUTH_TOKEN"
45
+ ),
46
+ default_ttl=timedelta(seconds=self.config.ttl),
47
+ )
48
+ self.client.create_cache(self.config.cachename)
49
+
50
+ def clear(self) -> None:
51
+ """Clear keys from current db."""
52
+ self.client.flush_cache(self.config.cachename)
53
+
54
+ def store(self, key: str, value: Any) -> None:
55
+ """
56
+ Store a value associated with a key.
57
+
58
+ Args:
59
+ key (str): The key under which to store the value.
60
+ value (Any): The value to store.
61
+ """
62
+ self.client.set(self.config.cachename, key, json.dumps(value))
63
+
64
+ def retrieve(self, key: str) -> Optional[Dict[str, Any]]:
65
+ """
66
+ Retrieve the value associated with a key.
67
+
68
+ Args:
69
+ key (str): The key to retrieve the value for.
70
+
71
+ Returns:
72
+ dict: The value associated with the key.
73
+ """
74
+ value = self.client.get(self.config.cachename, key)
75
+ if isinstance(value, CacheGet.Hit):
76
+ return json.loads(value.value_string) # type: ignore
77
+ else:
78
+ return None
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional, Tuple, Type, Union
7
7
  import aiohttp
8
8
  from pydantic import BaseModel, BaseSettings
9
9
 
10
+ from langroid.cachedb.momento_cachedb import MomentoCacheConfig
10
11
  from langroid.cachedb.redis_cachedb import RedisCacheConfig
11
12
  from langroid.mytypes import Document
12
13
  from langroid.parsing.agent_chats import parse_message
@@ -32,10 +33,7 @@ class LLMConfig(BaseSettings):
32
33
  min_output_tokens: int = 64
33
34
  use_chat_for_completion: bool = True # use chat model for completion?
34
35
  stream: bool = False # stream output from API?
35
- cache_config: RedisCacheConfig = RedisCacheConfig(
36
- hostname="redis-11524.c251.east-us-mz.azure.cloud.redislabs.com",
37
- port=11524,
38
- )
36
+ cache_config: None | RedisCacheConfig | MomentoCacheConfig = None
39
37
 
40
38
 
41
39
  class LLMFunctionCall(BaseModel):
@@ -11,7 +11,8 @@ from dotenv import load_dotenv
11
11
  from pydantic import BaseModel
12
12
  from rich import print
13
13
 
14
- from langroid.cachedb.redis_cachedb import RedisCache
14
+ from langroid.cachedb.momento_cachedb import MomentoCache, MomentoCacheConfig
15
+ from langroid.cachedb.redis_cachedb import RedisCache, RedisCacheConfig
15
16
  from langroid.language_models.base import (
16
17
  LanguageModel,
17
18
  LLMConfig,
@@ -92,7 +93,13 @@ class OpenAIGPT(LanguageModel):
92
93
  OPENAI_API_KEY not set in .env file,
93
94
  please set it to your OpenAI API key."""
94
95
  )
95
- self.cache = RedisCache(config.cache_config)
96
+ self.cache: MomentoCache | RedisCache
97
+ if settings.cache_type == "momento":
98
+ config.cache_config = MomentoCacheConfig()
99
+ self.cache = MomentoCache(config.cache_config)
100
+ else:
101
+ config.cache_config = RedisCacheConfig()
102
+ self.cache = RedisCache(config.cache_config)
96
103
 
97
104
  def set_stream(self, stream: bool) -> bool:
98
105
  """Enable or disable streaming output from API.
@@ -4,7 +4,7 @@ from typing import List, Tuple
4
4
  import aiohttp
5
5
 
6
6
  from langroid.language_models.base import LanguageModel
7
- from langroid.mytypes import DocMetaData, Document
7
+ from langroid.mytypes import Document
8
8
  from langroid.prompts.dialog import collate_chat_history
9
9
  from langroid.prompts.templates import EXTRACTION_PROMPT
10
10
 
@@ -60,158 +60,6 @@ def get_verbatim_extracts(
60
60
  return asyncio.run(_get_verbatim_extracts(question, passages, LLM))
61
61
 
62
62
 
63
- def generate_summarizer_prompt(question: str, texts: List[str], k: int = 1) -> str:
64
- # Request for k demonstrations
65
- demo_request = f"""
66
- Please provide {k} demonstrations of synthesizing answers based on
67
- relevant text fragments for different questions. Include the question,
68
- relevant text fragments, and the final synthesized answer for each
69
- demonstration.
70
- """
71
-
72
- # Placeholder for demonstrations
73
- demo_placeholder = "\n".join(
74
- [
75
- f"Question: [Question {i}]\n-----------\n"
76
- f"Content: [Relevant text {i}]\n-----------\nFinal Answer: [Answer {i}]\n"
77
- for i in range(1, k + 1)
78
- ]
79
- )
80
-
81
- # Format the actual question and texts
82
- actual_question_str = f"Question: {question}\n-----------\n"
83
- content_lines = "\n".join([f"Content: {text}" for text in texts])
84
- actual_question_str += content_lines + "\n-----------\nFinal Answer:\n"
85
-
86
- # Combine the request, demonstrations, and
87
- # actual question to form the complete prompt
88
- complete_prompt = demo_request + demo_placeholder + "\n" + actual_question_str
89
- return complete_prompt
90
-
91
-
92
- def make_summarizer_demos(k: int) -> str:
93
- # Define modified original question for LLM.generate
94
- # templatized_prompt = f"""
95
- # generate {k} few-shot demos of answering a question based on a list of
96
- # text contents extracted from a long document, where some or all
97
- # contents may be irrelevant to the question. When there is no relevant
98
- # text, the answer should be "I don't know". Each demo should be structured as
99
- # Question:, Content:, Content:, and so on, and Final Answer: Use 1-3
100
- # sentences for each piece of content.
101
- # """
102
- idk_instruction = ""
103
- if k > 1:
104
- idk_instruction = (
105
- "At least one of the demos should have an " "'I don't know' answer. "
106
- )
107
-
108
- meta_prompt = (
109
- f"""
110
- Generate a templatized prompt for answering questions based on document extracts.
111
- The prompt should include clear instructions, {k} few-shot demos, and placeholders
112
- for the input question and extracts.
113
-
114
- The instructions should specify that the answer must be based solely on the
115
- provided extracts. Making up an answer should be discouraged if the information
116
- is not in the extracts. If none of the extracts are relevant to the question,
117
- the response should be 'I don't know'.
118
-
119
- Each demo should consist of:
120
- - A sample question (Question:)
121
- - A series of extracts from a document (Extract:, Extract:, ...),
122
- with each extract being 1-5 sentences long.
123
- - A sample answer (Answer:)
124
-
125
- {idk_instruction}
126
- The final prompt should include placeholders:
127
- - A placeholder {{Question}} for the input question
128
- - A placeholder {{Extracts}} for the input extracts
129
-
130
- The final prompt should end with 'Answer:' to provide the response.
131
- """
132
- ).strip()
133
- return meta_prompt
134
-
135
-
136
- def get_summary_answer(
137
- question: str, passages: List[Document], LLM: LanguageModel, k: int = 1
138
- ) -> Document:
139
- templatized_prompt = """
140
- Use the provided extracts (with sources) to answer the question. If there's not
141
- enough information, respond with "I don't know." Justify your answer by citing
142
- your sources, as in these examples:
143
-
144
- Extract: The tree species in the garden include oak, maple, and birch.
145
- Source: https://en.wikipedia.org/wiki/Tree
146
- Extract: The oak trees are known for their longevity and strength.
147
- Source: https://en.wikipedia.org/wiki/Oak
148
- Question: What types of trees are in the garden?
149
- Answer: The types of trees in the garden include oak, maple, and birch.
150
- SOURCE: https://en.wikipedia.org/wiki/Tree
151
- TEXT: The tree species in the garden include oak, maple, and birch.
152
-
153
- Extract: The experiment involved three groups: control, low dose, and high dose.
154
- Source: https://en.wikipedia.org/wiki/Experiment
155
- Extract: The high dose group showed significant improvement in symptoms.
156
- Source: https://en.wikipedia.org/wiki/Experiment
157
- Extract: The control group did not receive any treatment and served as a baseline.
158
- Source: https://en.wikipedia.org/wiki/Experiment
159
- Question: How many groups were involved which group showed significant improvement?
160
- Answer: There were three groups and the high dose group showed significant
161
- improvement in symptoms.
162
- SOURCE: https://en.wikipedia.org/wiki/Experiment
163
- TEXT: The experiment involved three groups: control, low dose, and high dose.
164
- SOURCE: https://en.wikipedia.org/wiki/Experiment
165
- TEXT: The high dose group showed significant improvement in symptoms.
166
-
167
-
168
- Extract: The CEO announced several new initiatives during the company meeting.
169
- Source: https://en.wikipedia.org/wiki/CEO
170
- Extract: The financial performance of the company has been strong this year.
171
- Source: https://en.wikipedia.org/wiki/CEO
172
- Question: What new initiatives did the CEO announce?
173
- Answer: I don't know.
174
-
175
- {extracts}
176
- {question}
177
- Answer:
178
- """.strip()
179
-
180
- # templatized_prompt = LLM.generate(prompt=prompt, max_tokens=1024)
181
- # Define an auxiliary function to transform the list of
182
- # passages into a single string
183
- def stringify_passages(passages: List[Document]) -> str:
184
- return "\n".join(
185
- [
186
- f"""
187
- Extract: {p.content}
188
- Source: {p.metadata.source}
189
- """
190
- for p in passages
191
- ]
192
- )
193
-
194
- passages_str = stringify_passages(passages)
195
- # Substitute Q and P into the templatized prompt
196
- final_prompt = templatized_prompt.format(
197
- question=f"Question:{question}", extracts=passages_str
198
- )
199
-
200
- # Generate the final verbatim extract based on the final prompt
201
- final_answer = LLM.generate(prompt=final_prompt, max_tokens=1024).message.strip()
202
- parts = final_answer.split("SOURCE:", maxsplit=1)
203
- if len(parts) > 1:
204
- content = parts[0].strip()
205
- sources = parts[1].strip()
206
- else:
207
- content = final_answer
208
- sources = ""
209
- return Document(
210
- content=content,
211
- metadata=DocMetaData(source="SOURCE: " + sources),
212
- )
213
-
214
-
215
63
  def followup_to_standalone(
216
64
  LLM: LanguageModel, chat_history: List[Tuple[str, str]], question: str
217
65
  ) -> str:
@@ -8,6 +8,7 @@ class Settings(BaseSettings):
8
8
  progress: bool = False # show progress spinners/bars?
9
9
  stream: bool = True # stream output?
10
10
  cache: bool = True # use cache?
11
+ cache_type: str = "redis" # cache type: "redis" or "momento"
11
12
  interactive: bool = True # interactive mode?
12
13
  gpt3_5: bool = True # use GPT-3.5?
13
14
  nofunc: bool = False # use model without function_call? (i.e. gpt-4)
langroid/utils/logging.py CHANGED
@@ -124,8 +124,6 @@ class RichFileLogger:
124
124
 
125
125
  @no_type_check
126
126
  def log(self, message: str) -> None:
127
- self.file = open(self.log_file, "a")
128
- self.console = Console(file=self.file, force_terminal=True, width=200)
129
- self.console.print(message)
130
- self.file.flush()
131
- self.file.close()
127
+ with open(self.log_file, "a") as f:
128
+ console = Console(file=f, force_terminal=True, width=200)
129
+ console.print(message)
@@ -7,6 +7,7 @@ from langroid.embedding_models.base import (
7
7
  EmbeddingModel,
8
8
  EmbeddingModelsConfig,
9
9
  )
10
+ from langroid.embedding_models.models import OpenAIEmbeddingsConfig
10
11
  from langroid.mytypes import DocMetaData, Document
11
12
  from langroid.utils.configuration import settings
12
13
  from langroid.utils.output.printing import print_long_text
@@ -19,9 +20,7 @@ class ChromaDBConfig(VectorStoreConfig):
19
20
  type: str = "chroma"
20
21
  collection_name: str = "chroma-langroid"
21
22
  storage_path: str = ".chroma/data"
22
- embedding: EmbeddingModelsConfig = EmbeddingModelsConfig(
23
- model_type="openai",
24
- )
23
+ embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
25
24
  host: str = "127.0.0.1"
26
25
  port: int = 6333
27
26
 
@@ -19,6 +19,7 @@ from langroid.embedding_models.base import (
19
19
  EmbeddingModel,
20
20
  EmbeddingModelsConfig,
21
21
  )
22
+ from langroid.embedding_models.models import OpenAIEmbeddingsConfig
22
23
  from langroid.mytypes import Document
23
24
  from langroid.utils.configuration import settings
24
25
  from langroid.vector_store.base import VectorStore, VectorStoreConfig
@@ -32,9 +33,7 @@ class QdrantDBConfig(VectorStoreConfig):
32
33
 
33
34
  collection_name: str | None = None
34
35
  storage_path: str = ".qdrant/data"
35
- embedding: EmbeddingModelsConfig = EmbeddingModelsConfig(
36
- model_type="openai",
37
- )
36
+ embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
38
37
  distance: str = Distance.COSINE
39
38
 
40
39
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.26
3
+ Version: 0.1.28
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -30,6 +30,7 @@ Requires-Dist: mkdocs-literate-nav (>=0.6.0,<0.7.0)
30
30
  Requires-Dist: mkdocs-material (>=9.1.5,<10.0.0)
31
31
  Requires-Dist: mkdocs-section-index (>=0.3.5,<0.4.0)
32
32
  Requires-Dist: mkdocstrings[python] (>=0.21.2,<0.22.0)
33
+ Requires-Dist: momento (>=1.7.0,<2.0.0)
33
34
  Requires-Dist: mypy (>=1.2.0,<2.0.0)
34
35
  Requires-Dist: nltk (>=3.8.1,<4.0.0)
35
36
  Requires-Dist: openai (>=0.27.5,<0.28.0)
@@ -64,6 +65,7 @@ Description-Content-Type: text/markdown
64
65
  <div align="center">
65
66
 
66
67
  [![Pytest](https://github.com/langroid/langroid/actions/workflows/pytest.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/pytest.yml)
68
+ [![codecov](https://codecov.io/gh/langroid/langroid/branch/main/graph/badge.svg?token=H94BX5F0TE)](https://codecov.io/gh/langroid/langroid)
67
69
  [![Lint](https://github.com/langroid/langroid/actions/workflows/validate.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/validate.yml)
68
70
  [![Docs](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)
69
71
  [![Static Badge](https://img.shields.io/badge/Documentation-blue?link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F&link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F)](https://langroid.github.io/langroid)
@@ -74,7 +76,7 @@ Description-Content-Type: text/markdown
74
76
  <h3 align="center">
75
77
  <a target="_blank"
76
78
  href="https://langroid.github.io/langroid/" rel="dofollow">
77
- <strong>Explore the docs</strong></a>
79
+ <strong>Documentation</strong></a>
78
80
  &middot;
79
81
  <a target="_blank" href="https://github.com/langroid/langroid-examples" rel="dofollow">
80
82
  <strong>Examples Repo</strong></a>
@@ -96,17 +98,19 @@ collaboratively solve a problem by exchanging messages.
96
98
  This Multi-Agent paradigm is inspired by the
97
99
  [Actor Framework](https://en.wikipedia.org/wiki/Actor_model)
98
100
  (but you do not need to know anything about this!).
101
+ We welcome contributions -- See the [contributions](./CONTRIBUTING.md) document
102
+ for ideas on what to contribute.
99
103
 
100
104
 
101
105
  # :rocket: Demo
102
106
  Suppose you want to extract structured information about the key terms
103
107
  of a commercial lease document. You can easily do this with Langroid using a two-agent system,
104
108
  as we show in the [langroid-examples](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py) repo.
105
- The demo showcases several features of Langroid:
109
+ The demo showcases just a few of the many features of Langroid, such as:
106
110
  - Multi-agent collaboration: `LeaseExtractor` is in charge of the task, and its LLM (GPT4) generates questions
107
111
  to be answered by the `DocAgent`.
108
- - Retrieval augmented question-answering: `DocAgent` LLM (GPT4) uses retrieval from a vector-store to
109
- answer the `LeaseExtractor`'s questions.
112
+ - Retrieval augmented question-answering, with **source-citation**: `DocAgent` LLM (GPT4) uses retrieval from a vector-store to
113
+ answer the `LeaseExtractor`'s questions, cites the specific excerpt supporting the answer.
110
114
  - Function-calling (also known as tool/plugin): When it has all the information it
111
115
  needs, the `LeaseExtractor` LLM presents the information in a structured
112
116
  format using a Function-call.
@@ -157,7 +161,8 @@ Here is what it looks like in action:
157
161
 
158
162
  # :gear: Installation and Setup
159
163
 
160
- ### Install `langroid`
164
+ ### Install `langroid`
165
+ Langroid requires Python 3.11+. We recommend using a virtual environment.
161
166
  Use `pip` to install `langroid` (from PyPi) to your virtual environment:
162
167
  ```bash
163
168
  pip install langroid
@@ -172,19 +177,30 @@ Note that this will install `torch` and `sentence-transformers` libraries.
172
177
 
173
178
  ### Set up environment variables (API keys, etc)
174
179
 
180
+ To get started, all you need is an OpenAI API Key.
181
+ If you don't have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).
182
+ Currently only OpenAI models are supported. Others will be added later
183
+ (Pull Requests welcome!).
184
+
175
185
  In the root of the repo, copy the `.env-template` file to a new file `.env`:
176
186
  ```bash
177
187
  cp .env-template .env
178
188
  ```
179
- Then insert your OpenAI API Key. If you don't have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).
189
+ Then insert your OpenAI API Key.
180
190
  Your `.env` file should look like this:
181
-
182
191
  ```bash
183
192
  OPENAI_API_KEY=your-key-here-without-quotes
184
193
  ````
185
194
 
186
- Currently only OpenAI models are supported. Others will be added later
187
- (Pull Requests welcome!).
195
+ Alternatively, you can set this as an environment variable in your shell
196
+ (you will need to do this every time you open a new shell):
197
+ ```bash
198
+ export OPENAI_API_KEY=your-key-here-without-quotes
199
+ ```
200
+
201
+
202
+ <details>
203
+ <summary><b>Optional Setup Instructions (click to expand) </b></summary>
188
204
 
189
205
  All of the below are optional and not strictly needed to run any of the examples.
190
206
 
@@ -198,6 +214,9 @@ All of the below are optional and not strictly needed to run any of the examples
198
214
  which is more than sufficient to try out Langroid and even beyond.
199
215
  If you don't set up these, Langroid will use a pure-python
200
216
  Redis in-memory cache via the [Fakeredis](https://fakeredis.readthedocs.io/en/latest/) library.
217
+ - **Momento** Serverless Caching of LLM API responses (as an alternative to Redis).
218
+ To use Momento instead of Redis, simply enter your Momento Token in the `.env` file,
219
+ as the value of `MOMENTO_AUTH_TOKEN` (see example file below).
201
220
  - **GitHub** Personal Access Token (required for apps that need to analyze git
202
221
  repos; token-based API calls are less rate-limited). See this
203
222
  [GitHub page](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens).
@@ -209,10 +228,11 @@ GITHUB_ACCESS_TOKEN=your-personal-access-token-no-quotes
209
228
  REDIS_PASSWORD=your-redis-password-no-quotes
210
229
  REDIS_HOST=your-redis-hostname-no-quotes
211
230
  REDIS_PORT=your-redis-port-no-quotes
231
+ MOMENTO_AUTH_TOKEN=your-momento-token-no-quotes # instead of REDIS* variables
212
232
  QDRANT_API_KEY=your-key
213
233
  QDRANT_API_URL=https://your.url.here:6333 # note port number must be included
214
234
  ```
215
-
235
+ </details>
216
236
 
217
237
  ---
218
238
 
@@ -533,9 +553,7 @@ folder of the `langroid-examples` repo.
533
553
 
534
554
  ---
535
555
 
536
- # :heart: Thank you to our supporters!
537
-
538
- [![Stargazers repo roster for @langroid/langroid](https://reporoster.com/stars/langroid/langroid)](https://github.com/langroid/langroid/stargazers)
556
+ # :heart: Thank you to our [supporters](https://github.com/langroid/langroid/stargazers)
539
557
 
540
558
  # Contributors
541
559
 
@@ -14,14 +14,15 @@ langroid/agent/tool_message.py,sha256=7OdVcV7UyOZD2ihYgV1C_1fIwiWM-2pR8FFxoA1IgO
14
14
  langroid/agent_config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  langroid/cachedb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  langroid/cachedb/base.py,sha256=F-QSDlRXrC0tBRbxL397MX8hulfBMAnZNs1e9zH71OQ,790
17
+ langroid/cachedb/momento_cachedb.py,sha256=TG_6sYhKvgl2b7t1bAQHDs9pXvkS6iPCwo-9_pMHwhs,2339
17
18
  langroid/cachedb/redis_cachedb.py,sha256=xuQ96FAqcHTfK8PEt1tjrh1BkMWUjojFHIgjDfF3SnU,2369
18
19
  langroid/embedding_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
20
  langroid/embedding_models/base.py,sha256=176jDrjEAAhNzdFCG8pfossd8SAhvHR8Q5Y8pOOm0LI,983
20
21
  langroid/embedding_models/clustering.py,sha256=tZWElUqXl9Etqla0FAa7og96iDKgjqWjucZR_Egtp-A,6684
21
22
  langroid/embedding_models/models.py,sha256=1xcv9hqmCTsbUbS8v7XeZRsf25Tu79JUoSipIYpvNoo,2765
22
23
  langroid/language_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- langroid/language_models/base.py,sha256=Cr42pTm8XwGgq0bKGiSvedbnXpCTW37BYPm4LRcfBVY,12678
24
- langroid/language_models/openai_gpt.py,sha256=tWRSnzswQlGurQbe1eGUs3HhgaJKe4xj_fshGEqWr78,17538
24
+ langroid/language_models/base.py,sha256=7FMiUyJ3Kf9Uiqvjh-lNGfwnCibY0SbnK4jdORCv3SM,12657
25
+ langroid/language_models/openai_gpt.py,sha256=UCufa4G4SS0uNbxa-PtIhor-nZjXypZB4cYZDtwyARU,17910
25
26
  langroid/language_models/utils.py,sha256=rmnSn-sJ3aKl_wBdeLPkck0Li4Ed6zkCxZYYl7n1V34,4668
26
27
  langroid/mytypes.py,sha256=YA42IJcooJnTxAwk-B4FmZ1hqzIIF1ZZKcpUKzBTGGo,1537
27
28
  langroid/parsing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -41,15 +42,15 @@ langroid/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
41
42
  langroid/prompts/dialog.py,sha256=SpfiSyofSgy2pwD1YboHR_yHO3LEEMbv6j2sm874jKo,331
42
43
  langroid/prompts/prompts_config.py,sha256=EMK1Fm7EmS8y3CV4AkrVgn5K4NipiM4m7J8819W1KeM,98
43
44
  langroid/prompts/templates.py,sha256=8jmDiCN926mFMalMYRUUzYykFnVojVeSTazdDH9V1zA,5850
44
- langroid/prompts/transforms.py,sha256=JJ2_zqhgy87GrFMB0I0mF9AxnDpfMVWi5P8Cv1fnHLo,8935
45
+ langroid/prompts/transforms.py,sha256=GsQo1klGxUy0fACh6j0lTblk6XEl2erRnhRWlN2M4-c,2706
45
46
  langroid/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
47
  langroid/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
- langroid/utils/configuration.py,sha256=h6aGfQzXk8UeZUo_dP7EhT1C4418I4KM2kYTlBs8_34,1498
48
+ langroid/utils/configuration.py,sha256=ApAAL8w3TRtAolEl-nHy5fegLwA4WffsytbEbbWl0Tg,1564
48
49
  langroid/utils/constants.py,sha256=_keF-e-GIwEFvBxKJbC1Ry0u3SVWQBk4HRX0NpV2Qq8,485
49
50
  langroid/utils/docker.py,sha256=kJQOLTgM0x9j9pgIIqp0dZNZCTvoUDhp6i8tYBq1Jr0,1105
50
51
  langroid/utils/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
52
  langroid/utils/llms/strings.py,sha256=CSAX9Z6FQOLXOzbLMe_Opqtc3ruDAKTTk7cPqc6Blh0,263
52
- langroid/utils/logging.py,sha256=DLeErOyFE-ddrmzfvkbcxrQv_N4nKcNnQlVv9Cll2JE,3875
53
+ langroid/utils/logging.py,sha256=g9Sv-41pAmKu5kWeYHUBUMDq_HID_nrVkaRFTHTcrf4,3812
53
54
  langroid/utils/output/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
55
  langroid/utils/output/printing.py,sha256=ilm__9nS3FoY1Y-6k11pIaFM5tID8Jqyfr8LxfX5QEo,1311
55
56
  langroid/utils/system.py,sha256=I20DaIP0LfRJKdV8R4s60h7c6IvEm0n98nvmzIBK4DA,1138
@@ -58,10 +59,10 @@ langroid/utils/web/login.py,sha256=1iz9eUAHa87vpKIkzwkmFa00avwFWivDSAr7QUhK7U0,2
58
59
  langroid/utils/web/selenium_login.py,sha256=mYI6EvVmne34N9RajlsxxRqJQJvV-WG4LGp6sEECHPw,1156
59
60
  langroid/vector_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
60
61
  langroid/vector_store/base.py,sha256=QZx3NUNwf2I0r3A7iuoUHIRGbqt_pFGD0hq1R-Yg8iM,3740
61
- langroid/vector_store/chromadb.py,sha256=GPaXNPgPGIVUwYx2eO_-kreQoJ_33IFo13oigH5BP1c,5200
62
+ langroid/vector_store/chromadb.py,sha256=s5pQkKjaMP-Tt5A8M10EInFzttaALPbJAq7q4gf0TKg,5235
62
63
  langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
63
- langroid/vector_store/qdrantdb.py,sha256=cnxCrp4yIdbJ7-pz7yRoepJhd29mOzwacV2C6O7G_AQ,8548
64
- langroid-0.1.26.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
65
- langroid-0.1.26.dist-info/WHEEL,sha256=vVCvjcmxuUltf8cYhJ0sJMRDLr1XsPuxEId8YDzbyCY,88
66
- langroid-0.1.26.dist-info/METADATA,sha256=ozd74VngEPA1ZcQrREhERhw2Byb3h0wCuzAlaNJOsWo,21039
67
- langroid-0.1.26.dist-info/RECORD,,
64
+ langroid/vector_store/qdrantdb.py,sha256=Cnr1cLru9zTMexYRTW3lSTcmAPCtEYsjIxPXU5sO0dg,8583
65
+ langroid-0.1.28.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
66
+ langroid-0.1.28.dist-info/WHEEL,sha256=vVCvjcmxuUltf8cYhJ0sJMRDLr1XsPuxEId8YDzbyCY,88
67
+ langroid-0.1.28.dist-info/METADATA,sha256=vFDi4ASj2m0PGFS9cak1fsIaNkcSgVDkXFQ7F2LZMlU,22064
68
+ langroid-0.1.28.dist-info/RECORD,,