langroid 0.1.128__py3-none-any.whl → 0.1.130__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langroid/agent/task.py CHANGED
@@ -61,7 +61,7 @@ class Task:
61
61
  single_round: bool = False,
62
62
  system_message: str = "",
63
63
  user_message: str | None = "",
64
- restart: bool = False,
64
+ restart: bool = True,
65
65
  default_human_response: Optional[str] = None,
66
66
  interactive: bool = True,
67
67
  only_user_quits_root: bool = True,
@@ -362,18 +362,18 @@ class LanguageModel(ABC):
362
362
  pass
363
363
 
364
364
  @abstractmethod
365
- def generate(self, prompt: str, max_tokens: int) -> LLMResponse:
365
+ def generate(self, prompt: str, max_tokens: int = 200) -> LLMResponse:
366
366
  pass
367
367
 
368
368
  @abstractmethod
369
- async def agenerate(self, prompt: str, max_tokens: int) -> LLMResponse:
369
+ async def agenerate(self, prompt: str, max_tokens: int = 200) -> LLMResponse:
370
370
  pass
371
371
 
372
372
  @abstractmethod
373
373
  def chat(
374
374
  self,
375
375
  messages: Union[str, List[LLMMessage]],
376
- max_tokens: int,
376
+ max_tokens: int = 200,
377
377
  functions: Optional[List[LLMFunctionSpec]] = None,
378
378
  function_call: str | Dict[str, str] = "auto",
379
379
  ) -> LLMResponse:
@@ -383,7 +383,7 @@ class LanguageModel(ABC):
383
383
  async def achat(
384
384
  self,
385
385
  messages: Union[str, List[LLMMessage]],
386
- max_tokens: int,
386
+ max_tokens: int = 200,
387
387
  functions: Optional[List[LLMFunctionSpec]] = None,
388
388
  function_call: str | Dict[str, str] = "auto",
389
389
  ) -> LLMResponse:
@@ -49,9 +49,7 @@ class OpenAICompletionModel(str, Enum):
49
49
  """Enum for OpenAI Completion models"""
50
50
 
51
51
  TEXT_DA_VINCI_003 = "text-davinci-003" # deprecated
52
- TEXT_ADA_001 = "text-ada-001" # deprecated
53
- GPT4 = "gpt-4" # only works on chat-completion endpoint
54
- GPT4_TURBO = "gpt-4-1106-preview" # only works on chat-completion endpoint
52
+ GPT3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct"
55
53
 
56
54
 
57
55
  _context_length: Dict[str, int] = {
@@ -92,9 +90,10 @@ class OpenAIGPTConfig(LLMConfig):
92
90
  use_chat_for_completion = True # do not change this, for OpenAI models!
93
91
  timeout: int = 20
94
92
  temperature: float = 0.2
93
+ seed: int | None = 42
95
94
  # these can be any model name that is served at an OpenAI-compatible API end point
96
95
  chat_model: str = OpenAIChatModel.GPT4
97
- completion_model: str = OpenAICompletionModel.GPT4
96
+ completion_model: str = OpenAICompletionModel.GPT3_5_TURBO_INSTRUCT
98
97
 
99
98
  # all of the vars above can be set via env vars,
100
99
  # by upper-casing the name and prefixing with OPENAI_, e.g.
@@ -123,6 +122,7 @@ class OpenAIGPTConfig(LLMConfig):
123
122
  """
124
123
  )
125
124
  litellm.telemetry = False
125
+ self.seed = None # some local mdls don't support seed
126
126
  keys_dict = litellm.validate_environment(self.chat_model)
127
127
  missing_keys = keys_dict.get("missing_keys", [])
128
128
  if len(missing_keys) > 0:
@@ -201,6 +201,7 @@ class OpenAIGPT(LanguageModel):
201
201
  # so we can just use `openai.*` methods directly,
202
202
  # and don't need a adaptor library like litellm
203
203
  self.config.litellm = False
204
+ self.config.seed = None # some models raise an error when seed is set
204
205
  # Extract the api_base from the model name after the "local/" prefix
205
206
  self.api_base = "http://" + self.config.chat_model.split("/", 1)[1]
206
207
  else:
@@ -551,7 +552,7 @@ class OpenAIGPT(LanguageModel):
551
552
  prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, cost=cost
552
553
  )
553
554
 
554
- def generate(self, prompt: str, max_tokens: int) -> LLMResponse:
555
+ def generate(self, prompt: str, max_tokens: int = 200) -> LLMResponse:
555
556
  try:
556
557
  return self._generate(prompt, max_tokens)
557
558
  except Exception as e:
@@ -598,7 +599,7 @@ class OpenAIGPT(LanguageModel):
598
599
  msg = response["choices"][0]["text"].strip()
599
600
  return LLMResponse(message=msg, cached=cached)
600
601
 
601
- async def agenerate(self, prompt: str, max_tokens: int) -> LLMResponse:
602
+ async def agenerate(self, prompt: str, max_tokens: int = 200) -> LLMResponse:
602
603
  try:
603
604
  return await self._agenerate(prompt, max_tokens)
604
605
  except Exception as e:
@@ -688,7 +689,7 @@ class OpenAIGPT(LanguageModel):
688
689
  def chat(
689
690
  self,
690
691
  messages: Union[str, List[LLMMessage]],
691
- max_tokens: int,
692
+ max_tokens: int = 200,
692
693
  functions: Optional[List[LLMFunctionSpec]] = None,
693
694
  function_call: str | Dict[str, str] = "auto",
694
695
  ) -> LLMResponse:
@@ -730,7 +731,7 @@ class OpenAIGPT(LanguageModel):
730
731
  async def achat(
731
732
  self,
732
733
  messages: Union[str, List[LLMMessage]],
733
- max_tokens: int,
734
+ max_tokens: int = 200,
734
735
  functions: Optional[List[LLMFunctionSpec]] = None,
735
736
  function_call: str | Dict[str, str] = "auto",
736
737
  ) -> LLMResponse:
@@ -851,6 +852,8 @@ class OpenAIGPT(LanguageModel):
851
852
  temperature=self.config.temperature,
852
853
  stream=self.get_stream(),
853
854
  )
855
+ if self.config.seed is not None:
856
+ args.update(dict(seed=self.config.seed))
854
857
  # only include functions-related args if functions are provided
855
858
  # since the OpenAI API will throw an error if `functions` is None or []
856
859
  if functions is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.128
3
+ Version: 0.1.130
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -123,7 +123,7 @@ Description-Content-Type: text/markdown
123
123
  </h3>
124
124
 
125
125
  `Langroid` is an intuitive, lightweight, extensible and principled
126
- Python framework to easily build LLM-powered applications.
126
+ Python framework to easily build LLM-powered applications, from ex-CMU and UW-Madison researchers.
127
127
  You set up Agents, equip them with optional components (LLM,
128
128
  vector-store and tools/functions), assign them tasks, and have them
129
129
  collaboratively solve a problem by exchanging messages.
@@ -137,6 +137,12 @@ into simplifying the developer experience; it does not use `Langchain`.
137
137
  We welcome contributions -- See the [contributions](./CONTRIBUTING.md) document
138
138
  for ideas on what to contribute.
139
139
 
140
+
141
+ Building LLM Applications? Prasad Chalasani is available for consulting
142
+ (advisory/development): pchalasani at gmail dot com.
143
+
144
+ Sponsorship is also accepted via [GitHub Sponsors](https://github.com/sponsors/langroid)
145
+
140
146
  **Questions, Feedback, Ideas? Join us on [Discord](https://discord.gg/ZU36McDgDs)!**
141
147
 
142
148
  # Quick glimpse of coding with Langroid
@@ -902,14 +908,8 @@ If you like this project, please give it a star ⭐ and 📢 spread the word in
902
908
  [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Flangroid%2Flangroid&t=Harness%20LLMs%20with%20Multi-Agent%20Programming)
903
909
  [![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-blue)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Flangroid%2Flangroid&title=Harness%20LLMs%20with%20Multi-Agent%20Programming)
904
910
 
905
-
906
-
907
-
908
911
  Your support will help build Langroid's momentum and community.
909
912
 
910
-
911
-
912
-
913
913
  # Langroid Co-Founders
914
914
 
915
915
  - [Prasad Chalasani](https://www.linkedin.com/in/pchalasani/) (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)
@@ -20,7 +20,7 @@ langroid/agent/special/sql/utils/populate_metadata.py,sha256=zRjw31a1ZXvpx9bcmbt
20
20
  langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
21
21
  langroid/agent/special/sql/utils/tools.py,sha256=6uB2424SLtmapui9ggcEr0ZTiB6_dL1-JRGgN8RK9Js,1332
22
22
  langroid/agent/special/table_chat_agent.py,sha256=zejrvv6GaspImVJ1oXWUTVN-h-kDjadTdBDkTRqrYKo,7691
23
- langroid/agent/task.py,sha256=rBMMUbFibGs4kMUlMksrE-D1ChYEsBNbKcB5t9Tr3xM,36261
23
+ langroid/agent/task.py,sha256=bllafGA9AbVaeL6qk1a-1Wa2sc6fsuzeNEyhKHav3K8,36260
24
24
  langroid/agent/tool_message.py,sha256=_QS9_JnBdMBmpQw-ocu3PdJz_UzkFCytVky0UdIcMe0,6320
25
25
  langroid/agent/tools/__init__.py,sha256=6le5y_iPEHwh7Tli_0MtwCGOjy3tPQfAdfDC7WBg2e0,172
26
26
  langroid/agent/tools/extract_tool.py,sha256=u5lL9rKBzaLBOrRyLnTAZ97pQ1uxyLP39XsWMnpaZpw,3789
@@ -39,10 +39,10 @@ langroid/embedding_models/clustering.py,sha256=tZWElUqXl9Etqla0FAa7og96iDKgjqWju
39
39
  langroid/embedding_models/models.py,sha256=yEG-dasyqifjmiWees7tKWBPnXnuXjEEINCR9Oo6h0Y,3403
40
40
  langroid/language_models/__init__.py,sha256=fQYzSFE_thHeoPy0UPwl5splMSuQm6ANVedvqCxRebg,445
41
41
  langroid/language_models/azure_openai.py,sha256=_OOEoZOziI3NDOH_8t3qmh8IDWoHESQe-K6UN5cGtME,3946
42
- langroid/language_models/base.py,sha256=CMT4-BvsYlnhBTkK6urEXGWefcWbMn6SLKaJ2sTql2E,20558
42
+ langroid/language_models/base.py,sha256=_EOBLXU6Z3SK7wjWNS4G4rtw6aRJ5ycs9M-Sry-GX5k,20582
43
43
  langroid/language_models/config.py,sha256=PXcmEUq52GCDj2sekt8F9E1flWyyNjP2S0LTRs7T6Kg,269
44
44
  langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
45
- langroid/language_models/openai_gpt.py,sha256=nXJ4i5OAlc_PpcVX3us7zlYlnxgTV_GWKwP0vLMVVPg,38728
45
+ langroid/language_models/openai_gpt.py,sha256=xLoaEG6vNCbCOfxfnT8lr-xwntQ9_D2gOaum6kL8h08,38899
46
46
  langroid/language_models/prompt_formatter/__init__.py,sha256=wj2e6j7R9d3m63HCbSDY1vosjFuhHLQVlgBrq8iqF38,197
47
47
  langroid/language_models/prompt_formatter/base.py,sha256=2y_GcwhstvB5ih3haS7l5Fv79jVnFJ_vEw1jqWJzB9k,1247
48
48
  langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
@@ -96,7 +96,7 @@ langroid/vector_store/meilisearch.py,sha256=h4e1MZJ9J3EnFfcUhLshK1Duwy1dpHWH4Ajt
96
96
  langroid/vector_store/momento.py,sha256=otoUrpgwEduFOCUhbFFSZWKEzWF2di1d4-m3n5PIuHI,9964
97
97
  langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
98
98
  langroid/vector_store/qdrantdb.py,sha256=ZEJnlNIJwWIySGhPz3jBc6spcLCPqOcUDBYBisLF90I,11379
99
- langroid-0.1.128.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
100
- langroid-0.1.128.dist-info/METADATA,sha256=EoEz6D8mt6NIv28bCV1PU3bstmzFIGijLCpRyK804IE,42072
101
- langroid-0.1.128.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
102
- langroid-0.1.128.dist-info/RECORD,,
99
+ langroid-0.1.130.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
100
+ langroid-0.1.130.dist-info/METADATA,sha256=RN5edXTTDbJHRjAcEFrUjC57TG5ysl_YnDe_8J7o32c,42323
101
+ langroid-0.1.130.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
102
+ langroid-0.1.130.dist-info/RECORD,,