langroid 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langroid/agent/base.py CHANGED
@@ -344,7 +344,7 @@ class Agent(ABC):
344
344
  return results
345
345
  if not settings.quiet:
346
346
  console.print(f"[red]{self.indent}", end="")
347
- print(f"[red]Agent: {results}")
347
+ print(f"[red]Agent: {escape(results)}")
348
348
  maybe_json = len(extract_top_level_json(results)) > 0
349
349
  self.callbacks.show_agent_response(
350
350
  content=results,
@@ -409,14 +409,12 @@ class Agent(ABC):
409
409
  isinstance(msg, ChatDocument) and msg.metadata.recipient == Entity.USER
410
410
  )
411
411
 
412
- interactive = (
413
- self.interactive if self.interactive is not None else settings.interactive
414
- )
415
- if self.default_human_response is not None and not need_human_response:
416
- # useful for automated testing
417
- user_msg = self.default_human_response
418
- elif not interactive and not need_human_response:
412
+ interactive = self.interactive or settings.interactive
413
+
414
+ if not interactive and not need_human_response:
419
415
  return None
416
+ elif self.default_human_response is not None:
417
+ user_msg = self.default_human_response
420
418
  else:
421
419
  if self.callbacks.get_user_response is not None:
422
420
  # ask user with empty prompt: no need for prompt
@@ -857,7 +857,7 @@ class ChatAgent(Agent):
857
857
  # we won't have citations yet, so we're done
858
858
  return
859
859
  if response.metadata.has_citation and not settings.quiet:
860
- print("[grey37]SOURCES:\n" + response.metadata.source + "[/grey37]")
860
+ print("[grey37]SOURCES:\n" + escape(response.metadata.source) + "[/grey37]")
861
861
  self.callbacks.show_llm_response(
862
862
  content=str(response.metadata.source),
863
863
  is_tool=False,
@@ -36,7 +36,8 @@ class StatusCode(str, Enum):
36
36
  STALLED = "STALLED"
37
37
  INF_LOOP = "INF_LOOP"
38
38
  KILL = "KILL"
39
- MAX_TURNS = "MAX_TURNS"
39
+ FIXED_TURNS = "FIXED_TURNS" # reached intended number of turns
40
+ MAX_TURNS = "MAX_TURNS" # hit max-turns limit
40
41
  MAX_COST = "MAX_COST"
41
42
  MAX_TOKENS = "MAX_TOKENS"
42
43
  TIMEOUT = "TIMEOUT"
langroid/agent/task.py CHANGED
@@ -5,6 +5,7 @@ import copy
5
5
  import logging
6
6
  import threading
7
7
  from collections import Counter, deque
8
+ from pathlib import Path
8
9
  from types import SimpleNamespace
9
10
  from typing import (
10
11
  Any,
@@ -39,6 +40,7 @@ from langroid.parsing.routing import parse_addressed_message
39
40
  from langroid.pydantic_v1 import BaseModel
40
41
  from langroid.utils.configuration import settings
41
42
  from langroid.utils.constants import (
43
+ AT, # regex for start of an addressed recipient e.g. "@"
42
44
  DONE,
43
45
  NO_ANSWER,
44
46
  PASS,
@@ -77,6 +79,7 @@ class TaskConfig(BaseModel):
77
79
  inf_loop_dominance_factor: float = 1.5
78
80
  inf_loop_wait_factor: int = 5
79
81
  restart_as_subtask: bool = False
82
+ logs_dir: str = "logs"
80
83
 
81
84
 
82
85
  class Task:
@@ -124,13 +127,14 @@ class Task:
124
127
  restart: bool = True,
125
128
  default_human_response: Optional[str] = None,
126
129
  interactive: bool = True,
127
- only_user_quits_root: bool = False,
130
+ only_user_quits_root: bool = True,
128
131
  erase_substeps: bool = False,
129
- allow_null_result: bool = True,
132
+ allow_null_result: bool = False,
130
133
  max_stalled_steps: int = 5,
131
134
  done_if_no_response: List[Responder] = [],
132
135
  done_if_response: List[Responder] = [],
133
136
  config: TaskConfig = TaskConfig(),
137
+ **kwargs: Any, # catch-all for any legacy params, for backwards compatibility
134
138
  ):
135
139
  """
136
140
  A task to be performed by an agent.
@@ -139,23 +143,29 @@ class Task:
139
143
  agent (Agent): agent associated with the task
140
144
  name (str): name of the task
141
145
  llm_delegate (bool):
142
- [Deprecated, not used; use `done_if_response`, `done_if_no_response`
143
- instead]
144
- Whether to delegate control to LLM; conceptually,
146
+ Whether to delegate "control" to LLM; conceptually,
145
147
  the "controlling entity" is the one "seeking" responses to its queries,
146
- and has a goal it is aiming to achieve. The "controlling entity" is
147
- either the LLM or the USER. (Note within a Task there is just one
148
+ and has a goal it is aiming to achieve, and decides when a task is done.
149
+ The "controlling entity" is either the LLM or the USER.
150
+ (Note within a Task there is just one
148
151
  LLM, and all other entities are proxies of the "User" entity).
152
+ See also: `done_if_response`, `done_if_no_response` for more granular
153
+ control of task termination.
149
154
  single_round (bool):
150
- [Deprecated: Use `done_if_response`, `done_if_no_response` instead].
151
- If true, task runs until one message by controller,
152
- and subsequent response by non-controller. If false, runs for the
153
- specified number of turns in `run`, or until `done()` is true.
155
+ If true, task runs until one message by "controller"
156
+ (i.e. LLM if `llm_delegate` is true, otherwise USER)
157
+ and subsequent response by non-controller [When a tool is involved,
158
+ this will not give intended results. See `done_if_response`,
159
+ `done_if_no_response` below].
160
+ termination]. If false, runs for the specified number of turns in
161
+ `run`, or until `done()` is true.
154
162
  One run of step() is considered a "turn".
163
+ See also: `done_if_response`, `done_if_no_response` for more granular
164
+ control of task termination.
155
165
  system_message (str): if not empty, overrides agent's system_message
156
166
  user_message (str): if not empty, overrides agent's user_message
157
167
  restart (bool): if true, resets the agent's message history *at every run*.
158
- default_human_response (str): default response from user; useful for
168
+ default_human_response (str|None): default response from user; useful for
159
169
  testing, to avoid interactive input from user.
160
170
  [Instead of this, setting `interactive` usually suffices]
161
171
  interactive (bool): if true, wait for human input after each non-human
@@ -166,18 +176,24 @@ class Task:
166
176
  case the system will wait for a user response. In other words, use
167
177
  `interactive=False` when you want a "largely non-interactive"
168
178
  run, with the exception of explicit user addressing.
169
- only_user_quits_root (bool): if true, only user can quit the root task.
170
- [This param is ignored & deprecated; Keeping for backward compatibility.
171
- Instead of this, setting `interactive` suffices]
179
+ only_user_quits_root (bool): if true, when interactive=True, only user can
180
+ quit the root task (Ignored when interactive=False).
172
181
  erase_substeps (bool): if true, when task completes, erase intermediate
173
182
  conversation with subtasks from this agent's `message_history`, and also
174
183
  erase all subtask agents' `message_history`.
175
184
  Note: erasing can reduce prompt sizes, but results in repetitive
176
185
  sub-task delegation.
177
- allow_null_result (bool): [Deprecated, may be removed in future.]
178
- If true, allow null (empty or NO_ANSWER)
179
- as the result of a step or overall task result.
180
- Optional, default is True.
186
+ allow_null_result (bool):
187
+ If true, allow null (empty or NO_ANSWER) as the result of a step or
188
+ overall task result.
189
+ Optional, default is False.
190
+ *Note:* In non-interactive mode, when this is set to True,
191
+ you can have a situation where an LLM generates (non-tool) text,
192
+ and no other responders have valid responses, and a "Null result"
193
+ is inserted as a dummy response from the User entity, so the LLM
194
+ will now respond to this Null result, and this will continue
195
+ until the LLM emits a DONE signal (if instructed to do so),
196
+ otherwise it can result in an infinite loop.
181
197
  max_stalled_steps (int): task considered done after this many consecutive
182
198
  steps with no progress. Default is 3.
183
199
  done_if_no_response (List[Responder]): consider task done if NULL
@@ -234,36 +250,32 @@ class Task:
234
250
  self.tsv_logger: None | logging.Logger = None
235
251
  self.color_log: bool = False if settings.notebook else True
236
252
 
237
- self.step_progress = False # progress in current step?
238
253
  self.n_stalled_steps = 0 # how many consecutive steps with no progress?
254
+ # how many 2-step-apart alternations of no_answer step-result have we had,
255
+ # i.e. x1, N/A, x2, N/A, x3, N/A ...
256
+ self.n_no_answer_alternations = 0
257
+ self._no_answer_step: int = -1
258
+ self._step_idx = -1 # current step index
239
259
  self.max_stalled_steps = max_stalled_steps
240
260
  self.done_if_response = [r.value for r in done_if_response]
241
261
  self.done_if_no_response = [r.value for r in done_if_no_response]
242
262
  self.is_done = False # is task done (based on response)?
243
263
  self.is_pass_thru = False # is current response a pass-thru?
244
- self.task_progress = False # progress in current task (since run or run_async)?
245
264
  if name:
246
265
  # task name overrides name in agent config
247
266
  agent.config.name = name
248
267
  self.name = name or agent.config.name
249
268
  self.value: str = self.name
250
269
 
251
- if default_human_response is not None and default_human_response == "":
252
- interactive = False
253
- self.interactive = interactive
254
- self.agent.interactive = interactive
255
- self.message_history_idx = -1
256
- if interactive:
257
- only_user_quits_root = True
258
- else:
259
- default_human_response = default_human_response or ""
260
- only_user_quits_root = False
270
+ self.default_human_response = default_human_response
261
271
  if default_human_response is not None:
272
+ # only override agent's default_human_response if it is explicitly set
262
273
  self.agent.default_human_response = default_human_response
263
- self.default_human_response = default_human_response
264
- if self.interactive:
265
- self.agent.default_human_response = None
274
+ self.interactive = interactive
275
+ self.agent.interactive = interactive
266
276
  self.only_user_quits_root = only_user_quits_root
277
+ self.message_history_idx = -1
278
+
267
279
  # set to True if we want to collapse multi-turn conversation with sub-tasks into
268
280
  # just the first outgoing message and last incoming message.
269
281
  # Note this also completely erases sub-task agents' message_history.
@@ -300,17 +312,16 @@ class Task:
300
312
  self.turns = -1 # no limit
301
313
  self.llm_delegate = llm_delegate
302
314
  if llm_delegate:
303
- self.controller = Entity.LLM
304
315
  if self.single_round:
305
316
  # 0: User instructs (delegating to LLM);
306
- # 1: LLM asks;
317
+ # 1: LLM (as the Controller) asks;
307
318
  # 2: user replies.
308
319
  self.turns = 2
309
320
  else:
310
- self.controller = Entity.USER
311
321
  if self.single_round:
312
- self.turns = 1 # 0: User asks, 1: LLM replies.
313
-
322
+ # 0: User (as Controller) asks,
323
+ # 1: LLM replies.
324
+ self.turns = 1
314
325
  # other sub_tasks this task can delegate to
315
326
  self.sub_tasks: List[Task] = []
316
327
  self.caller: Task | None = None # which task called this task's `run` method
@@ -521,12 +532,18 @@ class Task:
521
532
  if self.caller is not None and self.caller.logger is not None:
522
533
  self.logger = self.caller.logger
523
534
  else:
524
- self.logger = RichFileLogger(f"logs/{self.name}.log", color=self.color_log)
535
+ self.logger = RichFileLogger(
536
+ str(Path(self.config.logs_dir) / f"{self.name}.log"),
537
+ color=self.color_log,
538
+ )
525
539
 
526
540
  if self.caller is not None and self.caller.tsv_logger is not None:
527
541
  self.tsv_logger = self.caller.tsv_logger
528
542
  else:
529
- self.tsv_logger = setup_file_logger("tsv_logger", f"logs/{self.name}.tsv")
543
+ self.tsv_logger = setup_file_logger(
544
+ "tsv_logger",
545
+ str(Path(self.config.logs_dir) / f"{self.name}.tsv"),
546
+ )
530
547
  header = ChatDocLoggerFields().tsv_header()
531
548
  self.tsv_logger.info(f" \tTask\tResponder\t{header}")
532
549
 
@@ -559,8 +576,10 @@ class Task:
559
576
  # so reset own agent and recursively for all sub-tasks
560
577
  self.reset_all_sub_tasks()
561
578
 
562
- self.task_progress = False
563
579
  self.n_stalled_steps = 0
580
+ self._no_answer_step = -1 # last step where the best explicit response was N/A
581
+ # how many N/A alternations have we had so far? (for Inf loop detection)
582
+ self.n_no_answer_alternations = 0
564
583
  self.max_cost = max_cost
565
584
  self.max_tokens = max_tokens
566
585
  self.session_id = session_id
@@ -588,6 +607,7 @@ class Task:
588
607
  turns = self.turns if turns < 0 else turns
589
608
  i = 0
590
609
  while True:
610
+ self._step_idx = i # used in step() below
591
611
  self.step()
592
612
  done, status = self.done()
593
613
  if done:
@@ -601,7 +621,17 @@ class Task:
601
621
  else max(turns, settings.max_turns)
602
622
  )
603
623
  if max_turns > 0 and i >= max_turns:
604
- status = StatusCode.MAX_TURNS
624
+ # Important to distinguish between:
625
+ # (a) intentional run for a
626
+ # fixed number of turns, where we expect the pending message
627
+ # at that stage to be the desired result, and
628
+ # (b) hitting max_turns limit, which is not intentional, and is an
629
+ # exception, resulting in a None task result
630
+ status = (
631
+ StatusCode.MAX_TURNS
632
+ if i == settings.max_turns
633
+ else StatusCode.FIXED_TURNS
634
+ )
605
635
  break
606
636
  if (
607
637
  self.config.inf_loop_cycle_len > 0
@@ -617,9 +647,7 @@ class Task:
617
647
  """
618
648
  )
619
649
 
620
- final_result = self.result()
621
- if final_result is not None:
622
- final_result.metadata.status = status
650
+ final_result = self.result(status)
623
651
  self._post_run_loop()
624
652
  return final_result
625
653
 
@@ -673,8 +701,10 @@ class Task:
673
701
  # so reset own agent and recursively for all sub-tasks
674
702
  self.reset_all_sub_tasks()
675
703
 
676
- self.task_progress = False
677
704
  self.n_stalled_steps = 0
705
+ self._no_answer_step = -1 # last step where the best explicit response was N/A
706
+ # how many N/A alternations have we had so far? (for Inf loop detection)
707
+ self.n_no_answer_alternations = 0
678
708
  self.max_cost = max_cost
679
709
  self.max_tokens = max_tokens
680
710
  self.session_id = session_id
@@ -698,6 +728,7 @@ class Task:
698
728
  turns = self.turns if turns < 0 else turns
699
729
  i = 0
700
730
  while True:
731
+ self._step_idx = i # used in step() below
701
732
  await self.step_async()
702
733
  await asyncio.sleep(0.01) # temp yield to avoid blocking
703
734
  done, status = self.done()
@@ -712,7 +743,17 @@ class Task:
712
743
  else max(turns, settings.max_turns)
713
744
  )
714
745
  if max_turns > 0 and i >= max_turns:
715
- status = StatusCode.MAX_TURNS
746
+ # Important to distinguish between:
747
+ # (a) intentional run for a
748
+ # fixed number of turns, where we expect the pending message
749
+ # at that stage to be the desired result, and
750
+ # (b) hitting max_turns limit, which is not intentional, and is an
751
+ # exception, resulting in a None task result
752
+ status = (
753
+ StatusCode.MAX_TURNS
754
+ if i == settings.max_turns
755
+ else StatusCode.FIXED_TURNS
756
+ )
716
757
  break
717
758
  if (
718
759
  self.config.inf_loop_cycle_len > 0
@@ -728,9 +769,7 @@ class Task:
728
769
  """
729
770
  )
730
771
 
731
- final_result = self.result()
732
- if final_result is not None:
733
- final_result.metadata.status = status
772
+ final_result = self.result(status)
734
773
  self._post_run_loop()
735
774
  return final_result
736
775
 
@@ -744,9 +783,6 @@ class Task:
744
783
  self.init(msg)
745
784
  # sets indentation to be printed prior to any output from agent
746
785
  self.agent.indent = self._indent
747
- if self.default_human_response is not None:
748
- self.agent.default_human_response = self.default_human_response
749
-
750
786
  self.message_history_idx = -1
751
787
  if isinstance(self.agent, ChatAgent):
752
788
  # mark where we are in the message history, so we can reset to this when
@@ -820,7 +856,6 @@ class Task:
820
856
  `step_async()`. Consider refactoring to avoid duplication.
821
857
  """
822
858
  self.is_done = False
823
- self.step_progress = False
824
859
  parent = self.pending_message
825
860
  recipient = (
826
861
  ""
@@ -860,6 +895,8 @@ class Task:
860
895
  responders.insert(0, Entity.USER)
861
896
 
862
897
  found_response = False
898
+ # (responder, result) from a responder who explicitly said NO_ANSWER
899
+ no_answer_response: None | Tuple[Responder, ChatDocument] = None
863
900
  for r in responders:
864
901
  self.is_pass_thru = False
865
902
  if not self._can_respond(r):
@@ -879,6 +916,8 @@ class Task:
879
916
  continue
880
917
  self.human_tried = r == Entity.USER
881
918
  result = self.response(r, turns)
919
+ if result and NO_ANSWER in result.content:
920
+ no_answer_response = (r, result)
882
921
  self.is_done = self._is_done_response(result, r)
883
922
  self.is_pass_thru = PASS in result.content if result else False
884
923
  if self.valid(result, r):
@@ -891,8 +930,15 @@ class Task:
891
930
  if self.is_done:
892
931
  # skip trying other responders in this step
893
932
  break
894
- if not found_response:
895
- self._process_invalid_step_result(parent)
933
+ if not found_response: # did not find a Non-NO_ANSWER response
934
+ if no_answer_response:
935
+ # even though there was no valid response from anyone in this step,
936
+ # if there was at least one who EXPLICITLY said NO_ANSWER, then
937
+ # we process that as a valid response.
938
+ r, result = no_answer_response
939
+ self._process_valid_responder_result(r, parent, result)
940
+ else:
941
+ self._process_invalid_step_result(parent)
896
942
  self._show_pending_message_if_debug()
897
943
  return self.pending_message
898
944
 
@@ -918,7 +964,6 @@ class Task:
918
964
  different context.
919
965
  """
920
966
  self.is_done = False
921
- self.step_progress = False
922
967
  parent = self.pending_message
923
968
  recipient = (
924
969
  ""
@@ -956,6 +1001,8 @@ class Task:
956
1001
  responders.insert(0, Entity.USER)
957
1002
 
958
1003
  found_response = False
1004
+ # (responder, result) from a responder who explicitly said NO_ANSWER
1005
+ no_answer_response: None | Tuple[Responder, ChatDocument] = None
959
1006
  for r in responders:
960
1007
  if not self._can_respond(r):
961
1008
  # create dummy msg for logging
@@ -974,6 +1021,8 @@ class Task:
974
1021
  continue
975
1022
  self.human_tried = r == Entity.USER
976
1023
  result = await self.response_async(r, turns)
1024
+ if result and NO_ANSWER in result.content:
1025
+ no_answer_response = (r, result)
977
1026
  self.is_done = self._is_done_response(result, r)
978
1027
  self.is_pass_thru = PASS in result.content if result else False
979
1028
  if self.valid(result, r):
@@ -987,7 +1036,14 @@ class Task:
987
1036
  # skip trying other responders in this step
988
1037
  break
989
1038
  if not found_response:
990
- self._process_invalid_step_result(parent)
1039
+ if no_answer_response:
1040
+ # even though there was no valid response from anyone in this step,
1041
+ # if there was at least one who EXPLICITLY said NO_ANSWER, then
1042
+ # we process that as a valid response.
1043
+ r, result = no_answer_response
1044
+ self._process_valid_responder_result(r, parent, result)
1045
+ else:
1046
+ self._process_invalid_step_result(parent)
991
1047
  self._show_pending_message_if_debug()
992
1048
  return self.pending_message
993
1049
 
@@ -999,6 +1055,18 @@ class Task:
999
1055
  ) -> None:
1000
1056
  """Processes valid result from a responder, during a step"""
1001
1057
 
1058
+ # in case the valid response was a NO_ANSWER,
1059
+ if NO_ANSWER in result.content:
1060
+ if self._no_answer_step == self._step_idx - 2:
1061
+ # N/A two steps ago
1062
+ self.n_no_answer_alternations += 1
1063
+ else:
1064
+ # reset alternations counter
1065
+ self.n_no_answer_alternations = 0
1066
+
1067
+ # record the last step where the best explicit response was N/A
1068
+ self._no_answer_step = self._step_idx
1069
+
1002
1070
  # pending_sender is of type Responder,
1003
1071
  # i.e. it is either one of the agent's entities
1004
1072
  # OR a sub-task, that has produced a valid response.
@@ -1026,8 +1094,6 @@ class Task:
1026
1094
  parent.metadata.child_id = result.id()
1027
1095
 
1028
1096
  self.log_message(self.pending_sender, result, mark=True)
1029
- self.step_progress = True
1030
- self.task_progress = True
1031
1097
  if self.is_pass_thru:
1032
1098
  self.n_stalled_steps += 1
1033
1099
  else:
@@ -1049,11 +1115,13 @@ class Task:
1049
1115
  parent (ChatDocument|None): parent message of the current message
1050
1116
  """
1051
1117
  self.n_stalled_steps += 1
1052
- if (not self.task_progress or self.allow_null_result) and not self.is_pass_thru:
1053
- # There has been no progress at all in this task, so we
1054
- # update the pending_message to a dummy NO_ANSWER msg
1118
+ if self.allow_null_result and not self.is_pass_thru:
1119
+ # Null step-result is allowed, and we're not in a "pass-thru" situation,
1120
+ # so we update the pending_message to a dummy NO_ANSWER msg
1055
1121
  # from the entity 'opposite' to the current pending_sender,
1056
- # so we show "progress" and avoid getting stuck in an infinite loop.
1122
+ # so that the task can continue.
1123
+ # CAUTION: unless the LLM is instructed to signal DONE at an appropriate
1124
+ # time, this can result in an infinite loop.
1057
1125
  responder = (
1058
1126
  Entity.LLM if self.pending_sender == Entity.USER else Entity.USER
1059
1127
  )
@@ -1092,7 +1160,9 @@ class Task:
1092
1160
  max_cost=self.max_cost,
1093
1161
  max_tokens=self.max_tokens,
1094
1162
  )
1095
- result_str = str(ChatDocument.to_LLMMessage(result))
1163
+ result_str = ( # only used by callback to display content and possible tool
1164
+ "NONE" if result is None else str(ChatDocument.to_LLMMessage(result))
1165
+ )
1096
1166
  maybe_tool = len(extract_top_level_json(result_str)) > 0
1097
1167
  self.callbacks.show_subtask_response(
1098
1168
  task=e,
@@ -1180,16 +1250,23 @@ class Task:
1180
1250
  result = await response_fn(self.pending_message)
1181
1251
  return self._process_result_routing(result)
1182
1252
 
1183
- def result(self) -> ChatDocument:
1253
+ def result(self, status: StatusCode | None = None) -> ChatDocument | None:
1184
1254
  """
1185
1255
  Get result of task. This is the default behavior.
1186
1256
  Derived classes can override this.
1187
1257
 
1188
1258
  Note the result of a task is returned as if it is from the User entity.
1189
1259
 
1260
+ Args:
1261
+ status (StatusCode): status of the task when it ended
1190
1262
  Returns:
1191
1263
  ChatDocument: result of task
1192
1264
  """
1265
+ if status in [StatusCode.STALLED, StatusCode.MAX_TURNS, StatusCode.INF_LOOP]:
1266
+ # In these case we don't know (and don't want to try to guess)
1267
+ # what the task result should be, so we return None
1268
+ return None
1269
+
1193
1270
  result_msg = self.pending_message
1194
1271
 
1195
1272
  content = result_msg.content if result_msg else ""
@@ -1201,7 +1278,6 @@ class Task:
1201
1278
  block = result_msg.metadata.block if result_msg else None
1202
1279
  recipient = result_msg.metadata.recipient if result_msg else ""
1203
1280
  tool_ids = result_msg.metadata.tool_ids if result_msg else []
1204
- status = result_msg.metadata.status if result_msg else None
1205
1281
 
1206
1282
  # regardless of which entity actually produced the result,
1207
1283
  # when we return the result, we set entity to USER
@@ -1214,7 +1290,7 @@ class Task:
1214
1290
  source=Entity.USER,
1215
1291
  sender=Entity.USER,
1216
1292
  block=block,
1217
- status=status,
1293
+ status=status or (result_msg.metadata.status if result_msg else None),
1218
1294
  sender_name=self.name,
1219
1295
  recipient=recipient,
1220
1296
  tool_ids=tool_ids,
@@ -1294,6 +1370,9 @@ class Task:
1294
1370
  If the set of last (W * m) messages are the same as the
1295
1371
  set of m dominant messages, then we are likely in a loop.
1296
1372
  """
1373
+ if self.n_no_answer_alternations > self.config.inf_loop_wait_factor:
1374
+ return True
1375
+
1297
1376
  max_cycle_len = self.config.inf_loop_cycle_len
1298
1377
  if max_cycle_len <= 0:
1299
1378
  # no loop detection
@@ -1362,8 +1441,8 @@ class Task:
1362
1441
  and result.content in USER_QUIT_STRINGS
1363
1442
  and result.metadata.sender == Entity.USER
1364
1443
  )
1365
- if self._level == 0 and self.only_user_quits_root:
1366
- # for top-level task, only user can quit out
1444
+ if self._level == 0 and self.interactive and self.only_user_quits_root:
1445
+ # for top-level task, in interactive mode, only user can quit out
1367
1446
  return (user_quit, StatusCode.USER_QUIT if user_quit else StatusCode.OK)
1368
1447
 
1369
1448
  if self.is_done:
@@ -1405,11 +1484,6 @@ class Task:
1405
1484
  and self.caller.name != ""
1406
1485
  and result.metadata.recipient == self.caller.name
1407
1486
  )
1408
- # or (
1409
- # # Task controller is "stuck", has nothing to say
1410
- # NO_ANSWER in result.content
1411
- # and result.metadata.sender == self.controller
1412
- # )
1413
1487
  or user_quit
1414
1488
  )
1415
1489
  return (final, StatusCode.OK)
@@ -1591,7 +1665,6 @@ def parse_routing(
1591
1665
  return True, addressee, None
1592
1666
  else:
1593
1667
  return False, addressee, content_to_send
1594
- AT = "@"
1595
1668
  if (
1596
1669
  AT in content
1597
1670
  and (addressee_content := parse_addressed_message(content, AT))[0] is not None
@@ -82,8 +82,9 @@ class RewindTool(ToolMessage):
82
82
  cls(n=1, content="What are the 3 major causes of heart disease?"),
83
83
  (
84
84
  """
85
- I want to change my 2nd message to Bob, to say
86
- 'who wrote the book Grime and Banishment?'
85
+ Based on the conversation so far, I realize I would get a better
86
+ response from Bob if rephrase my 2nd message to him to:
87
+ 'Who wrote the book Grime and Banishment?'
87
88
  """,
88
89
  cls(n=2, content="who wrote the book 'Grime and Banishment'?"),
89
90
  ),
@@ -1,12 +1,16 @@
1
1
  """Mock Language Model for testing"""
2
2
 
3
- from typing import Dict, List, Optional, Union
3
+ from typing import Callable, Dict, List, Optional, Union
4
4
 
5
5
  import langroid.language_models as lm
6
6
  from langroid.language_models import LLMResponse
7
7
  from langroid.language_models.base import LanguageModel, LLMConfig
8
8
 
9
9
 
10
+ def none_fn(x: str) -> None | str:
11
+ return None
12
+
13
+
10
14
  class MockLMConfig(LLMConfig):
11
15
  """
12
16
  Mock Language Model Configuration.
@@ -17,7 +21,9 @@ class MockLMConfig(LLMConfig):
17
21
  """
18
22
 
19
23
  response_dict: Dict[str, str] = {}
24
+ response_fn: Callable[[str], None | str] = none_fn
20
25
  default_response: str = "Mock response"
26
+
21
27
  type: str = "mock"
22
28
 
23
29
 
@@ -27,6 +33,19 @@ class MockLM(LanguageModel):
27
33
  super().__init__(config)
28
34
  self.config: MockLMConfig = config
29
35
 
36
+ def _response(self, msg: str) -> LLMResponse:
37
+ # response is based on this fallback order:
38
+ # - response_dict
39
+ # - response_fn
40
+ # - default_response
41
+ return lm.LLMResponse(
42
+ message=self.config.response_dict.get(
43
+ msg,
44
+ self.config.response_fn(msg) or self.config.default_response,
45
+ ),
46
+ cached=False,
47
+ )
48
+
30
49
  def chat(
31
50
  self,
32
51
  messages: Union[str, List[lm.LLMMessage]],
@@ -38,13 +57,7 @@ class MockLM(LanguageModel):
38
57
  Mock chat function for testing
39
58
  """
40
59
  last_msg = messages[-1].content if isinstance(messages, list) else messages
41
- return lm.LLMResponse(
42
- message=self.config.response_dict.get(
43
- last_msg,
44
- self.config.default_response,
45
- ),
46
- cached=False,
47
- )
60
+ return self._response(last_msg)
48
61
 
49
62
  async def achat(
50
63
  self,
@@ -57,37 +70,19 @@ class MockLM(LanguageModel):
57
70
  Mock chat function for testing
58
71
  """
59
72
  last_msg = messages[-1].content if isinstance(messages, list) else messages
60
- return lm.LLMResponse(
61
- message=self.config.response_dict.get(
62
- last_msg,
63
- self.config.default_response,
64
- ),
65
- cached=False,
66
- )
73
+ return self._response(last_msg)
67
74
 
68
75
  def generate(self, prompt: str, max_tokens: int = 200) -> lm.LLMResponse:
69
76
  """
70
77
  Mock generate function for testing
71
78
  """
72
- return lm.LLMResponse(
73
- message=self.config.response_dict.get(
74
- prompt,
75
- self.config.default_response,
76
- ),
77
- cached=False,
78
- )
79
+ return self._response(prompt)
79
80
 
80
81
  async def agenerate(self, prompt: str, max_tokens: int = 200) -> LLMResponse:
81
82
  """
82
83
  Mock generate function for testing
83
84
  """
84
- return lm.LLMResponse(
85
- message=self.config.response_dict.get(
86
- prompt,
87
- self.config.default_response,
88
- ),
89
- cached=False,
90
- )
85
+ return self._response(prompt)
91
86
 
92
87
  def get_stream(self) -> bool:
93
88
  return False
@@ -13,10 +13,11 @@ class Colors(BaseModel):
13
13
  RESET: str = "\033[0m"
14
14
 
15
15
 
16
- USER_QUIT_STRINGS = ["q", "x", "quit", "exit", "bye"]
17
16
  NO_ANSWER = "DO-NOT-KNOW"
18
17
  DONE = "DONE"
18
+ USER_QUIT_STRINGS = ["q", "x", "quit", "exit", "bye", DONE]
19
19
  PASS = "__PASS__"
20
20
  PASS_TO = PASS + ":"
21
21
  SEND_TO = "SEND:"
22
22
  TOOL = "TOOL"
23
+ AT = "@"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -36,7 +36,7 @@ Provides-Extra: vecdbs
36
36
  Requires-Dist: aiohttp (>=3.9.1,<4.0.0)
37
37
  Requires-Dist: async-generator (>=1.10,<2.0)
38
38
  Requires-Dist: bs4 (>=0.0.1,<0.0.2)
39
- Requires-Dist: chainlit (>=1.0.400,<2.0.0) ; extra == "all" or extra == "chainlit"
39
+ Requires-Dist: chainlit (==1.1.202) ; extra == "all" or extra == "chainlit"
40
40
  Requires-Dist: chromadb (>=0.4.21,<=0.4.23) ; extra == "vecdbs" or extra == "all" or extra == "chromadb"
41
41
  Requires-Dist: colorlog (>=6.7.0,<7.0.0)
42
42
  Requires-Dist: docstring-parser (>=0.15,<0.16)
@@ -226,6 +226,10 @@ teacher_task.run()
226
226
  <details>
227
227
  <summary> <b>Click to expand</b></summary>
228
228
 
229
+ - **Jun 2024:**
230
+ - **0.2.0:** Improved lineage tracking, granular sub-task configs, and a new tool, `RewindTool`,
231
+ that lets an agent "rewind and redo" a past message (and all dependent messages are cleared out
232
+ thanks to the lineage tracking). Read notes [here](https://github.com/langroid/langroid/releases/tag/0.2.0).
229
233
  - **May 2024:**
230
234
  - **Slimmer langroid**: All document-parsers (i.e. pdf, doc, docx) and most
231
235
  vector-databases (except qdrant)
@@ -1,11 +1,11 @@
1
1
  langroid/__init__.py,sha256=z_fCOLQJPOw3LLRPBlFB5-2HyCjpPgQa4m4iY5Fvb8Y,1800
2
2
  langroid/agent/__init__.py,sha256=ll0Cubd2DZ-fsCMl7e10hf9ZjFGKzphfBco396IKITY,786
3
- langroid/agent/base.py,sha256=rqkf5FN1jO7IGqa_bvnQc37d8LZRal1RHVJe0Dvtlsc,37680
3
+ langroid/agent/base.py,sha256=eeYZ-NYbrepOjUVQS9K0nDhE8x2gKUNjgxFTA24mook,37560
4
4
  langroid/agent/batch.py,sha256=feRA_yRG768ElOQjrKEefcRv6Aefd_yY7qktuYUQDwc,10040
5
5
  langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  langroid/agent/callbacks/chainlit.py,sha256=UKG2_v4ktfkEaGvdouVRHEqQejEYya2Rli8jrP65TmA,22055
7
- langroid/agent/chat_agent.py,sha256=eRTrTjGlu36gTbdBLbQ5M5EtBMIcdEc9bjgMLTa40oQ,41506
8
- langroid/agent/chat_document.py,sha256=8yH7o0aMVtUDHh3InpEErjhlY6t4Lr6KQzBrAKcYsEM,11141
7
+ langroid/agent/chat_agent.py,sha256=nO6Yx5WvFsul5RmTP-HCdzeQPhccmzU_mDcPNdkzQ-s,41514
8
+ langroid/agent/chat_document.py,sha256=MwtNABK28tfSzqCeQlxoauT8uPn8oldU7dlnrX8aQ10,11232
9
9
  langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
11
11
  langroid/agent/openai_assistant.py,sha256=rmGJD5n0eE7_O1EkPyXgHFMNGc3vb2GKweZMhzmRWvI,33068
@@ -32,7 +32,7 @@ langroid/agent/special/sql/utils/populate_metadata.py,sha256=1J22UsyEPKzwK0XlJZt
32
32
  langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
33
33
  langroid/agent/special/sql/utils/tools.py,sha256=vFYysk6Vi7HJjII8B4RitA3pt_z3gkSglDNdhNVMiFc,1332
34
34
  langroid/agent/special/table_chat_agent.py,sha256=d9v2wsblaRx7oMnKhLV7uO_ujvk9gh59pSGvBXyeyNc,9659
35
- langroid/agent/task.py,sha256=ZTnG8h214FF3Vm54Eyl37j5OzKIHc313QbiUkMlofwM,67091
35
+ langroid/agent/task.py,sha256=2v2-LEUxwJo65bbcLLALKOnMGVC02IZyBuyukOvP5Nw,71431
36
36
  langroid/agent/tool_message.py,sha256=wIyZnUcZpxkiRPvM9O3MO3b5BBAdLEEan9kqPbvtApc,9743
37
37
  langroid/agent/tools/__init__.py,sha256=e-63cfwQNk_ftRKQwgDAJQK16QLbRVWDBILeXIc7wLk,402
38
38
  langroid/agent/tools/duckduckgo_search_tool.py,sha256=NhsCaGZkdv28nja7yveAhSK_w6l_Ftym8agbrdzqgfo,1935
@@ -42,7 +42,7 @@ langroid/agent/tools/google_search_tool.py,sha256=y7b-3FtgXf0lfF4AYxrZ3K5pH2dhid
42
42
  langroid/agent/tools/metaphor_search_tool.py,sha256=qj4gt453cLEX3EGW7nVzVu6X7LCdrwjSlcNY0qJW104,2489
43
43
  langroid/agent/tools/recipient_tool.py,sha256=NrLxIeQT-kbMv7AeYX0uqvGeMK4Q3fIDvG15OVzlgk8,9624
44
44
  langroid/agent/tools/retrieval_tool.py,sha256=2q2pfoYbZNfbWQ0McxrtmfF0ekGglIgRl-6uF26pa-E,871
45
- langroid/agent/tools/rewind_tool.py,sha256=aeu35_OjmCDTCgWH6nn8noXIC7ACD7-Rh-qh36wnBOg,5516
45
+ langroid/agent/tools/rewind_tool.py,sha256=G4DiXuOt2nQ2fU7qvtJMdLyyf-rK7RZwLsFxsAUfk-Y,5606
46
46
  langroid/agent/tools/run_python_code.py,sha256=BvoxYzzHijU-p4703n2iVlt5BCieR1oMSy50w0tQZAg,1787
47
47
  langroid/agent/tools/segment_extract_tool.py,sha256=__srZ_VGYLVOdPrITUM8S0HpmX4q7r5FHWMDdHdEv8w,1440
48
48
  langroid/agent_config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -65,7 +65,7 @@ langroid/language_models/__init__.py,sha256=vrBtgR8Cq9UVfoI7nTms0IN7fd4y2JYpUP3G
65
65
  langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAwyngXT_M4P7k,5989
66
66
  langroid/language_models/base.py,sha256=aVptuo_LpymIQFpJh836lcFCUpJNOV3ukxvQAQMCqFc,17426
67
67
  langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
68
- langroid/language_models/mock_lm.py,sha256=L0YqrrxLCePs_5MrK7rJ5-SajNxDtuVU_VvZVRfs9q4,2834
68
+ langroid/language_models/mock_lm.py,sha256=qdgj-wtbQBXlibo_0rIRfCt0hGTPRoxy1C4VjN6quI4,2707
69
69
  langroid/language_models/openai_gpt.py,sha256=RXnLKULuCSeDeUPQvaZ4naqJgMKcMZogCtRDLycd4j8,50714
70
70
  langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8yiscohil1ogbP1ECkYdBlBsk,372
71
71
  langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
@@ -104,7 +104,7 @@ langroid/utils/__init__.py,sha256=Sruos2tB4G7Tn0vlblvYlX9PEGR0plI2uE0PJ4d_EC4,35
104
104
  langroid/utils/algorithms/__init__.py,sha256=WylYoZymA0fnzpB4vrsH_0n7WsoLhmuZq8qxsOCjUpM,41
105
105
  langroid/utils/algorithms/graph.py,sha256=JbdpPnUOhw4-D6O7ou101JLA3xPCD0Lr3qaPoFCaRfo,2866
106
106
  langroid/utils/configuration.py,sha256=A70LdvdMuunlLSGI1gBmBL5j6Jhz-1syNP8R4AdjqDc,3295
107
- langroid/utils/constants.py,sha256=eTiXfx8Nq2kmq0WChVLqV9C58UWju0NCIuW28sMgd5g,575
107
+ langroid/utils/constants.py,sha256=5WgyXjhRegNWB_BSYZsEZW-7mm-F_5bJPM4nuZ9qvNo,590
108
108
  langroid/utils/docker.py,sha256=kJQOLTgM0x9j9pgIIqp0dZNZCTvoUDhp6i8tYBq1Jr0,1105
109
109
  langroid/utils/globals.py,sha256=Az9dOFqR6n9CoTYSqa2kLikQWS0oCQ9DFQIQAnG-2q8,1355
110
110
  langroid/utils/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -128,8 +128,8 @@ langroid/vector_store/meilisearch.py,sha256=6frB7GFWeWmeKzRfLZIvzRjllniZ1cYj3Hmh
128
128
  langroid/vector_store/momento.py,sha256=QaPzUnTwlswoawGB-paLtUPyLRvckFXLfLDfvbTzjNQ,10505
129
129
  langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
130
130
  langroid/vector_store/qdrantdb.py,sha256=wYOuu5c2vIKn9ZgvTXcAiZXMpV8AOXEWFAzI8S8UP-0,16828
131
- pyproject.toml,sha256=dZYHxf2D_qc8g68xt4KgMoa7tuxbY_JpLn-Pd541kuw,6964
132
- langroid-0.2.0.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
133
- langroid-0.2.0.dist-info/METADATA,sha256=vcctmhiBgiBI4LlzmbJFMfRG7nwjD4B1IZvnDu0XR_M,52823
134
- langroid-0.2.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
135
- langroid-0.2.0.dist-info/RECORD,,
131
+ pyproject.toml,sha256=dJzlQA4tW7aK-naR-m3P-BrNk0KOzQ8S2w8EtfYJx4s,6963
132
+ langroid-0.2.2.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
133
+ langroid-0.2.2.dist-info/METADATA,sha256=grLp3cBevoVDKZ1N2Hnj33aQHhvxLioyNx1NiE-Ukyo,53146
134
+ langroid-0.2.2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
135
+ langroid-0.2.2.dist-info/RECORD,,
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langroid"
3
- version = "0.2.0"
3
+ version = "0.2.2"
4
4
  description = "Harness LLMs with Multi-Agent Programming"
5
5
  authors = ["Prasad Chalasani <pchalasani@gmail.com>"]
6
6
  readme = "README.md"
@@ -23,7 +23,7 @@ pymysql = {version = "^1.1.0", optional = true}
23
23
  meilisearch-python-sdk = {version="^2.2.3", optional=true}
24
24
  litellm = {version = "^1.30.1", optional = true}
25
25
  metaphor-python = {version = "^0.1.23", optional = true}
26
- chainlit = {version = "^1.0.400", optional = true}
26
+ chainlit = {version = "1.1.202", optional = true}
27
27
  python-socketio = {version="^5.11.0", optional=true}
28
28
  neo4j = {version = "^5.14.1", optional = true}
29
29
  huggingface-hub = {version="^0.21.2", optional=true}