langroid 0.1.139__py3-none-any.whl → 0.1.219__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. langroid/__init__.py +70 -0
  2. langroid/agent/__init__.py +22 -0
  3. langroid/agent/base.py +120 -33
  4. langroid/agent/batch.py +134 -35
  5. langroid/agent/callbacks/__init__.py +0 -0
  6. langroid/agent/callbacks/chainlit.py +608 -0
  7. langroid/agent/chat_agent.py +164 -100
  8. langroid/agent/chat_document.py +19 -2
  9. langroid/agent/openai_assistant.py +20 -10
  10. langroid/agent/special/__init__.py +33 -10
  11. langroid/agent/special/doc_chat_agent.py +521 -108
  12. langroid/agent/special/lance_doc_chat_agent.py +258 -0
  13. langroid/agent/special/lance_rag/__init__.py +9 -0
  14. langroid/agent/special/lance_rag/critic_agent.py +136 -0
  15. langroid/agent/special/lance_rag/lance_rag_task.py +80 -0
  16. langroid/agent/special/lance_rag/query_planner_agent.py +180 -0
  17. langroid/agent/special/lance_tools.py +44 -0
  18. langroid/agent/special/neo4j/__init__.py +0 -0
  19. langroid/agent/special/neo4j/csv_kg_chat.py +174 -0
  20. langroid/agent/special/neo4j/neo4j_chat_agent.py +370 -0
  21. langroid/agent/special/neo4j/utils/__init__.py +0 -0
  22. langroid/agent/special/neo4j/utils/system_message.py +46 -0
  23. langroid/agent/special/relevance_extractor_agent.py +23 -7
  24. langroid/agent/special/retriever_agent.py +29 -174
  25. langroid/agent/special/sql/__init__.py +7 -0
  26. langroid/agent/special/sql/sql_chat_agent.py +47 -23
  27. langroid/agent/special/sql/utils/__init__.py +11 -0
  28. langroid/agent/special/sql/utils/description_extractors.py +95 -46
  29. langroid/agent/special/sql/utils/populate_metadata.py +28 -21
  30. langroid/agent/special/table_chat_agent.py +43 -9
  31. langroid/agent/task.py +423 -114
  32. langroid/agent/tool_message.py +67 -10
  33. langroid/agent/tools/__init__.py +8 -0
  34. langroid/agent/tools/duckduckgo_search_tool.py +66 -0
  35. langroid/agent/tools/google_search_tool.py +11 -0
  36. langroid/agent/tools/metaphor_search_tool.py +67 -0
  37. langroid/agent/tools/recipient_tool.py +6 -24
  38. langroid/agent/tools/sciphi_search_rag_tool.py +79 -0
  39. langroid/cachedb/__init__.py +6 -0
  40. langroid/embedding_models/__init__.py +24 -0
  41. langroid/embedding_models/base.py +9 -1
  42. langroid/embedding_models/models.py +117 -17
  43. langroid/embedding_models/protoc/embeddings.proto +19 -0
  44. langroid/embedding_models/protoc/embeddings_pb2.py +33 -0
  45. langroid/embedding_models/protoc/embeddings_pb2.pyi +50 -0
  46. langroid/embedding_models/protoc/embeddings_pb2_grpc.py +79 -0
  47. langroid/embedding_models/remote_embeds.py +153 -0
  48. langroid/language_models/__init__.py +22 -0
  49. langroid/language_models/azure_openai.py +47 -4
  50. langroid/language_models/base.py +26 -10
  51. langroid/language_models/config.py +5 -0
  52. langroid/language_models/openai_gpt.py +407 -121
  53. langroid/language_models/prompt_formatter/__init__.py +9 -0
  54. langroid/language_models/prompt_formatter/base.py +4 -6
  55. langroid/language_models/prompt_formatter/hf_formatter.py +135 -0
  56. langroid/language_models/utils.py +10 -9
  57. langroid/mytypes.py +10 -4
  58. langroid/parsing/__init__.py +33 -1
  59. langroid/parsing/document_parser.py +259 -63
  60. langroid/parsing/image_text.py +32 -0
  61. langroid/parsing/parse_json.py +143 -0
  62. langroid/parsing/parser.py +20 -7
  63. langroid/parsing/repo_loader.py +108 -46
  64. langroid/parsing/search.py +8 -0
  65. langroid/parsing/table_loader.py +44 -0
  66. langroid/parsing/url_loader.py +59 -13
  67. langroid/parsing/urls.py +18 -9
  68. langroid/parsing/utils.py +130 -9
  69. langroid/parsing/web_search.py +73 -0
  70. langroid/prompts/__init__.py +7 -0
  71. langroid/prompts/chat-gpt4-system-prompt.md +68 -0
  72. langroid/prompts/prompts_config.py +1 -1
  73. langroid/utils/__init__.py +10 -0
  74. langroid/utils/algorithms/__init__.py +3 -0
  75. langroid/utils/configuration.py +0 -1
  76. langroid/utils/constants.py +4 -0
  77. langroid/utils/logging.py +2 -5
  78. langroid/utils/output/__init__.py +15 -2
  79. langroid/utils/output/status.py +33 -0
  80. langroid/utils/pandas_utils.py +30 -0
  81. langroid/utils/pydantic_utils.py +446 -4
  82. langroid/utils/system.py +36 -1
  83. langroid/vector_store/__init__.py +34 -2
  84. langroid/vector_store/base.py +33 -2
  85. langroid/vector_store/chromadb.py +42 -13
  86. langroid/vector_store/lancedb.py +226 -60
  87. langroid/vector_store/meilisearch.py +7 -6
  88. langroid/vector_store/momento.py +3 -2
  89. langroid/vector_store/qdrantdb.py +82 -11
  90. {langroid-0.1.139.dist-info → langroid-0.1.219.dist-info}/METADATA +190 -129
  91. langroid-0.1.219.dist-info/RECORD +127 -0
  92. langroid/agent/special/recipient_validator_agent.py +0 -157
  93. langroid/parsing/json.py +0 -64
  94. langroid/utils/web/selenium_login.py +0 -36
  95. langroid-0.1.139.dist-info/RECORD +0 -103
  96. {langroid-0.1.139.dist-info → langroid-0.1.219.dist-info}/LICENSE +0 -0
  97. {langroid-0.1.139.dist-info → langroid-0.1.219.dist-info}/WHEEL +0 -0
@@ -0,0 +1,608 @@
1
+ """
2
+ Callbacks for Chainlit integration.
3
+ """
4
+
5
+ import json
6
+ import logging
7
+ import textwrap
8
+ from typing import Any, Callable, Dict, List, Literal, Optional, no_type_check
9
+
10
+ from pydantic import BaseSettings
11
+
12
+ try:
13
+ import chainlit as cl
14
+ except ImportError:
15
+ raise ImportError(
16
+ """
17
+ You are attempting to use `chainlit`, which is not installed
18
+ by default with `langroid`.
19
+ Please install langroid with the `chainlit` extra using:
20
+ `pip install langroid[chainlit]` or
21
+ `poetry install -E chainlit`
22
+ depending on your scenario
23
+ """
24
+ )
25
+
26
+ from chainlit import run_sync
27
+ from chainlit.config import config
28
+ from chainlit.logger import logger
29
+
30
+ import langroid as lr
31
+ import langroid.language_models as lm
32
+ from langroid.utils.configuration import settings
33
+ from langroid.utils.constants import NO_ANSWER
34
+
35
+ # Attempt to reconfigure the root logger to your desired settings
36
+ log_level = logging.INFO if settings.debug else logging.WARNING
37
+ logger.setLevel(log_level)
38
+ logging.basicConfig(level=log_level)
39
+
40
+ logging.getLogger().setLevel(log_level)
41
+
42
+ USER_TIMEOUT = 60_000
43
+ SYSTEM = "System 🖥️"
44
+ LLM = "LLM 🧠"
45
+ AGENT = "Agent <>"
46
+ YOU = "You 😃"
47
+ ERROR = "Error 🚫"
48
+
49
+
50
+ @no_type_check
51
+ async def ask_helper(func, **kwargs):
52
+ res = await func(**kwargs).send()
53
+ while not res:
54
+ res = await func(**kwargs).send()
55
+ return res
56
+
57
+
58
+ @no_type_check
59
+ async def setup_llm() -> None:
60
+ """From the session `llm_settings`, create new LLMConfig and LLM objects,
61
+ save them in session state."""
62
+ llm_settings = cl.user_session.get("llm_settings", {})
63
+ model = llm_settings.get("chat_model")
64
+ context_length = llm_settings.get("context_length", 16_000)
65
+ temperature = llm_settings.get("temperature", 0.2)
66
+ timeout = llm_settings.get("timeout", 90)
67
+ logger.info(f"Using model: {model}")
68
+ llm_config = lm.OpenAIGPTConfig(
69
+ chat_model=model or lm.OpenAIChatModel.GPT4_TURBO,
70
+ # or, other possibilities for example:
71
+ # "litellm/ollama_chat/mistral"
72
+ # "litellm/ollama_chat/mistral:7b-instruct-v0.2-q8_0"
73
+ # "litellm/ollama/llama2"
74
+ # "local/localhost:8000/v1"
75
+ # "local/localhost:8000"
76
+ chat_context_length=context_length, # adjust based on model
77
+ temperature=temperature,
78
+ timeout=timeout,
79
+ )
80
+ llm = lm.OpenAIGPT(llm_config)
81
+ cl.user_session.set("llm_config", llm_config)
82
+ cl.user_session.set("llm", llm)
83
+
84
+
85
+ @no_type_check
86
+ async def update_llm(settings: Dict[str, Any]) -> None:
87
+ """Update LLMConfig and LLM from settings, and save in session state."""
88
+ cl.user_session.set("llm_settings", settings)
89
+ await inform_llm_settings()
90
+ await setup_llm()
91
+
92
+
93
+ async def make_llm_settings_widgets(
94
+ config: lm.OpenAIGPTConfig | None = None,
95
+ ) -> None:
96
+ config = config or lm.OpenAIGPTConfig()
97
+ await cl.ChatSettings(
98
+ [
99
+ cl.input_widget.TextInput(
100
+ id="chat_model",
101
+ label="Model Name (Default GPT4-Turbo)",
102
+ initial="",
103
+ placeholder="E.g. ollama/mistral or " "local/localhost:8000/v1",
104
+ ),
105
+ cl.input_widget.NumberInput(
106
+ id="context_length",
107
+ label="Chat Context Length",
108
+ initial=config.chat_context_length,
109
+ placeholder="E.g. 16000",
110
+ ),
111
+ cl.input_widget.Slider(
112
+ id="temperature",
113
+ label="LLM temperature",
114
+ min=0.0,
115
+ max=1.0,
116
+ step=0.1,
117
+ initial=config.temperature,
118
+ tooltip="Adjust based on model",
119
+ ),
120
+ cl.input_widget.Slider(
121
+ id="timeout",
122
+ label="Timeout (seconds)",
123
+ min=10,
124
+ max=200,
125
+ step=10,
126
+ initial=config.timeout,
127
+ tooltip="Timeout for LLM response, in seconds.",
128
+ ),
129
+ ]
130
+ ).send() # type: ignore
131
+
132
+
133
+ @no_type_check
134
+ async def inform_llm_settings() -> None:
135
+ llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
136
+ settings_dict = dict(
137
+ model=llm_settings.get("chat_model"),
138
+ context_length=llm_settings.get("context_length"),
139
+ temperature=llm_settings.get("temperature"),
140
+ timeout=llm_settings.get("timeout"),
141
+ )
142
+ await cl.Message(
143
+ author=SYSTEM,
144
+ content="LLM settings updated",
145
+ elements=[
146
+ cl.Text(
147
+ name="settings",
148
+ display="side",
149
+ content=json.dumps(settings_dict, indent=4),
150
+ language="json",
151
+ )
152
+ ],
153
+ ).send()
154
+
155
+
156
+ async def add_instructions(
157
+ title: str = "Instructions",
158
+ content: str = "Enter your question/response in the dialog box below.",
159
+ display: Literal["side", "inline", "page"] = "inline",
160
+ ) -> None:
161
+ await cl.Message(
162
+ author="",
163
+ content=title if display == "side" else "",
164
+ elements=[
165
+ cl.Text(
166
+ name=title,
167
+ content=content,
168
+ display=display,
169
+ )
170
+ ],
171
+ ).send()
172
+
173
+
174
+ async def add_image(
175
+ path: str,
176
+ name: str,
177
+ display: Literal["side", "inline", "page"] = "inline",
178
+ ) -> None:
179
+ await cl.Message(
180
+ author="",
181
+ content=name if display == "side" else "",
182
+ elements=[
183
+ cl.Image(
184
+ name=name,
185
+ path=path,
186
+ display=display,
187
+ )
188
+ ],
189
+ ).send()
190
+
191
+
192
+ async def get_text_files(
193
+ message: cl.Message,
194
+ extensions: List[str] = [".txt", ".pdf", ".doc", ".docx"],
195
+ ) -> Dict[str, str]:
196
+ """Get dict (file_name -> file_path) from files uploaded in chat msg"""
197
+
198
+ files = [file for file in message.elements if file.path.endswith(tuple(extensions))]
199
+ return {file.name: file.path for file in files}
200
+
201
+
202
+ def wrap_text_preserving_structure(text: str, width: int = 90) -> str:
203
+ """Wrap text preserving paragraph breaks. Typically used to
204
+ format an agent_response output, which may have long lines
205
+ with no newlines or paragraph breaks."""
206
+
207
+ paragraphs = text.split("\n\n") # Split the text into paragraphs
208
+ wrapped_text = []
209
+
210
+ for para in paragraphs:
211
+ if para.strip(): # If the paragraph is not just whitespace
212
+ # Wrap this paragraph and add it to the result
213
+ wrapped_paragraph = textwrap.fill(para, width=width)
214
+ wrapped_text.append(wrapped_paragraph)
215
+ else:
216
+ # Preserve paragraph breaks
217
+ wrapped_text.append("")
218
+
219
+ return "\n\n".join(wrapped_text)
220
+
221
+
222
+ class ChainlitCallbackConfig(BaseSettings):
223
+ user_has_agent_name: bool = True # show agent name in front of "YOU" ?
224
+
225
+
226
+ class ChainlitAgentCallbacks:
227
+ """Inject Chainlit callbacks into a Langroid Agent"""
228
+
229
+ last_step: Optional[cl.Step] = None # used to display sub-steps under this
230
+ curr_step: Optional[cl.Step] = None # used to update an initiated step
231
+ stream: Optional[cl.Step] = None # pushed into openai_gpt.py to stream tokens
232
+ parent_agent: Optional[lr.Agent] = None # used to get parent id, for step nesting
233
+
234
+ def __init__(
235
+ self,
236
+ agent: lr.Agent,
237
+ msg: cl.Message = None,
238
+ config: ChainlitCallbackConfig = ChainlitCallbackConfig(),
239
+ ):
240
+ """Add callbacks to the agent, and save the initial message,
241
+ so we can alter the display of the first user message.
242
+ """
243
+ agent.callbacks.start_llm_stream = self.start_llm_stream
244
+ agent.callbacks.cancel_llm_stream = self.cancel_llm_stream
245
+ agent.callbacks.finish_llm_stream = self.finish_llm_stream
246
+ agent.callbacks.show_llm_response = self.show_llm_response
247
+ agent.callbacks.show_agent_response = self.show_agent_response
248
+ agent.callbacks.get_user_response = self.get_user_response
249
+ agent.callbacks.get_last_step = self.get_last_step
250
+ agent.callbacks.set_parent_agent = self.set_parent_agent
251
+ agent.callbacks.show_error_message = self.show_error_message
252
+ agent.callbacks.show_start_response = self.show_start_response
253
+ self.config = config
254
+
255
+ self.agent: lr.Agent = agent
256
+ if msg is not None:
257
+ self.show_first_user_message(msg)
258
+
259
+ def _get_parent_id(self) -> str | None:
260
+ """Get step id under which we need to nest the current step:
261
+ This should be the parent Agent's last_step.
262
+ """
263
+ if self.parent_agent is None:
264
+ logger.info(f"No parent agent found for {self.agent.config.name}")
265
+ return None
266
+ logger.info(
267
+ f"Parent agent found for {self.agent.config.name} = "
268
+ f"{self.parent_agent.config.name}"
269
+ )
270
+ last_step = self.parent_agent.callbacks.get_last_step()
271
+ if last_step is None:
272
+ logger.info(f"No last step found for {self.parent_agent.config.name}")
273
+ return None
274
+ logger.info(
275
+ f"Last step found for {self.parent_agent.config.name} = {last_step.id}"
276
+ )
277
+ return last_step.id # type: ignore
278
+
279
+ def set_parent_agent(self, parent: lr.Agent) -> None:
280
+ self.parent_agent = parent
281
+
282
+ def get_last_step(self) -> Optional[cl.Step]:
283
+ return self.last_step
284
+
285
+ def start_llm_stream(self) -> Callable[[str], None]:
286
+ """Returns a streaming fn that can be passed to the LLM class"""
287
+ logger.info(
288
+ f"""
289
+ Starting LLM stream for {self.agent.config.name}
290
+ under parent {self._get_parent_id()}
291
+ """
292
+ )
293
+ self.stream = cl.Step(
294
+ id=self.curr_step.id if self.curr_step is not None else None,
295
+ name=self._entity_name("llm"),
296
+ type="llm",
297
+ parent_id=self._get_parent_id(),
298
+ )
299
+ self.last_step = self.stream
300
+ self.curr_step = None
301
+ run_sync(self.stream.send()) # type: ignore
302
+
303
+ def stream_token(t: str) -> None:
304
+ if self.stream is None:
305
+ raise ValueError("Stream not initialized")
306
+ run_sync(self.stream.stream_token(t))
307
+
308
+ return stream_token
309
+
310
+ def cancel_llm_stream(self) -> None:
311
+ """Called when cached response found."""
312
+ self.last_step = None
313
+ if self.stream is not None:
314
+ run_sync(self.stream.remove()) # type: ignore
315
+
316
+ def finish_llm_stream(self, content: str, is_tool: bool = False) -> None:
317
+ """Update the stream, and display entire response in the right language."""
318
+ if self.agent.llm is None or self.stream is None:
319
+ raise ValueError("LLM or stream not initialized")
320
+ if content == "":
321
+ run_sync(self.stream.remove()) # type: ignore
322
+ else:
323
+ run_sync(self.stream.update()) # type: ignore
324
+ stream_id = self.stream.id if content else None
325
+ step = cl.Step(
326
+ id=stream_id,
327
+ name=self._entity_name("llm", tool=is_tool),
328
+ type="llm",
329
+ parent_id=self._get_parent_id(),
330
+ language="json" if is_tool else None,
331
+ )
332
+ step.output = textwrap.dedent(content) or NO_ANSWER
333
+ run_sync(step.update()) # type: ignore
334
+
335
+ def show_llm_response(
336
+ self,
337
+ content: str,
338
+ is_tool: bool = False,
339
+ cached: bool = False,
340
+ ) -> None:
341
+ """Show non-streaming LLM response."""
342
+ step = cl.Step(
343
+ id=self.curr_step.id if self.curr_step is not None else None,
344
+ name=self._entity_name("llm", tool=is_tool, cached=cached),
345
+ type="llm",
346
+ parent_id=self._get_parent_id(),
347
+ language="json" if is_tool else None,
348
+ )
349
+ self.last_step = step
350
+ self.curr_step = None
351
+ step.output = textwrap.dedent(content) or NO_ANSWER
352
+ run_sync(step.send()) # type: ignore
353
+
354
+ def show_error_message(self, error: str) -> None:
355
+ """Show error message as a step."""
356
+ step = cl.Step(
357
+ name=self.agent.config.name + f"({ERROR})",
358
+ type="run",
359
+ parent_id=self._get_parent_id(),
360
+ language="text",
361
+ )
362
+ self.last_step = step
363
+ step.output = error
364
+ run_sync(step.send())
365
+
366
+ def show_agent_response(self, content: str, language="text") -> None:
367
+ """Show message from agent (typically tool handler).
368
+ Agent response can be considered as a "step"
369
+ between LLM response and user response
370
+ """
371
+ step = cl.Step(
372
+ id=self.curr_step.id if self.curr_step is not None else None,
373
+ name=self._entity_name("agent"),
374
+ type="tool",
375
+ parent_id=self._get_parent_id(),
376
+ language=language,
377
+ )
378
+ if language == "text":
379
+ content = wrap_text_preserving_structure(content, width=90)
380
+ self.last_step = step
381
+ self.curr_step = None
382
+ step.output = content
383
+ run_sync(step.send()) # type: ignore
384
+
385
+ def show_start_response(self, entity: str) -> None:
386
+ """When there's a potentially long-running process, start a step,
387
+ so that the UI displays a spinner while the process is running."""
388
+ if self.curr_step is not None:
389
+ run_sync(self.curr_step.remove()) # type: ignore
390
+ step = cl.Step(
391
+ name=self._entity_name(entity),
392
+ type="run",
393
+ parent_id=self._get_parent_id(),
394
+ language="text",
395
+ )
396
+ step.output = ""
397
+ self.last_step = step
398
+ self.curr_step = step
399
+ run_sync(step.send()) # type: ignore
400
+
401
+ def _entity_name(
402
+ self, entity: str, tool: bool = False, cached: bool = False
403
+ ) -> str:
404
+ """Construct name of entity to display as Author of a step"""
405
+ tool_indicator = " => 🛠️" if tool else ""
406
+ cached = "(cached)" if cached else ""
407
+ match entity:
408
+ case "llm":
409
+ model = self.agent.config.llm.chat_model
410
+ return (
411
+ self.agent.config.name + f"({LLM} {model} {tool_indicator}){cached}"
412
+ )
413
+ case "agent":
414
+ return self.agent.config.name + f"({AGENT})"
415
+ case "user":
416
+ if self.config.user_has_agent_name:
417
+ return self.agent.config.name + f"({YOU})"
418
+ else:
419
+ return YOU
420
+ case _:
421
+ return self.agent.config.name + f"({entity})"
422
+
423
+ def _get_user_response_buttons(self, prompt: str) -> str:
424
+ """Not used. Save for future reference"""
425
+ res = run_sync(
426
+ ask_helper(
427
+ cl.AskActionMessage,
428
+ content="Continue, exit or say something?",
429
+ actions=[
430
+ cl.Action(
431
+ name="continue",
432
+ value="continue",
433
+ label="✅ Continue",
434
+ ),
435
+ cl.Action(
436
+ name="feedback",
437
+ value="feedback",
438
+ label="💬 Say something",
439
+ ),
440
+ cl.Action(name="exit", value="exit", label="🔚 Exit Conversation"),
441
+ ],
442
+ )
443
+ )
444
+ if res.get("value") == "continue":
445
+ return ""
446
+ if res.get("value") == "exit":
447
+ return "x"
448
+ if res.get("value") == "feedback":
449
+ return self.get_user_response(prompt)
450
+ return "" # process the "feedback" case here
451
+
452
+ def get_user_response(self, prompt: str) -> str:
453
+ """Ask for user response, wait for it, and return it,
454
+ as a cl.Step rather than as a cl.Message so we can nest it
455
+ under the parent step.
456
+ """
457
+ return run_sync(self.ask_user_step(prompt=prompt, suppress_values=["c"]))
458
+
459
+ def show_user_response(self, message: str) -> None:
460
+ """Show user response as a step."""
461
+ step = cl.Step(
462
+ id=cl.context.current_step.id,
463
+ name=self._entity_name("user"),
464
+ type="run",
465
+ parent_id=self._get_parent_id(),
466
+ )
467
+ step.output = message
468
+ run_sync(step.send())
469
+
470
+ def show_first_user_message(self, msg: cl.Message):
471
+ """Show first user message as a step."""
472
+ step = cl.Step(
473
+ id=msg.id,
474
+ name=self._entity_name("user"),
475
+ type="run",
476
+ parent_id=self._get_parent_id(),
477
+ )
478
+ self.last_step = step
479
+ step.output = msg.content
480
+ run_sync(step.update())
481
+
482
+ async def ask_user_step(
483
+ self,
484
+ prompt: str,
485
+ timeout: int = USER_TIMEOUT,
486
+ suppress_values: List[str] = ["c"],
487
+ ) -> str:
488
+ """
489
+ Ask user for input, as a step nested under parent_id.
490
+ Rather than rely entirely on AskUserMessage (which doesn't let us
491
+ nest the question + answer under a step), we instead create fake
492
+ steps for the question and answer, and only rely on AskUserMessage
493
+ with an empty prompt to await user response.
494
+
495
+ Args:
496
+ prompt (str): Prompt to display to user
497
+ timeout (int): Timeout in seconds
498
+ suppress_values (List[str]): List of values to suppress from display
499
+ (e.g. "c" for continue)
500
+
501
+ Returns:
502
+ str: User response
503
+ """
504
+
505
+ # save hide_cot status to restore later
506
+ # (We should probably use a ctx mgr for this)
507
+ hide_cot = config.ui.hide_cot
508
+
509
+ # force hide_cot to False so that the user question + response is visible
510
+ config.ui.hide_cot = False
511
+
512
+ if prompt != "":
513
+ # Create a question step to ask user
514
+ question_step = cl.Step(
515
+ name=f"{self.agent.config.name} (AskUser ❓)",
516
+ type="run",
517
+ parent_id=self._get_parent_id(),
518
+ )
519
+ question_step.output = prompt
520
+ await question_step.send() # type: ignore
521
+
522
+ # Use AskUserMessage to await user response,
523
+ # but with an empty prompt so the question is not visible,
524
+ # but still pauses for user input in the input box.
525
+ res = await cl.AskUserMessage(
526
+ content="",
527
+ timeout=timeout,
528
+ ).send()
529
+
530
+ if res is None:
531
+ run_sync(
532
+ cl.Message(
533
+ content=f"Timed out after {USER_TIMEOUT} seconds. Exiting."
534
+ ).send()
535
+ )
536
+ return "x"
537
+
538
+ # The above will try to display user response in res
539
+ # but we create fake step with same id as res and
540
+ # erase it using empty output so it's not displayed
541
+ step = cl.Step(
542
+ id=res["id"],
543
+ name="TempUserResponse",
544
+ type="run",
545
+ parent_id=self._get_parent_id(),
546
+ )
547
+ step.output = ""
548
+ await step.update() # type: ignore
549
+
550
+ # Finally, reproduce the user response at right nesting level
551
+ if res["output"] in suppress_values:
552
+ config.ui.hide_cot = hide_cot # restore original value
553
+ return ""
554
+
555
+ step = cl.Step(
556
+ name=self._entity_name(entity="user"),
557
+ type="run",
558
+ parent_id=self._get_parent_id(),
559
+ )
560
+ step.output = res["output"]
561
+ await step.send() # type: ignore
562
+ config.ui.hide_cot = hide_cot # restore original value
563
+ return res["output"]
564
+
565
+
566
+ class ChainlitTaskCallbacks(ChainlitAgentCallbacks):
567
+ """
568
+ Recursively inject ChainlitAgentCallbacks into a Langroid Task's agent and
569
+ agents of sub-tasks.
570
+ """
571
+
572
+ def __init__(
573
+ self,
574
+ task: lr.Task,
575
+ msg: cl.Message = None,
576
+ config: ChainlitCallbackConfig = ChainlitCallbackConfig(),
577
+ ):
578
+ """Inject callbacks recursively, ensuring msg is passed to the
579
+ top-level agent"""
580
+
581
+ super().__init__(task.agent, msg, config)
582
+ ChainlitTaskCallbacks._inject_callbacks(task)
583
+ self.task = task
584
+ self.task.callbacks.show_subtask_response = self.show_subtask_response
585
+
586
+ @staticmethod
587
+ def _inject_callbacks(
588
+ task: lr.Task, config: ChainlitCallbackConfig = ChainlitCallbackConfig()
589
+ ) -> None:
590
+ # recursively apply ChainlitAgentCallbacks to agents of sub-tasks
591
+ for t in task.sub_tasks:
592
+ ChainlitTaskCallbacks(t, config=config)
593
+
594
+ def show_subtask_response(
595
+ self, task: lr.Task, content: str, is_tool: bool = False
596
+ ) -> None:
597
+ """Show sub-task response as a step, nested at the right level."""
598
+
599
+ # The step should nest under the calling agent's last step
600
+ step = cl.Step(
601
+ name=self.task.agent.config.name + f"( ⏎ From {task.agent.config.name})",
602
+ type="run",
603
+ parent_id=self._get_parent_id(),
604
+ language="json" if is_tool else None,
605
+ )
606
+ step.output = content or NO_ANSWER
607
+ self.last_step = step
608
+ run_sync(step.send())