langroid 0.1.181__tar.gz → 0.1.183__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. {langroid-0.1.181 → langroid-0.1.183}/PKG-INFO +4 -2
  2. {langroid-0.1.181 → langroid-0.1.183}/langroid/__init__.py +32 -0
  3. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/base.py +28 -5
  4. langroid-0.1.183/langroid/agent/callbacks/chainlit.py +450 -0
  5. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/chat_agent.py +70 -12
  6. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/task.py +25 -8
  7. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/base.py +17 -3
  8. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/openai_gpt.py +4 -0
  9. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/prompt_formatter/hf_formatter.py +6 -2
  10. langroid-0.1.183/langroid/prompts/chat-gpt4-system-prompt.md +68 -0
  11. langroid-0.1.183/langroid/utils/web/__init__.py +0 -0
  12. {langroid-0.1.181 → langroid-0.1.183}/pyproject.toml +7 -3
  13. {langroid-0.1.181 → langroid-0.1.183}/LICENSE +0 -0
  14. {langroid-0.1.181 → langroid-0.1.183}/README.md +0 -0
  15. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/__init__.py +0 -0
  16. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/batch.py +0 -0
  17. {langroid-0.1.181/langroid/agent/special/neo4j → langroid-0.1.183/langroid/agent/callbacks}/__init__.py +0 -0
  18. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/chat_document.py +0 -0
  19. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/helpers.py +0 -0
  20. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/junk +0 -0
  21. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/openai_assistant.py +0 -0
  22. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/__init__.py +0 -0
  23. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/doc_chat_agent.py +0 -0
  24. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/lance_doc_chat_agent.py +0 -0
  25. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/lance_rag/__init__.py +0 -0
  26. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/lance_rag/critic_agent.py +0 -0
  27. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/lance_rag/lance_rag_task.py +0 -0
  28. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/lance_rag/lance_tools.py +0 -0
  29. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/lance_rag/query_planner_agent.py +0 -0
  30. {langroid-0.1.181/langroid/agent/special/neo4j/utils → langroid-0.1.183/langroid/agent/special/neo4j}/__init__.py +0 -0
  31. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/neo4j/csv_kg_chat.py +0 -0
  32. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/neo4j/neo4j_chat_agent.py +0 -0
  33. {langroid-0.1.181/langroid/utils/llms → langroid-0.1.183/langroid/agent/special/neo4j/utils}/__init__.py +0 -0
  34. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/neo4j/utils/system_message.py +0 -0
  35. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/relevance_extractor_agent.py +0 -0
  36. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/retriever_agent.py +0 -0
  37. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/__init__.py +0 -0
  38. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/sql_chat_agent.py +0 -0
  39. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/utils/__init__.py +0 -0
  40. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/utils/description_extractors.py +0 -0
  41. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/utils/populate_metadata.py +0 -0
  42. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/utils/system_message.py +0 -0
  43. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/sql/utils/tools.py +0 -0
  44. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/special/table_chat_agent.py +0 -0
  45. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tool_message.py +0 -0
  46. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/__init__.py +0 -0
  47. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/extract_tool.py +0 -0
  48. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/generator_tool.py +0 -0
  49. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/google_search_tool.py +0 -0
  50. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/metaphor_search_tool.py +0 -0
  51. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/recipient_tool.py +0 -0
  52. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/run_python_code.py +0 -0
  53. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/sciphi_search_rag_tool.py +0 -0
  54. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent/tools/segment_extract_tool.py +0 -0
  55. {langroid-0.1.181 → langroid-0.1.183}/langroid/agent_config.py +0 -0
  56. {langroid-0.1.181 → langroid-0.1.183}/langroid/cachedb/__init__.py +0 -0
  57. {langroid-0.1.181 → langroid-0.1.183}/langroid/cachedb/base.py +0 -0
  58. {langroid-0.1.181 → langroid-0.1.183}/langroid/cachedb/momento_cachedb.py +0 -0
  59. {langroid-0.1.181 → langroid-0.1.183}/langroid/cachedb/redis_cachedb.py +0 -0
  60. {langroid-0.1.181 → langroid-0.1.183}/langroid/embedding_models/__init__.py +0 -0
  61. {langroid-0.1.181 → langroid-0.1.183}/langroid/embedding_models/base.py +0 -0
  62. {langroid-0.1.181 → langroid-0.1.183}/langroid/embedding_models/clustering.py +0 -0
  63. {langroid-0.1.181 → langroid-0.1.183}/langroid/embedding_models/models.py +0 -0
  64. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/__init__.py +0 -0
  65. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/azure_openai.py +0 -0
  66. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/config.py +0 -0
  67. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/openai_assistants.py +0 -0
  68. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/prompt_formatter/__init__.py +0 -0
  69. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/prompt_formatter/base.py +0 -0
  70. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/prompt_formatter/llama2_formatter.py +0 -0
  71. {langroid-0.1.181 → langroid-0.1.183}/langroid/language_models/utils.py +0 -0
  72. {langroid-0.1.181 → langroid-0.1.183}/langroid/mytypes.py +0 -0
  73. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/__init__.py +0 -0
  74. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/agent_chats.py +0 -0
  75. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/code-parsing.md +0 -0
  76. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/code_parser.py +0 -0
  77. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/config.py +0 -0
  78. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/document_parser.py +0 -0
  79. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/json.py +0 -0
  80. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/para_sentence_split.py +0 -0
  81. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/parser.py +0 -0
  82. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/repo_loader.py +0 -0
  83. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/search.py +0 -0
  84. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/spider.py +0 -0
  85. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/table_loader.py +0 -0
  86. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/url_loader.py +0 -0
  87. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/url_loader_cookies.py +0 -0
  88. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/urls.py +0 -0
  89. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/utils.py +0 -0
  90. {langroid-0.1.181 → langroid-0.1.183}/langroid/parsing/web_search.py +0 -0
  91. {langroid-0.1.181 → langroid-0.1.183}/langroid/prompts/__init__.py +0 -0
  92. {langroid-0.1.181 → langroid-0.1.183}/langroid/prompts/dialog.py +0 -0
  93. {langroid-0.1.181 → langroid-0.1.183}/langroid/prompts/prompts_config.py +0 -0
  94. {langroid-0.1.181 → langroid-0.1.183}/langroid/prompts/templates.py +0 -0
  95. {langroid-0.1.181 → langroid-0.1.183}/langroid/prompts/transforms.py +0 -0
  96. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/__init__.py +0 -0
  97. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/algorithms/__init__.py +0 -0
  98. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/algorithms/graph.py +0 -0
  99. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/configuration.py +0 -0
  100. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/constants.py +0 -0
  101. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/docker.py +0 -0
  102. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/globals.py +0 -0
  103. {langroid-0.1.181/langroid/utils/web → langroid-0.1.183/langroid/utils/llms}/__init__.py +0 -0
  104. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/llms/strings.py +0 -0
  105. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/logging.py +0 -0
  106. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/output/__init__.py +0 -0
  107. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/output/printing.py +0 -0
  108. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/pandas_utils.py +0 -0
  109. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/pydantic_utils.py +0 -0
  110. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/system.py +0 -0
  111. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/web/login.py +0 -0
  112. {langroid-0.1.181 → langroid-0.1.183}/langroid/utils/web/selenium_login.py +0 -0
  113. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/__init__.py +0 -0
  114. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/base.py +0 -0
  115. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/chromadb.py +0 -0
  116. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/lancedb.py +0 -0
  117. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/meilisearch.py +0 -0
  118. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/momento.py +0 -0
  119. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/qdrant_cloud.py +0 -0
  120. {langroid-0.1.181 → langroid-0.1.183}/langroid/vector_store/qdrantdb.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.181
3
+ Version: 0.1.183
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -10,6 +10,7 @@ Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
+ Provides-Extra: chainlit
13
14
  Provides-Extra: hf-embeddings
14
15
  Provides-Extra: litellm
15
16
  Provides-Extra: metaphor
@@ -23,6 +24,7 @@ Requires-Dist: async-generator (>=1.10,<2.0)
23
24
  Requires-Dist: autopep8 (>=2.0.2,<3.0.0)
24
25
  Requires-Dist: black[jupyter] (>=23.3.0,<24.0.0)
25
26
  Requires-Dist: bs4 (>=0.0.1,<0.0.2)
27
+ Requires-Dist: chainlit (>=1.0.200,<2.0.0) ; extra == "chainlit"
26
28
  Requires-Dist: chromadb (==0.3.21)
27
29
  Requires-Dist: colorlog (>=6.7.0,<7.0.0)
28
30
  Requires-Dist: docstring-parser (>=0.15,<0.16)
@@ -35,7 +37,7 @@ Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
35
37
  Requires-Dist: halo (>=0.0.31,<0.0.32)
36
38
  Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
37
39
  Requires-Dist: lancedb (>=0.4.1,<0.5.0)
38
- Requires-Dist: litellm (>=1.20.6,<2.0.0) ; extra == "litellm"
40
+ Requires-Dist: litellm (>=1.22.3,<2.0.0) ; extra == "litellm"
39
41
  Requires-Dist: lxml (>=4.9.3,<5.0.0)
40
42
  Requires-Dist: meilisearch (>=0.28.3,<0.29.0)
41
43
  Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0)
@@ -20,6 +20,15 @@ from .agent.base import (
20
20
  AgentConfig,
21
21
  )
22
22
 
23
+ from .agent.chat_document import (
24
+ ChatDocument,
25
+ ChatDocMetaData,
26
+ )
27
+
28
+ from .agent.tool_message import (
29
+ ToolMessage,
30
+ )
31
+
23
32
  from .agent.chat_agent import (
24
33
  ChatAgent,
25
34
  ChatAgentConfig,
@@ -27,6 +36,19 @@ from .agent.chat_agent import (
27
36
 
28
37
  from .agent.task import Task
29
38
 
39
+ try:
40
+ from .agent.callbacks.chainlit import (
41
+ ChainlitAgentCallbacks,
42
+ ChainlitTaskCallbacks,
43
+ )
44
+
45
+ chainlit_available = True
46
+ ChainlitAgentCallbacks
47
+ ChainlitTaskCallbacks
48
+ except ImportError:
49
+ chainlit_available = False
50
+
51
+
30
52
  from .mytypes import (
31
53
  DocMetaData,
32
54
  Document,
@@ -47,8 +69,18 @@ __all__ = [
47
69
  "AgentConfig",
48
70
  "ChatAgent",
49
71
  "ChatAgentConfig",
72
+ "ChatDocument",
73
+ "ChatDocMetaData",
50
74
  "Task",
51
75
  "DocMetaData",
52
76
  "Document",
53
77
  "Entity",
78
+ "ToolMessage",
54
79
  ]
80
+ if chainlit_available:
81
+ __all__.extend(
82
+ [
83
+ "ChainlitAgentCallbacks",
84
+ "ChainlitTaskCallbacks",
85
+ ]
86
+ )
@@ -4,6 +4,7 @@ import json
4
4
  import logging
5
5
  from abc import ABC
6
6
  from contextlib import ExitStack
7
+ from types import SimpleNamespace
7
8
  from typing import (
8
9
  Any,
9
10
  Callable,
@@ -63,6 +64,10 @@ class AgentConfig(BaseSettings):
63
64
  show_stats: bool = True # show token usage/cost stats?
64
65
 
65
66
 
67
+ def noop_fn(*args: List[Any], **kwargs: Dict[str, Any]) -> None:
68
+ pass
69
+
70
+
66
71
  class Agent(ABC):
67
72
  """
68
73
  An Agent is an abstraction that encapsulates mainly two components:
@@ -91,6 +96,16 @@ class Agent(ABC):
91
96
  self.parser: Optional[Parser] = (
92
97
  Parser(config.parsing) if config.parsing else None
93
98
  )
99
+ self.callbacks = SimpleNamespace(
100
+ start_llm_stream=lambda: noop_fn,
101
+ cancel_llm_stream=noop_fn,
102
+ finish_llm_stream=noop_fn,
103
+ show_llm_response=noop_fn,
104
+ show_agent_response=noop_fn,
105
+ get_user_response=None,
106
+ get_last_step=noop_fn,
107
+ set_parent_agent=noop_fn,
108
+ )
94
109
 
95
110
  def entity_responders(
96
111
  self,
@@ -295,6 +310,7 @@ class Agent(ABC):
295
310
  if not settings.quiet:
296
311
  console.print(f"[red]{self.indent}", end="")
297
312
  print(f"[red]Agent: {results}")
313
+ self.callbacks.show_agent_response(content=results)
298
314
  sender_name = self.config.name
299
315
  if isinstance(msg, ChatDocument) and msg.function_call is not None:
300
316
  # if result was from handling an LLM `function_call`,
@@ -353,11 +369,18 @@ class Agent(ABC):
353
369
  elif not settings.interactive:
354
370
  user_msg = ""
355
371
  else:
356
- user_msg = Prompt.ask(
357
- f"[blue]{self.indent}Human "
358
- "(respond or q, x to exit current level, "
359
- f"or hit enter to continue)\n{self.indent}",
360
- ).strip()
372
+ if self.callbacks.get_user_response is not None:
373
+ # ask user with empty prompt: no need for prompt
374
+ # since user has seen the conversation so far.
375
+ # But non-empty prompt can be useful when Agent
376
+ # uses a tool that requires user input, or in other scenarios.
377
+ user_msg = self.callbacks.get_user_response(prompt="")
378
+ else:
379
+ user_msg = Prompt.ask(
380
+ f"[blue]{self.indent}Human "
381
+ "(respond or q, x to exit current level, "
382
+ f"or hit enter to continue)\n{self.indent}",
383
+ ).strip()
361
384
 
362
385
  tool_ids = []
363
386
  if msg is not None and isinstance(msg, ChatDocument):
@@ -0,0 +1,450 @@
1
+ """
2
+ Callbacks for Chainlit integration.
3
+ """
4
+
5
+ import json
6
+ import logging
7
+ import textwrap
8
+ from typing import Any, Callable, Dict, List, Literal, Optional, no_type_check
9
+
10
+ try:
11
+ import chainlit as cl
12
+ except ImportError:
13
+ raise ImportError(
14
+ """
15
+ You are attempting to use `chainlit`, which is not installed
16
+ by default with `langroid`.
17
+ Please install langroid with the `chainlit` extra using:
18
+ `pip install langroid[chainlit]` or
19
+ `poetry install -E chainlit`
20
+ depending on your scenario
21
+ """
22
+ )
23
+
24
+ from chainlit import run_sync
25
+ from chainlit.config import config
26
+ from chainlit.logger import logger
27
+
28
+ import langroid as lr
29
+ import langroid.language_models as lm
30
+ from langroid.utils.configuration import settings
31
+ from langroid.utils.constants import NO_ANSWER
32
+
33
+ # Attempt to reconfigure the root logger to your desired settings
34
+ log_level = logging.INFO if settings.debug else logging.WARNING
35
+ logger.setLevel(log_level)
36
+
37
+ USER_TIMEOUT = 60_000
38
+
39
+
40
+ @no_type_check
41
+ async def ask_helper(func, **kwargs):
42
+ res = await func(**kwargs).send()
43
+ while not res:
44
+ res = await func(**kwargs).send()
45
+ return res
46
+
47
+
48
+ @no_type_check
49
+ async def setup_llm() -> None:
50
+ llm_settings = cl.user_session.get("llm_settings", {})
51
+ model = llm_settings.get("chat_model")
52
+ context_length = llm_settings.get("context_length", 16_000)
53
+ temperature = llm_settings.get("temperature", 0.2)
54
+ timeout = llm_settings.get("timeout", 90)
55
+ print(f"Using model: {model}")
56
+ llm_config = lm.OpenAIGPTConfig(
57
+ chat_model=model or lm.OpenAIChatModel.GPT4_TURBO,
58
+ # or, other possibilities for example:
59
+ # "litellm/ollama_chat/mistral"
60
+ # "litellm/ollama_chat/mistral:7b-instruct-v0.2-q8_0"
61
+ # "litellm/ollama/llama2"
62
+ # "local/localhost:8000/v1"
63
+ # "local/localhost:8000"
64
+ chat_context_length=context_length, # adjust based on model
65
+ temperature=temperature,
66
+ timeout=timeout,
67
+ )
68
+ llm = lm.OpenAIGPT(llm_config)
69
+ cl.user_session.set("llm_config", llm_config)
70
+ cl.user_session.set("llm", llm)
71
+
72
+
73
+ @no_type_check
74
+ async def update_agent(settings: Dict[str, Any], agent="agent") -> None:
75
+ cl.user_session.set("llm_settings", settings)
76
+ await inform_llm_settings()
77
+ await setup_llm()
78
+ agent = cl.user_session.get(agent)
79
+ if agent is None:
80
+ raise ValueError(f"Agent {agent} not found in user session")
81
+ agent.llm = cl.user_session.get("llm")
82
+ agent.config.llm = cl.user_session.get("llm_config")
83
+
84
+
85
+ async def make_llm_settings_widgets() -> None:
86
+ await cl.ChatSettings(
87
+ [
88
+ cl.input_widget.TextInput(
89
+ id="chat_model",
90
+ label="Model Name (Default GPT4-Turbo)",
91
+ initial="",
92
+ placeholder="E.g. litellm/ollama_chat/mistral or "
93
+ "local/localhost:8000/v1",
94
+ ),
95
+ cl.input_widget.NumberInput(
96
+ id="context_length",
97
+ label="Chat Context Length",
98
+ initial=16_000,
99
+ placeholder="E.g. 16000",
100
+ ),
101
+ cl.input_widget.Slider(
102
+ id="temperature",
103
+ label="LLM temperature",
104
+ min=0.0,
105
+ max=1.0,
106
+ step=0.1,
107
+ initial=0.2,
108
+ tooltip="Adjust based on model",
109
+ ),
110
+ cl.input_widget.Slider(
111
+ id="timeout",
112
+ label="Timeout (seconds)",
113
+ min=10,
114
+ max=200,
115
+ step=10,
116
+ initial=90,
117
+ tooltip="Timeout for LLM response, in seconds.",
118
+ ),
119
+ ]
120
+ ).send() # type: ignore
121
+
122
+
123
+ @no_type_check
124
+ async def inform_llm_settings() -> None:
125
+ llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
126
+ settings_dict = dict(
127
+ model=llm_settings.get("chat_model"),
128
+ context_length=llm_settings.get("context_length"),
129
+ temperature=llm_settings.get("temperature"),
130
+ timeout=llm_settings.get("timeout"),
131
+ )
132
+ await cl.Message(
133
+ author="System",
134
+ content="LLM settings updated",
135
+ elements=[
136
+ cl.Text(
137
+ name="settings",
138
+ display="side",
139
+ content=json.dumps(settings_dict, indent=4),
140
+ language="json",
141
+ )
142
+ ],
143
+ ).send()
144
+
145
+
146
+ async def add_instructions(
147
+ title: str = "Instructions",
148
+ content: str = "Enter your question/response in the dialog box below.",
149
+ display: Literal["side", "inline", "page"] = "inline",
150
+ ) -> None:
151
+ await cl.Message(
152
+ author="",
153
+ content=title if display == "side" else "",
154
+ elements=[
155
+ cl.Text(
156
+ name=title,
157
+ content=content,
158
+ display=display,
159
+ )
160
+ ],
161
+ ).send()
162
+
163
+
164
+ async def ask_user_step(
165
+ name: str,
166
+ prompt: str,
167
+ parent_id: str | None = None,
168
+ timeout: int = USER_TIMEOUT,
169
+ suppress_values: List[str] = ["c"],
170
+ ) -> str:
171
+ """
172
+ Ask user for input, as a step nested under parent_id.
173
+ Rather than rely entirely on AskUserMessage (which doesn't let us
174
+ nest the question + answer under a step), we instead create fake
175
+ steps for the question and answer, and only rely on AskUserMessage
176
+ with an empty prompt to await user response.
177
+
178
+ Args:
179
+ name (str): Name of the agent
180
+ prompt (str): Prompt to display to user
181
+ parent_id (str): Id of the parent step under which this step should be nested
182
+ (If None, the step will be shown at root level)
183
+ timeout (int): Timeout in seconds
184
+ suppress_values (List[str]): List of values to suppress from display
185
+ (e.g. "c" for continue)
186
+
187
+ Returns:
188
+ str: User response
189
+ """
190
+
191
+ # save hide_cot status to restore later
192
+ # (We should probably use a ctx mgr for this)
193
+ hide_cot = config.ui.hide_cot
194
+
195
+ # force hide_cot to False so that the user question + response is visible
196
+ config.ui.hide_cot = False
197
+
198
+ if prompt != "":
199
+ # Create a question step to ask user
200
+ question_step = cl.Step(
201
+ name=f"{name} (AskUser ❓)",
202
+ type="run",
203
+ parent_id=parent_id,
204
+ )
205
+ question_step.output = prompt
206
+ await question_step.send() # type: ignore
207
+
208
+ # Use AskUserMessage to await user response,
209
+ # but with an empty prompt so the question is not visible,
210
+ # but still pauses for user input in the input box.
211
+ res = await cl.AskUserMessage(
212
+ content="",
213
+ timeout=timeout,
214
+ ).send()
215
+
216
+ if res is None:
217
+ run_sync(
218
+ cl.Message(
219
+ content=f"Timed out after {USER_TIMEOUT} seconds. Exiting."
220
+ ).send()
221
+ )
222
+ return "x"
223
+
224
+ # The above will try to display user response in res
225
+ # but we create fake step with same id as res and
226
+ # erase it using empty output so it's not displayed
227
+ step = cl.Step(
228
+ id=res["id"], name="TempUserResponse", type="run", parent_id=parent_id
229
+ )
230
+ step.output = ""
231
+ await step.update() # type: ignore
232
+
233
+ # Finally, reproduce the user response at right nesting level
234
+ if res["output"] in suppress_values:
235
+ config.ui.hide_cot = hide_cot # restore original value
236
+ return ""
237
+
238
+ step = cl.Step(
239
+ name=f"{name}(You 😃)",
240
+ type="run",
241
+ parent_id=parent_id,
242
+ )
243
+ step.output = res["output"]
244
+ await step.send() # type: ignore
245
+ config.ui.hide_cot = hide_cot # restore original value
246
+ return res["output"]
247
+
248
+
249
+ def wrap_text_preserving_structure(text: str, width: int = 90) -> str:
250
+ """Wrap text preserving paragraph breaks. Typically used to
251
+ format an agent_response output, which may have long lines
252
+ with no newlines or paragraph breaks."""
253
+
254
+ paragraphs = text.split("\n\n") # Split the text into paragraphs
255
+ wrapped_text = []
256
+
257
+ for para in paragraphs:
258
+ if para.strip(): # If the paragraph is not just whitespace
259
+ # Wrap this paragraph and add it to the result
260
+ wrapped_paragraph = textwrap.fill(para, width=width)
261
+ wrapped_text.append(wrapped_paragraph)
262
+ else:
263
+ # Preserve paragraph breaks
264
+ wrapped_text.append("")
265
+
266
+ return "\n\n".join(wrapped_text)
267
+
268
+
269
+ class ChainlitAgentCallbacks:
270
+ """Inject Chainlit callbacks into a Langroid Agent"""
271
+
272
+ last_step: Optional[cl.Step] = None # used to display sub-steps under this
273
+ stream: Optional[cl.Step] = None # pushed into openai_gpt.py to stream tokens
274
+ parent_agent: Optional[lr.Agent] = None # used to get parent id, for step nesting
275
+
276
+ def __init__(self, agent: lr.Agent):
277
+ agent.callbacks.start_llm_stream = self.start_llm_stream
278
+ agent.callbacks.cancel_llm_stream = self.cancel_llm_stream
279
+ agent.callbacks.finish_llm_stream = self.finish_llm_stream
280
+ agent.callbacks.show_llm_response = self.show_llm_response
281
+ agent.callbacks.show_agent_response = self.show_agent_response
282
+ agent.callbacks.get_user_response = self.get_user_response
283
+ agent.callbacks.get_last_step = self.get_last_step
284
+ agent.callbacks.set_parent_agent = self.set_parent_agent
285
+ self.agent: lr.Agent = agent
286
+ self.name = agent.config.name
287
+
288
+ def _get_parent_id(self) -> str | None:
289
+ """Get step id under which we need to nest the current step:
290
+ This should be the parent Agent's last_step.
291
+ """
292
+ if self.parent_agent is None:
293
+ logger.info(f"No parent agent found for {self.name}")
294
+ return None
295
+ logger.info(
296
+ f"Parent agent found for {self.name} = {self.parent_agent.config.name}"
297
+ )
298
+ last_step = self.parent_agent.callbacks.get_last_step()
299
+ if last_step is None:
300
+ logger.info(f"No last step found for {self.parent_agent.config.name}")
301
+ return None
302
+ logger.info(
303
+ f"Last step found for {self.parent_agent.config.name} = {last_step.id}"
304
+ )
305
+ return last_step.id # type: ignore
306
+
307
+ def set_parent_agent(self, parent: lr.Agent) -> None:
308
+ self.parent_agent = parent
309
+
310
+ def get_last_step(self) -> Optional[cl.Step]:
311
+ return self.last_step
312
+
313
+ def start_llm_stream(self) -> Callable[[str], None]:
314
+ """Returns a streaming fn that can be passed to the LLM class"""
315
+ logger.info(
316
+ f"""
317
+ Starting LLM stream for {self.agent.config.name}
318
+ under parent {self._get_parent_id()}
319
+ """
320
+ )
321
+ self.stream = cl.Step(
322
+ name=self.agent.config.name + "(LLM 🧠)",
323
+ type="llm",
324
+ parent_id=self._get_parent_id(),
325
+ )
326
+ self.last_step = self.stream
327
+ run_sync(self.stream.send()) # type: ignore
328
+
329
+ def stream_token(t: str) -> None:
330
+ if self.stream is None:
331
+ raise ValueError("Stream not initialized")
332
+ run_sync(self.stream.stream_token(t))
333
+
334
+ return stream_token
335
+
336
+ def cancel_llm_stream(self) -> None:
337
+ """Called when cached response found."""
338
+ self.last_step = None
339
+ if self.stream is not None:
340
+ run_sync(self.stream.remove()) # type: ignore
341
+
342
+ def finish_llm_stream(self, content: str, is_tool: bool = False) -> None:
343
+ """Update the stream, and display entire response in the right language."""
344
+ tool_indicator = " => 🛠️" if is_tool else ""
345
+ if self.agent.llm is None or self.stream is None:
346
+ raise ValueError("LLM or stream not initialized")
347
+ model = self.agent.llm.config.chat_model
348
+ if content == "":
349
+ run_sync(self.stream.remove()) # type: ignore
350
+ else:
351
+ run_sync(self.stream.update()) # type: ignore
352
+ stream_id = self.stream.id if content else None
353
+ step = cl.Step(
354
+ id=stream_id,
355
+ name=self.agent.config.name + f"(LLM {model} 🧠{tool_indicator})",
356
+ type="llm",
357
+ parent_id=self._get_parent_id(),
358
+ language="json" if is_tool else None,
359
+ )
360
+ step.output = content or NO_ANSWER
361
+ run_sync(step.update()) # type: ignore
362
+
363
+ def show_llm_response(self, content: str, is_tool: bool = False) -> None:
364
+ """Show non-streaming LLM response."""
365
+ model = self.agent.llm is not None and self.agent.llm.config.chat_model
366
+ tool_indicator = " => 🛠️" if is_tool else ""
367
+ step = cl.Step(
368
+ name=self.agent.config.name + f"(LLM {model} 🧠{tool_indicator})",
369
+ type="llm",
370
+ parent_id=self._get_parent_id(),
371
+ language="json" if is_tool else None,
372
+ )
373
+ self.last_step = step
374
+ step.output = content or NO_ANSWER
375
+ run_sync(step.send()) # type: ignore
376
+
377
+ def show_agent_response(self, content: str) -> None:
378
+ """Show message from agent (typically tool handler).
379
+ Agent response can be considered as a "step"
380
+ between LLM response and user response
381
+ """
382
+ step = cl.Step(
383
+ name=self.agent.config.name + "(Agent <>)",
384
+ type="tool",
385
+ parent_id=self._get_parent_id(),
386
+ language="text",
387
+ )
388
+ self.last_step = step
389
+ step.output = wrap_text_preserving_structure(content, width=90)
390
+ run_sync(step.send()) # type: ignore
391
+
392
+ def _get_user_response_buttons(self, prompt: str) -> str:
393
+ """Not used. Save for future reference"""
394
+ res = run_sync(
395
+ ask_helper(
396
+ cl.AskActionMessage,
397
+ content="Continue, exit or say something?",
398
+ actions=[
399
+ cl.Action(
400
+ name="continue",
401
+ value="continue",
402
+ label="✅ Continue",
403
+ ),
404
+ cl.Action(
405
+ name="feedback",
406
+ value="feedback",
407
+ label="💬 Say something",
408
+ ),
409
+ cl.Action(name="exit", value="exit", label="🔚 Exit Conversation"),
410
+ ],
411
+ )
412
+ )
413
+ if res.get("value") == "continue":
414
+ return ""
415
+ if res.get("value") == "exit":
416
+ return "x"
417
+ if res.get("value") == "feedback":
418
+ return self.get_user_response(prompt)
419
+ return "" # process the "feedback" case here
420
+
421
+ def get_user_response(self, prompt: str) -> str:
422
+ """Ask for user response, wait for it, and return it,
423
+ as a cl.Step rather than as a cl.Message so we can nest it
424
+ under the parent step.
425
+ """
426
+ return run_sync(
427
+ ask_user_step(
428
+ name=self.agent.config.name,
429
+ prompt=prompt,
430
+ parent_id=self._get_parent_id(),
431
+ suppress_values=["c"],
432
+ )
433
+ )
434
+
435
+
436
+ class ChainlitTaskCallbacks:
437
+ """
438
+ Inject ChainlitCallbacks into a Langroid Task's agent and
439
+ agents of sub-tasks.
440
+ """
441
+
442
+ def __init__(self, task: lr.Task):
443
+ ChainlitTaskCallbacks._inject_callbacks(task)
444
+
445
+ @staticmethod
446
+ def _inject_callbacks(task: lr.Task) -> None:
447
+ # recursively apply ChainlitCallbacks to agents of sub-tasks
448
+ ChainlitAgentCallbacks(task.agent)
449
+ for t in task.sub_tasks:
450
+ ChainlitTaskCallbacks._inject_callbacks(t)