chainlit 0.2.111__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chainlit might be problematic. Click here for more details.

@@ -0,0 +1 @@
1
+ body{margin:0;padding:0;font-family:Inter,sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.markdown-body *:first-child{margin-top:0}.markdown-body *:last-child{margin-bottom:0}.markdown-body p{white-space:break-spaces}.DraftEditor-editorContainer,.DraftEditor-root,.public-DraftEditor-content{height:inherit;text-align:initial}.public-DraftEditor-content[contenteditable=true]{-webkit-user-modify:read-write-plaintext-only}.DraftEditor-root{position:relative}.DraftEditor-editorContainer{background-color:#fff0;border-left:.1px solid transparent;position:relative;z-index:1}.public-DraftEditor-block{position:relative}.DraftEditor-alignLeft .public-DraftStyleDefault-block{text-align:left}.DraftEditor-alignLeft .public-DraftEditorPlaceholder-root{left:0;text-align:left}.DraftEditor-alignCenter .public-DraftStyleDefault-block{text-align:center}.DraftEditor-alignCenter .public-DraftEditorPlaceholder-root{margin:0 auto;text-align:center;width:100%}.DraftEditor-alignRight .public-DraftStyleDefault-block{text-align:right}.DraftEditor-alignRight .public-DraftEditorPlaceholder-root{right:0;text-align:right}.public-DraftEditorPlaceholder-root{color:#9197a3;position:absolute;width:100%;z-index:1}.public-DraftEditorPlaceholder-hasFocus{color:#bdc1c9}.DraftEditorPlaceholder-hidden{display:none}.public-DraftStyleDefault-block{position:relative;white-space:pre-wrap}.public-DraftStyleDefault-ltr{direction:ltr;text-align:left}.public-DraftStyleDefault-rtl{direction:rtl;text-align:right}.public-DraftStyleDefault-listLTR{direction:ltr}.public-DraftStyleDefault-listRTL{direction:rtl}.public-DraftStyleDefault-ol,.public-DraftStyleDefault-ul{margin:16px 0;padding:0}.public-DraftStyleDefault-depth0.public-DraftStyleDefault-listLTR{margin-left:1.5em}.public-DraftStyleDefault-depth0.public-DraftStyleDefault-listRTL{margin-right:1.5em}.public-DraftStyleDefault-depth1.public-DraftStyleDefault-listLTR{margin-left:3em}.public-DraftStyleDefault-depth1.public-DraftStyleDefault-listRTL{margin-right:3em}.public-DraftStyleDefault-depth2.public-DraftStyleDefault-listLTR{margin-left:4.5em}.public-DraftStyleDefault-depth2.public-DraftStyleDefault-listRTL{margin-right:4.5em}.public-DraftStyleDefault-depth3.public-DraftStyleDefault-listLTR{margin-left:6em}.public-DraftStyleDefault-depth3.public-DraftStyleDefault-listRTL{margin-right:6em}.public-DraftStyleDefault-depth4.public-DraftStyleDefault-listLTR{margin-left:7.5em}.public-DraftStyleDefault-depth4.public-DraftStyleDefault-listRTL{margin-right:7.5em}.public-DraftStyleDefault-unorderedListItem{list-style-type:square;position:relative}.public-DraftStyleDefault-unorderedListItem.public-DraftStyleDefault-depth0{list-style-type:disc}.public-DraftStyleDefault-unorderedListItem.public-DraftStyleDefault-depth1{list-style-type:circle}.public-DraftStyleDefault-orderedListItem{list-style-type:none;position:relative}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-listLTR:before{left:-36px;position:absolute;text-align:right;width:30px}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-listRTL:before{position:absolute;right:-36px;text-align:left;width:30px}.public-DraftStyleDefault-orderedListItem:before{content:counter(ol0) ". ";counter-increment:ol0}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth1:before{content:counter(ol1,lower-alpha) ". ";counter-increment:ol1}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth2:before{content:counter(ol2,lower-roman) ". ";counter-increment:ol2}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth3:before{content:counter(ol3) ". ";counter-increment:ol3}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth4:before{content:counter(ol4,lower-alpha) ". ";counter-increment:ol4}.public-DraftStyleDefault-depth0.public-DraftStyleDefault-reset{counter-reset:ol0}.public-DraftStyleDefault-depth1.public-DraftStyleDefault-reset{counter-reset:ol1}.public-DraftStyleDefault-depth2.public-DraftStyleDefault-reset{counter-reset:ol2}.public-DraftStyleDefault-depth3.public-DraftStyleDefault-reset{counter-reset:ol3}.public-DraftStyleDefault-depth4.public-DraftStyleDefault-reset{counter-reset:ol4}.react-resizable{position:relative}.react-resizable-handle{position:absolute;width:20px;height:20px;background-repeat:no-repeat;background-origin:content-box;box-sizing:border-box;background-image:url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCA2IDYiIHN0eWxlPSJiYWNrZ3JvdW5kLWNvbG9yOiNmZmZmZmYwMCIgeD0iMHB4IiB5PSIwcHgiIHdpZHRoPSI2cHgiIGhlaWdodD0iNnB4Ij48ZyBvcGFjaXR5PSIwLjMwMiI+PHBhdGggZD0iTSA2IDYgTCAwIDYgTCAwIDQuMiBMIDQgNC4yIEwgNC4yIDQuMiBMIDQuMiAwIEwgNiAwIEwgNiA2IEwgNiA2IFoiIGZpbGw9IiMwMDAwMDAiLz48L2c+PC9zdmc+);background-position:bottom right;padding:0 3px 3px 0}.react-resizable-handle-sw{bottom:0;left:0;cursor:sw-resize;transform:rotate(90deg)}.react-resizable-handle-se{bottom:0;right:0;cursor:se-resize}.react-resizable-handle-nw{top:0;left:0;cursor:nw-resize;transform:rotate(180deg)}.react-resizable-handle-ne{top:0;right:0;cursor:ne-resize;transform:rotate(270deg)}.react-resizable-handle-w,.react-resizable-handle-e{top:50%;margin-top:-10px;cursor:ew-resize}.react-resizable-handle-w{left:0;transform:rotate(135deg)}.react-resizable-handle-e{right:0;transform:rotate(315deg)}.react-resizable-handle-n,.react-resizable-handle-s{left:50%;margin-left:-10px;cursor:ns-resize}.react-resizable-handle-n{top:0;transform:rotate(225deg)}.react-resizable-handle-s{bottom:0;transform:rotate(45deg)}
@@ -4,7 +4,7 @@
4
4
  <meta charset="UTF-8" />
5
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
6
  <!-- TAG INJECTION PLACEHOLDER -->
7
- <link rel="icon" type="image/x-icon" href="favicon.svg" />
7
+ <link rel="icon" type="image/svg+xml" href="/favicon.svg" />
8
8
  <link rel="preconnect" href="https://fonts.googleapis.com" />
9
9
  <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
10
10
  <link
@@ -14,8 +14,8 @@
14
14
  <script>
15
15
  const global = globalThis;
16
16
  </script>
17
- <script type="module" crossorigin src="/assets/index-4d8f8873.js"></script>
18
- <link rel="stylesheet" href="/assets/index-bdffdaa0.css">
17
+ <script type="module" crossorigin src="/assets/index-0b7e367e.js"></script>
18
+ <link rel="stylesheet" href="/assets/index-0cc9e355.css">
19
19
  </head>
20
20
  <body>
21
21
  <div id="root"></div>
chainlit/hello.py CHANGED
@@ -4,9 +4,9 @@ from chainlit import AskUserMessage, Message, on_chat_start
4
4
 
5
5
 
6
6
  @on_chat_start
7
- def main():
8
- res = AskUserMessage(content="What is your name?", timeout=30).send()
7
+ async def main():
8
+ res = await AskUserMessage(content="What is your name?", timeout=30).send()
9
9
  if res:
10
- Message(
10
+ await Message(
11
11
  content=f"Your name is: {res['content']}.\nChainlit installation is working!\nYou can now start building your own chainlit apps!",
12
12
  ).send()
chainlit/lc/__init__.py CHANGED
@@ -0,0 +1,11 @@
1
+ try:
2
+ import langchain
3
+
4
+ if langchain.__version__ < "0.0.198":
5
+ raise ValueError(
6
+ "LangChain version is too old, expected >= 0.0.198. Run `pip install langchain --upgrade`"
7
+ )
8
+
9
+ LANGCHAIN_INSTALLED = True
10
+ except ImportError:
11
+ LANGCHAIN_INSTALLED = False
chainlit/lc/agent.py ADDED
@@ -0,0 +1,32 @@
1
+ from typing import Any
2
+ from chainlit.lc.callbacks import ChainlitCallbackHandler, AsyncChainlitCallbackHandler
3
+ from chainlit.sync import make_async
4
+
5
+
6
+ async def run_langchain_agent(agent: Any, input_str: str, use_async: bool):
7
+ if hasattr(agent, "input_keys"):
8
+ input_key = agent.input_keys[0]
9
+ if use_async:
10
+ raw_res = await agent.acall(
11
+ {input_key: input_str}, callbacks=[AsyncChainlitCallbackHandler()]
12
+ )
13
+ else:
14
+ raw_res = await make_async(agent.__call__)(
15
+ {input_key: input_str}, callbacks=[ChainlitCallbackHandler()]
16
+ )
17
+ else:
18
+ if use_async:
19
+ raw_res = await agent.acall(
20
+ input_str, callbacks=[AsyncChainlitCallbackHandler()]
21
+ )
22
+ else:
23
+ raw_res = await make_async(agent.__call__)(
24
+ input_str, callbacks=[ChainlitCallbackHandler()]
25
+ )
26
+
27
+ if hasattr(agent, "output_keys"):
28
+ output_key = agent.output_keys[0]
29
+ else:
30
+ output_key = None
31
+
32
+ return raw_res, output_key
@@ -0,0 +1,411 @@
1
+ from typing import Any, Dict, List, Optional, Union
2
+ from langchain.callbacks.base import BaseCallbackHandler, AsyncCallbackHandler
3
+ from langchain.schema import (
4
+ AgentAction,
5
+ AgentFinish,
6
+ BaseMessage,
7
+ LLMResult,
8
+ )
9
+ from chainlit.emitter import get_emitter, ChainlitEmitter
10
+ from chainlit.message import Message, ErrorMessage
11
+ from chainlit.config import config
12
+ from chainlit.types import LLMSettings
13
+ from chainlit.sync import run_sync
14
+
15
+ IGNORE_LIST = ["AgentExecutor"]
16
+
17
+
18
+ def get_llm_settings(invocation_params: Union[Dict, None]):
19
+ if invocation_params is None:
20
+ return None
21
+ elif invocation_params["_type"] == "openai":
22
+ return LLMSettings(
23
+ model_name=invocation_params["model_name"],
24
+ stop=invocation_params["stop"],
25
+ temperature=invocation_params["temperature"],
26
+ max_tokens=invocation_params["max_tokens"],
27
+ top_p=invocation_params["top_p"],
28
+ frequency_penalty=invocation_params["frequency_penalty"],
29
+ presence_penalty=invocation_params["presence_penalty"],
30
+ )
31
+ elif invocation_params["_type"] == "openai-chat":
32
+ return LLMSettings(
33
+ model_name=invocation_params["model_name"],
34
+ stop=invocation_params["stop"],
35
+ )
36
+ else:
37
+ return None
38
+
39
+
40
+ class BaseChainlitCallbackHandler(BaseCallbackHandler):
41
+ emitter: ChainlitEmitter
42
+ # Keep track of the formatted prompts to display them in the prompt playground.
43
+ prompts: List[str]
44
+ # Keep track of the LLM settings for the last prompt
45
+ llm_settings: LLMSettings
46
+ # Keep track of the call sequence, like [AgentExecutor, LLMMathChain, Calculator, ...]
47
+ sequence: List[str]
48
+ # Keep track of the last prompt for each session
49
+ last_prompt: Union[str, None]
50
+ # Keep track of the currently streamed message for the session
51
+ stream: Union[Message, None]
52
+
53
+ raise_error = True
54
+
55
+ # We want to handler to be called on every message
56
+ always_verbose: bool = True
57
+
58
+ def __init__(self) -> None:
59
+ self.emitter = get_emitter()
60
+ self.prompts = []
61
+ self.llm_settings = None
62
+ self.sequence = []
63
+ self.last_prompt = None
64
+ self.stream = None
65
+
66
+ def end_stream(self):
67
+ self.stream = None
68
+
69
+ def add_in_sequence(self, name: str):
70
+ self.sequence.append(name)
71
+
72
+ def pop_sequence(self):
73
+ if self.sequence:
74
+ return self.sequence.pop()
75
+
76
+ def add_prompt(self, prompt: str, llm_settings: LLMSettings = None):
77
+ self.prompts.append(prompt)
78
+ self.llm_settings = llm_settings
79
+
80
+ def pop_prompt(self):
81
+ if self.prompts:
82
+ self.last_prompt = self.prompts.pop()
83
+
84
+ def consume_last_prompt(self):
85
+ last_prompt = self.last_prompt
86
+ self.last_prompt = None
87
+ return last_prompt
88
+
89
+ def get_message_params(self):
90
+ llm_settings = self.llm_settings
91
+
92
+ indent = len(self.sequence) if self.sequence else 0
93
+
94
+ if self.sequence:
95
+ author = self.sequence[-1]
96
+ else:
97
+ author = config.chatbot_name
98
+
99
+ return author, indent, llm_settings
100
+
101
+
102
+ class ChainlitCallbackHandler(BaseChainlitCallbackHandler, BaseCallbackHandler):
103
+ def start_stream(self):
104
+ author, indent, llm_settings = self.get_message_params()
105
+
106
+ if author in IGNORE_LIST:
107
+ return
108
+
109
+ if config.lc_rename:
110
+ author = run_sync(
111
+ config.lc_rename(author, __chainlit_emitter__=self.emitter)
112
+ )
113
+
114
+ self.pop_prompt()
115
+
116
+ __chainlit_emitter__ = self.emitter
117
+
118
+ streamed_message = Message(
119
+ author=author,
120
+ indent=indent,
121
+ llm_settings=llm_settings,
122
+ prompt=self.consume_last_prompt(),
123
+ content="",
124
+ )
125
+ self.stream = streamed_message
126
+
127
+ def send_token(self, token: str):
128
+ if self.stream:
129
+ run_sync(self.stream.stream_token(token))
130
+
131
+ def add_message(self, message, prompt: str = None, error=False):
132
+ author, indent, llm_settings = self.get_message_params()
133
+
134
+ if author in IGNORE_LIST:
135
+ return
136
+
137
+ if config.lc_rename:
138
+ author = run_sync(
139
+ config.lc_rename(author, __chainlit_emitter__=self.emitter)
140
+ )
141
+
142
+ __chainlit_emitter__ = self.emitter
143
+
144
+ if error:
145
+ run_sync(ErrorMessage(author=author, content=message).send())
146
+ self.end_stream()
147
+ return
148
+
149
+ if self.stream:
150
+ run_sync(self.stream.send())
151
+ self.end_stream()
152
+ else:
153
+ run_sync(
154
+ Message(
155
+ author=author,
156
+ content=message,
157
+ indent=indent,
158
+ prompt=prompt,
159
+ llm_settings=llm_settings,
160
+ ).send()
161
+ )
162
+
163
+ # Callbacks for various events
164
+
165
+ def on_llm_start(
166
+ self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
167
+ ) -> None:
168
+ invocation_params = kwargs.get("invocation_params")
169
+ llm_settings = get_llm_settings(invocation_params)
170
+ self.add_prompt(prompts[0], llm_settings)
171
+
172
+ def on_chat_model_start(
173
+ self,
174
+ serialized: Dict[str, Any],
175
+ messages: List[List[BaseMessage]],
176
+ **kwargs: Any,
177
+ ) -> None:
178
+ invocation_params = kwargs.get("invocation_params")
179
+ llm_settings = get_llm_settings(invocation_params)
180
+ prompt = "\n".join([m.content for m in messages[0]])
181
+ self.add_prompt(prompt, llm_settings)
182
+
183
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
184
+ if not self.stream:
185
+ self.start_stream()
186
+ self.send_token(token)
187
+
188
+ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
189
+ self.pop_prompt()
190
+ if response.llm_output is not None:
191
+ if "token_usage" in response.llm_output:
192
+ token_usage = response.llm_output["token_usage"]
193
+ if "total_tokens" in token_usage:
194
+ run_sync(
195
+ self.emitter.update_token_count(token_usage["total_tokens"])
196
+ )
197
+
198
+ def on_llm_error(
199
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
200
+ ) -> None:
201
+ pass
202
+
203
+ def on_chain_start(
204
+ self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
205
+ ) -> None:
206
+ self.add_in_sequence(serialized["id"][-1])
207
+ # Useful to display details button in the UI
208
+ self.add_message("")
209
+
210
+ def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
211
+ output_key = list(outputs.keys())[0]
212
+ if output_key:
213
+ prompt = self.consume_last_prompt()
214
+ self.add_message(outputs[output_key], prompt)
215
+ self.pop_sequence()
216
+
217
+ def on_chain_error(
218
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
219
+ ) -> None:
220
+ if isinstance(error, InterruptedError):
221
+ return
222
+ self.add_message(str(error), error=True)
223
+ self.pop_sequence()
224
+
225
+ def on_tool_start(
226
+ self, serialized: Dict[str, Any], inputs: Any, **kwargs: Any
227
+ ) -> None:
228
+ self.add_in_sequence(serialized["name"])
229
+ self.add_message("")
230
+
231
+ def on_tool_end(
232
+ self,
233
+ output: str,
234
+ observation_prefix: Optional[str] = None,
235
+ llm_prefix: Optional[str] = None,
236
+ **kwargs: Any,
237
+ ) -> None:
238
+ prompt = self.consume_last_prompt()
239
+ self.add_message(output, prompt)
240
+ self.pop_sequence()
241
+
242
+ def on_tool_error(
243
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
244
+ ) -> None:
245
+ """Do nothing."""
246
+ if isinstance(error, InterruptedError):
247
+ return
248
+ self.add_message(str(error), error=True)
249
+ self.pop_sequence()
250
+
251
+ def on_text(self, text: str, **kwargs: Any) -> None:
252
+ pass
253
+
254
+ def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
255
+ pass
256
+
257
+ def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
258
+ """Run on agent end."""
259
+ pass
260
+
261
+
262
+ class AsyncChainlitCallbackHandler(BaseChainlitCallbackHandler, AsyncCallbackHandler):
263
+ async def start_stream(self):
264
+ author, indent, llm_settings = self.get_message_params()
265
+
266
+ if author in IGNORE_LIST:
267
+ return
268
+
269
+ if config.lc_rename:
270
+ author = await config.lc_rename(author, __chainlit_emitter__=self.emitter)
271
+
272
+ self.pop_prompt()
273
+
274
+ __chainlit_emitter__ = self.emitter
275
+
276
+ streamed_message = Message(
277
+ author=author,
278
+ indent=indent,
279
+ prompt=self.consume_last_prompt(),
280
+ llm_settings=llm_settings,
281
+ content="",
282
+ )
283
+ self.stream = streamed_message
284
+
285
+ async def send_token(self, token: str):
286
+ if self.stream:
287
+ await self.stream.stream_token(token)
288
+
289
+ async def add_message(self, message, prompt: str = None, error=False):
290
+ author, indent, llm_settings = self.get_message_params()
291
+
292
+ if author in IGNORE_LIST:
293
+ return
294
+
295
+ if config.lc_rename:
296
+ author = await config.lc_rename(author, __chainlit_emitter__=self.emitter)
297
+
298
+ __chainlit_emitter__ = self.emitter
299
+
300
+ if error:
301
+ await ErrorMessage(author=author, content=message).send()
302
+ self.end_stream()
303
+ return
304
+
305
+ if self.stream:
306
+ await self.stream.send()
307
+ self.end_stream()
308
+ else:
309
+ await Message(
310
+ author=author,
311
+ content=message,
312
+ indent=indent,
313
+ prompt=prompt,
314
+ llm_settings=llm_settings,
315
+ ).send()
316
+
317
+ # Callbacks for various events
318
+
319
+ async def on_llm_start(
320
+ self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
321
+ ) -> None:
322
+ invocation_params = kwargs.get("invocation_params")
323
+ llm_settings = get_llm_settings(invocation_params)
324
+ self.add_prompt(prompts[0], llm_settings)
325
+
326
+ async def on_chat_model_start(
327
+ self,
328
+ serialized: Dict[str, Any],
329
+ messages: List[List[BaseMessage]],
330
+ **kwargs: Any,
331
+ ) -> None:
332
+ invocation_params = kwargs.get("invocation_params")
333
+ llm_settings = get_llm_settings(invocation_params)
334
+ prompt = "\n".join([m.content for m in messages[0]])
335
+ self.add_prompt(prompt, llm_settings)
336
+
337
+ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
338
+ if not self.stream:
339
+ await self.start_stream()
340
+ await self.send_token(token)
341
+
342
+ async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
343
+ self.pop_prompt()
344
+ if response.llm_output is not None:
345
+ if "token_usage" in response.llm_output:
346
+ token_usage = response.llm_output["token_usage"]
347
+ if "total_tokens" in token_usage:
348
+ await self.emitter.update_token_count(token_usage["total_tokens"])
349
+
350
+ async def on_llm_error(
351
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
352
+ ) -> None:
353
+ pass
354
+
355
+ async def on_chain_start(
356
+ self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
357
+ ) -> None:
358
+ self.add_in_sequence(serialized["id"][-1])
359
+ # Useful to display details button in the UI
360
+ await self.add_message("")
361
+
362
+ async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
363
+ output_key = list(outputs.keys())[0]
364
+ if output_key:
365
+ prompt = self.consume_last_prompt()
366
+ await self.add_message(outputs[output_key], prompt)
367
+ self.pop_sequence()
368
+
369
+ async def on_chain_error(
370
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
371
+ ) -> None:
372
+ if isinstance(error, InterruptedError):
373
+ return
374
+ await self.add_message(str(error), error=True)
375
+ self.pop_sequence()
376
+
377
+ async def on_tool_start(
378
+ self, serialized: Dict[str, Any], inputs: Any, **kwargs: Any
379
+ ) -> None:
380
+ self.add_in_sequence(serialized["name"])
381
+ await self.add_message("")
382
+
383
+ async def on_tool_end(
384
+ self,
385
+ output: str,
386
+ observation_prefix: Optional[str] = None,
387
+ llm_prefix: Optional[str] = None,
388
+ **kwargs: Any,
389
+ ) -> None:
390
+ prompt = self.consume_last_prompt()
391
+ await self.add_message(output, prompt)
392
+ self.pop_sequence()
393
+
394
+ async def on_tool_error(
395
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
396
+ ) -> None:
397
+ """Do nothing."""
398
+ if isinstance(error, InterruptedError):
399
+ return
400
+ await self.add_message(str(error), error=True)
401
+ self.pop_sequence()
402
+
403
+ async def on_text(self, text: str, **kwargs: Any) -> None:
404
+ pass
405
+
406
+ async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
407
+ pass
408
+
409
+ async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
410
+ """Run on agent end."""
411
+ pass