chainlit 0.2.110__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chainlit might be problematic. Click here for more details.

@@ -1 +0,0 @@
1
- .markdown-body *:first-child{margin-top:0}.markdown-body *:last-child{margin-bottom:0}.markdown-body p{white-space:break-spaces}.DraftEditor-editorContainer,.DraftEditor-root,.public-DraftEditor-content{height:inherit;text-align:initial}.public-DraftEditor-content[contenteditable=true]{-webkit-user-modify:read-write-plaintext-only}.DraftEditor-root{position:relative}.DraftEditor-editorContainer{background-color:#fff0;border-left:.1px solid transparent;position:relative;z-index:1}.public-DraftEditor-block{position:relative}.DraftEditor-alignLeft .public-DraftStyleDefault-block{text-align:left}.DraftEditor-alignLeft .public-DraftEditorPlaceholder-root{left:0;text-align:left}.DraftEditor-alignCenter .public-DraftStyleDefault-block{text-align:center}.DraftEditor-alignCenter .public-DraftEditorPlaceholder-root{margin:0 auto;text-align:center;width:100%}.DraftEditor-alignRight .public-DraftStyleDefault-block{text-align:right}.DraftEditor-alignRight .public-DraftEditorPlaceholder-root{right:0;text-align:right}.public-DraftEditorPlaceholder-root{color:#9197a3;position:absolute;width:100%;z-index:1}.public-DraftEditorPlaceholder-hasFocus{color:#bdc1c9}.DraftEditorPlaceholder-hidden{display:none}.public-DraftStyleDefault-block{position:relative;white-space:pre-wrap}.public-DraftStyleDefault-ltr{direction:ltr;text-align:left}.public-DraftStyleDefault-rtl{direction:rtl;text-align:right}.public-DraftStyleDefault-listLTR{direction:ltr}.public-DraftStyleDefault-listRTL{direction:rtl}.public-DraftStyleDefault-ol,.public-DraftStyleDefault-ul{margin:16px 0;padding:0}.public-DraftStyleDefault-depth0.public-DraftStyleDefault-listLTR{margin-left:1.5em}.public-DraftStyleDefault-depth0.public-DraftStyleDefault-listRTL{margin-right:1.5em}.public-DraftStyleDefault-depth1.public-DraftStyleDefault-listLTR{margin-left:3em}.public-DraftStyleDefault-depth1.public-DraftStyleDefault-listRTL{margin-right:3em}.public-DraftStyleDefault-depth2.public-DraftStyleDefault-listLTR{margin-left:4.5em}.public-DraftStyleDefault-depth2.public-DraftStyleDefault-listRTL{margin-right:4.5em}.public-DraftStyleDefault-depth3.public-DraftStyleDefault-listLTR{margin-left:6em}.public-DraftStyleDefault-depth3.public-DraftStyleDefault-listRTL{margin-right:6em}.public-DraftStyleDefault-depth4.public-DraftStyleDefault-listLTR{margin-left:7.5em}.public-DraftStyleDefault-depth4.public-DraftStyleDefault-listRTL{margin-right:7.5em}.public-DraftStyleDefault-unorderedListItem{list-style-type:square;position:relative}.public-DraftStyleDefault-unorderedListItem.public-DraftStyleDefault-depth0{list-style-type:disc}.public-DraftStyleDefault-unorderedListItem.public-DraftStyleDefault-depth1{list-style-type:circle}.public-DraftStyleDefault-orderedListItem{list-style-type:none;position:relative}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-listLTR:before{left:-36px;position:absolute;text-align:right;width:30px}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-listRTL:before{position:absolute;right:-36px;text-align:left;width:30px}.public-DraftStyleDefault-orderedListItem:before{content:counter(ol0) ". ";counter-increment:ol0}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth1:before{content:counter(ol1,lower-alpha) ". ";counter-increment:ol1}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth2:before{content:counter(ol2,lower-roman) ". ";counter-increment:ol2}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth3:before{content:counter(ol3) ". ";counter-increment:ol3}.public-DraftStyleDefault-orderedListItem.public-DraftStyleDefault-depth4:before{content:counter(ol4,lower-alpha) ". ";counter-increment:ol4}.public-DraftStyleDefault-depth0.public-DraftStyleDefault-reset{counter-reset:ol0}.public-DraftStyleDefault-depth1.public-DraftStyleDefault-reset{counter-reset:ol1}.public-DraftStyleDefault-depth2.public-DraftStyleDefault-reset{counter-reset:ol2}.public-DraftStyleDefault-depth3.public-DraftStyleDefault-reset{counter-reset:ol3}.public-DraftStyleDefault-depth4.public-DraftStyleDefault-reset{counter-reset:ol4}body{margin:0;padding:0;font-family:Public Sans,sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}
@@ -1,271 +0,0 @@
1
- from typing import Any, Dict, List, Optional, Union
2
- from langchain.callbacks.base import BaseCallbackHandler
3
- from langchain.schema import AgentAction, AgentFinish, LLMResult
4
- from chainlit.sdk import get_sdk, Chainlit
5
- from chainlit.message import Message, ErrorMessage
6
- from chainlit.config import config
7
- from chainlit.types import LLMSettings
8
-
9
- IGNORE_LIST = ["AgentExecutor"]
10
-
11
-
12
- class ChainlitCallbackHandler(BaseCallbackHandler):
13
- # Keep track of the formatted prompts to display them in the prompt playground.
14
- prompts_per_session: Dict[str, List[str]]
15
- # Keep track of the LLM settings for the last prompt
16
- llm_settings_per_session: Dict[str, LLMSettings]
17
- # Keep track of the call sequence, like [AgentExecutor, LLMMathChain, Calculator, ...]
18
- sequence_per_session: Dict[str, List[str]]
19
- # Keep track of the last prompt for each session
20
- last_prompt_per_session: Dict[str, Union[str, None]]
21
- # Keep track of the currently streamed message for the session
22
- stream_per_session: Dict[str, Message]
23
-
24
- # We want to handler to be called on every message
25
- always_verbose: bool = True
26
-
27
- def __init__(self) -> None:
28
- # Initialize dictionaries to store session data
29
- self.prompts_per_session = {}
30
- self.llm_settings_per_session = {}
31
- self.sequence_per_session = {}
32
- self.last_prompt_per_session = {}
33
- self.stream_per_session = {}
34
-
35
- def get_streamed_message(self) -> Union[Message, None]:
36
- sdk = get_sdk()
37
- if not sdk:
38
- return
39
-
40
- session_id = sdk.session["id"]
41
- return self.stream_per_session.get(session_id, None)
42
-
43
- def start_stream(self):
44
- sdk = get_sdk()
45
- if not sdk:
46
- return
47
-
48
- session_id = sdk.session["id"]
49
- author, indent, llm_settings = self.get_message_params(sdk)
50
-
51
- if author in IGNORE_LIST:
52
- return
53
- streamed_message = Message(
54
- author=author, indent=indent, llm_settings=llm_settings, content=""
55
- )
56
- self.stream_per_session[session_id] = streamed_message
57
-
58
- def send_token(self, token: str):
59
- streamed_message = self.get_streamed_message()
60
- if streamed_message:
61
- streamed_message.stream_token(token)
62
-
63
- def end_stream(self):
64
- sdk = get_sdk()
65
- if not sdk:
66
- return
67
-
68
- session_id = sdk.session["id"]
69
- del self.stream_per_session[session_id]
70
-
71
- def add_in_sequence(self, name: str):
72
- sdk = get_sdk()
73
- if not sdk:
74
- return
75
-
76
- session_id = sdk.session["id"]
77
-
78
- # Initialize session sequences if not already present
79
- if session_id not in self.sequence_per_session:
80
- self.sequence_per_session[session_id] = []
81
-
82
- sequence = self.sequence_per_session[session_id]
83
-
84
- sequence.append(name)
85
-
86
- def pop_sequence(self):
87
- sdk = get_sdk()
88
- if not sdk:
89
- return
90
-
91
- session_id = sdk.session["id"]
92
-
93
- # Remove the last element from the sequences
94
- if (
95
- session_id in self.sequence_per_session
96
- and self.sequence_per_session[session_id]
97
- ):
98
- self.sequence_per_session[session_id].pop()
99
-
100
- def add_prompt(self, prompt: str, llm_settings: LLMSettings = None):
101
- sdk = get_sdk()
102
- if not sdk:
103
- return
104
-
105
- session_id = sdk.session["id"]
106
-
107
- # Initialize session prompts if not already present
108
- if session_id not in self.prompts_per_session:
109
- self.prompts_per_session[session_id] = []
110
-
111
- self.prompts_per_session[session_id].append(prompt)
112
-
113
- if llm_settings:
114
- self.llm_settings_per_session[session_id] = llm_settings
115
-
116
- def pop_prompt(self):
117
- sdk = get_sdk()
118
- if not sdk:
119
- return
120
-
121
- session_id = sdk.session["id"]
122
-
123
- # Remove the last prompt from the session
124
- if (
125
- session_id in self.prompts_per_session
126
- and self.prompts_per_session[session_id]
127
- ):
128
- self.last_prompt_per_session[session_id] = self.prompts_per_session[
129
- session_id
130
- ].pop()
131
-
132
- def consume_last_prompt(self):
133
- sdk = get_sdk()
134
- if not sdk:
135
- return
136
-
137
- session_id = sdk.session["id"]
138
-
139
- last_prompt = self.last_prompt_per_session.get(session_id)
140
- self.last_prompt_per_session[session_id] = None
141
- return last_prompt
142
-
143
- def get_message_params(self, sdk: Chainlit):
144
- llm_settings = self.llm_settings_per_session.get(sdk.session["id"])
145
-
146
- sequence = self.sequence_per_session.get(sdk.session["id"])
147
-
148
- indent = len(sequence) if sequence else 0
149
-
150
- if sequence:
151
- if config.lc_rename:
152
- author = config.lc_rename(sequence[-1])
153
- else:
154
- author = sequence[-1]
155
- else:
156
- author = config.chatbot_name
157
-
158
- return author, indent, llm_settings
159
-
160
- def add_message(self, message, prompt: str = None, error=False):
161
- sdk = get_sdk()
162
- if not sdk:
163
- return
164
-
165
- author, indent, llm_settings = self.get_message_params(sdk)
166
-
167
- if author in IGNORE_LIST:
168
- return
169
-
170
- if error:
171
- ErrorMessage(author=author, content=message).send()
172
- self.end_stream()
173
- return
174
-
175
- streamed_message = self.get_streamed_message()
176
-
177
- if streamed_message:
178
- streamed_message.prompt = prompt
179
- streamed_message.llm_settings = llm_settings
180
- streamed_message.send()
181
- self.end_stream()
182
- else:
183
- Message(
184
- author=author,
185
- content=message,
186
- indent=indent,
187
- prompt=prompt,
188
- llm_settings=llm_settings,
189
- ).send()
190
-
191
- # Callbacks for various events
192
-
193
- def on_llm_start(
194
- self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
195
- ) -> None:
196
- self.add_prompt(prompts[0], kwargs.get("llm_settings"))
197
-
198
- def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
199
- if not self.get_streamed_message():
200
- self.start_stream()
201
- self.send_token(token)
202
-
203
- def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
204
- self.pop_prompt()
205
- sdk = get_sdk()
206
- if not sdk:
207
- return
208
- if response.llm_output is not None:
209
- if "token_usage" in response.llm_output:
210
- token_usage = response.llm_output["token_usage"]
211
- if "total_tokens" in token_usage:
212
- sdk.update_token_count(token_usage["total_tokens"])
213
-
214
- def on_llm_error(
215
- self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
216
- ) -> None:
217
- pass
218
-
219
- def on_chain_start(
220
- self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
221
- ) -> None:
222
- self.add_in_sequence(serialized["name"])
223
- # Useful to display details button in the UI
224
- self.add_message("")
225
-
226
- def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
227
- output_key = list(outputs.keys())[0]
228
- if output_key:
229
- prompt = self.consume_last_prompt()
230
- self.add_message(outputs[output_key], prompt)
231
- self.pop_sequence()
232
-
233
- def on_chain_error(
234
- self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
235
- ) -> None:
236
- self.add_message(str(error), error=True)
237
- self.pop_sequence()
238
-
239
- def on_tool_start(
240
- self, serialized: Dict[str, Any], inputs: Any, **kwargs: Any
241
- ) -> None:
242
- self.add_in_sequence(serialized["name"])
243
- self.add_message("")
244
-
245
- def on_tool_end(
246
- self,
247
- output: str,
248
- observation_prefix: Optional[str] = None,
249
- llm_prefix: Optional[str] = None,
250
- **kwargs: Any,
251
- ) -> None:
252
- prompt = self.consume_last_prompt()
253
- self.add_message(output, prompt)
254
- self.pop_sequence()
255
-
256
- def on_tool_error(
257
- self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
258
- ) -> None:
259
- """Do nothing."""
260
- self.add_message(str(error), error=True)
261
- self.pop_sequence()
262
-
263
- def on_text(self, text: str, **kwargs: Any) -> None:
264
- pass
265
-
266
- def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
267
- pass
268
-
269
- def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
270
- """Run on agent end."""
271
- pass
chainlit/lc/monkey.py DELETED
@@ -1,28 +0,0 @@
1
- import os
2
- from chainlit.config import config
3
- from chainlit.logger import logger
4
-
5
-
6
- # Check if LangChain is installed and set up cache and callback handler
7
- def patch():
8
- try:
9
- import langchain
10
- from langchain.cache import SQLiteCache
11
-
12
- if config.lc_cache_path:
13
- langchain.llm_cache = SQLiteCache(database_path=config.lc_cache_path)
14
- if not os.path.exists(config.lc_cache_path):
15
- logger.info(f"LangChain cache enabled: {config.lc_cache_path}")
16
-
17
- # New callback handler architecture
18
- if langchain.__version__ > "0.0.153":
19
- import chainlit.lc.new_monkey
20
- # Old callback handler architecture
21
- else:
22
- import chainlit.lc.old_monkey
23
-
24
- LANGCHAIN_INSTALLED = True
25
- except ImportError:
26
- LANGCHAIN_INSTALLED = False
27
-
28
- return LANGCHAIN_INSTALLED
chainlit/lc/new_monkey.py DELETED
@@ -1,167 +0,0 @@
1
- import inspect
2
- import langchain
3
- from typing import List, Optional
4
- from langchain.llms.base import BaseLLM, update_cache, get_prompts
5
- from langchain.chat_models.base import BaseChatModel
6
- from langchain.schema import (
7
- LLMResult,
8
- )
9
- from langchain.callbacks.manager import (
10
- CallbackManager,
11
- Callbacks,
12
- )
13
- from langchain.schema import (
14
- BaseMessage,
15
- LLMResult,
16
- get_buffer_string,
17
- )
18
- from chainlit.lc.utils import get_llm_settings
19
- from chainlit.lc.chainlit_handler import ChainlitCallbackHandler
20
-
21
- # Monkey patch LangChain > 0.0.153
22
- chainlit_handler = ChainlitCallbackHandler()
23
-
24
- orig_configure = CallbackManager.configure
25
-
26
-
27
- def patched_configure(
28
- inheritable_callbacks: Callbacks = None,
29
- local_callbacks: Callbacks = None,
30
- verbose: bool = False,
31
- ):
32
- cbm = orig_configure(inheritable_callbacks, local_callbacks, verbose)
33
- cbm.add_handler(chainlit_handler, False)
34
- return cbm
35
-
36
-
37
- CallbackManager.configure = patched_configure
38
-
39
-
40
- def patched_generate(
41
- self,
42
- prompts: List[str],
43
- stop: Optional[List[str]] = None,
44
- callbacks: Callbacks = None,
45
- ) -> LLMResult:
46
- """Run the LLM on the given prompt and input."""
47
- # If string is passed in directly no errors will be raised but outputs will
48
- # not make sense.
49
- if not isinstance(prompts, list):
50
- raise ValueError(
51
- "Argument 'prompts' is expected to be of type List[str], received"
52
- f" argument of type {type(prompts)}."
53
- )
54
- # PATCH
55
- llm_settings = get_llm_settings(self, stop)
56
- disregard_cache = self.cache is not None and not self.cache
57
- callback_manager = CallbackManager.configure(
58
- callbacks, self.callbacks, self.verbose
59
- )
60
- new_arg_supported = inspect.signature(self._generate).parameters.get("run_manager")
61
- if langchain.llm_cache is None or disregard_cache:
62
- # This happens when langchain.cache is None, but self.cache is True
63
- if self.cache is not None and self.cache:
64
- raise ValueError("Asked to cache, but no cache found at `langchain.cache`.")
65
- # PATCH
66
- run_manager = callback_manager.on_llm_start(
67
- {"name": self.__class__.__name__}, prompts, llm_settings=llm_settings
68
- )
69
- try:
70
- output = (
71
- self._generate(prompts, stop=stop, run_manager=run_manager)
72
- if new_arg_supported
73
- else self._generate(prompts, stop=stop)
74
- )
75
- except (KeyboardInterrupt, Exception) as e:
76
- run_manager.on_llm_error(e)
77
- raise e
78
- run_manager.on_llm_end(output)
79
- return output
80
- params = self.dict()
81
- params["stop"] = stop
82
- (
83
- existing_prompts,
84
- llm_string,
85
- missing_prompt_idxs,
86
- missing_prompts,
87
- ) = get_prompts(params, prompts)
88
- if len(missing_prompts) > 0:
89
- # PATCH
90
- run_manager = callback_manager.on_llm_start(
91
- {"name": self.__class__.__name__},
92
- missing_prompts,
93
- llm_settings=llm_settings,
94
- )
95
- try:
96
- new_results = (
97
- self._generate(missing_prompts, stop=stop, run_manager=run_manager)
98
- if new_arg_supported
99
- else self._generate(missing_prompts, stop=stop)
100
- )
101
- except (KeyboardInterrupt, Exception) as e:
102
- run_manager.on_llm_error(e)
103
- raise e
104
- run_manager.on_llm_end(new_results)
105
- llm_output = update_cache(
106
- existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
107
- )
108
- else:
109
- # PATCH
110
- run_manager = callback_manager.on_llm_start(
111
- {"name": self.__class__.__name__},
112
- prompts,
113
- verbose=self.verbose,
114
- llm_settings=llm_settings,
115
- )
116
- llm_output = {}
117
- run_manager.on_llm_end(
118
- LLMResult(generations=[], llm_output=llm_output), verbose=self.verbose
119
- )
120
-
121
- generations = [existing_prompts[i] for i in range(len(prompts))]
122
- return LLMResult(generations=generations, llm_output=llm_output)
123
-
124
-
125
- BaseLLM.generate = patched_generate
126
-
127
-
128
- def patched_chat_generate(
129
- self,
130
- messages: List[List[BaseMessage]],
131
- stop: Optional[List[str]] = None,
132
- callbacks: Callbacks = None,
133
- ) -> LLMResult:
134
- """Top Level call"""
135
-
136
- # PATCH
137
- llm_settings = get_llm_settings(self, stop)
138
-
139
- callback_manager = CallbackManager.configure(
140
- callbacks, self.callbacks, self.verbose
141
- )
142
- message_strings = [get_buffer_string(m) for m in messages]
143
-
144
- # PATCH
145
- run_manager = callback_manager.on_llm_start(
146
- {"name": self.__class__.__name__}, message_strings, llm_settings=llm_settings
147
- )
148
-
149
- new_arg_supported = inspect.signature(self._generate).parameters.get("run_manager")
150
- try:
151
- results = [
152
- self._generate(m, stop=stop, run_manager=run_manager)
153
- if new_arg_supported
154
- else self._generate(m, stop=stop)
155
- for m in messages
156
- ]
157
- except (KeyboardInterrupt, Exception) as e:
158
- run_manager.on_llm_error(e)
159
- raise e
160
- llm_output = self._combine_llm_outputs([res.llm_output for res in results])
161
- generations = [res.generations for res in results]
162
- output = LLMResult(generations=generations, llm_output=llm_output)
163
- run_manager.on_llm_end(output)
164
- return output
165
-
166
-
167
- BaseChatModel.generate = patched_chat_generate
chainlit/lc/old_monkey.py DELETED
@@ -1,119 +0,0 @@
1
- import langchain
2
- from typing import List, Optional
3
- from langchain.llms import base as llm_base
4
- from langchain.chat_models.base import BaseChatModel
5
- from langchain.schema import (
6
- LLMResult,
7
- PromptValue,
8
- )
9
- from langchain.callbacks import get_callback_manager
10
- from chainlit.lc.chainlit_handler import ChainlitCallbackHandler
11
- from chainlit.lc.utils import get_llm_settings
12
-
13
- # Monkey patch LangChain <= 0.0.153
14
-
15
-
16
- def patched_generate(
17
- self: llm_base.BaseLLM, prompts: List[str], stop: Optional[List[str]] = None
18
- ) -> LLMResult:
19
- """Run the LLM on the given prompt and input."""
20
- # If string is passed in directly no errors will be raised but outputs will
21
- # not make sense.
22
-
23
- if not isinstance(prompts, list):
24
- raise ValueError(
25
- "Argument 'prompts' is expected to be of type List[str], received"
26
- f" argument of type {type(prompts)}."
27
- )
28
- # PATCH
29
- llm_settings = get_llm_settings(self, stop)
30
- disregard_cache = self.cache is not None and not self.cache
31
- if langchain.llm_cache is None or disregard_cache:
32
- # This happens when langchain.cache is None, but self.cache is True
33
- if self.cache is not None and self.cache:
34
- raise ValueError("Asked to cache, but no cache found at `langchain.cache`.")
35
- self.callback_manager.on_llm_start(
36
- {"name": self.__class__.__name__},
37
- prompts,
38
- verbose=self.verbose,
39
- llm_settings=llm_settings,
40
- )
41
- try:
42
- output = self._generate(prompts, stop=stop)
43
- except (KeyboardInterrupt, Exception) as e:
44
- self.callback_manager.on_llm_error(e, verbose=self.verbose)
45
- raise e
46
- self.callback_manager.on_llm_end(output, verbose=self.verbose)
47
- return output
48
- params = self.dict()
49
- params["stop"] = stop
50
- (
51
- existing_prompts,
52
- llm_string,
53
- missing_prompt_idxs,
54
- missing_prompts,
55
- ) = llm_base.get_prompts(params, prompts)
56
-
57
- if len(missing_prompts) > 0:
58
- self.callback_manager.on_llm_start(
59
- {"name": self.__class__.__name__},
60
- missing_prompts,
61
- verbose=self.verbose,
62
- llm_settings=llm_settings,
63
- )
64
- try:
65
- new_results = self._generate(missing_prompts, stop=stop)
66
- except (KeyboardInterrupt, Exception) as e:
67
- self.callback_manager.on_llm_error(e, verbose=self.verbose)
68
- raise e
69
- self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
70
- llm_output = llm_base.update_cache(
71
- existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
72
- )
73
- else:
74
- # PATCH
75
- self.callback_manager.on_llm_start(
76
- {"name": self.__class__.__name__},
77
- prompts,
78
- verbose=self.verbose,
79
- llm_settings=llm_settings,
80
- )
81
- llm_output = {}
82
- self.callback_manager.on_llm_end(
83
- LLMResult(generations=[], llm_output=llm_output), verbose=self.verbose
84
- )
85
- generations = [existing_prompts[i] for i in range(len(prompts))]
86
- return LLMResult(generations=generations, llm_output=llm_output)
87
-
88
-
89
- llm_base.BaseLLM.generate = patched_generate
90
-
91
-
92
- def patched_generate_prompt(
93
- self, prompts: List[PromptValue], stop: Optional[List[str]] = None
94
- ) -> LLMResult:
95
- prompt_messages = [p.to_messages() for p in prompts]
96
- prompt_strings = [p.to_string() for p in prompts]
97
-
98
- # PATCH
99
- llm_settings = get_llm_settings(self, stop)
100
- self.callback_manager.on_llm_start(
101
- {"name": self.__class__.__name__},
102
- prompt_strings,
103
- verbose=self.verbose,
104
- llm_settings=llm_settings,
105
- )
106
-
107
- try:
108
- output = self.generate(prompt_messages, stop=stop)
109
- except (KeyboardInterrupt, Exception) as e:
110
- self.callback_manager.on_llm_error(e, verbose=self.verbose)
111
- raise e
112
- self.callback_manager.on_llm_end(output, verbose=self.verbose)
113
- return output
114
-
115
-
116
- BaseChatModel.generate_prompt = patched_generate_prompt
117
-
118
-
119
- get_callback_manager()._callback_manager.add_handler(ChainlitCallbackHandler())
chainlit/lc/utils.py DELETED
@@ -1,38 +0,0 @@
1
- from typing import Any
2
- from chainlit.types import LLMSettings
3
- from typing import List, Optional
4
-
5
-
6
- def run_langchain_agent(agent: Any, input_str: str):
7
- if hasattr(agent, "input_keys"):
8
- input_key = agent.input_keys[0]
9
- raw_res = agent({input_key: input_str})
10
- else:
11
- raw_res = agent(input_str)
12
-
13
- if hasattr(agent, "output_keys"):
14
- output_key = agent.output_keys[0]
15
- else:
16
- output_key = None
17
-
18
- return raw_res, output_key
19
-
20
-
21
- def get_llm_settings(llm, stop: Optional[List[str]] = None):
22
- if llm.__class__.__name__ == "OpenAI":
23
- return LLMSettings(
24
- model_name=llm.model_name,
25
- stop=stop,
26
- temperature=llm.temperature,
27
- max_tokens=llm.max_tokens,
28
- top_p=llm.top_p,
29
- frequency_penalty=llm.frequency_penalty,
30
- presence_penalty=llm.presence_penalty,
31
- )
32
- elif llm.__class__.__name__ == "ChatOpenAI":
33
- return LLMSettings(
34
- model_name=llm.model_name,
35
- stop=stop,
36
- )
37
- else:
38
- return None
chainlit/watch.py DELETED
@@ -1,54 +0,0 @@
1
- import os
2
- from watchdog_gevent.observers import GeventObserver
3
- from watchdog.events import FileSystemEventHandler
4
- from chainlit.config import config, load_module
5
- from chainlit.server import socketio
6
- from chainlit.logger import logger
7
-
8
- last_modified_time = 0
9
- is_watching = False
10
-
11
-
12
- class ChangeHandler(FileSystemEventHandler):
13
- def on_modified(self, event):
14
- global last_modified_time
15
-
16
- # Get the modified time of the file
17
- statbuf = os.stat(event.src_path)
18
- current_modified_time = statbuf.st_mtime
19
-
20
- file_ext = os.path.splitext(event.src_path)[1]
21
-
22
- if not file_ext in [".py", ".md"]:
23
- return
24
-
25
- # Check if the file was modified more than 0.5 seconds ago
26
- if (current_modified_time - last_modified_time) > 0.5:
27
- logger.info(f"event type: {event.event_type} path : {event.src_path}")
28
-
29
- # Load the module if the module name is specified in the config
30
- if config.module_name:
31
- load_module(config.module_name)
32
-
33
- # Emit a "reload" event to the socket
34
- socketio.emit("reload", {})
35
-
36
- last_modified_time = current_modified_time
37
-
38
-
39
- def watch_directory():
40
- global is_watching
41
-
42
- # Return if already watching
43
- if is_watching:
44
- return
45
-
46
- is_watching = True
47
- event_handler = ChangeHandler()
48
- observer = GeventObserver()
49
-
50
- # Schedule the observer to watch the directory recursively
51
- observer.schedule(event_handler, config.root, recursive=True)
52
-
53
- # Start the observer
54
- observer.start()