chainlit 1.1.305__py3-none-any.whl → 1.1.400__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chainlit might be problematic. Click here for more details.

Files changed (48) hide show
  1. chainlit/__init__.py +19 -5
  2. chainlit/chat_context.py +64 -0
  3. chainlit/config.py +6 -3
  4. chainlit/context.py +9 -0
  5. chainlit/copilot/dist/index.js +150 -147
  6. chainlit/data/__init__.py +8 -12
  7. chainlit/data/sql_alchemy.py +0 -4
  8. chainlit/discord/app.py +44 -24
  9. chainlit/emitter.py +9 -11
  10. chainlit/frontend/dist/assets/{DailyMotion-4f715d15.js → DailyMotion-e2c926d0.js} +1 -1
  11. chainlit/frontend/dist/assets/{Facebook-25f45c09.js → Facebook-88f80aba.js} +1 -1
  12. chainlit/frontend/dist/assets/{FilePlayer-04482650.js → FilePlayer-febd1b0d.js} +1 -1
  13. chainlit/frontend/dist/assets/{Kaltura-37152a96.js → Kaltura-606fe0bd.js} +1 -1
  14. chainlit/frontend/dist/assets/{Mixcloud-914b75ee.js → Mixcloud-b3db090a.js} +1 -1
  15. chainlit/frontend/dist/assets/{Mux-fb751398.js → Mux-ca847a7d.js} +1 -1
  16. chainlit/frontend/dist/assets/{Preview-85fbb8da.js → Preview-7a3747b5.js} +1 -1
  17. chainlit/frontend/dist/assets/{SoundCloud-8afad6c0.js → SoundCloud-0da01fb1.js} +1 -1
  18. chainlit/frontend/dist/assets/{Streamable-08844d93.js → Streamable-4a6ab048.js} +1 -1
  19. chainlit/frontend/dist/assets/{Twitch-1b95f5c8.js → Twitch-3e260619.js} +1 -1
  20. chainlit/frontend/dist/assets/{Vidyard-5028fa2f.js → Vidyard-73692980.js} +1 -1
  21. chainlit/frontend/dist/assets/{Vimeo-ca732959.js → Vimeo-6627c7a8.js} +1 -1
  22. chainlit/frontend/dist/assets/{Wistia-74e58d71.js → Wistia-27df9c66.js} +1 -1
  23. chainlit/frontend/dist/assets/{YouTube-bdf4ca10.js → YouTube-a11d419d.js} +1 -1
  24. chainlit/frontend/dist/assets/index-d66d1800.js +730 -0
  25. chainlit/frontend/dist/assets/{react-plotly-cf9b99cc.js → react-plotly-012ed517.js} +1 -1
  26. chainlit/frontend/dist/index.html +1 -1
  27. chainlit/haystack/callbacks.py +1 -3
  28. chainlit/langchain/callbacks.py +39 -20
  29. chainlit/llama_index/callbacks.py +8 -28
  30. chainlit/message.py +4 -17
  31. chainlit/mistralai/__init__.py +2 -5
  32. chainlit/openai/__init__.py +0 -2
  33. chainlit/server.py +1 -1
  34. chainlit/session.py +2 -12
  35. chainlit/slack/app.py +38 -31
  36. chainlit/socket.py +37 -1
  37. chainlit/step.py +37 -32
  38. chainlit/teams/app.py +23 -18
  39. chainlit/translations/zh-CN.json +229 -0
  40. chainlit/types.py +0 -1
  41. chainlit/user_session.py +0 -3
  42. chainlit/utils.py +1 -0
  43. {chainlit-1.1.305.dist-info → chainlit-1.1.400.dist-info}/METADATA +1 -1
  44. chainlit-1.1.400.dist-info/RECORD +82 -0
  45. chainlit/frontend/dist/assets/index-621140f9.js +0 -727
  46. chainlit-1.1.305.dist-info/RECORD +0 -80
  47. {chainlit-1.1.305.dist-info → chainlit-1.1.400.dist-info}/WHEEL +0 -0
  48. {chainlit-1.1.305.dist-info → chainlit-1.1.400.dist-info}/entry_points.txt +0 -0
@@ -21,7 +21,7 @@
21
21
  <script>
22
22
  const global = globalThis;
23
23
  </script>
24
- <script type="module" crossorigin src="/assets/index-621140f9.js"></script>
24
+ <script type="module" crossorigin src="/assets/index-d66d1800.js"></script>
25
25
  <link rel="stylesheet" href="/assets/index-aaf974a9.css">
26
26
  </head>
27
27
  <body>
@@ -68,9 +68,7 @@ class HaystackAgentCallbackHandler:
68
68
  self.last_tokens: List[str] = []
69
69
  self.answer_reached = False
70
70
 
71
- root_message = context.session.root_message
72
- parent_id = root_message.id if root_message else None
73
- run_step = Step(name=self.agent_name, type="run", parent_id=parent_id)
71
+ run_step = Step(name=self.agent_name, type="run")
74
72
  run_step.start = utc_now()
75
73
  run_step.input = kwargs
76
74
 
@@ -1,11 +1,8 @@
1
1
  import json
2
2
  import time
3
- from typing import Any, Dict, List, Optional, TypedDict, Union
3
+ from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union
4
4
  from uuid import UUID
5
5
 
6
- from chainlit.context import context_var
7
- from chainlit.message import Message
8
- from chainlit.step import Step
9
6
  from langchain.callbacks.tracers.base import BaseTracer
10
7
  from langchain.callbacks.tracers.schemas import Run
11
8
  from langchain.schema import BaseMessage
@@ -15,6 +12,10 @@ from literalai import ChatGeneration, CompletionGeneration, GenerationMessage
15
12
  from literalai.helper import utc_now
16
13
  from literalai.step import TrueStepType
17
14
 
15
+ from chainlit.context import context_var
16
+ from chainlit.message import Message
17
+ from chainlit.step import Step
18
+
18
19
  DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
19
20
 
20
21
 
@@ -229,7 +230,24 @@ class GenerationHelper:
229
230
  return provider, model, tools, settings
230
231
 
231
232
 
232
- DEFAULT_TO_IGNORE = ["Runnable", "<lambda>"]
233
+ def process_content(content: Any) -> Tuple[Dict, Optional[str]]:
234
+ if content is None:
235
+ return {}, None
236
+ if isinstance(content, dict):
237
+ return content, "json"
238
+ elif isinstance(content, str):
239
+ return {"content": content}, "text"
240
+ else:
241
+ return {"content": str(content)}, "text"
242
+
243
+
244
+ DEFAULT_TO_IGNORE = [
245
+ "RunnableSequence",
246
+ "RunnableParallel",
247
+ "RunnableAssign",
248
+ "RunnableLambda",
249
+ "<lambda>",
250
+ ]
233
251
  DEFAULT_TO_KEEP = ["retriever", "llm", "agent", "chain", "tool"]
234
252
 
235
253
 
@@ -267,8 +285,6 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
267
285
 
268
286
  if self.context.current_step:
269
287
  self.root_parent_id = self.context.current_step.id
270
- elif self.context.session.root_message:
271
- self.root_parent_id = self.context.session.root_message.id
272
288
  else:
273
289
  self.root_parent_id = None
274
290
 
@@ -431,9 +447,6 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
431
447
  self.ignored_runs.add(str(run.id))
432
448
  return ignore, parent_id
433
449
 
434
- def _is_annotable(self, run: Run):
435
- return run.run_type in ["retriever", "llm"]
436
-
437
450
  def _start_trace(self, run: Run) -> None:
438
451
  super()._start_trace(run)
439
452
  context_var.set(self.context)
@@ -452,7 +465,8 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
452
465
  if run.run_type == "agent":
453
466
  step_type = "run"
454
467
  elif run.run_type == "chain":
455
- pass
468
+ if not self.steps:
469
+ step_type = "run"
456
470
  elif run.run_type == "llm":
457
471
  step_type = "llm"
458
472
  elif run.run_type == "retriever":
@@ -462,20 +476,18 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
462
476
  elif run.run_type == "embedding":
463
477
  step_type = "embedding"
464
478
 
465
- if not self.steps and step_type != "llm":
466
- step_type = "run"
467
-
468
- disable_feedback = not self._is_annotable(run)
469
-
470
479
  step = Step(
471
480
  id=str(run.id),
472
481
  name=run.name,
473
482
  type=step_type,
474
483
  parent_id=parent_id,
475
- disable_feedback=disable_feedback,
476
484
  )
477
485
  step.start = utc_now()
478
- step.input = run.inputs
486
+ step.input, language = process_content(run.inputs)
487
+ if language is not None:
488
+ if step.metadata is None:
489
+ step.metadata = {}
490
+ step.metadata["language"] = language
479
491
 
480
492
  self.steps[str(run.id)] = step
481
493
 
@@ -499,6 +511,9 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
499
511
  generations = (run.outputs or {}).get("generations", [])
500
512
  generation = generations[0][0]
501
513
  variables = self.generation_inputs.get(str(run.parent_run_id), {})
514
+ variables = {
515
+ k: process_content(v) for k, v in variables.items() if v is not None
516
+ }
502
517
  if message := generation.get("message"):
503
518
  chat_start = self.chat_generations[str(run.id)]
504
519
  duration = time.time() - chat_start["start"]
@@ -529,7 +544,11 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
529
544
  "prompt_id"
530
545
  ]
531
546
  if custom_variables := m.additional_kwargs.get("variables"):
532
- current_step.generation.variables = custom_variables
547
+ current_step.generation.variables = {
548
+ k: process_content(v)
549
+ for k, v in custom_variables.items()
550
+ if v is not None
551
+ }
533
552
  break
534
553
 
535
554
  current_step.language = "json"
@@ -573,7 +592,7 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
573
592
  output = outputs.get(output_keys[0], outputs)
574
593
 
575
594
  if current_step:
576
- current_step.output = output
595
+ current_step.output, _ = output
577
596
  current_step.end = utc_now()
578
597
  self._run_sync(current_step.update())
579
598
 
@@ -33,33 +33,17 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
33
33
  event_starts_to_ignore=event_starts_to_ignore,
34
34
  event_ends_to_ignore=event_ends_to_ignore,
35
35
  )
36
- self.context = context_var.get()
37
36
 
38
37
  self.steps = {}
39
38
 
40
39
  def _get_parent_id(self, event_parent_id: Optional[str] = None) -> Optional[str]:
41
40
  if event_parent_id and event_parent_id in self.steps:
42
41
  return event_parent_id
43
- elif self.context.current_step:
44
- return self.context.current_step.id
45
- elif self.context.session.root_message:
46
- return self.context.session.root_message.id
42
+ elif context_var.get().current_step:
43
+ return context_var.get().current_step.id
47
44
  else:
48
45
  return None
49
46
 
50
- def _restore_context(self) -> None:
51
- """Restore Chainlit context in the current thread
52
-
53
- Chainlit context is local to the main thread, and LlamaIndex
54
- runs the callbacks in its own threads, so they don't have a
55
- Chainlit context by default.
56
-
57
- This method restores the context in which the callback handler
58
- has been created (it's always created in the main thread), so
59
- that we can actually send messages.
60
- """
61
- context_var.set(self.context)
62
-
63
47
  def on_event_start(
64
48
  self,
65
49
  event_type: CBEventType,
@@ -69,8 +53,6 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
69
53
  **kwargs: Any,
70
54
  ) -> str:
71
55
  """Run when an event starts and return id of event."""
72
- self._restore_context()
73
-
74
56
  step_type: StepType = "undefined"
75
57
  if event_type == CBEventType.RETRIEVE:
76
58
  step_type = "tool"
@@ -86,12 +68,12 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
86
68
  type=step_type,
87
69
  parent_id=self._get_parent_id(parent_id),
88
70
  id=event_id,
89
- disable_feedback=True,
90
71
  )
72
+
91
73
  self.steps[event_id] = step
92
74
  step.start = utc_now()
93
75
  step.input = payload or {}
94
- self.context.loop.create_task(step.send())
76
+ context_var.get().loop.create_task(step.send())
95
77
  return event_id
96
78
 
97
79
  def on_event_end(
@@ -107,8 +89,6 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
107
89
  if payload is None or step is None:
108
90
  return
109
91
 
110
- self._restore_context()
111
-
112
92
  step.end = utc_now()
113
93
 
114
94
  if event_type == CBEventType.QUERY:
@@ -127,7 +107,7 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
127
107
  for idx, source in enumerate(source_nodes)
128
108
  ]
129
109
  step.output = f"Retrieved the following sources: {source_refs}"
130
- self.context.loop.create_task(step.update())
110
+ context_var.get().loop.create_task(step.update())
131
111
 
132
112
  elif event_type == CBEventType.RETRIEVE:
133
113
  sources = payload.get(EventPayload.NODES)
@@ -144,7 +124,7 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
144
124
  for idx, source in enumerate(sources)
145
125
  ]
146
126
  step.output = f"Retrieved the following sources: {source_refs}"
147
- self.context.loop.create_task(step.update())
127
+ context_var.get().loop.create_task(step.update())
148
128
 
149
129
  elif event_type == CBEventType.LLM:
150
130
  formatted_messages = payload.get(
@@ -195,11 +175,11 @@ class LlamaIndexCallbackHandler(TokenCountingHandler):
195
175
  token_count=token_count,
196
176
  )
197
177
 
198
- self.context.loop.create_task(step.update())
178
+ context_var.get().loop.create_task(step.update())
199
179
 
200
180
  else:
201
181
  step.output = payload
202
- self.context.loop.create_task(step.update())
182
+ context_var.get().loop.create_task(step.update())
203
183
 
204
184
  self.steps.pop(event_id, None)
205
185
 
chainlit/message.py CHANGED
@@ -6,6 +6,7 @@ from abc import ABC
6
6
  from typing import Dict, List, Optional, Union, cast
7
7
 
8
8
  from chainlit.action import Action
9
+ from chainlit.chat_context import chat_context
9
10
  from chainlit.config import config
10
11
  from chainlit.context import context, local_steps
11
12
  from chainlit.data import get_data_layer
@@ -31,7 +32,6 @@ class MessageBase(ABC):
31
32
  author: str
32
33
  content: str = ""
33
34
  type: MessageStepType = "assistant_message"
34
- disable_feedback = False
35
35
  streaming = False
36
36
  created_at: Union[str, None] = None
37
37
  fail_on_persist_error: bool = False
@@ -66,7 +66,6 @@ class MessageBase(ABC):
66
66
  content=_dict["output"],
67
67
  author=_dict.get("name", config.ui.name),
68
68
  type=type, # type: ignore
69
- disable_feedback=_dict.get("disableFeedback", False),
70
69
  language=_dict.get("language"),
71
70
  )
72
71
 
@@ -86,7 +85,6 @@ class MessageBase(ABC):
86
85
  "createdAt": self.created_at,
87
86
  "language": self.language,
88
87
  "streaming": self.streaming,
89
- "disableFeedback": self.disable_feedback,
90
88
  "isError": self.is_error,
91
89
  "waitForAnswer": self.wait_for_answer,
92
90
  "indent": self.indent,
@@ -108,6 +106,7 @@ class MessageBase(ABC):
108
106
  self.streaming = False
109
107
 
110
108
  step_dict = self.to_dict()
109
+ chat_context.add(self)
111
110
 
112
111
  data_layer = get_data_layer()
113
112
  if data_layer:
@@ -127,7 +126,7 @@ class MessageBase(ABC):
127
126
  Remove a message already sent to the UI.
128
127
  """
129
128
  trace_event("remove_message")
130
-
129
+ chat_context.remove(self)
131
130
  step_dict = self.to_dict()
132
131
  data_layer = get_data_layer()
133
132
  if data_layer:
@@ -169,6 +168,7 @@ class MessageBase(ABC):
169
168
  self.streaming = False
170
169
 
171
170
  step_dict = await self._create()
171
+ chat_context.add(self)
172
172
  await context.emitter.send_step(step_dict)
173
173
 
174
174
  return self
@@ -205,7 +205,6 @@ class Message(MessageBase):
205
205
  language (str, optional): Language of the code is the content is code. See https://react-code-blocks-rajinwonderland.vercel.app/?path=/story/codeblock--supported-languages for a list of supported languages.
206
206
  actions (List[Action], optional): A list of actions to send with the message.
207
207
  elements (List[ElementBased], optional): A list of elements to send with the message.
208
- disable_feedback (bool, optional): Hide the feedback buttons for this specific message
209
208
  """
210
209
 
211
210
  def __init__(
@@ -215,7 +214,6 @@ class Message(MessageBase):
215
214
  language: Optional[str] = None,
216
215
  actions: Optional[List[Action]] = None,
217
216
  elements: Optional[List[ElementBased]] = None,
218
- disable_feedback: bool = False,
219
217
  type: MessageStepType = "assistant_message",
220
218
  metadata: Optional[Dict] = None,
221
219
  tags: Optional[List[str]] = None,
@@ -254,7 +252,6 @@ class Message(MessageBase):
254
252
  self.type = type
255
253
  self.actions = actions if actions is not None else []
256
254
  self.elements = elements if elements is not None else []
257
- self.disable_feedback = disable_feedback
258
255
 
259
256
  super().__post_init__()
260
257
 
@@ -266,8 +263,6 @@ class Message(MessageBase):
266
263
  trace_event("send_message")
267
264
  await super().send()
268
265
 
269
- context.session.root_message = self
270
-
271
266
  # Create tasks for all actions and elements
272
267
  tasks = [action.send(for_id=self.id) for action in self.actions]
273
268
  tasks.extend(element.send(for_id=self.id) for element in self.elements)
@@ -352,7 +347,6 @@ class AskUserMessage(AskMessageBase):
352
347
  Args:
353
348
  content (str): The content of the prompt.
354
349
  author (str, optional): The author of the message, this will be used in the UI. Defaults to the assistant name (see config).
355
- disable_feedback (bool, optional): Hide the feedback buttons for this specific message
356
350
  timeout (int, optional): The number of seconds to wait for an answer before raising a TimeoutError.
357
351
  raise_on_timeout (bool, optional): Whether to raise a socketio TimeoutError if the user does not answer in time.
358
352
  """
@@ -362,7 +356,6 @@ class AskUserMessage(AskMessageBase):
362
356
  content: str,
363
357
  author: str = config.ui.name,
364
358
  type: MessageStepType = "assistant_message",
365
- disable_feedback: bool = True,
366
359
  timeout: int = 60,
367
360
  raise_on_timeout: bool = False,
368
361
  ):
@@ -370,7 +363,6 @@ class AskUserMessage(AskMessageBase):
370
363
  self.author = author
371
364
  self.timeout = timeout
372
365
  self.type = type
373
- self.disable_feedback = disable_feedback
374
366
  self.raise_on_timeout = raise_on_timeout
375
367
 
376
368
  super().__post_init__()
@@ -417,7 +409,6 @@ class AskFileMessage(AskMessageBase):
417
409
  max_size_mb (int, optional): Maximum size per file in MB. Maximum value is 100.
418
410
  max_files (int, optional): Maximum number of files to upload. Maximum value is 10.
419
411
  author (str, optional): The author of the message, this will be used in the UI. Defaults to the assistant name (see config).
420
- disable_feedback (bool, optional): Hide the feedback buttons for this specific message
421
412
  timeout (int, optional): The number of seconds to wait for an answer before raising a TimeoutError.
422
413
  raise_on_timeout (bool, optional): Whether to raise a socketio TimeoutError if the user does not answer in time.
423
414
  """
@@ -430,7 +421,6 @@ class AskFileMessage(AskMessageBase):
430
421
  max_files=1,
431
422
  author=config.ui.name,
432
423
  type: MessageStepType = "assistant_message",
433
- disable_feedback: bool = True,
434
424
  timeout=90,
435
425
  raise_on_timeout=False,
436
426
  ):
@@ -442,7 +432,6 @@ class AskFileMessage(AskMessageBase):
442
432
  self.author = author
443
433
  self.timeout = timeout
444
434
  self.raise_on_timeout = raise_on_timeout
445
- self.disable_feedback = disable_feedback
446
435
 
447
436
  super().__post_init__()
448
437
 
@@ -506,14 +495,12 @@ class AskActionMessage(AskMessageBase):
506
495
  content: str,
507
496
  actions: List[Action],
508
497
  author=config.ui.name,
509
- disable_feedback=True,
510
498
  timeout=90,
511
499
  raise_on_timeout=False,
512
500
  ):
513
501
  self.content = content
514
502
  self.actions = actions
515
503
  self.author = author
516
- self.disable_feedback = disable_feedback
517
504
  self.timeout = timeout
518
505
  self.raise_on_timeout = raise_on_timeout
519
506
 
@@ -1,12 +1,11 @@
1
1
  import asyncio
2
2
  from typing import Union
3
3
 
4
- from literalai import ChatGeneration, CompletionGeneration
5
- from literalai.helper import timestamp_utc
6
-
7
4
  from chainlit.context import get_context
8
5
  from chainlit.step import Step
9
6
  from chainlit.utils import check_module_version
7
+ from literalai import ChatGeneration, CompletionGeneration
8
+ from literalai.helper import timestamp_utc
10
9
 
11
10
 
12
11
  def instrument_mistralai():
@@ -20,8 +19,6 @@ def instrument_mistralai():
20
19
  parent_id = None
21
20
  if context.current_step:
22
21
  parent_id = context.current_step.id
23
- elif context.session.root_message:
24
- parent_id = context.session.root_message.id
25
22
 
26
23
  step = Step(
27
24
  name=generation.model if generation.model else generation.provider,
@@ -24,8 +24,6 @@ def instrument_openai():
24
24
  parent_id = None
25
25
  if context.current_step:
26
26
  parent_id = context.current_step.id
27
- elif context.session.root_message:
28
- parent_id = context.session.root_message.id
29
27
 
30
28
  step = Step(
31
29
  name=generation.model if generation.model else generation.provider,
chainlit/server.py CHANGED
@@ -266,7 +266,7 @@ def get_html_template():
266
266
  )
267
267
  url = config.ui.github or default_url
268
268
  meta_image_url = config.ui.custom_meta_image_url or default_meta_image_url
269
- favicon_path = ROOT_PATH + "/favicon" if ROOT_PATH else "/favicon"
269
+ favicon_path = "/favicon"
270
270
 
271
271
  tags = f"""<title>{config.ui.name}</title>
272
272
  <link rel="icon" href="{favicon_path}" />
chainlit/session.py CHANGED
@@ -35,17 +35,16 @@ class JSONEncoderIgnoreNonSerializable(json.JSONEncoder):
35
35
  return None
36
36
 
37
37
 
38
-
39
38
  def clean_metadata(metadata: Dict, max_size: int = 1048576):
40
39
  cleaned_metadata = json.loads(
41
40
  json.dumps(metadata, cls=JSONEncoderIgnoreNonSerializable, ensure_ascii=False)
42
41
  )
43
42
 
44
- metadata_size = len(json.dumps(cleaned_metadata).encode('utf-8'))
43
+ metadata_size = len(json.dumps(cleaned_metadata).encode("utf-8"))
45
44
  if metadata_size > max_size:
46
45
  # Redact the metadata if it exceeds the maximum size
47
46
  cleaned_metadata = {
48
- 'message': f'Metadata size exceeds the limit of {max_size} bytes. Redacted.'
47
+ "message": f"Metadata size exceeds the limit of {max_size} bytes. Redacted."
49
48
  }
50
49
 
51
50
  return cleaned_metadata
@@ -71,8 +70,6 @@ class BaseSession:
71
70
  token: Optional[str],
72
71
  # User specific environment variables. Empty if no user environment variables are required.
73
72
  user_env: Optional[Dict[str, str]],
74
- # Last message at the root of the chat
75
- root_message: Optional["Message"] = None,
76
73
  # Chat profile selected before the session was created
77
74
  chat_profile: Optional[str] = None,
78
75
  # Origin of the request
@@ -84,7 +81,6 @@ class BaseSession:
84
81
  self.user = user
85
82
  self.client_type = client_type
86
83
  self.token = token
87
- self.root_message = root_message
88
84
  self.has_first_interaction = False
89
85
  self.user_env = user_env or {}
90
86
  self.chat_profile = chat_profile
@@ -178,8 +174,6 @@ class HTTPSession(BaseSession):
178
174
  # Logged-in user token
179
175
  token: Optional[str] = None,
180
176
  user_env: Optional[Dict[str, str]] = None,
181
- # Last message at the root of the chat
182
- root_message: Optional["Message"] = None,
183
177
  # Origin of the request
184
178
  http_referer: Optional[str] = None,
185
179
  ):
@@ -190,7 +184,6 @@ class HTTPSession(BaseSession):
190
184
  token=token,
191
185
  client_type=client_type,
192
186
  user_env=user_env,
193
- root_message=root_message,
194
187
  http_referer=http_referer,
195
188
  )
196
189
 
@@ -233,8 +226,6 @@ class WebsocketSession(BaseSession):
233
226
  user: Optional[Union["User", "PersistedUser"]] = None,
234
227
  # Logged-in user token
235
228
  token: Optional[str] = None,
236
- # Last message at the root of the chat
237
- root_message: Optional["Message"] = None,
238
229
  # Chat profile selected before the session was created
239
230
  chat_profile: Optional[str] = None,
240
231
  # Languages of the user's browser
@@ -249,7 +240,6 @@ class WebsocketSession(BaseSession):
249
240
  token=token,
250
241
  user_env=user_env,
251
242
  client_type=client_type,
252
- root_message=root_message,
253
243
  chat_profile=chat_profile,
254
244
  http_referer=http_referer,
255
245
  )