chainlit 1.0.400__py3-none-any.whl → 2.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chainlit might be problematic. Click here for more details.
- chainlit/__init__.py +98 -279
- chainlit/_utils.py +8 -0
- chainlit/action.py +12 -10
- chainlit/{auth.py → auth/__init__.py} +28 -36
- chainlit/auth/cookie.py +122 -0
- chainlit/auth/jwt.py +39 -0
- chainlit/cache.py +4 -6
- chainlit/callbacks.py +362 -0
- chainlit/chat_context.py +64 -0
- chainlit/chat_settings.py +3 -1
- chainlit/cli/__init__.py +77 -8
- chainlit/config.py +181 -101
- chainlit/context.py +42 -13
- chainlit/copilot/dist/index.js +8750 -903
- chainlit/data/__init__.py +101 -416
- chainlit/data/acl.py +6 -2
- chainlit/data/base.py +107 -0
- chainlit/data/chainlit_data_layer.py +608 -0
- chainlit/data/dynamodb.py +590 -0
- chainlit/data/literalai.py +500 -0
- chainlit/data/sql_alchemy.py +721 -0
- chainlit/data/storage_clients/__init__.py +0 -0
- chainlit/data/storage_clients/azure.py +81 -0
- chainlit/data/storage_clients/azure_blob.py +89 -0
- chainlit/data/storage_clients/base.py +26 -0
- chainlit/data/storage_clients/gcs.py +88 -0
- chainlit/data/storage_clients/s3.py +75 -0
- chainlit/data/utils.py +29 -0
- chainlit/discord/__init__.py +6 -0
- chainlit/discord/app.py +354 -0
- chainlit/element.py +91 -33
- chainlit/emitter.py +80 -29
- chainlit/frontend/dist/assets/DailyMotion-C_XC7xJI.js +1 -0
- chainlit/frontend/dist/assets/Dataframe-Cs4l4hA1.js +22 -0
- chainlit/frontend/dist/assets/Facebook-CUeCH7hk.js +1 -0
- chainlit/frontend/dist/assets/FilePlayer-CB-fYkx8.js +1 -0
- chainlit/frontend/dist/assets/Kaltura-YX6qaq72.js +1 -0
- chainlit/frontend/dist/assets/Mixcloud-DGV0ldjP.js +1 -0
- chainlit/frontend/dist/assets/Mux-CmRss5oc.js +1 -0
- chainlit/frontend/dist/assets/Preview-DBVJn7-H.js +1 -0
- chainlit/frontend/dist/assets/SoundCloud-qLUb18oY.js +1 -0
- chainlit/frontend/dist/assets/Streamable-BvYP7bFp.js +1 -0
- chainlit/frontend/dist/assets/Twitch-CTHt-sGZ.js +1 -0
- chainlit/frontend/dist/assets/Vidyard-B-0mCJbm.js +1 -0
- chainlit/frontend/dist/assets/Vimeo-Dnp7ri8q.js +1 -0
- chainlit/frontend/dist/assets/Wistia-DW0x_UBn.js +1 -0
- chainlit/frontend/dist/assets/YouTube--98FipvA.js +1 -0
- chainlit/frontend/dist/assets/index-D71nZ46o.js +8665 -0
- chainlit/frontend/dist/assets/index-g8LTJwwr.css +1 -0
- chainlit/frontend/dist/assets/react-plotly-Cn_BQTQw.js +3484 -0
- chainlit/frontend/dist/index.html +2 -4
- chainlit/haystack/callbacks.py +4 -7
- chainlit/input_widget.py +8 -4
- chainlit/langchain/callbacks.py +107 -72
- chainlit/langflow/__init__.py +1 -0
- chainlit/llama_index/__init__.py +2 -2
- chainlit/llama_index/callbacks.py +67 -42
- chainlit/markdown.py +22 -6
- chainlit/message.py +54 -56
- chainlit/mistralai/__init__.py +50 -0
- chainlit/oauth_providers.py +266 -8
- chainlit/openai/__init__.py +10 -18
- chainlit/secret.py +1 -1
- chainlit/server.py +789 -228
- chainlit/session.py +108 -90
- chainlit/slack/__init__.py +6 -0
- chainlit/slack/app.py +397 -0
- chainlit/socket.py +199 -116
- chainlit/step.py +141 -89
- chainlit/sync.py +2 -1
- chainlit/teams/__init__.py +6 -0
- chainlit/teams/app.py +338 -0
- chainlit/translations/bn.json +235 -0
- chainlit/translations/en-US.json +83 -4
- chainlit/translations/gu.json +235 -0
- chainlit/translations/he-IL.json +235 -0
- chainlit/translations/hi.json +235 -0
- chainlit/translations/kn.json +235 -0
- chainlit/translations/ml.json +235 -0
- chainlit/translations/mr.json +235 -0
- chainlit/translations/nl-NL.json +233 -0
- chainlit/translations/ta.json +235 -0
- chainlit/translations/te.json +235 -0
- chainlit/translations/zh-CN.json +233 -0
- chainlit/translations.py +60 -0
- chainlit/types.py +133 -28
- chainlit/user.py +14 -3
- chainlit/user_session.py +6 -3
- chainlit/utils.py +52 -5
- chainlit/version.py +3 -2
- {chainlit-1.0.400.dist-info → chainlit-2.0.3.dist-info}/METADATA +48 -50
- chainlit-2.0.3.dist-info/RECORD +106 -0
- chainlit/cli/utils.py +0 -24
- chainlit/frontend/dist/assets/index-9711593e.js +0 -723
- chainlit/frontend/dist/assets/index-d088547c.css +0 -1
- chainlit/frontend/dist/assets/react-plotly-d8762cc2.js +0 -3602
- chainlit/playground/__init__.py +0 -2
- chainlit/playground/config.py +0 -40
- chainlit/playground/provider.py +0 -108
- chainlit/playground/providers/__init__.py +0 -13
- chainlit/playground/providers/anthropic.py +0 -118
- chainlit/playground/providers/huggingface.py +0 -75
- chainlit/playground/providers/langchain.py +0 -89
- chainlit/playground/providers/openai.py +0 -408
- chainlit/playground/providers/vertexai.py +0 -171
- chainlit/translations/pt-BR.json +0 -155
- chainlit-1.0.400.dist-info/RECORD +0 -66
- /chainlit/copilot/dist/assets/{logo_dark-2a3cf740.svg → logo_dark-IkGJ_IwC.svg} +0 -0
- /chainlit/copilot/dist/assets/{logo_light-b078e7bc.svg → logo_light-Bb_IPh6r.svg} +0 -0
- /chainlit/frontend/dist/assets/{logo_dark-2a3cf740.svg → logo_dark-IkGJ_IwC.svg} +0 -0
- /chainlit/frontend/dist/assets/{logo_light-b078e7bc.svg → logo_light-Bb_IPh6r.svg} +0 -0
- {chainlit-1.0.400.dist-info → chainlit-2.0.3.dist-info}/WHEEL +0 -0
- {chainlit-1.0.400.dist-info → chainlit-2.0.3.dist-info}/entry_points.txt +0 -0
|
@@ -4,7 +4,6 @@
|
|
|
4
4
|
<meta charset="UTF-8" />
|
|
5
5
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
6
6
|
<!-- TAG INJECTION PLACEHOLDER -->
|
|
7
|
-
<link rel="icon" href="/favicon" />
|
|
8
7
|
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
|
9
8
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
|
10
9
|
<!-- FONT START -->
|
|
@@ -22,11 +21,10 @@
|
|
|
22
21
|
<script>
|
|
23
22
|
const global = globalThis;
|
|
24
23
|
</script>
|
|
25
|
-
<script type="module" crossorigin src="/assets/index-
|
|
26
|
-
<link rel="stylesheet" href="/assets/index-
|
|
24
|
+
<script type="module" crossorigin src="/assets/index-D71nZ46o.js"></script>
|
|
25
|
+
<link rel="stylesheet" crossorigin href="/assets/index-g8LTJwwr.css">
|
|
27
26
|
</head>
|
|
28
27
|
<body>
|
|
29
28
|
<div id="root"></div>
|
|
30
|
-
|
|
31
29
|
</body>
|
|
32
30
|
</html>
|
chainlit/haystack/callbacks.py
CHANGED
|
@@ -1,14 +1,13 @@
|
|
|
1
1
|
import re
|
|
2
2
|
from typing import Any, Generic, List, Optional, TypeVar
|
|
3
3
|
|
|
4
|
-
from chainlit.context import context
|
|
5
|
-
from chainlit.step import Step
|
|
6
|
-
from chainlit.sync import run_sync
|
|
7
4
|
from haystack.agents import Agent, Tool
|
|
8
5
|
from haystack.agents.agent_step import AgentStep
|
|
9
6
|
from literalai.helper import utc_now
|
|
10
7
|
|
|
11
8
|
from chainlit import Message
|
|
9
|
+
from chainlit.step import Step
|
|
10
|
+
from chainlit.sync import run_sync
|
|
12
11
|
|
|
13
12
|
T = TypeVar("T")
|
|
14
13
|
|
|
@@ -68,9 +67,7 @@ class HaystackAgentCallbackHandler:
|
|
|
68
67
|
self.last_tokens: List[str] = []
|
|
69
68
|
self.answer_reached = False
|
|
70
69
|
|
|
71
|
-
|
|
72
|
-
parent_id = root_message.id if root_message else None
|
|
73
|
-
run_step = Step(name=self.agent_name, type="run", parent_id=parent_id)
|
|
70
|
+
run_step = Step(name=self.agent_name, type="run")
|
|
74
71
|
run_step.start = utc_now()
|
|
75
72
|
run_step.input = kwargs
|
|
76
73
|
|
|
@@ -135,7 +132,7 @@ class HaystackAgentCallbackHandler:
|
|
|
135
132
|
tool_result: str,
|
|
136
133
|
tool_name: Optional[str] = None,
|
|
137
134
|
tool_input: Optional[str] = None,
|
|
138
|
-
**kwargs: Any
|
|
135
|
+
**kwargs: Any,
|
|
139
136
|
) -> None:
|
|
140
137
|
# Tool finished, send step with tool_result
|
|
141
138
|
tool_step = self.stack.pop()
|
chainlit/input_widget.py
CHANGED
|
@@ -2,8 +2,10 @@ from abc import abstractmethod
|
|
|
2
2
|
from collections import defaultdict
|
|
3
3
|
from typing import Any, Dict, List, Optional
|
|
4
4
|
|
|
5
|
+
from pydantic import Field
|
|
6
|
+
from pydantic.dataclasses import dataclass
|
|
7
|
+
|
|
5
8
|
from chainlit.types import InputWidgetType
|
|
6
|
-
from pydantic.dataclasses import Field, dataclass
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
@dataclass
|
|
@@ -75,7 +77,7 @@ class Select(InputWidget):
|
|
|
75
77
|
initial: Optional[str] = None
|
|
76
78
|
initial_index: Optional[int] = None
|
|
77
79
|
initial_value: Optional[str] = None
|
|
78
|
-
values: List[str] = Field(default_factory=
|
|
80
|
+
values: List[str] = Field(default_factory=list)
|
|
79
81
|
items: Dict[str, str] = Field(default_factory=lambda: defaultdict(dict))
|
|
80
82
|
|
|
81
83
|
def __post_init__(
|
|
@@ -127,6 +129,7 @@ class TextInput(InputWidget):
|
|
|
127
129
|
type: InputWidgetType = "textinput"
|
|
128
130
|
initial: Optional[str] = None
|
|
129
131
|
placeholder: Optional[str] = None
|
|
132
|
+
multiline: bool = False
|
|
130
133
|
|
|
131
134
|
def to_dict(self) -> Dict[str, Any]:
|
|
132
135
|
return {
|
|
@@ -137,6 +140,7 @@ class TextInput(InputWidget):
|
|
|
137
140
|
"placeholder": self.placeholder,
|
|
138
141
|
"tooltip": self.tooltip,
|
|
139
142
|
"description": self.description,
|
|
143
|
+
"multiline": self.multiline,
|
|
140
144
|
}
|
|
141
145
|
|
|
142
146
|
|
|
@@ -165,8 +169,8 @@ class Tags(InputWidget):
|
|
|
165
169
|
"""Useful to create an input for an array of strings."""
|
|
166
170
|
|
|
167
171
|
type: InputWidgetType = "tags"
|
|
168
|
-
initial: List[str] = Field(default_factory=
|
|
169
|
-
values: List[str] = Field(default_factory=
|
|
172
|
+
initial: List[str] = Field(default_factory=list)
|
|
173
|
+
values: List[str] = Field(default_factory=list)
|
|
170
174
|
|
|
171
175
|
def to_dict(self) -> Dict[str, Any]:
|
|
172
176
|
return {
|
chainlit/langchain/callbacks.py
CHANGED
|
@@ -1,19 +1,20 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import time
|
|
3
|
-
from typing import Any, Dict, List, Optional, TypedDict, Union
|
|
3
|
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypedDict, Union
|
|
4
4
|
from uuid import UUID
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
from chainlit.message import Message
|
|
8
|
-
from chainlit.step import Step
|
|
9
|
-
from langchain.callbacks.tracers.base import BaseTracer
|
|
6
|
+
import pydantic
|
|
10
7
|
from langchain.callbacks.tracers.schemas import Run
|
|
11
8
|
from langchain.schema import BaseMessage
|
|
12
|
-
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
|
|
13
9
|
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
10
|
+
from langchain_core.tracers.base import AsyncBaseTracer
|
|
14
11
|
from literalai import ChatGeneration, CompletionGeneration, GenerationMessage
|
|
15
12
|
from literalai.helper import utc_now
|
|
16
|
-
from literalai.step import TrueStepType
|
|
13
|
+
from literalai.observability.step import TrueStepType
|
|
14
|
+
|
|
15
|
+
from chainlit.context import context_var
|
|
16
|
+
from chainlit.message import Message
|
|
17
|
+
from chainlit.step import Step
|
|
17
18
|
|
|
18
19
|
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
|
|
19
20
|
|
|
@@ -123,6 +124,14 @@ class GenerationHelper:
|
|
|
123
124
|
key: self.ensure_values_serializable(value)
|
|
124
125
|
for key, value in data.items()
|
|
125
126
|
}
|
|
127
|
+
elif isinstance(data, pydantic.BaseModel):
|
|
128
|
+
# Fallback to support pydantic v1
|
|
129
|
+
# https://docs.pydantic.dev/latest/migration/#changes-to-pydanticbasemodel
|
|
130
|
+
if pydantic.VERSION.startswith("1"):
|
|
131
|
+
return data.dict()
|
|
132
|
+
|
|
133
|
+
# pydantic v2
|
|
134
|
+
return data.model_dump() # pyright: ignore reportAttributeAccessIssue
|
|
126
135
|
elif isinstance(data, list):
|
|
127
136
|
return [self.ensure_values_serializable(item) for item in data]
|
|
128
137
|
elif isinstance(data, (str, int, float, bool, type(None))):
|
|
@@ -229,11 +238,28 @@ class GenerationHelper:
|
|
|
229
238
|
return provider, model, tools, settings
|
|
230
239
|
|
|
231
240
|
|
|
232
|
-
|
|
241
|
+
def process_content(content: Any) -> Tuple[Dict, Optional[str]]:
|
|
242
|
+
if content is None:
|
|
243
|
+
return {}, None
|
|
244
|
+
if isinstance(content, dict):
|
|
245
|
+
return content, "json"
|
|
246
|
+
elif isinstance(content, str):
|
|
247
|
+
return {"content": content}, "text"
|
|
248
|
+
else:
|
|
249
|
+
return {"content": str(content)}, "text"
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
DEFAULT_TO_IGNORE = [
|
|
253
|
+
"RunnableSequence",
|
|
254
|
+
"RunnableParallel",
|
|
255
|
+
"RunnableAssign",
|
|
256
|
+
"RunnableLambda",
|
|
257
|
+
"<lambda>",
|
|
258
|
+
]
|
|
233
259
|
DEFAULT_TO_KEEP = ["retriever", "llm", "agent", "chain", "tool"]
|
|
234
260
|
|
|
235
261
|
|
|
236
|
-
class LangchainTracer(
|
|
262
|
+
class LangchainTracer(AsyncBaseTracer, GenerationHelper, FinalStreamHelper):
|
|
237
263
|
steps: Dict[str, Step]
|
|
238
264
|
parent_id_map: Dict[str, str]
|
|
239
265
|
ignored_runs: set
|
|
@@ -252,7 +278,7 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
252
278
|
to_keep: Optional[List[str]] = None,
|
|
253
279
|
**kwargs: Any,
|
|
254
280
|
) -> None:
|
|
255
|
-
|
|
281
|
+
AsyncBaseTracer.__init__(self, **kwargs)
|
|
256
282
|
GenerationHelper.__init__(self)
|
|
257
283
|
FinalStreamHelper.__init__(
|
|
258
284
|
self,
|
|
@@ -267,8 +293,6 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
267
293
|
|
|
268
294
|
if self.context.current_step:
|
|
269
295
|
self.root_parent_id = self.context.current_step.id
|
|
270
|
-
elif self.context.session.root_message:
|
|
271
|
-
self.root_parent_id = self.context.session.root_message.id
|
|
272
296
|
else:
|
|
273
297
|
self.root_parent_id = None
|
|
274
298
|
|
|
@@ -282,7 +306,7 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
282
306
|
else:
|
|
283
307
|
self.to_keep = to_keep
|
|
284
308
|
|
|
285
|
-
def on_chat_model_start(
|
|
309
|
+
async def on_chat_model_start(
|
|
286
310
|
self,
|
|
287
311
|
serialized: Dict[str, Any],
|
|
288
312
|
messages: List[List[BaseMessage]],
|
|
@@ -291,8 +315,9 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
291
315
|
parent_run_id: Optional["UUID"] = None,
|
|
292
316
|
tags: Optional[List[str]] = None,
|
|
293
317
|
metadata: Optional[Dict[str, Any]] = None,
|
|
318
|
+
name: Optional[str] = None,
|
|
294
319
|
**kwargs: Any,
|
|
295
|
-
) ->
|
|
320
|
+
) -> Run:
|
|
296
321
|
lc_messages = messages[0]
|
|
297
322
|
self.chat_generations[str(run_id)] = {
|
|
298
323
|
"input_messages": lc_messages,
|
|
@@ -301,46 +326,48 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
301
326
|
"tt_first_token": None,
|
|
302
327
|
}
|
|
303
328
|
|
|
304
|
-
return super().on_chat_model_start(
|
|
329
|
+
return await super().on_chat_model_start(
|
|
305
330
|
serialized,
|
|
306
331
|
messages,
|
|
307
332
|
run_id=run_id,
|
|
308
333
|
parent_run_id=parent_run_id,
|
|
309
334
|
tags=tags,
|
|
310
335
|
metadata=metadata,
|
|
336
|
+
name=name,
|
|
311
337
|
**kwargs,
|
|
312
338
|
)
|
|
313
339
|
|
|
314
|
-
def on_llm_start(
|
|
340
|
+
async def on_llm_start(
|
|
315
341
|
self,
|
|
316
342
|
serialized: Dict[str, Any],
|
|
317
343
|
prompts: List[str],
|
|
318
344
|
*,
|
|
319
345
|
run_id: "UUID",
|
|
346
|
+
parent_run_id: Optional[UUID] = None,
|
|
320
347
|
tags: Optional[List[str]] = None,
|
|
321
|
-
parent_run_id: Optional["UUID"] = None,
|
|
322
348
|
metadata: Optional[Dict[str, Any]] = None,
|
|
323
|
-
name: Optional[str] = None,
|
|
324
349
|
**kwargs: Any,
|
|
325
|
-
) ->
|
|
326
|
-
|
|
327
|
-
"prompt": prompts[0],
|
|
328
|
-
"start": time.time(),
|
|
329
|
-
"token_count": 0,
|
|
330
|
-
"tt_first_token": None,
|
|
331
|
-
}
|
|
332
|
-
return super().on_llm_start(
|
|
350
|
+
) -> None:
|
|
351
|
+
await super().on_llm_start(
|
|
333
352
|
serialized,
|
|
334
353
|
prompts,
|
|
335
354
|
run_id=run_id,
|
|
336
355
|
parent_run_id=parent_run_id,
|
|
337
356
|
tags=tags,
|
|
338
357
|
metadata=metadata,
|
|
339
|
-
name=name,
|
|
340
358
|
**kwargs,
|
|
341
359
|
)
|
|
342
360
|
|
|
343
|
-
|
|
361
|
+
self.completion_generations[str(run_id)] = {
|
|
362
|
+
"prompt": prompts[0],
|
|
363
|
+
"start": time.time(),
|
|
364
|
+
"token_count": 0,
|
|
365
|
+
"tt_first_token": None,
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
return None
|
|
369
|
+
|
|
370
|
+
async def on_llm_new_token(
|
|
344
371
|
self,
|
|
345
372
|
token: str,
|
|
346
373
|
*,
|
|
@@ -348,7 +375,14 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
348
375
|
run_id: "UUID",
|
|
349
376
|
parent_run_id: Optional["UUID"] = None,
|
|
350
377
|
**kwargs: Any,
|
|
351
|
-
) ->
|
|
378
|
+
) -> None:
|
|
379
|
+
await super().on_llm_new_token(
|
|
380
|
+
token=token,
|
|
381
|
+
chunk=chunk,
|
|
382
|
+
run_id=run_id,
|
|
383
|
+
parent_run_id=parent_run_id,
|
|
384
|
+
**kwargs,
|
|
385
|
+
)
|
|
352
386
|
if isinstance(chunk, ChatGenerationChunk):
|
|
353
387
|
start = self.chat_generations[str(run_id)]
|
|
354
388
|
else:
|
|
@@ -363,24 +397,13 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
363
397
|
if self.answer_reached:
|
|
364
398
|
if not self.final_stream:
|
|
365
399
|
self.final_stream = Message(content="")
|
|
366
|
-
self.
|
|
367
|
-
self.
|
|
400
|
+
await self.final_stream.send()
|
|
401
|
+
await self.final_stream.stream_token(token)
|
|
368
402
|
self.has_streamed_final_answer = True
|
|
369
403
|
else:
|
|
370
404
|
self.answer_reached = self._check_if_answer_reached()
|
|
371
405
|
|
|
372
|
-
|
|
373
|
-
token,
|
|
374
|
-
chunk=chunk,
|
|
375
|
-
run_id=run_id,
|
|
376
|
-
parent_run_id=parent_run_id,
|
|
377
|
-
)
|
|
378
|
-
|
|
379
|
-
def _run_sync(self, co): # TODO: WHAT TO DO WITH THIS?
|
|
380
|
-
context_var.set(self.context)
|
|
381
|
-
self.context.loop.create_task(co)
|
|
382
|
-
|
|
383
|
-
def _persist_run(self, run: Run) -> None:
|
|
406
|
+
async def _persist_run(self, run: Run) -> None:
|
|
384
407
|
pass
|
|
385
408
|
|
|
386
409
|
def _get_run_parent_id(self, run: Run):
|
|
@@ -431,11 +454,8 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
431
454
|
self.ignored_runs.add(str(run.id))
|
|
432
455
|
return ignore, parent_id
|
|
433
456
|
|
|
434
|
-
def
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
def _start_trace(self, run: Run) -> None:
|
|
438
|
-
super()._start_trace(run)
|
|
457
|
+
async def _start_trace(self, run: Run) -> None:
|
|
458
|
+
await super()._start_trace(run)
|
|
439
459
|
context_var.set(self.context)
|
|
440
460
|
|
|
441
461
|
ignore, parent_id = self._should_ignore_run(run)
|
|
@@ -448,40 +468,39 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
448
468
|
if ignore:
|
|
449
469
|
return
|
|
450
470
|
|
|
451
|
-
step_type:
|
|
471
|
+
step_type: TrueStepType = "undefined"
|
|
452
472
|
if run.run_type == "agent":
|
|
453
473
|
step_type = "run"
|
|
454
474
|
elif run.run_type == "chain":
|
|
455
|
-
|
|
475
|
+
if not self.steps:
|
|
476
|
+
step_type = "run"
|
|
456
477
|
elif run.run_type == "llm":
|
|
457
478
|
step_type = "llm"
|
|
458
479
|
elif run.run_type == "retriever":
|
|
459
|
-
step_type = "
|
|
480
|
+
step_type = "tool"
|
|
460
481
|
elif run.run_type == "tool":
|
|
461
482
|
step_type = "tool"
|
|
462
483
|
elif run.run_type == "embedding":
|
|
463
484
|
step_type = "embedding"
|
|
464
485
|
|
|
465
|
-
if not self.steps:
|
|
466
|
-
step_type = "run"
|
|
467
|
-
|
|
468
|
-
disable_feedback = not self._is_annotable(run)
|
|
469
|
-
|
|
470
486
|
step = Step(
|
|
471
487
|
id=str(run.id),
|
|
472
488
|
name=run.name,
|
|
473
489
|
type=step_type,
|
|
474
490
|
parent_id=parent_id,
|
|
475
|
-
disable_feedback=disable_feedback,
|
|
476
491
|
)
|
|
477
492
|
step.start = utc_now()
|
|
478
|
-
step.input = run.inputs
|
|
493
|
+
step.input, language = process_content(run.inputs)
|
|
494
|
+
if language is not None:
|
|
495
|
+
if step.metadata is None:
|
|
496
|
+
step.metadata = {}
|
|
497
|
+
step.metadata["language"] = language
|
|
479
498
|
|
|
480
499
|
self.steps[str(run.id)] = step
|
|
481
500
|
|
|
482
|
-
|
|
501
|
+
await step.send()
|
|
483
502
|
|
|
484
|
-
def _on_run_update(self, run: Run) -> None:
|
|
503
|
+
async def _on_run_update(self, run: Run) -> None:
|
|
485
504
|
"""Process a run upon update."""
|
|
486
505
|
context_var.set(self.context)
|
|
487
506
|
|
|
@@ -499,6 +518,9 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
499
518
|
generations = (run.outputs or {}).get("generations", [])
|
|
500
519
|
generation = generations[0][0]
|
|
501
520
|
variables = self.generation_inputs.get(str(run.parent_run_id), {})
|
|
521
|
+
variables = {
|
|
522
|
+
k: process_content(v) for k, v in variables.items() if v is not None
|
|
523
|
+
}
|
|
502
524
|
if message := generation.get("message"):
|
|
503
525
|
chat_start = self.chat_generations[str(run.id)]
|
|
504
526
|
duration = time.time() - chat_start["start"]
|
|
@@ -525,13 +547,21 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
525
547
|
# find first message with prompt_id
|
|
526
548
|
for m in chat_start["input_messages"]:
|
|
527
549
|
if m.additional_kwargs.get("prompt_id"):
|
|
528
|
-
current_step.generation.prompt_id = m.additional_kwargs[
|
|
550
|
+
current_step.generation.prompt_id = m.additional_kwargs[
|
|
551
|
+
"prompt_id"
|
|
552
|
+
]
|
|
529
553
|
if custom_variables := m.additional_kwargs.get("variables"):
|
|
530
|
-
current_step.generation.variables =
|
|
554
|
+
current_step.generation.variables = {
|
|
555
|
+
k: process_content(v)
|
|
556
|
+
for k, v in custom_variables.items()
|
|
557
|
+
if v is not None
|
|
558
|
+
}
|
|
531
559
|
break
|
|
532
|
-
|
|
560
|
+
|
|
533
561
|
current_step.language = "json"
|
|
534
|
-
current_step.output = json.dumps(
|
|
562
|
+
current_step.output = json.dumps(
|
|
563
|
+
message_completion, indent=4, ensure_ascii=False
|
|
564
|
+
)
|
|
535
565
|
else:
|
|
536
566
|
completion_start = self.completion_generations[str(run.id)]
|
|
537
567
|
completion = generation.get("text", "")
|
|
@@ -555,34 +585,39 @@ class LangchainTracer(BaseTracer, GenerationHelper, FinalStreamHelper):
|
|
|
555
585
|
|
|
556
586
|
if current_step:
|
|
557
587
|
current_step.end = utc_now()
|
|
558
|
-
|
|
588
|
+
await current_step.update()
|
|
559
589
|
|
|
560
590
|
if self.final_stream and self.has_streamed_final_answer:
|
|
561
|
-
|
|
562
|
-
self.final_stream.content = completion
|
|
563
|
-
self._run_sync(self.final_stream.update())
|
|
591
|
+
await self.final_stream.update()
|
|
564
592
|
|
|
565
593
|
return
|
|
566
594
|
|
|
567
595
|
outputs = run.outputs or {}
|
|
568
596
|
output_keys = list(outputs.keys())
|
|
569
597
|
output = outputs
|
|
598
|
+
|
|
570
599
|
if output_keys:
|
|
571
600
|
output = outputs.get(output_keys[0], outputs)
|
|
572
601
|
|
|
573
602
|
if current_step:
|
|
574
|
-
current_step.output =
|
|
603
|
+
current_step.output = (
|
|
604
|
+
output[0]
|
|
605
|
+
if isinstance(output, Sequence)
|
|
606
|
+
and not isinstance(output, str)
|
|
607
|
+
and len(output)
|
|
608
|
+
else output
|
|
609
|
+
)
|
|
575
610
|
current_step.end = utc_now()
|
|
576
|
-
|
|
611
|
+
await current_step.update()
|
|
577
612
|
|
|
578
|
-
def _on_error(self, error: BaseException, *, run_id: UUID, **kwargs: Any):
|
|
613
|
+
async def _on_error(self, error: BaseException, *, run_id: UUID, **kwargs: Any):
|
|
579
614
|
context_var.set(self.context)
|
|
580
615
|
|
|
581
616
|
if current_step := self.steps.get(str(run_id), None):
|
|
582
617
|
current_step.is_error = True
|
|
583
618
|
current_step.output = str(error)
|
|
584
619
|
current_step.end = utc_now()
|
|
585
|
-
|
|
620
|
+
await current_step.update()
|
|
586
621
|
|
|
587
622
|
on_llm_error = _on_error
|
|
588
623
|
on_chain_error = _on_error
|
chainlit/langflow/__init__.py
CHANGED
chainlit/llama_index/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from chainlit.utils import check_module_version
|
|
2
2
|
|
|
3
|
-
if not check_module_version("llama_index", "0.
|
|
3
|
+
if not check_module_version("llama_index.core", "0.10.15"):
|
|
4
4
|
raise ValueError(
|
|
5
|
-
"Expected LlamaIndex version >= 0.
|
|
5
|
+
"Expected LlamaIndex version >= 0.10.15. Run `pip install llama_index --upgrade`"
|
|
6
6
|
)
|