chainlit 2.7.0__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chainlit might be problematic. Click here for more details.

Files changed (85) hide show
  1. {chainlit-2.7.0.dist-info → chainlit-2.7.1.dist-info}/METADATA +1 -1
  2. chainlit-2.7.1.dist-info/RECORD +4 -0
  3. chainlit/__init__.py +0 -207
  4. chainlit/__main__.py +0 -4
  5. chainlit/_utils.py +0 -8
  6. chainlit/action.py +0 -33
  7. chainlit/auth/__init__.py +0 -95
  8. chainlit/auth/cookie.py +0 -197
  9. chainlit/auth/jwt.py +0 -42
  10. chainlit/cache.py +0 -45
  11. chainlit/callbacks.py +0 -433
  12. chainlit/chat_context.py +0 -64
  13. chainlit/chat_settings.py +0 -34
  14. chainlit/cli/__init__.py +0 -235
  15. chainlit/config.py +0 -621
  16. chainlit/context.py +0 -112
  17. chainlit/data/__init__.py +0 -111
  18. chainlit/data/acl.py +0 -19
  19. chainlit/data/base.py +0 -107
  20. chainlit/data/chainlit_data_layer.py +0 -687
  21. chainlit/data/dynamodb.py +0 -616
  22. chainlit/data/literalai.py +0 -501
  23. chainlit/data/sql_alchemy.py +0 -741
  24. chainlit/data/storage_clients/__init__.py +0 -0
  25. chainlit/data/storage_clients/azure.py +0 -84
  26. chainlit/data/storage_clients/azure_blob.py +0 -94
  27. chainlit/data/storage_clients/base.py +0 -28
  28. chainlit/data/storage_clients/gcs.py +0 -101
  29. chainlit/data/storage_clients/s3.py +0 -88
  30. chainlit/data/utils.py +0 -29
  31. chainlit/discord/__init__.py +0 -6
  32. chainlit/discord/app.py +0 -364
  33. chainlit/element.py +0 -454
  34. chainlit/emitter.py +0 -450
  35. chainlit/hello.py +0 -12
  36. chainlit/input_widget.py +0 -182
  37. chainlit/langchain/__init__.py +0 -6
  38. chainlit/langchain/callbacks.py +0 -682
  39. chainlit/langflow/__init__.py +0 -25
  40. chainlit/llama_index/__init__.py +0 -6
  41. chainlit/llama_index/callbacks.py +0 -206
  42. chainlit/logger.py +0 -16
  43. chainlit/markdown.py +0 -57
  44. chainlit/mcp.py +0 -99
  45. chainlit/message.py +0 -619
  46. chainlit/mistralai/__init__.py +0 -50
  47. chainlit/oauth_providers.py +0 -835
  48. chainlit/openai/__init__.py +0 -53
  49. chainlit/py.typed +0 -0
  50. chainlit/secret.py +0 -9
  51. chainlit/semantic_kernel/__init__.py +0 -111
  52. chainlit/server.py +0 -1616
  53. chainlit/session.py +0 -304
  54. chainlit/sidebar.py +0 -55
  55. chainlit/slack/__init__.py +0 -6
  56. chainlit/slack/app.py +0 -427
  57. chainlit/socket.py +0 -381
  58. chainlit/step.py +0 -490
  59. chainlit/sync.py +0 -43
  60. chainlit/teams/__init__.py +0 -6
  61. chainlit/teams/app.py +0 -348
  62. chainlit/translations/bn.json +0 -214
  63. chainlit/translations/el-GR.json +0 -214
  64. chainlit/translations/en-US.json +0 -214
  65. chainlit/translations/fr-FR.json +0 -214
  66. chainlit/translations/gu.json +0 -214
  67. chainlit/translations/he-IL.json +0 -214
  68. chainlit/translations/hi.json +0 -214
  69. chainlit/translations/ja.json +0 -214
  70. chainlit/translations/kn.json +0 -214
  71. chainlit/translations/ml.json +0 -214
  72. chainlit/translations/mr.json +0 -214
  73. chainlit/translations/nl.json +0 -214
  74. chainlit/translations/ta.json +0 -214
  75. chainlit/translations/te.json +0 -214
  76. chainlit/translations/zh-CN.json +0 -214
  77. chainlit/translations.py +0 -60
  78. chainlit/types.py +0 -334
  79. chainlit/user.py +0 -43
  80. chainlit/user_session.py +0 -153
  81. chainlit/utils.py +0 -173
  82. chainlit/version.py +0 -8
  83. chainlit-2.7.0.dist-info/RECORD +0 -84
  84. {chainlit-2.7.0.dist-info → chainlit-2.7.1.dist-info}/WHEEL +0 -0
  85. {chainlit-2.7.0.dist-info → chainlit-2.7.1.dist-info}/entry_points.txt +0 -0
@@ -1,682 +0,0 @@
1
- import time
2
- from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union
3
- from uuid import UUID
4
-
5
- import pydantic
6
- from langchain.callbacks.tracers.schemas import Run
7
- from langchain.load.dump import dumps
8
- from langchain.schema import BaseMessage
9
- from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
10
- from langchain_core.tracers.base import AsyncBaseTracer
11
- from literalai import ChatGeneration, CompletionGeneration, GenerationMessage
12
- from literalai.observability.step import TrueStepType
13
-
14
- from chainlit.context import context_var
15
- from chainlit.message import Message
16
- from chainlit.step import Step
17
- from chainlit.utils import utc_now
18
-
19
- DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
20
-
21
-
22
- class FinalStreamHelper:
23
- # The stream we can use to stream the final answer from a chain
24
- final_stream: Union[Message, None]
25
- # Should we stream the final answer?
26
- stream_final_answer: bool = False
27
- # Token sequence that prefixes the answer
28
- answer_prefix_tokens: List[str]
29
- # Ignore white spaces and new lines when comparing answer_prefix_tokens to last tokens? (to determine if answer has been reached)
30
- strip_tokens: bool
31
-
32
- answer_reached: bool
33
-
34
- def __init__(
35
- self,
36
- answer_prefix_tokens: Optional[List[str]] = None,
37
- stream_final_answer: bool = False,
38
- force_stream_final_answer: bool = False,
39
- strip_tokens: bool = True,
40
- ) -> None:
41
- # Langchain final answer streaming logic
42
- if answer_prefix_tokens is None:
43
- self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
44
- else:
45
- self.answer_prefix_tokens = answer_prefix_tokens
46
- if strip_tokens:
47
- self.answer_prefix_tokens_stripped = [
48
- token.strip() for token in self.answer_prefix_tokens
49
- ]
50
- else:
51
- self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
52
-
53
- self.last_tokens = [""] * len(self.answer_prefix_tokens)
54
- self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
55
- self.strip_tokens = strip_tokens
56
- self.answer_reached = force_stream_final_answer
57
-
58
- # Our own final answer streaming logic
59
- self.stream_final_answer = stream_final_answer
60
- self.final_stream = None
61
- self.has_streamed_final_answer = False
62
-
63
- def _check_if_answer_reached(self) -> bool:
64
- if self.strip_tokens:
65
- return self._compare_last_tokens(self.last_tokens_stripped)
66
- else:
67
- return self._compare_last_tokens(self.last_tokens)
68
-
69
- def _compare_last_tokens(self, last_tokens: List[str]):
70
- if last_tokens == self.answer_prefix_tokens_stripped:
71
- # If tokens match perfectly we are done
72
- return True
73
- else:
74
- # Some LLMs will consider all the tokens of the final answer as one token
75
- # so we check if any last token contains all answer tokens
76
- return any(
77
- [
78
- all(
79
- answer_token in last_token
80
- for answer_token in self.answer_prefix_tokens_stripped
81
- )
82
- for last_token in last_tokens
83
- ]
84
- )
85
-
86
- def _append_to_last_tokens(self, token: str) -> None:
87
- self.last_tokens.append(token)
88
- self.last_tokens_stripped.append(token.strip())
89
- if len(self.last_tokens) > len(self.answer_prefix_tokens):
90
- self.last_tokens.pop(0)
91
- self.last_tokens_stripped.pop(0)
92
-
93
-
94
- class ChatGenerationStart(TypedDict):
95
- input_messages: List[BaseMessage]
96
- start: float
97
- token_count: int
98
- tt_first_token: Optional[float]
99
-
100
-
101
- class CompletionGenerationStart(TypedDict):
102
- prompt: str
103
- start: float
104
- token_count: int
105
- tt_first_token: Optional[float]
106
-
107
-
108
- class GenerationHelper:
109
- chat_generations: Dict[str, ChatGenerationStart]
110
- completion_generations: Dict[str, CompletionGenerationStart]
111
- generation_inputs: Dict[str, Dict]
112
-
113
- def __init__(self) -> None:
114
- self.chat_generations = {}
115
- self.completion_generations = {}
116
- self.generation_inputs = {}
117
-
118
- def ensure_values_serializable(self, data):
119
- """
120
- Recursively ensures that all values in the input (dict or list) are JSON serializable.
121
- """
122
- if isinstance(data, dict):
123
- return {
124
- key: self.ensure_values_serializable(value)
125
- for key, value in data.items()
126
- }
127
- elif isinstance(data, pydantic.BaseModel):
128
- # Fallback to support pydantic v1
129
- # https://docs.pydantic.dev/latest/migration/#changes-to-pydanticbasemodel
130
- if pydantic.VERSION.startswith("1"):
131
- return data.dict()
132
-
133
- # pydantic v2
134
- return data.model_dump() # pyright: ignore reportAttributeAccessIssue
135
- elif isinstance(data, list):
136
- return [self.ensure_values_serializable(item) for item in data]
137
- elif isinstance(data, (str, int, float, bool, type(None))):
138
- return data
139
- elif isinstance(data, (tuple, set)):
140
- return list(data) # Convert tuples and sets to lists
141
- else:
142
- return str(data) # Fallback: convert other types to string
143
-
144
- def _convert_message_role(self, role: str):
145
- if "human" in role.lower():
146
- return "user"
147
- elif "system" in role.lower():
148
- return "system"
149
- elif "function" in role.lower():
150
- return "function"
151
- elif "tool" in role.lower():
152
- return "tool"
153
- else:
154
- return "assistant"
155
-
156
- def _convert_message_dict(
157
- self,
158
- message: Dict,
159
- ):
160
- class_name = message["id"][-1]
161
- kwargs = message.get("kwargs", {})
162
- function_call = kwargs.get("additional_kwargs", {}).get("function_call")
163
-
164
- msg = GenerationMessage(
165
- role=self._convert_message_role(class_name),
166
- content="",
167
- )
168
- if name := kwargs.get("name"):
169
- msg["name"] = name
170
- if function_call:
171
- msg["function_call"] = function_call
172
- else:
173
- content = kwargs.get("content")
174
- if isinstance(content, list):
175
- tool_calls = []
176
- content_parts = []
177
- for item in content:
178
- if item.get("type") == "tool_use":
179
- tool_calls.append(
180
- {
181
- "id": item.get("id"),
182
- "type": "function",
183
- "function": {
184
- "name": item.get("name"),
185
- "arguments": item.get("input"),
186
- },
187
- }
188
- )
189
- elif item.get("type") == "text":
190
- content_parts.append({"type": "text", "text": item.get("text")})
191
-
192
- if tool_calls:
193
- msg["tool_calls"] = tool_calls
194
- if content_parts:
195
- msg["content"] = content_parts # type: ignore
196
- else:
197
- msg["content"] = content # type: ignore
198
-
199
- return msg
200
-
201
- def _convert_message(
202
- self,
203
- message: Union[Dict, BaseMessage],
204
- ):
205
- if isinstance(message, dict):
206
- return self._convert_message_dict(
207
- message,
208
- )
209
-
210
- function_call = message.additional_kwargs.get("function_call")
211
-
212
- msg = GenerationMessage(
213
- role=self._convert_message_role(message.type),
214
- content="",
215
- )
216
-
217
- if literal_uuid := message.additional_kwargs.get("uuid"):
218
- msg["uuid"] = literal_uuid
219
- msg["templated"] = True
220
-
221
- if name := getattr(message, "name", None):
222
- msg["name"] = name
223
-
224
- if function_call:
225
- msg["function_call"] = function_call
226
- else:
227
- if isinstance(message.content, list):
228
- tool_calls = []
229
- content_parts = []
230
- for item in message.content:
231
- if isinstance(item, str):
232
- continue
233
- if item.get("type") == "tool_use":
234
- tool_calls.append(
235
- {
236
- "id": item.get("id"),
237
- "type": "function",
238
- "function": {
239
- "name": item.get("name"),
240
- "arguments": item.get("input"),
241
- },
242
- }
243
- )
244
- elif item.get("type") == "text":
245
- content_parts.append({"type": "text", "text": item.get("text")})
246
-
247
- if tool_calls:
248
- msg["tool_calls"] = tool_calls
249
- if content_parts:
250
- msg["content"] = content_parts # type: ignore
251
- else:
252
- msg["content"] = message.content # type: ignore
253
-
254
- return msg
255
-
256
- def _build_llm_settings(
257
- self,
258
- serialized: Dict,
259
- invocation_params: Optional[Dict] = None,
260
- ):
261
- # invocation_params = run.extra.get("invocation_params")
262
- if invocation_params is None:
263
- return None, None
264
-
265
- provider = invocation_params.pop("_type", "") # type: str
266
-
267
- model_kwargs = invocation_params.pop("model_kwargs", {})
268
-
269
- if model_kwargs is None:
270
- model_kwargs = {}
271
-
272
- merged = {
273
- **invocation_params,
274
- **model_kwargs,
275
- **serialized.get("kwargs", {}),
276
- }
277
-
278
- # make sure there is no api key specification
279
- settings = {k: v for k, v in merged.items() if not k.endswith("_api_key")}
280
-
281
- model_keys = ["azure_deployment", "deployment_name", "model", "model_name"]
282
- model = next((settings[k] for k in model_keys if k in settings), None)
283
- if isinstance(model, str):
284
- model = model.replace("models/", "")
285
- tools = None
286
- if "functions" in settings:
287
- tools = [{"type": "function", "function": f} for f in settings["functions"]]
288
- if "tools" in settings:
289
- tools = [
290
- {"type": "function", "function": t}
291
- if t.get("type") != "function"
292
- else t
293
- for t in settings["tools"]
294
- ]
295
- return provider, model, tools, settings
296
-
297
-
298
- def process_content(content: Any) -> Tuple[Dict | str, Optional[str]]:
299
- if content is None:
300
- return {}, None
301
- if isinstance(content, str):
302
- return {"content": content}, "text"
303
- else:
304
- return dumps(content), "json"
305
-
306
-
307
- DEFAULT_TO_IGNORE = [
308
- "RunnableSequence",
309
- "RunnableParallel",
310
- "RunnableAssign",
311
- "RunnableLambda",
312
- "<lambda>",
313
- ]
314
- DEFAULT_TO_KEEP = ["retriever", "llm", "agent", "chain", "tool"]
315
-
316
-
317
- class LangchainTracer(AsyncBaseTracer, GenerationHelper, FinalStreamHelper):
318
- steps: Dict[str, Step]
319
- parent_id_map: Dict[str, str]
320
- ignored_runs: set
321
-
322
- def __init__(
323
- self,
324
- # Token sequence that prefixes the answer
325
- answer_prefix_tokens: Optional[List[str]] = None,
326
- # Should we stream the final answer?
327
- stream_final_answer: bool = False,
328
- # Should force stream the first response?
329
- force_stream_final_answer: bool = False,
330
- # Runs to ignore to enhance readability
331
- to_ignore: Optional[List[str]] = None,
332
- # Runs to keep within ignored runs
333
- to_keep: Optional[List[str]] = None,
334
- **kwargs: Any,
335
- ) -> None:
336
- AsyncBaseTracer.__init__(self, **kwargs)
337
- GenerationHelper.__init__(self)
338
- FinalStreamHelper.__init__(
339
- self,
340
- answer_prefix_tokens=answer_prefix_tokens,
341
- stream_final_answer=stream_final_answer,
342
- force_stream_final_answer=force_stream_final_answer,
343
- )
344
- self.context = context_var.get()
345
- self.steps = {}
346
- self.parent_id_map = {}
347
- self.ignored_runs = set()
348
-
349
- if self.context.current_step:
350
- self.root_parent_id = self.context.current_step.id
351
- else:
352
- self.root_parent_id = None
353
-
354
- if to_ignore is None:
355
- self.to_ignore = DEFAULT_TO_IGNORE
356
- else:
357
- self.to_ignore = to_ignore
358
-
359
- if to_keep is None:
360
- self.to_keep = DEFAULT_TO_KEEP
361
- else:
362
- self.to_keep = to_keep
363
-
364
- async def on_chat_model_start(
365
- self,
366
- serialized: Dict[str, Any],
367
- messages: List[List[BaseMessage]],
368
- *,
369
- run_id: "UUID",
370
- parent_run_id: Optional["UUID"] = None,
371
- tags: Optional[List[str]] = None,
372
- metadata: Optional[Dict[str, Any]] = None,
373
- name: Optional[str] = None,
374
- **kwargs: Any,
375
- ) -> Run:
376
- lc_messages = messages[0]
377
- self.chat_generations[str(run_id)] = {
378
- "input_messages": lc_messages,
379
- "start": time.time(),
380
- "token_count": 0,
381
- "tt_first_token": None,
382
- }
383
-
384
- return await super().on_chat_model_start(
385
- serialized,
386
- messages,
387
- run_id=run_id,
388
- parent_run_id=parent_run_id,
389
- tags=tags,
390
- metadata=metadata,
391
- name=name,
392
- **kwargs,
393
- )
394
-
395
- async def on_llm_start(
396
- self,
397
- serialized: Dict[str, Any],
398
- prompts: List[str],
399
- *,
400
- run_id: "UUID",
401
- parent_run_id: Optional[UUID] = None,
402
- tags: Optional[List[str]] = None,
403
- metadata: Optional[Dict[str, Any]] = None,
404
- **kwargs: Any,
405
- ) -> None:
406
- await super().on_llm_start(
407
- serialized,
408
- prompts,
409
- run_id=run_id,
410
- parent_run_id=parent_run_id,
411
- tags=tags,
412
- metadata=metadata,
413
- **kwargs,
414
- )
415
-
416
- self.completion_generations[str(run_id)] = {
417
- "prompt": prompts[0],
418
- "start": time.time(),
419
- "token_count": 0,
420
- "tt_first_token": None,
421
- }
422
-
423
- return None
424
-
425
- async def on_llm_new_token(
426
- self,
427
- token: str,
428
- *,
429
- chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
430
- run_id: "UUID",
431
- parent_run_id: Optional["UUID"] = None,
432
- **kwargs: Any,
433
- ) -> None:
434
- await super().on_llm_new_token(
435
- token=token,
436
- chunk=chunk,
437
- run_id=run_id,
438
- parent_run_id=parent_run_id,
439
- **kwargs,
440
- )
441
- if isinstance(chunk, ChatGenerationChunk):
442
- start = self.chat_generations[str(run_id)]
443
- else:
444
- start = self.completion_generations[str(run_id)] # type: ignore
445
- start["token_count"] += 1
446
- if start["tt_first_token"] is None:
447
- start["tt_first_token"] = (time.time() - start["start"]) * 1000
448
-
449
- # Process token to ensure it's a string, as strip() will be called on it.
450
- processed_token: str
451
- # Handle case where token is a list (can occur with some model outputs).
452
- # Join all elements into a single string to maintain compatibility with downstream processing.
453
- if isinstance(token, list):
454
- # If token is a list, join its elements (converted to strings) into a single string.
455
- processed_token = "".join(map(str, token))
456
- elif not isinstance(token, str):
457
- # If token is neither a list nor a string, convert it to a string.
458
- processed_token = str(token)
459
- else:
460
- # If token is already a string, use it as is.
461
- processed_token = token
462
-
463
- if self.stream_final_answer:
464
- self._append_to_last_tokens(processed_token)
465
-
466
- if self.answer_reached:
467
- if not self.final_stream:
468
- self.final_stream = Message(content="")
469
- await self.final_stream.send()
470
- await self.final_stream.stream_token(processed_token)
471
- self.has_streamed_final_answer = True
472
- else:
473
- self.answer_reached = self._check_if_answer_reached()
474
-
475
- async def _persist_run(self, run: Run) -> None:
476
- pass
477
-
478
- def _get_run_parent_id(self, run: Run):
479
- parent_id = str(run.parent_run_id) if run.parent_run_id else self.root_parent_id
480
-
481
- return parent_id
482
-
483
- def _get_non_ignored_parent_id(self, current_parent_id: Optional[str] = None):
484
- if not current_parent_id:
485
- return self.root_parent_id
486
-
487
- if current_parent_id not in self.parent_id_map:
488
- return None
489
-
490
- while current_parent_id in self.parent_id_map:
491
- # If the parent id is in the ignored runs, we need to get the parent id of the ignored run
492
- if current_parent_id in self.ignored_runs:
493
- current_parent_id = self.parent_id_map[current_parent_id]
494
- else:
495
- return current_parent_id
496
-
497
- return self.root_parent_id
498
-
499
- def _should_ignore_run(self, run: Run):
500
- parent_id = self._get_run_parent_id(run)
501
-
502
- if parent_id:
503
- # Add the parent id of the ignored run in the mapping
504
- # so we can re-attach a kept child to the right parent id
505
- self.parent_id_map[str(run.id)] = parent_id
506
-
507
- ignore_by_name = False
508
- ignore_by_parent = parent_id in self.ignored_runs
509
-
510
- for filter in self.to_ignore:
511
- if filter in run.name:
512
- ignore_by_name = True
513
- break
514
-
515
- ignore = ignore_by_name or ignore_by_parent
516
-
517
- # If the ignore cause is the parent being ignored, check if we should nonetheless keep the child
518
- if ignore_by_parent and not ignore_by_name and run.run_type in self.to_keep:
519
- return False, self._get_non_ignored_parent_id(parent_id)
520
- else:
521
- if ignore:
522
- # Tag the run as ignored
523
- self.ignored_runs.add(str(run.id))
524
- return ignore, parent_id
525
-
526
- async def _start_trace(self, run: Run) -> None:
527
- await super()._start_trace(run)
528
- context_var.set(self.context)
529
-
530
- ignore, parent_id = self._should_ignore_run(run)
531
-
532
- if run.run_type in ["chain", "prompt"]:
533
- self.generation_inputs[str(run.id)] = self.ensure_values_serializable(
534
- run.inputs
535
- )
536
-
537
- if ignore:
538
- return
539
-
540
- step_type: TrueStepType = "undefined"
541
- if run.run_type == "agent":
542
- step_type = "run"
543
- elif run.run_type == "chain":
544
- if not self.steps:
545
- step_type = "run"
546
- elif run.run_type == "llm":
547
- step_type = "llm"
548
- elif run.run_type == "retriever":
549
- step_type = "tool"
550
- elif run.run_type == "tool":
551
- step_type = "tool"
552
- elif run.run_type == "embedding":
553
- step_type = "embedding"
554
-
555
- step = Step(
556
- id=str(run.id),
557
- name=run.name,
558
- type=step_type,
559
- parent_id=parent_id,
560
- )
561
- step.start = utc_now()
562
- if step_type != "llm":
563
- step.input, language = process_content(run.inputs)
564
- step.show_input = language or False
565
-
566
- step.tags = run.tags
567
- self.steps[str(run.id)] = step
568
-
569
- await step.send()
570
-
571
- async def _on_run_update(self, run: Run) -> None:
572
- """Process a run upon update."""
573
- context_var.set(self.context)
574
-
575
- ignore, parent_id = self._should_ignore_run(run)
576
-
577
- if ignore:
578
- return
579
-
580
- current_step = self.steps.get(str(run.id), None)
581
-
582
- if run.run_type == "llm" and current_step:
583
- provider, model, tools, llm_settings = self._build_llm_settings(
584
- (run.serialized or {}), (run.extra or {}).get("invocation_params")
585
- )
586
- generations = (run.outputs or {}).get("generations", [])
587
- generation = generations[0][0]
588
- variables = self.generation_inputs.get(str(run.parent_run_id), {})
589
- variables = {k: str(v) for k, v in variables.items() if v is not None}
590
- if message := generation.get("message"):
591
- chat_start = self.chat_generations[str(run.id)]
592
- duration = time.time() - chat_start["start"]
593
- if duration and chat_start["token_count"]:
594
- throughput = chat_start["token_count"] / duration
595
- else:
596
- throughput = None
597
- message_completion = self._convert_message(message)
598
- current_step.generation = ChatGeneration(
599
- provider=provider,
600
- model=model,
601
- tools=tools,
602
- variables=variables,
603
- settings=llm_settings,
604
- duration=duration,
605
- token_throughput_in_s=throughput,
606
- tt_first_token=chat_start.get("tt_first_token"),
607
- messages=[
608
- self._convert_message(m) for m in chat_start["input_messages"]
609
- ],
610
- message_completion=message_completion,
611
- )
612
-
613
- # find first message with prompt_id
614
- for m in chat_start["input_messages"]:
615
- if m.additional_kwargs.get("prompt_id"):
616
- current_step.generation.prompt_id = m.additional_kwargs[
617
- "prompt_id"
618
- ]
619
- if custom_variables := m.additional_kwargs.get("variables"):
620
- current_step.generation.variables = {
621
- k: str(v)
622
- for k, v in custom_variables.items()
623
- if v is not None
624
- }
625
- break
626
-
627
- current_step.language = "json"
628
- else:
629
- completion_start = self.completion_generations[str(run.id)]
630
- completion = generation.get("text", "")
631
- duration = time.time() - completion_start["start"]
632
- if duration and completion_start["token_count"]:
633
- throughput = completion_start["token_count"] / duration
634
- else:
635
- throughput = None
636
- current_step.generation = CompletionGeneration(
637
- provider=provider,
638
- model=model,
639
- settings=llm_settings,
640
- variables=variables,
641
- duration=duration,
642
- token_throughput_in_s=throughput,
643
- tt_first_token=completion_start.get("tt_first_token"),
644
- prompt=completion_start["prompt"],
645
- completion=completion,
646
- )
647
- current_step.output = completion
648
-
649
- if current_step:
650
- current_step.end = utc_now()
651
- await current_step.update()
652
-
653
- if self.final_stream and self.has_streamed_final_answer:
654
- await self.final_stream.update()
655
-
656
- return
657
-
658
- if current_step:
659
- if current_step.type != "llm":
660
- current_step.output, current_step.language = process_content(
661
- run.outputs
662
- )
663
- current_step.end = utc_now()
664
- await current_step.update()
665
-
666
- async def _on_error(self, error: BaseException, *, run_id: UUID, **kwargs: Any):
667
- context_var.set(self.context)
668
-
669
- if current_step := self.steps.get(str(run_id), None):
670
- current_step.is_error = True
671
- current_step.output = str(error)
672
- current_step.end = utc_now()
673
- await current_step.update()
674
-
675
- on_llm_error = _on_error
676
- on_chain_error = _on_error
677
- on_tool_error = _on_error
678
- on_retriever_error = _on_error
679
-
680
-
681
- LangchainCallbackHandler = LangchainTracer
682
- AsyncLangchainCallbackHandler = LangchainTracer
@@ -1,25 +0,0 @@
1
- from chainlit.utils import check_module_version
2
-
3
- if not check_module_version("langflow", "0.1.4"):
4
- raise ValueError(
5
- "Expected Langflow version >= 0.1.4. Run `pip install langflow --upgrade`"
6
- )
7
-
8
- from typing import Dict, Optional, Union
9
-
10
- import httpx
11
-
12
-
13
- async def load_flow(schema: Union[Dict, str], tweaks: Optional[Dict] = None):
14
- from langflow import load_flow_from_json
15
-
16
- if isinstance(schema, str):
17
- async with httpx.AsyncClient() as client:
18
- response = await client.get(schema)
19
- if response.status_code != 200:
20
- raise ValueError(f"Error: {response.text}")
21
- schema = response.json()
22
-
23
- flow = load_flow_from_json(flow=schema, tweaks=tweaks)
24
-
25
- return flow