chainlit 0.7.604rc2__py3-none-any.whl → 1.0.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chainlit might be problematic. Click here for more details.

Files changed (40) hide show
  1. chainlit/__init__.py +32 -23
  2. chainlit/auth.py +9 -10
  3. chainlit/cache.py +3 -3
  4. chainlit/cli/__init__.py +12 -2
  5. chainlit/config.py +22 -13
  6. chainlit/context.py +7 -3
  7. chainlit/data/__init__.py +375 -9
  8. chainlit/data/acl.py +6 -5
  9. chainlit/element.py +86 -123
  10. chainlit/emitter.py +117 -50
  11. chainlit/frontend/dist/assets/index-6aee009a.js +697 -0
  12. chainlit/frontend/dist/assets/{react-plotly-16f7de12.js → react-plotly-2f07c02a.js} +1 -1
  13. chainlit/frontend/dist/index.html +1 -1
  14. chainlit/haystack/callbacks.py +45 -43
  15. chainlit/hello.py +1 -1
  16. chainlit/langchain/callbacks.py +135 -120
  17. chainlit/llama_index/callbacks.py +68 -48
  18. chainlit/message.py +179 -207
  19. chainlit/oauth_providers.py +39 -34
  20. chainlit/playground/provider.py +44 -30
  21. chainlit/playground/providers/anthropic.py +4 -4
  22. chainlit/playground/providers/huggingface.py +2 -2
  23. chainlit/playground/providers/langchain.py +8 -10
  24. chainlit/playground/providers/openai.py +19 -13
  25. chainlit/server.py +155 -99
  26. chainlit/session.py +109 -40
  27. chainlit/socket.py +54 -38
  28. chainlit/step.py +393 -0
  29. chainlit/types.py +78 -21
  30. chainlit/user.py +32 -0
  31. chainlit/user_session.py +1 -5
  32. {chainlit-0.7.604rc2.dist-info → chainlit-1.0.0rc0.dist-info}/METADATA +12 -31
  33. chainlit-1.0.0rc0.dist-info/RECORD +60 -0
  34. chainlit/client/base.py +0 -169
  35. chainlit/client/cloud.py +0 -500
  36. chainlit/frontend/dist/assets/index-c58dbd4b.js +0 -871
  37. chainlit/prompt.py +0 -40
  38. chainlit-0.7.604rc2.dist-info/RECORD +0 -61
  39. {chainlit-0.7.604rc2.dist-info → chainlit-1.0.0rc0.dist-info}/WHEEL +0 -0
  40. {chainlit-0.7.604rc2.dist-info → chainlit-1.0.0rc0.dist-info}/entry_points.txt +0 -0
@@ -4,7 +4,7 @@ import urllib.parse
4
4
  from typing import Dict, List, Optional, Tuple
5
5
 
6
6
  import httpx
7
- from chainlit.client.base import AppUser
7
+ from chainlit.user import User
8
8
  from fastapi import HTTPException
9
9
 
10
10
 
@@ -22,7 +22,7 @@ class OAuthProvider:
22
22
  async def get_token(self, code: str, url: str) -> str:
23
23
  raise NotImplementedError()
24
24
 
25
- async def get_user_info(self, token: str) -> Tuple[Dict[str, str], AppUser]:
25
+ async def get_user_info(self, token: str) -> Tuple[Dict[str, str], User]:
26
26
  raise NotImplementedError()
27
27
 
28
28
 
@@ -65,7 +65,7 @@ class GithubOAuthProvider(OAuthProvider):
65
65
  headers={"Authorization": f"token {token}"},
66
66
  )
67
67
  user_response.raise_for_status()
68
- user = user_response.json()
68
+ github_user = user_response.json()
69
69
 
70
70
  emails_response = await client.get(
71
71
  "https://api.github.com/user/emails",
@@ -74,14 +74,12 @@ class GithubOAuthProvider(OAuthProvider):
74
74
  emails_response.raise_for_status()
75
75
  emails = emails_response.json()
76
76
 
77
- user.update({"emails": emails})
78
-
79
- app_user = AppUser(
80
- username=user["login"],
81
- image=user["avatar_url"],
82
- provider="github",
77
+ github_user.update({"emails": emails})
78
+ user = User(
79
+ identifier=github_user["login"],
80
+ metadata={"image": github_user["avatar_url"], "provider": "github"},
83
81
  )
84
- return (user, app_user)
82
+ return (github_user, user)
85
83
 
86
84
 
87
85
  class GoogleOAuthProvider(OAuthProvider):
@@ -129,12 +127,12 @@ class GoogleOAuthProvider(OAuthProvider):
129
127
  headers={"Authorization": f"Bearer {token}"},
130
128
  )
131
129
  response.raise_for_status()
132
- user = response.json()
133
-
134
- app_user = AppUser(
135
- username=user["name"], image=user["picture"], provider="google"
130
+ google_user = response.json()
131
+ user = User(
132
+ identifier=google_user["email"],
133
+ metadata={"image": google_user["picture"], "provider": "google"},
136
134
  )
137
- return (user, app_user)
135
+ return (google_user, user)
138
136
 
139
137
 
140
138
  class AzureADOAuthProvider(OAuthProvider):
@@ -196,7 +194,7 @@ class AzureADOAuthProvider(OAuthProvider):
196
194
  )
197
195
  response.raise_for_status()
198
196
 
199
- user = response.json()
197
+ azure_user = response.json()
200
198
 
201
199
  try:
202
200
  photo_response = await client.get(
@@ -205,19 +203,18 @@ class AzureADOAuthProvider(OAuthProvider):
205
203
  )
206
204
  photo_data = await photo_response.aread()
207
205
  base64_image = base64.b64encode(photo_data)
208
- user[
206
+ azure_user[
209
207
  "image"
210
208
  ] = f"data:{photo_response.headers['Content-Type']};base64,{base64_image.decode('utf-8')}"
211
209
  except Exception as e:
212
210
  # Ignore errors getting the photo
213
211
  pass
214
212
 
215
- app_user = AppUser(
216
- username=user["userPrincipalName"],
217
- image=user.get("image", ""),
218
- provider="azure-ad",
213
+ user = User(
214
+ identifier=azure_user["userPrincipalName"],
215
+ metadata={"image": azure_user.get("image"), "provider": "azure-ad"},
219
216
  )
220
- return (user, app_user)
217
+ return (azure_user, user)
221
218
 
222
219
 
223
220
  class OktaOAuthProvider(OAuthProvider):
@@ -284,10 +281,13 @@ class OktaOAuthProvider(OAuthProvider):
284
281
  headers={"Authorization": f"Bearer {token}"},
285
282
  )
286
283
  response.raise_for_status()
287
- user = response.json()
284
+ okta_user = response.json()
288
285
 
289
- app_user = AppUser(username=user.get("email"), image="", provider="okta")
290
- return (user, app_user)
286
+ user = User(
287
+ identifier=okta_user.get("email"),
288
+ metadata={"image": "", "provider": "okta"},
289
+ )
290
+ return (okta_user, user)
291
291
 
292
292
 
293
293
  class Auth0OAuthProvider(OAuthProvider):
@@ -342,13 +342,15 @@ class Auth0OAuthProvider(OAuthProvider):
342
342
  headers={"Authorization": f"Bearer {token}"},
343
343
  )
344
344
  response.raise_for_status()
345
- user = response.json()
346
- app_user = AppUser(
347
- username=user.get("email"),
348
- image=user.get("picture", ""),
349
- provider="auth0",
345
+ auth0_user = response.json()
346
+ user = User(
347
+ identifier=auth0_user.get("email"),
348
+ metadata={
349
+ "image": auth0_user.get("picture", ""),
350
+ "provider": "auth0",
351
+ },
350
352
  )
351
- return (user, app_user)
353
+ return (auth0_user, user)
352
354
 
353
355
 
354
356
  class DescopeOAuthProvider(OAuthProvider):
@@ -398,10 +400,13 @@ class DescopeOAuthProvider(OAuthProvider):
398
400
  f"{self.domain}/userinfo", headers={"Authorization": f"Bearer {token}"}
399
401
  )
400
402
  response.raise_for_status() # This will raise an exception for 4xx/5xx responses
401
- user = response.json()
403
+ descope_user = response.json()
402
404
 
403
- app_user = AppUser(username=user.get("email"), image="", provider="descope")
404
- return (user, app_user)
405
+ user = User(
406
+ identifier=descope_user.get("email"),
407
+ metadata={"image": "", "provider": "descope"},
408
+ )
409
+ return (descope_user, user)
405
410
 
406
411
 
407
412
  providers = [
@@ -1,10 +1,10 @@
1
1
  import os
2
- from typing import Any, Dict, List, Union
2
+ from typing import Any, Dict, List, Optional, Union
3
3
 
4
4
  from chainlit.config import config
5
- from chainlit.prompt import Prompt, PromptMessage
6
5
  from chainlit.telemetry import trace_event
7
- from chainlit.types import CompletionRequest
6
+ from chainlit.types import GenerationRequest
7
+ from chainlit_client import BaseGeneration, ChatGeneration, GenerationMessage
8
8
  from fastapi import HTTPException
9
9
  from pydantic.dataclasses import dataclass
10
10
 
@@ -20,67 +20,81 @@ class BaseProvider:
20
20
  is_chat: bool
21
21
 
22
22
  # Format the message based on the template provided
23
- def format_message(self, message: PromptMessage, prompt: Prompt):
23
+ def format_message(self, message: GenerationMessage, inputs: Optional[Dict]):
24
24
  if message.template:
25
- message.formatted = self._format_template(message.template, prompt)
25
+ message.formatted = self._format_template(
26
+ message.template, inputs, message.template_format
27
+ )
26
28
  return message
27
29
 
28
30
  # Convert the message to string format
29
- def message_to_string(self, message: PromptMessage):
31
+ def message_to_string(self, message: GenerationMessage):
30
32
  return message.formatted
31
33
 
32
34
  # Concatenate multiple messages with a joiner
33
- def concatenate_messages(self, messages: List[PromptMessage], joiner="\n\n"):
35
+ def concatenate_messages(self, messages: List[GenerationMessage], joiner="\n\n"):
34
36
  return joiner.join([self.message_to_string(m) for m in messages])
35
37
 
36
38
  # Format the template based on the prompt inputs
37
- def _format_template(self, template: str, prompt: Prompt):
38
- if prompt.template_format == "f-string":
39
- return template.format(**(prompt.inputs or {}))
40
- raise HTTPException(
41
- status_code=422, detail=f"Unsupported format {prompt.template_format}"
42
- )
39
+ def _format_template(
40
+ self, template: str, inputs: Optional[Dict], format: str = "f-string"
41
+ ):
42
+ if format == "f-string":
43
+ return template.format(**(inputs or {}))
44
+ raise HTTPException(status_code=422, detail=f"Unsupported format {format}")
43
45
 
44
46
  # Create a prompt based on the request
45
- def create_prompt(self, request: CompletionRequest):
46
- prompt = request.prompt
47
- if prompt.messages:
48
- messages = [self.format_message(m, prompt=prompt) for m in prompt.messages]
47
+ def create_generation(self, request: GenerationRequest):
48
+ if request.chatGeneration and request.chatGeneration.messages:
49
+ messages = [
50
+ self.format_message(m, request.chatGeneration.inputs)
51
+ for m in request.chatGeneration.messages
52
+ ]
49
53
  else:
50
54
  messages = None
51
55
 
52
56
  if self.is_chat:
53
57
  if messages:
54
58
  return messages
55
- elif prompt.template or prompt.formatted:
59
+ elif request.completionGeneration and (
60
+ request.completionGeneration.template
61
+ or request.completionGeneration.formatted
62
+ ):
56
63
  return [
57
64
  self.format_message(
58
- PromptMessage(
59
- template=prompt.template,
60
- formatted=prompt.formatted,
65
+ GenerationMessage(
66
+ template=request.completionGeneration.template,
67
+ formatted=request.completionGeneration.formatted,
61
68
  role="user",
62
69
  ),
63
- prompt=prompt,
70
+ inputs=request.completionGeneration.inputs,
64
71
  )
65
72
  ]
66
73
  else:
67
- raise HTTPException(status_code=422, detail="Could not create prompt")
74
+ raise HTTPException(
75
+ status_code=422, detail="Could not create generation"
76
+ )
68
77
  else:
69
- if prompt.template:
70
- return self._format_template(prompt.template, prompt=prompt)
78
+ if request.completionGeneration:
79
+ if request.completionGeneration.template:
80
+ return self._format_template(
81
+ request.completionGeneration.template,
82
+ request.completionGeneration.inputs,
83
+ request.completionGeneration.template_format,
84
+ )
85
+ elif request.completionGeneration.formatted:
86
+ return request.completionGeneration.formatted
71
87
  elif messages:
72
88
  return self.concatenate_messages(messages)
73
- elif prompt.formatted:
74
- return prompt.formatted
75
89
  else:
76
90
  raise HTTPException(status_code=422, detail="Could not create prompt")
77
91
 
78
92
  # Create a completion event
79
- async def create_completion(self, request: CompletionRequest):
93
+ async def create_completion(self, request: GenerationRequest):
80
94
  trace_event("completion")
81
95
 
82
96
  # Get the environment variable based on the request
83
- def get_var(self, request: CompletionRequest, var: str) -> Union[str, None]:
97
+ def get_var(self, request: GenerationRequest, var: str) -> Union[str, None]:
84
98
  user_env = config.project.user_env or []
85
99
 
86
100
  if var in user_env:
@@ -101,7 +115,7 @@ class BaseProvider:
101
115
  return True
102
116
 
103
117
  # Validate the environment variables in the request
104
- def validate_env(self, request: CompletionRequest):
118
+ def validate_env(self, request: GenerationRequest):
105
119
  return {k: self.get_var(request, v) for k, v in self.env_vars.items()}
106
120
 
107
121
  # Check if the required settings are present
@@ -1,12 +1,12 @@
1
1
  from chainlit.input_widget import Select, Slider, Tags
2
2
  from chainlit.playground.provider import BaseProvider
3
- from chainlit.prompt import PromptMessage
3
+ from chainlit_client import GenerationMessage
4
4
  from fastapi import HTTPException
5
5
  from fastapi.responses import StreamingResponse
6
6
 
7
7
 
8
8
  class AnthropicProvider(BaseProvider):
9
- def message_to_string(self, message: PromptMessage) -> str:
9
+ def message_to_string(self, message: GenerationMessage) -> str:
10
10
  import anthropic
11
11
 
12
12
  if message.role == "user":
@@ -29,10 +29,10 @@ class AnthropicProvider(BaseProvider):
29
29
 
30
30
  env_settings = self.validate_env(request=request)
31
31
 
32
- llm_settings = request.prompt.settings
32
+ llm_settings = request.generation.settings
33
33
  self.require_settings(llm_settings)
34
34
 
35
- prompt = self.concatenate_messages(self.create_prompt(request), joiner="")
35
+ prompt = self.concatenate_messages(self.create_generation(request), joiner="")
36
36
 
37
37
  if not prompt.endswith(anthropic.AI_PROMPT):
38
38
  prompt += anthropic.AI_PROMPT
@@ -18,7 +18,7 @@ class BaseHuggingFaceProvider(BaseProvider):
18
18
  from huggingface_hub.inference_api import InferenceApi
19
19
 
20
20
  env_settings = self.validate_env(request=request)
21
- llm_settings = request.prompt.settings
21
+ llm_settings = request.generation.settings
22
22
  self.require_settings(llm_settings)
23
23
 
24
24
  client = InferenceApi(
@@ -27,7 +27,7 @@ class BaseHuggingFaceProvider(BaseProvider):
27
27
  task=self.task,
28
28
  )
29
29
 
30
- prompt = self.create_prompt(request)
30
+ prompt = self.create_generation(request)
31
31
 
32
32
  response = await make_async(client)(inputs=prompt, params=llm_settings)
33
33
 
@@ -1,17 +1,15 @@
1
1
  from typing import Union
2
2
 
3
- from fastapi.responses import StreamingResponse
4
-
5
3
  from chainlit.playground.provider import BaseProvider
6
- from chainlit.prompt import PromptMessage
7
4
  from chainlit.sync import make_async
8
-
9
- from chainlit import input_widget
5
+ from chainlit_client import GenerationMessage
6
+ from fastapi.responses import StreamingResponse
10
7
 
11
8
 
12
9
  class LangchainGenericProvider(BaseProvider):
13
10
  from langchain.chat_models.base import BaseChatModel
14
11
  from langchain.llms.base import LLM
12
+ from langchain.schema import BaseMessage
15
13
 
16
14
  llm: Union[LLM, BaseChatModel]
17
15
 
@@ -31,7 +29,7 @@ class LangchainGenericProvider(BaseProvider):
31
29
  )
32
30
  self.llm = llm
33
31
 
34
- def prompt_message_to_langchain_message(self, message: PromptMessage):
32
+ def prompt_message_to_langchain_message(self, message: GenerationMessage):
35
33
  from langchain.schema.messages import (
36
34
  AIMessage,
37
35
  FunctionMessage,
@@ -46,7 +44,7 @@ class LangchainGenericProvider(BaseProvider):
46
44
  return AIMessage(content=content)
47
45
  elif message.role == "system":
48
46
  return SystemMessage(content=content)
49
- elif message.role == "function":
47
+ elif message.role == "tool":
50
48
  return FunctionMessage(
51
49
  content=content, name=message.name if message.name else "function"
52
50
  )
@@ -57,15 +55,15 @@ class LangchainGenericProvider(BaseProvider):
57
55
  message = super().format_message(message, prompt)
58
56
  return self.prompt_message_to_langchain_message(message)
59
57
 
60
- def message_to_string(self, message: PromptMessage) -> str:
61
- return message.to_string()
58
+ def message_to_string(self, message: BaseMessage) -> str: # type: ignore[override]
59
+ return message.content
62
60
 
63
61
  async def create_completion(self, request):
64
62
  from langchain.schema.messages import BaseMessageChunk
65
63
 
66
64
  await super().create_completion(request)
67
65
 
68
- messages = self.create_prompt(request)
66
+ messages = self.create_generation(request)
69
67
 
70
68
  stream = make_async(self.llm.stream)
71
69
 
@@ -127,11 +127,11 @@ class ChatOpenAIProvider(BaseProvider):
127
127
 
128
128
  client = AsyncClient(api_key=env_settings["api_key"])
129
129
 
130
- llm_settings = request.prompt.settings
130
+ llm_settings = request.generation.settings
131
131
 
132
132
  self.require_settings(llm_settings)
133
133
 
134
- messages = self.create_prompt(request)
134
+ messages = self.create_generation(request)
135
135
 
136
136
  if "stop" in llm_settings:
137
137
  stop = llm_settings["stop"]
@@ -142,8 +142,8 @@ class ChatOpenAIProvider(BaseProvider):
142
142
 
143
143
  llm_settings["stop"] = stop
144
144
 
145
- if request.prompt.functions:
146
- llm_settings["functions"] = request.prompt.functions
145
+ if request.generation.functions:
146
+ llm_settings["functions"] = request.generation.functions
147
147
  llm_settings["stream"] = False
148
148
  else:
149
149
  llm_settings["stream"] = True
@@ -188,11 +188,11 @@ class OpenAIProvider(BaseProvider):
188
188
 
189
189
  client = AsyncClient(api_key=env_settings["api_key"])
190
190
 
191
- llm_settings = request.prompt.settings
191
+ llm_settings = request.generation.settings
192
192
 
193
193
  self.require_settings(llm_settings)
194
194
 
195
- prompt = self.create_prompt(request)
195
+ prompt = self.create_generation(request)
196
196
 
197
197
  if "stop" in llm_settings:
198
198
  stop = llm_settings["stop"]
@@ -240,11 +240,11 @@ class AzureOpenAIProvider(BaseProvider):
240
240
  azure_ad_token_provider=self.get_var(request, "AZURE_AD_TOKEN_PROVIDER"),
241
241
  azure_deployment=self.get_var(request, "AZURE_DEPLOYMENT"),
242
242
  )
243
- llm_settings = request.prompt.settings
243
+ llm_settings = request.generation.settings
244
244
 
245
245
  self.require_settings(llm_settings)
246
246
 
247
- prompt = self.create_prompt(request)
247
+ prompt = self.create_generation(request)
248
248
 
249
249
  if "stop" in llm_settings:
250
250
  stop = llm_settings["stop"]
@@ -294,11 +294,11 @@ class AzureChatOpenAIProvider(BaseProvider):
294
294
  azure_deployment=self.get_var(request, "AZURE_DEPLOYMENT"),
295
295
  )
296
296
 
297
- llm_settings = request.prompt.settings
297
+ llm_settings = request.generation.settings
298
298
 
299
299
  self.require_settings(llm_settings)
300
300
 
301
- messages = self.create_prompt(request)
301
+ messages = self.create_generation(request)
302
302
 
303
303
  if "stop" in llm_settings:
304
304
  stop = llm_settings["stop"]
@@ -311,8 +311,8 @@ class AzureChatOpenAIProvider(BaseProvider):
311
311
 
312
312
  llm_settings["model"] = env_settings["deployment_name"]
313
313
 
314
- if request.prompt.functions:
315
- llm_settings["functions"] = request.prompt.functions
314
+ if request.generation.functions:
315
+ llm_settings["functions"] = request.generation.functions
316
316
  llm_settings["stream"] = False
317
317
  else:
318
318
  llm_settings["stream"] = True
@@ -362,7 +362,13 @@ ChatOpenAI = ChatOpenAIProvider(
362
362
  Select(
363
363
  id="model",
364
364
  label="Model",
365
- values=["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"],
365
+ values=[
366
+ "gpt-3.5-turbo",
367
+ "gpt-3.5-turbo-16k",
368
+ "gpt-4",
369
+ "gpt-4-32k",
370
+ "gpt-4-1106-preview",
371
+ ],
366
372
  initial_value="gpt-3.5-turbo",
367
373
  ),
368
374
  *openai_common_inputs,