chainlit 1.0.401__py3-none-any.whl → 2.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chainlit might be problematic. Click here for more details.

Files changed (113) hide show
  1. chainlit/__init__.py +98 -279
  2. chainlit/_utils.py +8 -0
  3. chainlit/action.py +12 -10
  4. chainlit/{auth.py → auth/__init__.py} +28 -36
  5. chainlit/auth/cookie.py +123 -0
  6. chainlit/auth/jwt.py +39 -0
  7. chainlit/cache.py +4 -6
  8. chainlit/callbacks.py +362 -0
  9. chainlit/chat_context.py +64 -0
  10. chainlit/chat_settings.py +3 -1
  11. chainlit/cli/__init__.py +77 -8
  12. chainlit/config.py +191 -102
  13. chainlit/context.py +42 -13
  14. chainlit/copilot/dist/index.js +8750 -903
  15. chainlit/data/__init__.py +101 -416
  16. chainlit/data/acl.py +6 -2
  17. chainlit/data/base.py +107 -0
  18. chainlit/data/chainlit_data_layer.py +614 -0
  19. chainlit/data/dynamodb.py +590 -0
  20. chainlit/data/literalai.py +500 -0
  21. chainlit/data/sql_alchemy.py +721 -0
  22. chainlit/data/storage_clients/__init__.py +0 -0
  23. chainlit/data/storage_clients/azure.py +81 -0
  24. chainlit/data/storage_clients/azure_blob.py +89 -0
  25. chainlit/data/storage_clients/base.py +26 -0
  26. chainlit/data/storage_clients/gcs.py +88 -0
  27. chainlit/data/storage_clients/s3.py +75 -0
  28. chainlit/data/utils.py +29 -0
  29. chainlit/discord/__init__.py +6 -0
  30. chainlit/discord/app.py +354 -0
  31. chainlit/element.py +91 -33
  32. chainlit/emitter.py +81 -29
  33. chainlit/frontend/dist/assets/DailyMotion-Ce9dQoqZ.js +1 -0
  34. chainlit/frontend/dist/assets/Dataframe-C1XonMcV.js +22 -0
  35. chainlit/frontend/dist/assets/Facebook-DVVt6lrr.js +1 -0
  36. chainlit/frontend/dist/assets/FilePlayer-c7stW4vz.js +1 -0
  37. chainlit/frontend/dist/assets/Kaltura-BmMmgorA.js +1 -0
  38. chainlit/frontend/dist/assets/Mixcloud-Cw8hDmiO.js +1 -0
  39. chainlit/frontend/dist/assets/Mux-DiRZfeUf.js +1 -0
  40. chainlit/frontend/dist/assets/Preview-6Jt2mRHx.js +1 -0
  41. chainlit/frontend/dist/assets/SoundCloud-DKwcT58_.js +1 -0
  42. chainlit/frontend/dist/assets/Streamable-BVdxrEeX.js +1 -0
  43. chainlit/frontend/dist/assets/Twitch-DFqZR7Gu.js +1 -0
  44. chainlit/frontend/dist/assets/Vidyard-0BQAAtVk.js +1 -0
  45. chainlit/frontend/dist/assets/Vimeo-CRFSH0Vu.js +1 -0
  46. chainlit/frontend/dist/assets/Wistia-CKrmdQaG.js +1 -0
  47. chainlit/frontend/dist/assets/YouTube-CQpL-rvU.js +1 -0
  48. chainlit/frontend/dist/assets/index-DQmLRKyv.css +1 -0
  49. chainlit/frontend/dist/assets/index-QdmxtIMQ.js +8665 -0
  50. chainlit/frontend/dist/assets/react-plotly-B9hvVpUG.js +3484 -0
  51. chainlit/frontend/dist/index.html +2 -4
  52. chainlit/haystack/callbacks.py +4 -7
  53. chainlit/input_widget.py +8 -4
  54. chainlit/langchain/callbacks.py +103 -68
  55. chainlit/langflow/__init__.py +1 -0
  56. chainlit/llama_index/callbacks.py +65 -40
  57. chainlit/markdown.py +22 -6
  58. chainlit/message.py +54 -56
  59. chainlit/mistralai/__init__.py +50 -0
  60. chainlit/oauth_providers.py +266 -8
  61. chainlit/openai/__init__.py +10 -18
  62. chainlit/secret.py +1 -1
  63. chainlit/server.py +789 -228
  64. chainlit/session.py +108 -90
  65. chainlit/slack/__init__.py +6 -0
  66. chainlit/slack/app.py +397 -0
  67. chainlit/socket.py +199 -116
  68. chainlit/step.py +141 -89
  69. chainlit/sync.py +2 -1
  70. chainlit/teams/__init__.py +6 -0
  71. chainlit/teams/app.py +338 -0
  72. chainlit/translations/bn.json +244 -0
  73. chainlit/translations/en-US.json +122 -8
  74. chainlit/translations/gu.json +244 -0
  75. chainlit/translations/he-IL.json +244 -0
  76. chainlit/translations/hi.json +244 -0
  77. chainlit/translations/ja.json +242 -0
  78. chainlit/translations/kn.json +244 -0
  79. chainlit/translations/ml.json +244 -0
  80. chainlit/translations/mr.json +244 -0
  81. chainlit/translations/nl-NL.json +242 -0
  82. chainlit/translations/ta.json +244 -0
  83. chainlit/translations/te.json +244 -0
  84. chainlit/translations/zh-CN.json +243 -0
  85. chainlit/translations.py +60 -0
  86. chainlit/types.py +133 -28
  87. chainlit/user.py +14 -3
  88. chainlit/user_session.py +6 -3
  89. chainlit/utils.py +52 -5
  90. chainlit/version.py +3 -2
  91. {chainlit-1.0.401.dist-info → chainlit-2.0.4.dist-info}/METADATA +48 -50
  92. chainlit-2.0.4.dist-info/RECORD +107 -0
  93. chainlit/cli/utils.py +0 -24
  94. chainlit/frontend/dist/assets/index-9711593e.js +0 -723
  95. chainlit/frontend/dist/assets/index-d088547c.css +0 -1
  96. chainlit/frontend/dist/assets/react-plotly-d8762cc2.js +0 -3602
  97. chainlit/playground/__init__.py +0 -2
  98. chainlit/playground/config.py +0 -40
  99. chainlit/playground/provider.py +0 -108
  100. chainlit/playground/providers/__init__.py +0 -13
  101. chainlit/playground/providers/anthropic.py +0 -118
  102. chainlit/playground/providers/huggingface.py +0 -75
  103. chainlit/playground/providers/langchain.py +0 -89
  104. chainlit/playground/providers/openai.py +0 -408
  105. chainlit/playground/providers/vertexai.py +0 -171
  106. chainlit/translations/pt-BR.json +0 -155
  107. chainlit-1.0.401.dist-info/RECORD +0 -66
  108. /chainlit/copilot/dist/assets/{logo_dark-2a3cf740.svg → logo_dark-IkGJ_IwC.svg} +0 -0
  109. /chainlit/copilot/dist/assets/{logo_light-b078e7bc.svg → logo_light-Bb_IPh6r.svg} +0 -0
  110. /chainlit/frontend/dist/assets/{logo_dark-2a3cf740.svg → logo_dark-IkGJ_IwC.svg} +0 -0
  111. /chainlit/frontend/dist/assets/{logo_light-b078e7bc.svg → logo_light-Bb_IPh6r.svg} +0 -0
  112. {chainlit-1.0.401.dist-info → chainlit-2.0.4.dist-info}/WHEEL +0 -0
  113. {chainlit-1.0.401.dist-info → chainlit-2.0.4.dist-info}/entry_points.txt +0 -0
@@ -1,408 +0,0 @@
1
- import json
2
- from contextlib import contextmanager
3
-
4
- from chainlit.input_widget import Select, Slider, Tags
5
- from chainlit.playground.provider import BaseProvider
6
- from fastapi import HTTPException
7
- from fastapi.responses import StreamingResponse
8
-
9
-
10
- def stringify_function_call(function_call):
11
- if isinstance(function_call, dict):
12
- _function_call = function_call.copy()
13
- else:
14
- _function_call = {
15
- "arguments": function_call.arguments,
16
- "name": function_call.name,
17
- }
18
-
19
- if "arguments" in _function_call and isinstance(_function_call["arguments"], str):
20
- _function_call["arguments"] = json.loads(_function_call["arguments"])
21
- return json.dumps(_function_call, indent=4, ensure_ascii=False)
22
-
23
-
24
- openai_common_inputs = [
25
- Slider(
26
- id="temperature",
27
- label="Temperature",
28
- min=0.0,
29
- max=2.0,
30
- step=0.01,
31
- initial=0.9,
32
- tooltip="Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
33
- ),
34
- Slider(
35
- id="max_tokens",
36
- label="Max Tokens",
37
- min=0.0,
38
- max=8000,
39
- step=1,
40
- initial=256,
41
- tooltip="The maximum number of tokens to generate in the chat completion.",
42
- ),
43
- Slider(
44
- id="top_p",
45
- label="Top P",
46
- min=0.0,
47
- max=1.0,
48
- step=0.01,
49
- initial=1.0,
50
- tooltip="An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
51
- ),
52
- Slider(
53
- id="frequency_penalty",
54
- label="Frequency Penalty",
55
- min=-2.0,
56
- max=2.0,
57
- step=0.01,
58
- initial=0.0,
59
- tooltip="Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
60
- ),
61
- Slider(
62
- id="presence_penalty",
63
- label="Presence Penalty",
64
- min=-2.0,
65
- max=2.0,
66
- step=0.01,
67
- initial=0.0,
68
- tooltip="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
69
- ),
70
- Tags(
71
- id="stop",
72
- label="Stop Sequences",
73
- initial=[],
74
- tooltip="Up to 4 sequences where the API will stop generating further tokens.",
75
- ),
76
- ]
77
-
78
-
79
- @contextmanager
80
- def handle_openai_error():
81
- import openai
82
-
83
- try:
84
- yield
85
- except openai.APITimeoutError as e:
86
- raise HTTPException(
87
- status_code=408,
88
- detail=f"OpenAI API request timed out: {e}",
89
- )
90
- except openai.APIError as e:
91
- raise HTTPException(
92
- status_code=500,
93
- detail=f"OpenAI API returned an API Error: {e}",
94
- )
95
- except openai.APIConnectionError as e:
96
- raise HTTPException(
97
- status_code=503,
98
- detail=f"OpenAI API request failed to connect: {e}",
99
- )
100
- except openai.AuthenticationError as e:
101
- raise HTTPException(
102
- status_code=403,
103
- detail=f"OpenAI API request was not authorized: {e}",
104
- )
105
- except openai.PermissionDeniedError as e:
106
- raise HTTPException(
107
- status_code=403,
108
- detail=f"OpenAI API request was not permitted: {e}",
109
- )
110
- except openai.RateLimitError as e:
111
- raise HTTPException(
112
- status_code=429,
113
- detail=f"OpenAI API request exceeded rate limit: {e}",
114
- )
115
-
116
-
117
- class ChatOpenAIProvider(BaseProvider):
118
- def format_message(self, message, prompt):
119
- message = super().format_message(message, prompt)
120
- return message.to_openai()
121
-
122
- async def create_completion(self, request):
123
- await super().create_completion(request)
124
- from openai import AsyncClient
125
-
126
- env_settings = self.validate_env(request=request)
127
-
128
- client = AsyncClient(api_key=env_settings["api_key"])
129
-
130
- llm_settings = request.generation.settings
131
-
132
- self.require_settings(llm_settings)
133
-
134
- messages = self.create_generation(request)
135
-
136
- if "stop" in llm_settings:
137
- stop = llm_settings["stop"]
138
-
139
- # OpenAI doesn't support an empty stop array, clear it
140
- if isinstance(stop, list) and len(stop) == 0:
141
- stop = None
142
-
143
- llm_settings["stop"] = stop
144
-
145
- if request.generation.tools:
146
- llm_settings["tools"] = request.generation.tools
147
- llm_settings["stream"] = False
148
- else:
149
- llm_settings["stream"] = True
150
-
151
- with handle_openai_error():
152
- response = await client.chat.completions.create(
153
- messages=messages,
154
- **llm_settings,
155
- )
156
-
157
- if llm_settings["stream"]:
158
-
159
- async def create_event_stream():
160
- async for part in response:
161
- if part.choices and part.choices[0].delta.content:
162
- token = part.choices[0].delta.content
163
- yield token
164
- else:
165
- continue
166
-
167
- else:
168
-
169
- async def create_event_stream():
170
- message = response.choices[0].message
171
- if function_call := message.function_call:
172
- yield stringify_function_call(function_call)
173
- else:
174
- yield message.content or ""
175
-
176
- return StreamingResponse(create_event_stream())
177
-
178
-
179
- class OpenAIProvider(BaseProvider):
180
- def message_to_string(self, message):
181
- return message.to_string()
182
-
183
- async def create_completion(self, request):
184
- await super().create_completion(request)
185
- from openai import AsyncClient
186
-
187
- env_settings = self.validate_env(request=request)
188
-
189
- client = AsyncClient(api_key=env_settings["api_key"])
190
-
191
- llm_settings = request.generation.settings
192
-
193
- self.require_settings(llm_settings)
194
-
195
- prompt = self.create_generation(request)
196
-
197
- if "stop" in llm_settings:
198
- stop = llm_settings["stop"]
199
-
200
- # OpenAI doesn't support an empty stop array, clear it
201
- if isinstance(stop, list) and len(stop) == 0:
202
- stop = None
203
-
204
- llm_settings["stop"] = stop
205
-
206
- llm_settings["stream"] = True
207
-
208
- with handle_openai_error():
209
- response = await client.completions.create(
210
- prompt=prompt,
211
- **llm_settings,
212
- )
213
-
214
- async def create_event_stream():
215
- async for part in response:
216
- if part.choices and part.choices[0].text:
217
- token = part.choices[0].text
218
- yield token
219
- else:
220
- continue
221
-
222
- return StreamingResponse(create_event_stream())
223
-
224
-
225
- class AzureOpenAIProvider(BaseProvider):
226
- def message_to_string(self, message):
227
- return message.to_string()
228
-
229
- async def create_completion(self, request):
230
- await super().create_completion(request)
231
- from openai import AsyncAzureOpenAI
232
-
233
- env_settings = self.validate_env(request=request)
234
-
235
- client = AsyncAzureOpenAI(
236
- api_key=env_settings["api_key"],
237
- api_version=env_settings["api_version"],
238
- azure_endpoint=env_settings["azure_endpoint"],
239
- azure_ad_token=self.get_var(request, "AZURE_AD_TOKEN"),
240
- azure_deployment=self.get_var(request, "AZURE_DEPLOYMENT"),
241
- )
242
- llm_settings = request.generation.settings
243
-
244
- self.require_settings(llm_settings)
245
-
246
- prompt = self.create_generation(request)
247
-
248
- if "stop" in llm_settings:
249
- stop = llm_settings["stop"]
250
-
251
- # OpenAI doesn't support an empty stop array, clear it
252
- if isinstance(stop, list) and len(stop) == 0:
253
- stop = None
254
-
255
- llm_settings["stop"] = stop
256
-
257
- llm_settings["stream"] = True
258
-
259
- with handle_openai_error():
260
- response = await client.completions.create(
261
- prompt=prompt,
262
- **llm_settings,
263
- )
264
-
265
- async def create_event_stream():
266
- async for part in response:
267
- if part.choices and part.choices[0].text:
268
- token = part.choices[0].text
269
- yield token
270
- else:
271
- continue
272
-
273
- return StreamingResponse(create_event_stream())
274
-
275
-
276
- class AzureChatOpenAIProvider(BaseProvider):
277
- def format_message(self, message, prompt):
278
- message = super().format_message(message, prompt)
279
- return message.to_openai()
280
-
281
- async def create_completion(self, request):
282
- await super().create_completion(request)
283
- from openai import AsyncAzureOpenAI
284
-
285
- env_settings = self.validate_env(request=request)
286
-
287
- client = AsyncAzureOpenAI(
288
- api_key=env_settings["api_key"],
289
- api_version=env_settings["api_version"],
290
- azure_endpoint=env_settings["azure_endpoint"],
291
- azure_ad_token=self.get_var(request, "AZURE_AD_TOKEN"),
292
- azure_deployment=self.get_var(request, "AZURE_DEPLOYMENT"),
293
- )
294
-
295
- llm_settings = request.generation.settings
296
-
297
- self.require_settings(llm_settings)
298
-
299
- messages = self.create_generation(request)
300
-
301
- if "stop" in llm_settings:
302
- stop = llm_settings["stop"]
303
-
304
- # OpenAI doesn't support an empty stop array, clear it
305
- if isinstance(stop, list) and len(stop) == 0:
306
- stop = None
307
-
308
- llm_settings["stop"] = stop
309
-
310
- llm_settings["model"] = env_settings["deployment_name"]
311
-
312
- if request.generation.tools:
313
- llm_settings["tools"] = request.generation.tools
314
- llm_settings["stream"] = False
315
- else:
316
- llm_settings["stream"] = True
317
-
318
- with handle_openai_error():
319
- response = await client.chat.completions.create(
320
- messages=messages,
321
- **llm_settings,
322
- )
323
-
324
- if llm_settings["stream"]:
325
-
326
- async def create_event_stream():
327
- async for part in response:
328
- if part.choices and part.choices[0].delta.content:
329
- token = part.choices[0].delta.content
330
- yield token
331
- else:
332
- continue
333
-
334
- else:
335
-
336
- async def create_event_stream():
337
- message = response.choices[0].message
338
- if function_call := message.function_call:
339
- yield stringify_function_call(function_call)
340
- else:
341
- yield message.content or ""
342
-
343
- return StreamingResponse(create_event_stream())
344
-
345
-
346
- openai_env_vars = {"api_key": "OPENAI_API_KEY"}
347
-
348
- azure_openai_env_vars = {
349
- "api_key": "AZURE_OPENAI_API_KEY",
350
- "api_version": "AZURE_OPENAI_API_VERSION",
351
- "azure_endpoint": "AZURE_OPENAI_ENDPOINT",
352
- "deployment_name": "AZURE_OPENAI_DEPLOYMENT_NAME",
353
- }
354
-
355
- ChatOpenAI = ChatOpenAIProvider(
356
- id="openai-chat",
357
- env_vars=openai_env_vars,
358
- name="ChatOpenAI",
359
- inputs=[
360
- Select(
361
- id="model",
362
- label="Model",
363
- values=[
364
- "gpt-3.5-turbo",
365
- "gpt-3.5-turbo-16k",
366
- "gpt-4",
367
- "gpt-4-32k",
368
- "gpt-4-1106-preview",
369
- ],
370
- initial_value="gpt-3.5-turbo",
371
- ),
372
- *openai_common_inputs,
373
- ],
374
- is_chat=True,
375
- )
376
-
377
- OpenAI = OpenAIProvider(
378
- id="openai",
379
- name="OpenAI",
380
- env_vars=openai_env_vars,
381
- inputs=[
382
- Select(
383
- id="model",
384
- label="Model",
385
- values=["text-davinci-003", "text-davinci-002"],
386
- initial_value="text-davinci-003",
387
- ),
388
- *openai_common_inputs,
389
- ],
390
- is_chat=False,
391
- )
392
-
393
-
394
- AzureChatOpenAI = AzureChatOpenAIProvider(
395
- id="azure-openai-chat",
396
- env_vars=azure_openai_env_vars,
397
- name="AzureChatOpenAI",
398
- inputs=openai_common_inputs,
399
- is_chat=True,
400
- )
401
-
402
- AzureOpenAI = AzureOpenAIProvider(
403
- id="azure",
404
- name="AzureOpenAI",
405
- env_vars=azure_openai_env_vars,
406
- inputs=openai_common_inputs,
407
- is_chat=False,
408
- )
@@ -1,171 +0,0 @@
1
- import chainlit as cl
2
- from fastapi import HTTPException
3
-
4
- from fastapi.responses import StreamingResponse
5
-
6
- from chainlit.input_widget import Select, Slider, Tags
7
- from chainlit.playground.provider import BaseProvider
8
-
9
- vertexai_common_inputs = [
10
- Slider(
11
- id="temperature",
12
- label="Temperature",
13
- min=0.0,
14
- max=0.99,
15
- step=0.01,
16
- initial=0.2,
17
- ),
18
- Slider(
19
- id="max_output_tokens",
20
- label="Max Output Tokens",
21
- min=0.0,
22
- max=1024,
23
- step=1,
24
- initial=256,
25
- ),
26
- ]
27
-
28
-
29
- class ChatVertexAIProvider(BaseProvider):
30
- async def create_completion(self, request):
31
- await super().create_completion(request)
32
- from vertexai.language_models import ChatModel, CodeChatModel
33
-
34
- self.validate_env(request=request)
35
-
36
- llm_settings = request.generation.settings
37
- self.require_settings(llm_settings)
38
-
39
- messages = self.create_generation(request)
40
- model_name = llm_settings["model"]
41
- if model_name.startswith("chat-"):
42
- model = ChatModel.from_pretrained(model_name)
43
- elif model_name.startswith("codechat-"):
44
- model = CodeChatModel.from_pretrained(model_name)
45
- else:
46
- raise HTTPException(
47
- status_code=400,
48
- detail=f"This model{model_name} is not implemented.",
49
- )
50
- del llm_settings["model"]
51
- chat = model.start_chat()
52
-
53
- async def create_event_stream():
54
- for response in await cl.make_async(chat.send_message_streaming)(
55
- messages, **llm_settings
56
- ):
57
- yield response.text
58
-
59
- return StreamingResponse(create_event_stream())
60
-
61
-
62
- class GenerationVertexAIProvider(BaseProvider):
63
- async def create_completion(self, request):
64
- await super().create_completion(request)
65
- from vertexai.language_models import TextGenerationModel, CodeGenerationModel
66
-
67
- self.validate_env(request=request)
68
-
69
- llm_settings = request.generation.settings
70
- self.require_settings(llm_settings)
71
-
72
- messages = self.create_generation(request)
73
- model_name = llm_settings["model"]
74
- if model_name.startswith("text-"):
75
- model = TextGenerationModel.from_pretrained(model_name)
76
- elif model_name.startswith("code-"):
77
- model = CodeGenerationModel.from_pretrained(model_name)
78
- else:
79
- raise HTTPException(
80
- status_code=400,
81
- detail=f"This model{model_name} is not implemented.",
82
- )
83
- del llm_settings["model"]
84
-
85
- async def create_event_stream():
86
- for response in await cl.make_async(model.predict_streaming)(
87
- messages, **llm_settings
88
- ):
89
- yield response.text
90
-
91
- return StreamingResponse(create_event_stream())
92
-
93
-
94
- class GeminiProvider(BaseProvider):
95
- async def create_completion(self, request):
96
- await super().create_completion(request)
97
- from vertexai.preview.generative_models import GenerativeModel
98
- from google.cloud import aiplatform
99
- import os
100
-
101
- self.validate_env(request=request)
102
-
103
- llm_settings = request.generation.settings
104
- self.require_settings(llm_settings)
105
-
106
- messages = self.create_generation(request)
107
- aiplatform.init( # TODO: remove this when Gemini is released in all the regions
108
- project=os.environ["GCP_PROJECT_ID"],
109
- location='us-central1',
110
- )
111
- model = GenerativeModel(llm_settings["model"])
112
- del llm_settings["model"]
113
-
114
- async def create_event_stream():
115
- for response in await cl.make_async(model.generate_content)(
116
- messages, stream=True, generation_config=llm_settings
117
- ):
118
- yield response.candidates[0].content.parts[0].text
119
-
120
- return StreamingResponse(create_event_stream())
121
-
122
-
123
- gcp_env_vars = {"google_application_credentials": "GOOGLE_APPLICATION_CREDENTIALS"}
124
-
125
- ChatVertexAI = ChatVertexAIProvider(
126
- id="chat-vertexai",
127
- env_vars=gcp_env_vars,
128
- name="ChatVertexAI",
129
- inputs=[
130
- Select(
131
- id="model",
132
- label="Model",
133
- values=["chat-bison", "codechat-bison"],
134
- initial_value="chat-bison",
135
- ),
136
- *vertexai_common_inputs,
137
- ],
138
- is_chat=True,
139
- )
140
-
141
- GenerationVertexAI = GenerationVertexAIProvider(
142
- id="generation-vertexai",
143
- env_vars=gcp_env_vars,
144
- name="GenerationVertexAI",
145
- inputs=[
146
- Select(
147
- id="model",
148
- label="Model",
149
- values=["text-bison", "code-bison"],
150
- initial_value="text-bison",
151
- ),
152
- *vertexai_common_inputs,
153
- ],
154
- is_chat=False,
155
- )
156
-
157
- Gemini = GeminiProvider(
158
- id="gemini",
159
- env_vars=gcp_env_vars,
160
- name="Gemini",
161
- inputs=[
162
- Select(
163
- id="model",
164
- label="Model",
165
- values=["gemini-pro", "gemini-pro-vision"],
166
- initial_value="gemini-pro",
167
- ),
168
- *vertexai_common_inputs,
169
- ],
170
- is_chat=False,
171
- )