h2ogpte 1.6.42__py3-none-any.whl → 1.6.43rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h2ogpte/__init__.py +1 -1
- h2ogpte/cli/__init__.py +0 -0
- h2ogpte/cli/commands/__init__.py +0 -0
- h2ogpte/cli/commands/command_handlers/__init__.py +0 -0
- h2ogpte/cli/commands/command_handlers/agent.py +41 -0
- h2ogpte/cli/commands/command_handlers/chat.py +37 -0
- h2ogpte/cli/commands/command_handlers/clear.py +8 -0
- h2ogpte/cli/commands/command_handlers/collection.py +67 -0
- h2ogpte/cli/commands/command_handlers/config.py +113 -0
- h2ogpte/cli/commands/command_handlers/disconnect.py +36 -0
- h2ogpte/cli/commands/command_handlers/exit.py +37 -0
- h2ogpte/cli/commands/command_handlers/help.py +8 -0
- h2ogpte/cli/commands/command_handlers/history.py +29 -0
- h2ogpte/cli/commands/command_handlers/rag.py +146 -0
- h2ogpte/cli/commands/command_handlers/research_agent.py +45 -0
- h2ogpte/cli/commands/command_handlers/session.py +77 -0
- h2ogpte/cli/commands/command_handlers/status.py +33 -0
- h2ogpte/cli/commands/dispatcher.py +79 -0
- h2ogpte/cli/core/__init__.py +0 -0
- h2ogpte/cli/core/app.py +105 -0
- h2ogpte/cli/core/config.py +199 -0
- h2ogpte/cli/core/encryption.py +104 -0
- h2ogpte/cli/core/session.py +171 -0
- h2ogpte/cli/integrations/__init__.py +0 -0
- h2ogpte/cli/integrations/agent.py +338 -0
- h2ogpte/cli/integrations/rag.py +442 -0
- h2ogpte/cli/main.py +90 -0
- h2ogpte/cli/ui/__init__.py +0 -0
- h2ogpte/cli/ui/hbot_prompt.py +435 -0
- h2ogpte/cli/ui/prompts.py +129 -0
- h2ogpte/cli/ui/status_bar.py +133 -0
- h2ogpte/cli/utils/__init__.py +0 -0
- h2ogpte/cli/utils/file_manager.py +411 -0
- h2ogpte/h2ogpte.py +471 -67
- h2ogpte/h2ogpte_async.py +482 -68
- h2ogpte/h2ogpte_sync_base.py +8 -1
- h2ogpte/rest_async/__init__.py +6 -3
- h2ogpte/rest_async/api/chat_api.py +29 -0
- h2ogpte/rest_async/api/collections_api.py +293 -0
- h2ogpte/rest_async/api/extractors_api.py +2874 -70
- h2ogpte/rest_async/api/prompt_templates_api.py +32 -32
- h2ogpte/rest_async/api_client.py +1 -1
- h2ogpte/rest_async/configuration.py +1 -1
- h2ogpte/rest_async/models/__init__.py +5 -2
- h2ogpte/rest_async/models/chat_completion.py +4 -2
- h2ogpte/rest_async/models/chat_completion_delta.py +5 -3
- h2ogpte/rest_async/models/chat_completion_request.py +1 -1
- h2ogpte/rest_async/models/chat_session.py +4 -2
- h2ogpte/rest_async/models/chat_settings.py +1 -1
- h2ogpte/rest_async/models/collection.py +4 -2
- h2ogpte/rest_async/models/collection_create_request.py +4 -2
- h2ogpte/rest_async/models/create_chat_session_request.py +87 -0
- h2ogpte/rest_async/models/extraction_request.py +1 -1
- h2ogpte/rest_async/models/extractor.py +4 -2
- h2ogpte/rest_async/models/guardrails_settings.py +8 -4
- h2ogpte/rest_async/models/guardrails_settings_create_request.py +1 -1
- h2ogpte/rest_async/models/process_document_job_request.py +1 -1
- h2ogpte/rest_async/models/question_request.py +1 -1
- h2ogpte/rest_async/models/{reset_and_share_prompt_template_request.py → reset_and_share_request.py} +6 -6
- h2ogpte/{rest_sync/models/reset_and_share_prompt_template_with_groups_request.py → rest_async/models/reset_and_share_with_groups_request.py} +6 -6
- h2ogpte/rest_async/models/summarize_request.py +1 -1
- h2ogpte/rest_async/models/update_collection_workspace_request.py +87 -0
- h2ogpte/rest_async/models/update_extractor_privacy_request.py +87 -0
- h2ogpte/rest_sync/__init__.py +6 -3
- h2ogpte/rest_sync/api/chat_api.py +29 -0
- h2ogpte/rest_sync/api/collections_api.py +293 -0
- h2ogpte/rest_sync/api/extractors_api.py +2874 -70
- h2ogpte/rest_sync/api/prompt_templates_api.py +32 -32
- h2ogpte/rest_sync/api_client.py +1 -1
- h2ogpte/rest_sync/configuration.py +1 -1
- h2ogpte/rest_sync/models/__init__.py +5 -2
- h2ogpte/rest_sync/models/chat_completion.py +4 -2
- h2ogpte/rest_sync/models/chat_completion_delta.py +5 -3
- h2ogpte/rest_sync/models/chat_completion_request.py +1 -1
- h2ogpte/rest_sync/models/chat_session.py +4 -2
- h2ogpte/rest_sync/models/chat_settings.py +1 -1
- h2ogpte/rest_sync/models/collection.py +4 -2
- h2ogpte/rest_sync/models/collection_create_request.py +4 -2
- h2ogpte/rest_sync/models/create_chat_session_request.py +87 -0
- h2ogpte/rest_sync/models/extraction_request.py +1 -1
- h2ogpte/rest_sync/models/extractor.py +4 -2
- h2ogpte/rest_sync/models/guardrails_settings.py +8 -4
- h2ogpte/rest_sync/models/guardrails_settings_create_request.py +1 -1
- h2ogpte/rest_sync/models/process_document_job_request.py +1 -1
- h2ogpte/rest_sync/models/question_request.py +1 -1
- h2ogpte/rest_sync/models/{reset_and_share_prompt_template_request.py → reset_and_share_request.py} +6 -6
- h2ogpte/{rest_async/models/reset_and_share_prompt_template_with_groups_request.py → rest_sync/models/reset_and_share_with_groups_request.py} +6 -6
- h2ogpte/rest_sync/models/summarize_request.py +1 -1
- h2ogpte/rest_sync/models/update_collection_workspace_request.py +87 -0
- h2ogpte/rest_sync/models/update_extractor_privacy_request.py +87 -0
- h2ogpte/session.py +3 -2
- h2ogpte/session_async.py +22 -6
- h2ogpte/types.py +6 -0
- {h2ogpte-1.6.42.dist-info → h2ogpte-1.6.43rc2.dist-info}/METADATA +5 -1
- {h2ogpte-1.6.42.dist-info → h2ogpte-1.6.43rc2.dist-info}/RECORD +98 -59
- h2ogpte-1.6.43rc2.dist-info/entry_points.txt +2 -0
- {h2ogpte-1.6.42.dist-info → h2ogpte-1.6.43rc2.dist-info}/WHEEL +0 -0
- {h2ogpte-1.6.42.dist-info → h2ogpte-1.6.43rc2.dist-info}/top_level.txt +0 -0
|
@@ -24,8 +24,8 @@ from h2ogpte.rest_sync.models.group_share_permission import GroupSharePermission
|
|
|
24
24
|
from h2ogpte.rest_sync.models.prompt_template import PromptTemplate
|
|
25
25
|
from h2ogpte.rest_sync.models.prompt_template_base import PromptTemplateBase
|
|
26
26
|
from h2ogpte.rest_sync.models.prompt_template_create_request import PromptTemplateCreateRequest
|
|
27
|
-
from h2ogpte.rest_sync.models.
|
|
28
|
-
from h2ogpte.rest_sync.models.
|
|
27
|
+
from h2ogpte.rest_sync.models.reset_and_share_request import ResetAndShareRequest
|
|
28
|
+
from h2ogpte.rest_sync.models.reset_and_share_with_groups_request import ResetAndShareWithGroupsRequest
|
|
29
29
|
from h2ogpte.rest_sync.models.share_permission import SharePermission
|
|
30
30
|
from h2ogpte.rest_sync.models.update_default_prompt_template_visibility_request import UpdateDefaultPromptTemplateVisibilityRequest
|
|
31
31
|
from h2ogpte.rest_sync.models.update_prompt_template_privacy_request import UpdatePromptTemplatePrivacyRequest
|
|
@@ -2589,7 +2589,7 @@ class PromptTemplatesApi:
|
|
|
2589
2589
|
def reset_and_share_prompt_template(
|
|
2590
2590
|
self,
|
|
2591
2591
|
prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
|
|
2592
|
-
|
|
2592
|
+
reset_and_share_request: Annotated[ResetAndShareRequest, Field(description="List of usernames the prompt template should be shared with.")],
|
|
2593
2593
|
_request_timeout: Union[
|
|
2594
2594
|
None,
|
|
2595
2595
|
Annotated[StrictFloat, Field(gt=0)],
|
|
@@ -2609,8 +2609,8 @@ class PromptTemplatesApi:
|
|
|
2609
2609
|
|
|
2610
2610
|
:param prompt_template_id: Id of a prompt template. (required)
|
|
2611
2611
|
:type prompt_template_id: str
|
|
2612
|
-
:param
|
|
2613
|
-
:type
|
|
2612
|
+
:param reset_and_share_request: List of usernames the prompt template should be shared with. (required)
|
|
2613
|
+
:type reset_and_share_request: ResetAndShareRequest
|
|
2614
2614
|
:param _request_timeout: timeout setting for this request. If one
|
|
2615
2615
|
number provided, it will be total request
|
|
2616
2616
|
timeout. It can also be a pair (tuple) of
|
|
@@ -2635,7 +2635,7 @@ class PromptTemplatesApi:
|
|
|
2635
2635
|
|
|
2636
2636
|
_param = self._reset_and_share_prompt_template_serialize(
|
|
2637
2637
|
prompt_template_id=prompt_template_id,
|
|
2638
|
-
|
|
2638
|
+
reset_and_share_request=reset_and_share_request,
|
|
2639
2639
|
_request_auth=_request_auth,
|
|
2640
2640
|
_content_type=_content_type,
|
|
2641
2641
|
_headers=_headers,
|
|
@@ -2661,7 +2661,7 @@ class PromptTemplatesApi:
|
|
|
2661
2661
|
def reset_and_share_prompt_template_with_http_info(
|
|
2662
2662
|
self,
|
|
2663
2663
|
prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
|
|
2664
|
-
|
|
2664
|
+
reset_and_share_request: Annotated[ResetAndShareRequest, Field(description="List of usernames the prompt template should be shared with.")],
|
|
2665
2665
|
_request_timeout: Union[
|
|
2666
2666
|
None,
|
|
2667
2667
|
Annotated[StrictFloat, Field(gt=0)],
|
|
@@ -2681,8 +2681,8 @@ class PromptTemplatesApi:
|
|
|
2681
2681
|
|
|
2682
2682
|
:param prompt_template_id: Id of a prompt template. (required)
|
|
2683
2683
|
:type prompt_template_id: str
|
|
2684
|
-
:param
|
|
2685
|
-
:type
|
|
2684
|
+
:param reset_and_share_request: List of usernames the prompt template should be shared with. (required)
|
|
2685
|
+
:type reset_and_share_request: ResetAndShareRequest
|
|
2686
2686
|
:param _request_timeout: timeout setting for this request. If one
|
|
2687
2687
|
number provided, it will be total request
|
|
2688
2688
|
timeout. It can also be a pair (tuple) of
|
|
@@ -2707,7 +2707,7 @@ class PromptTemplatesApi:
|
|
|
2707
2707
|
|
|
2708
2708
|
_param = self._reset_and_share_prompt_template_serialize(
|
|
2709
2709
|
prompt_template_id=prompt_template_id,
|
|
2710
|
-
|
|
2710
|
+
reset_and_share_request=reset_and_share_request,
|
|
2711
2711
|
_request_auth=_request_auth,
|
|
2712
2712
|
_content_type=_content_type,
|
|
2713
2713
|
_headers=_headers,
|
|
@@ -2733,7 +2733,7 @@ class PromptTemplatesApi:
|
|
|
2733
2733
|
def reset_and_share_prompt_template_without_preload_content(
|
|
2734
2734
|
self,
|
|
2735
2735
|
prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
|
|
2736
|
-
|
|
2736
|
+
reset_and_share_request: Annotated[ResetAndShareRequest, Field(description="List of usernames the prompt template should be shared with.")],
|
|
2737
2737
|
_request_timeout: Union[
|
|
2738
2738
|
None,
|
|
2739
2739
|
Annotated[StrictFloat, Field(gt=0)],
|
|
@@ -2753,8 +2753,8 @@ class PromptTemplatesApi:
|
|
|
2753
2753
|
|
|
2754
2754
|
:param prompt_template_id: Id of a prompt template. (required)
|
|
2755
2755
|
:type prompt_template_id: str
|
|
2756
|
-
:param
|
|
2757
|
-
:type
|
|
2756
|
+
:param reset_and_share_request: List of usernames the prompt template should be shared with. (required)
|
|
2757
|
+
:type reset_and_share_request: ResetAndShareRequest
|
|
2758
2758
|
:param _request_timeout: timeout setting for this request. If one
|
|
2759
2759
|
number provided, it will be total request
|
|
2760
2760
|
timeout. It can also be a pair (tuple) of
|
|
@@ -2779,7 +2779,7 @@ class PromptTemplatesApi:
|
|
|
2779
2779
|
|
|
2780
2780
|
_param = self._reset_and_share_prompt_template_serialize(
|
|
2781
2781
|
prompt_template_id=prompt_template_id,
|
|
2782
|
-
|
|
2782
|
+
reset_and_share_request=reset_and_share_request,
|
|
2783
2783
|
_request_auth=_request_auth,
|
|
2784
2784
|
_content_type=_content_type,
|
|
2785
2785
|
_headers=_headers,
|
|
@@ -2800,7 +2800,7 @@ class PromptTemplatesApi:
|
|
|
2800
2800
|
def _reset_and_share_prompt_template_serialize(
|
|
2801
2801
|
self,
|
|
2802
2802
|
prompt_template_id,
|
|
2803
|
-
|
|
2803
|
+
reset_and_share_request,
|
|
2804
2804
|
_request_auth,
|
|
2805
2805
|
_content_type,
|
|
2806
2806
|
_headers,
|
|
@@ -2828,8 +2828,8 @@ class PromptTemplatesApi:
|
|
|
2828
2828
|
# process the header parameters
|
|
2829
2829
|
# process the form parameters
|
|
2830
2830
|
# process the body parameter
|
|
2831
|
-
if
|
|
2832
|
-
_body_params =
|
|
2831
|
+
if reset_and_share_request is not None:
|
|
2832
|
+
_body_params = reset_and_share_request
|
|
2833
2833
|
|
|
2834
2834
|
|
|
2835
2835
|
# set the HTTP header `Accept`
|
|
@@ -2881,7 +2881,7 @@ class PromptTemplatesApi:
|
|
|
2881
2881
|
def reset_and_share_prompt_template_with_groups(
|
|
2882
2882
|
self,
|
|
2883
2883
|
prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
|
|
2884
|
-
|
|
2884
|
+
reset_and_share_with_groups_request: Annotated[ResetAndShareWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
|
|
2885
2885
|
_request_timeout: Union[
|
|
2886
2886
|
None,
|
|
2887
2887
|
Annotated[StrictFloat, Field(gt=0)],
|
|
@@ -2901,8 +2901,8 @@ class PromptTemplatesApi:
|
|
|
2901
2901
|
|
|
2902
2902
|
:param prompt_template_id: Id of a prompt template. (required)
|
|
2903
2903
|
:type prompt_template_id: str
|
|
2904
|
-
:param
|
|
2905
|
-
:type
|
|
2904
|
+
:param reset_and_share_with_groups_request: List of group ids the prompt template should be shared with. (required)
|
|
2905
|
+
:type reset_and_share_with_groups_request: ResetAndShareWithGroupsRequest
|
|
2906
2906
|
:param _request_timeout: timeout setting for this request. If one
|
|
2907
2907
|
number provided, it will be total request
|
|
2908
2908
|
timeout. It can also be a pair (tuple) of
|
|
@@ -2927,7 +2927,7 @@ class PromptTemplatesApi:
|
|
|
2927
2927
|
|
|
2928
2928
|
_param = self._reset_and_share_prompt_template_with_groups_serialize(
|
|
2929
2929
|
prompt_template_id=prompt_template_id,
|
|
2930
|
-
|
|
2930
|
+
reset_and_share_with_groups_request=reset_and_share_with_groups_request,
|
|
2931
2931
|
_request_auth=_request_auth,
|
|
2932
2932
|
_content_type=_content_type,
|
|
2933
2933
|
_headers=_headers,
|
|
@@ -2953,7 +2953,7 @@ class PromptTemplatesApi:
|
|
|
2953
2953
|
def reset_and_share_prompt_template_with_groups_with_http_info(
|
|
2954
2954
|
self,
|
|
2955
2955
|
prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
|
|
2956
|
-
|
|
2956
|
+
reset_and_share_with_groups_request: Annotated[ResetAndShareWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
|
|
2957
2957
|
_request_timeout: Union[
|
|
2958
2958
|
None,
|
|
2959
2959
|
Annotated[StrictFloat, Field(gt=0)],
|
|
@@ -2973,8 +2973,8 @@ class PromptTemplatesApi:
|
|
|
2973
2973
|
|
|
2974
2974
|
:param prompt_template_id: Id of a prompt template. (required)
|
|
2975
2975
|
:type prompt_template_id: str
|
|
2976
|
-
:param
|
|
2977
|
-
:type
|
|
2976
|
+
:param reset_and_share_with_groups_request: List of group ids the prompt template should be shared with. (required)
|
|
2977
|
+
:type reset_and_share_with_groups_request: ResetAndShareWithGroupsRequest
|
|
2978
2978
|
:param _request_timeout: timeout setting for this request. If one
|
|
2979
2979
|
number provided, it will be total request
|
|
2980
2980
|
timeout. It can also be a pair (tuple) of
|
|
@@ -2999,7 +2999,7 @@ class PromptTemplatesApi:
|
|
|
2999
2999
|
|
|
3000
3000
|
_param = self._reset_and_share_prompt_template_with_groups_serialize(
|
|
3001
3001
|
prompt_template_id=prompt_template_id,
|
|
3002
|
-
|
|
3002
|
+
reset_and_share_with_groups_request=reset_and_share_with_groups_request,
|
|
3003
3003
|
_request_auth=_request_auth,
|
|
3004
3004
|
_content_type=_content_type,
|
|
3005
3005
|
_headers=_headers,
|
|
@@ -3025,7 +3025,7 @@ class PromptTemplatesApi:
|
|
|
3025
3025
|
def reset_and_share_prompt_template_with_groups_without_preload_content(
|
|
3026
3026
|
self,
|
|
3027
3027
|
prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
|
|
3028
|
-
|
|
3028
|
+
reset_and_share_with_groups_request: Annotated[ResetAndShareWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
|
|
3029
3029
|
_request_timeout: Union[
|
|
3030
3030
|
None,
|
|
3031
3031
|
Annotated[StrictFloat, Field(gt=0)],
|
|
@@ -3045,8 +3045,8 @@ class PromptTemplatesApi:
|
|
|
3045
3045
|
|
|
3046
3046
|
:param prompt_template_id: Id of a prompt template. (required)
|
|
3047
3047
|
:type prompt_template_id: str
|
|
3048
|
-
:param
|
|
3049
|
-
:type
|
|
3048
|
+
:param reset_and_share_with_groups_request: List of group ids the prompt template should be shared with. (required)
|
|
3049
|
+
:type reset_and_share_with_groups_request: ResetAndShareWithGroupsRequest
|
|
3050
3050
|
:param _request_timeout: timeout setting for this request. If one
|
|
3051
3051
|
number provided, it will be total request
|
|
3052
3052
|
timeout. It can also be a pair (tuple) of
|
|
@@ -3071,7 +3071,7 @@ class PromptTemplatesApi:
|
|
|
3071
3071
|
|
|
3072
3072
|
_param = self._reset_and_share_prompt_template_with_groups_serialize(
|
|
3073
3073
|
prompt_template_id=prompt_template_id,
|
|
3074
|
-
|
|
3074
|
+
reset_and_share_with_groups_request=reset_and_share_with_groups_request,
|
|
3075
3075
|
_request_auth=_request_auth,
|
|
3076
3076
|
_content_type=_content_type,
|
|
3077
3077
|
_headers=_headers,
|
|
@@ -3092,7 +3092,7 @@ class PromptTemplatesApi:
|
|
|
3092
3092
|
def _reset_and_share_prompt_template_with_groups_serialize(
|
|
3093
3093
|
self,
|
|
3094
3094
|
prompt_template_id,
|
|
3095
|
-
|
|
3095
|
+
reset_and_share_with_groups_request,
|
|
3096
3096
|
_request_auth,
|
|
3097
3097
|
_content_type,
|
|
3098
3098
|
_headers,
|
|
@@ -3120,8 +3120,8 @@ class PromptTemplatesApi:
|
|
|
3120
3120
|
# process the header parameters
|
|
3121
3121
|
# process the form parameters
|
|
3122
3122
|
# process the body parameter
|
|
3123
|
-
if
|
|
3124
|
-
_body_params =
|
|
3123
|
+
if reset_and_share_with_groups_request is not None:
|
|
3124
|
+
_body_params = reset_and_share_with_groups_request
|
|
3125
3125
|
|
|
3126
3126
|
|
|
3127
3127
|
# set the HTTP header `Accept`
|
h2ogpte/rest_sync/api_client.py
CHANGED
|
@@ -90,7 +90,7 @@ class ApiClient:
|
|
|
90
90
|
self.default_headers[header_name] = header_value
|
|
91
91
|
self.cookie = cookie
|
|
92
92
|
# Set default User-Agent.
|
|
93
|
-
self.user_agent = 'OpenAPI-Generator/1.6.
|
|
93
|
+
self.user_agent = 'OpenAPI-Generator/1.6.43-dev2/python'
|
|
94
94
|
self.client_side_validation = configuration.client_side_validation
|
|
95
95
|
|
|
96
96
|
def __enter__(self):
|
|
@@ -503,7 +503,7 @@ class Configuration:
|
|
|
503
503
|
"OS: {env}\n"\
|
|
504
504
|
"Python Version: {pyversion}\n"\
|
|
505
505
|
"Version of the API: v1.0.0\n"\
|
|
506
|
-
"SDK Package Version: 1.6.
|
|
506
|
+
"SDK Package Version: 1.6.43-dev2".\
|
|
507
507
|
format(env=sys.platform, pyversion=sys.version)
|
|
508
508
|
|
|
509
509
|
def get_host_settings(self) -> List[HostSetting]:
|
|
@@ -51,6 +51,7 @@ from h2ogpte.rest_sync.models.count_with_queue_details import CountWithQueueDeta
|
|
|
51
51
|
from h2ogpte.rest_sync.models.create_agent_key_request import CreateAgentKeyRequest
|
|
52
52
|
from h2ogpte.rest_sync.models.create_agent_tool_key_associations_request import CreateAgentToolKeyAssociationsRequest
|
|
53
53
|
from h2ogpte.rest_sync.models.create_agent_tool_request import CreateAgentToolRequest
|
|
54
|
+
from h2ogpte.rest_sync.models.create_chat_session_request import CreateChatSessionRequest
|
|
54
55
|
from h2ogpte.rest_sync.models.create_import_collection_to_collection_job_request import CreateImportCollectionToCollectionJobRequest
|
|
55
56
|
from h2ogpte.rest_sync.models.create_insert_document_to_collection_job_request import CreateInsertDocumentToCollectionJobRequest
|
|
56
57
|
from h2ogpte.rest_sync.models.create_secret201_response import CreateSecret201Response
|
|
@@ -106,8 +107,8 @@ from h2ogpte.rest_sync.models.prompt_template_create_request import PromptTempla
|
|
|
106
107
|
from h2ogpte.rest_sync.models.qa_feedback import QAFeedback
|
|
107
108
|
from h2ogpte.rest_sync.models.question_request import QuestionRequest
|
|
108
109
|
from h2ogpte.rest_sync.models.queue_details import QueueDetails
|
|
109
|
-
from h2ogpte.rest_sync.models.
|
|
110
|
-
from h2ogpte.rest_sync.models.
|
|
110
|
+
from h2ogpte.rest_sync.models.reset_and_share_request import ResetAndShareRequest
|
|
111
|
+
from h2ogpte.rest_sync.models.reset_and_share_with_groups_request import ResetAndShareWithGroupsRequest
|
|
111
112
|
from h2ogpte.rest_sync.models.role_create_request import RoleCreateRequest
|
|
112
113
|
from h2ogpte.rest_sync.models.role_info import RoleInfo
|
|
113
114
|
from h2ogpte.rest_sync.models.roles_reset_request import RolesResetRequest
|
|
@@ -130,9 +131,11 @@ from h2ogpte.rest_sync.models.update_agent_tool_preference_request import Update
|
|
|
130
131
|
from h2ogpte.rest_sync.models.update_collection_expiry_date_request import UpdateCollectionExpiryDateRequest
|
|
131
132
|
from h2ogpte.rest_sync.models.update_collection_inactivity_interval_request import UpdateCollectionInactivityIntervalRequest
|
|
132
133
|
from h2ogpte.rest_sync.models.update_collection_privacy_request import UpdateCollectionPrivacyRequest
|
|
134
|
+
from h2ogpte.rest_sync.models.update_collection_workspace_request import UpdateCollectionWorkspaceRequest
|
|
133
135
|
from h2ogpte.rest_sync.models.update_custom_agent_tool200_response import UpdateCustomAgentTool200Response
|
|
134
136
|
from h2ogpte.rest_sync.models.update_custom_agent_tool_request import UpdateCustomAgentToolRequest
|
|
135
137
|
from h2ogpte.rest_sync.models.update_default_prompt_template_visibility_request import UpdateDefaultPromptTemplateVisibilityRequest
|
|
138
|
+
from h2ogpte.rest_sync.models.update_extractor_privacy_request import UpdateExtractorPrivacyRequest
|
|
136
139
|
from h2ogpte.rest_sync.models.update_prompt_template_privacy_request import UpdatePromptTemplatePrivacyRequest
|
|
137
140
|
from h2ogpte.rest_sync.models.update_qa_feedback_request import UpdateQAFeedbackRequest
|
|
138
141
|
from h2ogpte.rest_sync.models.update_secret_request import UpdateSecretRequest
|
|
@@ -17,7 +17,7 @@ import pprint
|
|
|
17
17
|
import re # noqa: F401
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
|
-
from pydantic import BaseModel, ConfigDict, StrictStr
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
@@ -26,8 +26,9 @@ class ChatCompletion(BaseModel):
|
|
|
26
26
|
"""
|
|
27
27
|
ChatCompletion
|
|
28
28
|
""" # noqa: E501
|
|
29
|
+
message_id: StrictStr = Field(description="Id of the chat completion message")
|
|
29
30
|
body: StrictStr
|
|
30
|
-
__properties: ClassVar[List[str]] = ["body"]
|
|
31
|
+
__properties: ClassVar[List[str]] = ["message_id", "body"]
|
|
31
32
|
|
|
32
33
|
model_config = ConfigDict(
|
|
33
34
|
populate_by_name=True,
|
|
@@ -80,6 +81,7 @@ class ChatCompletion(BaseModel):
|
|
|
80
81
|
return cls.model_validate(obj)
|
|
81
82
|
|
|
82
83
|
_obj = cls.model_validate({
|
|
84
|
+
"message_id": obj.get("message_id"),
|
|
83
85
|
"body": obj.get("body")
|
|
84
86
|
})
|
|
85
87
|
return _obj
|
|
@@ -17,8 +17,8 @@ import pprint
|
|
|
17
17
|
import re # noqa: F401
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
|
-
from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr
|
|
21
|
-
from typing import Any, ClassVar, Dict, List
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
24
24
|
|
|
@@ -26,9 +26,10 @@ class ChatCompletionDelta(BaseModel):
|
|
|
26
26
|
"""
|
|
27
27
|
ChatCompletionDelta
|
|
28
28
|
""" # noqa: E501
|
|
29
|
+
message_id: Optional[StrictStr] = Field(default=None, description="Id of the chat completion message. The attribute is populated only in the last message (`finished==True`).")
|
|
29
30
|
body: StrictStr
|
|
30
31
|
finished: StrictBool
|
|
31
|
-
__properties: ClassVar[List[str]] = ["body", "finished"]
|
|
32
|
+
__properties: ClassVar[List[str]] = ["message_id", "body", "finished"]
|
|
32
33
|
|
|
33
34
|
model_config = ConfigDict(
|
|
34
35
|
populate_by_name=True,
|
|
@@ -81,6 +82,7 @@ class ChatCompletionDelta(BaseModel):
|
|
|
81
82
|
return cls.model_validate(obj)
|
|
82
83
|
|
|
83
84
|
_obj = cls.model_validate({
|
|
85
|
+
"message_id": obj.get("message_id"),
|
|
84
86
|
"body": obj.get("body"),
|
|
85
87
|
"finished": obj.get("finished")
|
|
86
88
|
})
|
|
@@ -33,7 +33,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
33
33
|
image_batch_final_prompt: Optional[StrictStr] = Field(default=None, description="A prompt for each image batch for vision models.")
|
|
34
34
|
image_batch_image_prompt: Optional[StrictStr] = Field(default=None, description="A prompt to reduce all answers each image batch for vision models")
|
|
35
35
|
llm: Optional[StrictStr] = Field(default=None, description="LLM name to send the query. Use \"auto\" for automatic model routing, set cost_controls of llm_args for detailed control over automatic routing.")
|
|
36
|
-
llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
|
|
36
|
+
llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `reasoning_effort` **(type=integer, default=0)** - Level of reasoning effort for the model (higher values = deeper reasoning, e.g., 10000-65000). Use for models that support chain-of-thought reasoning. 0 means no additional reasoning effort. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
|
|
37
37
|
self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
|
|
38
38
|
rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
|
|
39
39
|
include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
|
|
@@ -34,7 +34,8 @@ class ChatSession(BaseModel):
|
|
|
34
34
|
prompt_template_id: Optional[StrictStr] = None
|
|
35
35
|
latest_message_content: Optional[StrictStr] = None
|
|
36
36
|
updated_at: datetime
|
|
37
|
-
|
|
37
|
+
workspace: Optional[StrictStr] = None
|
|
38
|
+
__properties: ClassVar[List[str]] = ["id", "name", "collection_id", "collection_name", "prompt_template_id", "latest_message_content", "updated_at", "workspace"]
|
|
38
39
|
|
|
39
40
|
model_config = ConfigDict(
|
|
40
41
|
populate_by_name=True,
|
|
@@ -93,7 +94,8 @@ class ChatSession(BaseModel):
|
|
|
93
94
|
"collection_name": obj.get("collection_name"),
|
|
94
95
|
"prompt_template_id": obj.get("prompt_template_id"),
|
|
95
96
|
"latest_message_content": obj.get("latest_message_content"),
|
|
96
|
-
"updated_at": obj.get("updated_at")
|
|
97
|
+
"updated_at": obj.get("updated_at"),
|
|
98
|
+
"workspace": obj.get("workspace")
|
|
97
99
|
})
|
|
98
100
|
return _obj
|
|
99
101
|
|
|
@@ -27,7 +27,7 @@ class ChatSettings(BaseModel):
|
|
|
27
27
|
ChatSettings
|
|
28
28
|
""" # noqa: E501
|
|
29
29
|
llm: Optional[StrictStr] = Field(default=None, description="LLM name to send the query. Use \"auto\" for automatic model routing, set cost_controls of llm_args for detailed control over automatic routing.")
|
|
30
|
-
llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
|
|
30
|
+
llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `reasoning_effort` **(type=integer, default=0)** - Level of reasoning effort for the model (higher values = deeper reasoning, e.g., 10000-65000). Use for models that support chain-of-thought reasoning. 0 means no additional reasoning effort. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
|
|
31
31
|
self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
|
|
32
32
|
rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
|
|
33
33
|
include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
|
|
@@ -47,7 +47,8 @@ class Collection(BaseModel):
|
|
|
47
47
|
inactivity_interval: Optional[StrictInt] = Field(default=None, description="The inactivity interval as an integer number of days.")
|
|
48
48
|
rag_type: Optional[StrictStr] = Field(default=None, description="RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. ")
|
|
49
49
|
metadata_dict: Optional[Dict[str, Any]] = None
|
|
50
|
-
|
|
50
|
+
workspace: Optional[StrictStr] = Field(default=None, description="The workspace associated with the collection.")
|
|
51
|
+
__properties: ClassVar[List[str]] = ["id", "name", "description", "embedding_model", "document_count", "document_size", "created_at", "updated_at", "user_count", "is_public", "username", "sessions_count", "status", "prompt_template_id", "thumbnail", "size_limit", "expiry_date", "inactivity_interval", "rag_type", "metadata_dict", "workspace"]
|
|
51
52
|
|
|
52
53
|
@field_validator('rag_type')
|
|
53
54
|
def rag_type_validate_enum(cls, value):
|
|
@@ -129,7 +130,8 @@ class Collection(BaseModel):
|
|
|
129
130
|
"expiry_date": obj.get("expiry_date"),
|
|
130
131
|
"inactivity_interval": obj.get("inactivity_interval"),
|
|
131
132
|
"rag_type": obj.get("rag_type"),
|
|
132
|
-
"metadata_dict": obj.get("metadata_dict")
|
|
133
|
+
"metadata_dict": obj.get("metadata_dict"),
|
|
134
|
+
"workspace": obj.get("workspace")
|
|
133
135
|
})
|
|
134
136
|
return _obj
|
|
135
137
|
|
|
@@ -35,7 +35,8 @@ class CollectionCreateRequest(BaseModel):
|
|
|
35
35
|
collection_settings: Optional[CollectionSettings] = None
|
|
36
36
|
chat_settings: Optional[ChatSettings] = None
|
|
37
37
|
expiry_date: Optional[datetime] = Field(default=None, description="Optional expiration date for the collection")
|
|
38
|
-
|
|
38
|
+
workspace: Optional[StrictStr] = None
|
|
39
|
+
__properties: ClassVar[List[str]] = ["name", "description", "embedding_model", "collection_settings", "chat_settings", "expiry_date", "workspace"]
|
|
39
40
|
|
|
40
41
|
model_config = ConfigDict(
|
|
41
42
|
populate_by_name=True,
|
|
@@ -99,7 +100,8 @@ class CollectionCreateRequest(BaseModel):
|
|
|
99
100
|
"embedding_model": obj.get("embedding_model"),
|
|
100
101
|
"collection_settings": CollectionSettings.from_dict(obj["collection_settings"]) if obj.get("collection_settings") is not None else None,
|
|
101
102
|
"chat_settings": ChatSettings.from_dict(obj["chat_settings"]) if obj.get("chat_settings") is not None else None,
|
|
102
|
-
"expiry_date": obj.get("expiry_date")
|
|
103
|
+
"expiry_date": obj.get("expiry_date"),
|
|
104
|
+
"workspace": obj.get("workspace")
|
|
103
105
|
})
|
|
104
106
|
return _obj
|
|
105
107
|
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
h2oGPTe REST API
|
|
5
|
+
|
|
6
|
+
# Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class CreateChatSessionRequest(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
CreateChatSessionRequest
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
workspace: Optional[StrictStr] = Field(default=None, description="Workspace to be associated with the chat session. If not provided, the collection's workspace or default workspace will be used.")
|
|
30
|
+
__properties: ClassVar[List[str]] = ["workspace"]
|
|
31
|
+
|
|
32
|
+
model_config = ConfigDict(
|
|
33
|
+
populate_by_name=True,
|
|
34
|
+
validate_assignment=True,
|
|
35
|
+
protected_namespaces=(),
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def to_str(self) -> str:
|
|
40
|
+
"""Returns the string representation of the model using alias"""
|
|
41
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
42
|
+
|
|
43
|
+
def to_json(self) -> str:
|
|
44
|
+
"""Returns the JSON representation of the model using alias"""
|
|
45
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
46
|
+
return json.dumps(self.to_dict())
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
50
|
+
"""Create an instance of CreateChatSessionRequest from a JSON string"""
|
|
51
|
+
return cls.from_dict(json.loads(json_str))
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Return the dictionary representation of the model using alias.
|
|
55
|
+
|
|
56
|
+
This has the following differences from calling pydantic's
|
|
57
|
+
`self.model_dump(by_alias=True)`:
|
|
58
|
+
|
|
59
|
+
* `None` is only added to the output dict for nullable fields that
|
|
60
|
+
were set at model initialization. Other fields with value `None`
|
|
61
|
+
are ignored.
|
|
62
|
+
"""
|
|
63
|
+
excluded_fields: Set[str] = set([
|
|
64
|
+
])
|
|
65
|
+
|
|
66
|
+
_dict = self.model_dump(
|
|
67
|
+
by_alias=True,
|
|
68
|
+
exclude=excluded_fields,
|
|
69
|
+
exclude_none=True,
|
|
70
|
+
)
|
|
71
|
+
return _dict
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
+
"""Create an instance of CreateChatSessionRequest from a dict"""
|
|
76
|
+
if obj is None:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
if not isinstance(obj, dict):
|
|
80
|
+
return cls.model_validate(obj)
|
|
81
|
+
|
|
82
|
+
_obj = cls.model_validate({
|
|
83
|
+
"workspace": obj.get("workspace")
|
|
84
|
+
})
|
|
85
|
+
return _obj
|
|
86
|
+
|
|
87
|
+
|