h2ogpte 1.6.41rc5__py3-none-any.whl → 1.6.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. h2ogpte/__init__.py +1 -1
  2. h2ogpte/cli/__init__.py +0 -0
  3. h2ogpte/cli/commands/__init__.py +0 -0
  4. h2ogpte/cli/commands/command_handlers/__init__.py +0 -0
  5. h2ogpte/cli/commands/command_handlers/agent.py +41 -0
  6. h2ogpte/cli/commands/command_handlers/chat.py +37 -0
  7. h2ogpte/cli/commands/command_handlers/clear.py +8 -0
  8. h2ogpte/cli/commands/command_handlers/collection.py +67 -0
  9. h2ogpte/cli/commands/command_handlers/config.py +113 -0
  10. h2ogpte/cli/commands/command_handlers/disconnect.py +36 -0
  11. h2ogpte/cli/commands/command_handlers/exit.py +37 -0
  12. h2ogpte/cli/commands/command_handlers/help.py +8 -0
  13. h2ogpte/cli/commands/command_handlers/history.py +29 -0
  14. h2ogpte/cli/commands/command_handlers/rag.py +146 -0
  15. h2ogpte/cli/commands/command_handlers/research_agent.py +45 -0
  16. h2ogpte/cli/commands/command_handlers/session.py +77 -0
  17. h2ogpte/cli/commands/command_handlers/status.py +33 -0
  18. h2ogpte/cli/commands/dispatcher.py +79 -0
  19. h2ogpte/cli/core/__init__.py +0 -0
  20. h2ogpte/cli/core/app.py +105 -0
  21. h2ogpte/cli/core/config.py +199 -0
  22. h2ogpte/cli/core/encryption.py +104 -0
  23. h2ogpte/cli/core/session.py +171 -0
  24. h2ogpte/cli/integrations/__init__.py +0 -0
  25. h2ogpte/cli/integrations/agent.py +338 -0
  26. h2ogpte/cli/integrations/rag.py +442 -0
  27. h2ogpte/cli/main.py +90 -0
  28. h2ogpte/cli/ui/__init__.py +0 -0
  29. h2ogpte/cli/ui/hbot_prompt.py +435 -0
  30. h2ogpte/cli/ui/prompts.py +129 -0
  31. h2ogpte/cli/ui/status_bar.py +133 -0
  32. h2ogpte/cli/utils/__init__.py +0 -0
  33. h2ogpte/cli/utils/file_manager.py +411 -0
  34. h2ogpte/connectors.py +11 -0
  35. h2ogpte/h2ogpte.py +619 -69
  36. h2ogpte/h2ogpte_async.py +631 -70
  37. h2ogpte/h2ogpte_sync_base.py +8 -1
  38. h2ogpte/rest_async/__init__.py +8 -3
  39. h2ogpte/rest_async/api/chat_api.py +29 -0
  40. h2ogpte/rest_async/api/collections_api.py +293 -0
  41. h2ogpte/rest_async/api/document_ingestion_api.py +1365 -436
  42. h2ogpte/rest_async/api/extractors_api.py +2874 -70
  43. h2ogpte/rest_async/api/prompt_templates_api.py +32 -32
  44. h2ogpte/rest_async/api_client.py +1 -1
  45. h2ogpte/rest_async/configuration.py +1 -1
  46. h2ogpte/rest_async/models/__init__.py +7 -2
  47. h2ogpte/rest_async/models/chat_completion.py +4 -2
  48. h2ogpte/rest_async/models/chat_completion_delta.py +5 -3
  49. h2ogpte/rest_async/models/chat_completion_request.py +1 -1
  50. h2ogpte/rest_async/models/chat_session.py +4 -2
  51. h2ogpte/rest_async/models/chat_settings.py +1 -1
  52. h2ogpte/rest_async/models/collection.py +4 -2
  53. h2ogpte/rest_async/models/collection_create_request.py +4 -2
  54. h2ogpte/rest_async/models/confluence_credentials.py +89 -0
  55. h2ogpte/rest_async/models/create_chat_session_request.py +87 -0
  56. h2ogpte/rest_async/models/extraction_request.py +1 -1
  57. h2ogpte/rest_async/models/extractor.py +4 -2
  58. h2ogpte/rest_async/models/guardrails_settings.py +8 -4
  59. h2ogpte/rest_async/models/guardrails_settings_create_request.py +1 -1
  60. h2ogpte/rest_async/models/ingest_from_confluence_body.py +97 -0
  61. h2ogpte/rest_async/models/process_document_job_request.py +1 -1
  62. h2ogpte/rest_async/models/question_request.py +1 -1
  63. h2ogpte/rest_async/models/{reset_and_share_prompt_template_request.py → reset_and_share_request.py} +6 -6
  64. h2ogpte/{rest_sync/models/reset_and_share_prompt_template_with_groups_request.py → rest_async/models/reset_and_share_with_groups_request.py} +6 -6
  65. h2ogpte/rest_async/models/summarize_request.py +1 -1
  66. h2ogpte/rest_async/models/update_collection_privacy_request.py +6 -4
  67. h2ogpte/rest_async/models/update_collection_workspace_request.py +87 -0
  68. h2ogpte/rest_async/models/update_extractor_privacy_request.py +87 -0
  69. h2ogpte/rest_sync/__init__.py +8 -3
  70. h2ogpte/rest_sync/api/chat_api.py +29 -0
  71. h2ogpte/rest_sync/api/collections_api.py +293 -0
  72. h2ogpte/rest_sync/api/document_ingestion_api.py +1365 -436
  73. h2ogpte/rest_sync/api/extractors_api.py +2874 -70
  74. h2ogpte/rest_sync/api/prompt_templates_api.py +32 -32
  75. h2ogpte/rest_sync/api_client.py +1 -1
  76. h2ogpte/rest_sync/configuration.py +1 -1
  77. h2ogpte/rest_sync/models/__init__.py +7 -2
  78. h2ogpte/rest_sync/models/chat_completion.py +4 -2
  79. h2ogpte/rest_sync/models/chat_completion_delta.py +5 -3
  80. h2ogpte/rest_sync/models/chat_completion_request.py +1 -1
  81. h2ogpte/rest_sync/models/chat_session.py +4 -2
  82. h2ogpte/rest_sync/models/chat_settings.py +1 -1
  83. h2ogpte/rest_sync/models/collection.py +4 -2
  84. h2ogpte/rest_sync/models/collection_create_request.py +4 -2
  85. h2ogpte/rest_sync/models/confluence_credentials.py +89 -0
  86. h2ogpte/rest_sync/models/create_chat_session_request.py +87 -0
  87. h2ogpte/rest_sync/models/extraction_request.py +1 -1
  88. h2ogpte/rest_sync/models/extractor.py +4 -2
  89. h2ogpte/rest_sync/models/guardrails_settings.py +8 -4
  90. h2ogpte/rest_sync/models/guardrails_settings_create_request.py +1 -1
  91. h2ogpte/rest_sync/models/ingest_from_confluence_body.py +97 -0
  92. h2ogpte/rest_sync/models/process_document_job_request.py +1 -1
  93. h2ogpte/rest_sync/models/question_request.py +1 -1
  94. h2ogpte/rest_sync/models/{reset_and_share_prompt_template_request.py → reset_and_share_request.py} +6 -6
  95. h2ogpte/{rest_async/models/reset_and_share_prompt_template_with_groups_request.py → rest_sync/models/reset_and_share_with_groups_request.py} +6 -6
  96. h2ogpte/rest_sync/models/summarize_request.py +1 -1
  97. h2ogpte/rest_sync/models/update_collection_privacy_request.py +6 -4
  98. h2ogpte/rest_sync/models/update_collection_workspace_request.py +87 -0
  99. h2ogpte/rest_sync/models/update_extractor_privacy_request.py +87 -0
  100. h2ogpte/session.py +14 -2
  101. h2ogpte/session_async.py +33 -6
  102. h2ogpte/types.py +9 -1
  103. {h2ogpte-1.6.41rc5.dist-info → h2ogpte-1.6.43.dist-info}/METADATA +5 -1
  104. {h2ogpte-1.6.41rc5.dist-info → h2ogpte-1.6.43.dist-info}/RECORD +107 -64
  105. h2ogpte-1.6.43.dist-info/entry_points.txt +2 -0
  106. {h2ogpte-1.6.41rc5.dist-info → h2ogpte-1.6.43.dist-info}/WHEEL +0 -0
  107. {h2ogpte-1.6.41rc5.dist-info → h2ogpte-1.6.43.dist-info}/top_level.txt +0 -0
@@ -24,8 +24,8 @@ from h2ogpte.rest_sync.models.group_share_permission import GroupSharePermission
24
24
  from h2ogpte.rest_sync.models.prompt_template import PromptTemplate
25
25
  from h2ogpte.rest_sync.models.prompt_template_base import PromptTemplateBase
26
26
  from h2ogpte.rest_sync.models.prompt_template_create_request import PromptTemplateCreateRequest
27
- from h2ogpte.rest_sync.models.reset_and_share_prompt_template_request import ResetAndSharePromptTemplateRequest
28
- from h2ogpte.rest_sync.models.reset_and_share_prompt_template_with_groups_request import ResetAndSharePromptTemplateWithGroupsRequest
27
+ from h2ogpte.rest_sync.models.reset_and_share_request import ResetAndShareRequest
28
+ from h2ogpte.rest_sync.models.reset_and_share_with_groups_request import ResetAndShareWithGroupsRequest
29
29
  from h2ogpte.rest_sync.models.share_permission import SharePermission
30
30
  from h2ogpte.rest_sync.models.update_default_prompt_template_visibility_request import UpdateDefaultPromptTemplateVisibilityRequest
31
31
  from h2ogpte.rest_sync.models.update_prompt_template_privacy_request import UpdatePromptTemplatePrivacyRequest
@@ -2589,7 +2589,7 @@ class PromptTemplatesApi:
2589
2589
  def reset_and_share_prompt_template(
2590
2590
  self,
2591
2591
  prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
2592
- reset_and_share_prompt_template_request: Annotated[ResetAndSharePromptTemplateRequest, Field(description="List of usernames the prompt template should be shared with.")],
2592
+ reset_and_share_request: Annotated[ResetAndShareRequest, Field(description="List of usernames the prompt template should be shared with.")],
2593
2593
  _request_timeout: Union[
2594
2594
  None,
2595
2595
  Annotated[StrictFloat, Field(gt=0)],
@@ -2609,8 +2609,8 @@ class PromptTemplatesApi:
2609
2609
 
2610
2610
  :param prompt_template_id: Id of a prompt template. (required)
2611
2611
  :type prompt_template_id: str
2612
- :param reset_and_share_prompt_template_request: List of usernames the prompt template should be shared with. (required)
2613
- :type reset_and_share_prompt_template_request: ResetAndSharePromptTemplateRequest
2612
+ :param reset_and_share_request: List of usernames the prompt template should be shared with. (required)
2613
+ :type reset_and_share_request: ResetAndShareRequest
2614
2614
  :param _request_timeout: timeout setting for this request. If one
2615
2615
  number provided, it will be total request
2616
2616
  timeout. It can also be a pair (tuple) of
@@ -2635,7 +2635,7 @@ class PromptTemplatesApi:
2635
2635
 
2636
2636
  _param = self._reset_and_share_prompt_template_serialize(
2637
2637
  prompt_template_id=prompt_template_id,
2638
- reset_and_share_prompt_template_request=reset_and_share_prompt_template_request,
2638
+ reset_and_share_request=reset_and_share_request,
2639
2639
  _request_auth=_request_auth,
2640
2640
  _content_type=_content_type,
2641
2641
  _headers=_headers,
@@ -2661,7 +2661,7 @@ class PromptTemplatesApi:
2661
2661
  def reset_and_share_prompt_template_with_http_info(
2662
2662
  self,
2663
2663
  prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
2664
- reset_and_share_prompt_template_request: Annotated[ResetAndSharePromptTemplateRequest, Field(description="List of usernames the prompt template should be shared with.")],
2664
+ reset_and_share_request: Annotated[ResetAndShareRequest, Field(description="List of usernames the prompt template should be shared with.")],
2665
2665
  _request_timeout: Union[
2666
2666
  None,
2667
2667
  Annotated[StrictFloat, Field(gt=0)],
@@ -2681,8 +2681,8 @@ class PromptTemplatesApi:
2681
2681
 
2682
2682
  :param prompt_template_id: Id of a prompt template. (required)
2683
2683
  :type prompt_template_id: str
2684
- :param reset_and_share_prompt_template_request: List of usernames the prompt template should be shared with. (required)
2685
- :type reset_and_share_prompt_template_request: ResetAndSharePromptTemplateRequest
2684
+ :param reset_and_share_request: List of usernames the prompt template should be shared with. (required)
2685
+ :type reset_and_share_request: ResetAndShareRequest
2686
2686
  :param _request_timeout: timeout setting for this request. If one
2687
2687
  number provided, it will be total request
2688
2688
  timeout. It can also be a pair (tuple) of
@@ -2707,7 +2707,7 @@ class PromptTemplatesApi:
2707
2707
 
2708
2708
  _param = self._reset_and_share_prompt_template_serialize(
2709
2709
  prompt_template_id=prompt_template_id,
2710
- reset_and_share_prompt_template_request=reset_and_share_prompt_template_request,
2710
+ reset_and_share_request=reset_and_share_request,
2711
2711
  _request_auth=_request_auth,
2712
2712
  _content_type=_content_type,
2713
2713
  _headers=_headers,
@@ -2733,7 +2733,7 @@ class PromptTemplatesApi:
2733
2733
  def reset_and_share_prompt_template_without_preload_content(
2734
2734
  self,
2735
2735
  prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
2736
- reset_and_share_prompt_template_request: Annotated[ResetAndSharePromptTemplateRequest, Field(description="List of usernames the prompt template should be shared with.")],
2736
+ reset_and_share_request: Annotated[ResetAndShareRequest, Field(description="List of usernames the prompt template should be shared with.")],
2737
2737
  _request_timeout: Union[
2738
2738
  None,
2739
2739
  Annotated[StrictFloat, Field(gt=0)],
@@ -2753,8 +2753,8 @@ class PromptTemplatesApi:
2753
2753
 
2754
2754
  :param prompt_template_id: Id of a prompt template. (required)
2755
2755
  :type prompt_template_id: str
2756
- :param reset_and_share_prompt_template_request: List of usernames the prompt template should be shared with. (required)
2757
- :type reset_and_share_prompt_template_request: ResetAndSharePromptTemplateRequest
2756
+ :param reset_and_share_request: List of usernames the prompt template should be shared with. (required)
2757
+ :type reset_and_share_request: ResetAndShareRequest
2758
2758
  :param _request_timeout: timeout setting for this request. If one
2759
2759
  number provided, it will be total request
2760
2760
  timeout. It can also be a pair (tuple) of
@@ -2779,7 +2779,7 @@ class PromptTemplatesApi:
2779
2779
 
2780
2780
  _param = self._reset_and_share_prompt_template_serialize(
2781
2781
  prompt_template_id=prompt_template_id,
2782
- reset_and_share_prompt_template_request=reset_and_share_prompt_template_request,
2782
+ reset_and_share_request=reset_and_share_request,
2783
2783
  _request_auth=_request_auth,
2784
2784
  _content_type=_content_type,
2785
2785
  _headers=_headers,
@@ -2800,7 +2800,7 @@ class PromptTemplatesApi:
2800
2800
  def _reset_and_share_prompt_template_serialize(
2801
2801
  self,
2802
2802
  prompt_template_id,
2803
- reset_and_share_prompt_template_request,
2803
+ reset_and_share_request,
2804
2804
  _request_auth,
2805
2805
  _content_type,
2806
2806
  _headers,
@@ -2828,8 +2828,8 @@ class PromptTemplatesApi:
2828
2828
  # process the header parameters
2829
2829
  # process the form parameters
2830
2830
  # process the body parameter
2831
- if reset_and_share_prompt_template_request is not None:
2832
- _body_params = reset_and_share_prompt_template_request
2831
+ if reset_and_share_request is not None:
2832
+ _body_params = reset_and_share_request
2833
2833
 
2834
2834
 
2835
2835
  # set the HTTP header `Accept`
@@ -2881,7 +2881,7 @@ class PromptTemplatesApi:
2881
2881
  def reset_and_share_prompt_template_with_groups(
2882
2882
  self,
2883
2883
  prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
2884
- reset_and_share_prompt_template_with_groups_request: Annotated[ResetAndSharePromptTemplateWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
2884
+ reset_and_share_with_groups_request: Annotated[ResetAndShareWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
2885
2885
  _request_timeout: Union[
2886
2886
  None,
2887
2887
  Annotated[StrictFloat, Field(gt=0)],
@@ -2901,8 +2901,8 @@ class PromptTemplatesApi:
2901
2901
 
2902
2902
  :param prompt_template_id: Id of a prompt template. (required)
2903
2903
  :type prompt_template_id: str
2904
- :param reset_and_share_prompt_template_with_groups_request: List of group ids the prompt template should be shared with. (required)
2905
- :type reset_and_share_prompt_template_with_groups_request: ResetAndSharePromptTemplateWithGroupsRequest
2904
+ :param reset_and_share_with_groups_request: List of group ids the prompt template should be shared with. (required)
2905
+ :type reset_and_share_with_groups_request: ResetAndShareWithGroupsRequest
2906
2906
  :param _request_timeout: timeout setting for this request. If one
2907
2907
  number provided, it will be total request
2908
2908
  timeout. It can also be a pair (tuple) of
@@ -2927,7 +2927,7 @@ class PromptTemplatesApi:
2927
2927
 
2928
2928
  _param = self._reset_and_share_prompt_template_with_groups_serialize(
2929
2929
  prompt_template_id=prompt_template_id,
2930
- reset_and_share_prompt_template_with_groups_request=reset_and_share_prompt_template_with_groups_request,
2930
+ reset_and_share_with_groups_request=reset_and_share_with_groups_request,
2931
2931
  _request_auth=_request_auth,
2932
2932
  _content_type=_content_type,
2933
2933
  _headers=_headers,
@@ -2953,7 +2953,7 @@ class PromptTemplatesApi:
2953
2953
  def reset_and_share_prompt_template_with_groups_with_http_info(
2954
2954
  self,
2955
2955
  prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
2956
- reset_and_share_prompt_template_with_groups_request: Annotated[ResetAndSharePromptTemplateWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
2956
+ reset_and_share_with_groups_request: Annotated[ResetAndShareWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
2957
2957
  _request_timeout: Union[
2958
2958
  None,
2959
2959
  Annotated[StrictFloat, Field(gt=0)],
@@ -2973,8 +2973,8 @@ class PromptTemplatesApi:
2973
2973
 
2974
2974
  :param prompt_template_id: Id of a prompt template. (required)
2975
2975
  :type prompt_template_id: str
2976
- :param reset_and_share_prompt_template_with_groups_request: List of group ids the prompt template should be shared with. (required)
2977
- :type reset_and_share_prompt_template_with_groups_request: ResetAndSharePromptTemplateWithGroupsRequest
2976
+ :param reset_and_share_with_groups_request: List of group ids the prompt template should be shared with. (required)
2977
+ :type reset_and_share_with_groups_request: ResetAndShareWithGroupsRequest
2978
2978
  :param _request_timeout: timeout setting for this request. If one
2979
2979
  number provided, it will be total request
2980
2980
  timeout. It can also be a pair (tuple) of
@@ -2999,7 +2999,7 @@ class PromptTemplatesApi:
2999
2999
 
3000
3000
  _param = self._reset_and_share_prompt_template_with_groups_serialize(
3001
3001
  prompt_template_id=prompt_template_id,
3002
- reset_and_share_prompt_template_with_groups_request=reset_and_share_prompt_template_with_groups_request,
3002
+ reset_and_share_with_groups_request=reset_and_share_with_groups_request,
3003
3003
  _request_auth=_request_auth,
3004
3004
  _content_type=_content_type,
3005
3005
  _headers=_headers,
@@ -3025,7 +3025,7 @@ class PromptTemplatesApi:
3025
3025
  def reset_and_share_prompt_template_with_groups_without_preload_content(
3026
3026
  self,
3027
3027
  prompt_template_id: Annotated[StrictStr, Field(description="Id of a prompt template.")],
3028
- reset_and_share_prompt_template_with_groups_request: Annotated[ResetAndSharePromptTemplateWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
3028
+ reset_and_share_with_groups_request: Annotated[ResetAndShareWithGroupsRequest, Field(description="List of group ids the prompt template should be shared with.")],
3029
3029
  _request_timeout: Union[
3030
3030
  None,
3031
3031
  Annotated[StrictFloat, Field(gt=0)],
@@ -3045,8 +3045,8 @@ class PromptTemplatesApi:
3045
3045
 
3046
3046
  :param prompt_template_id: Id of a prompt template. (required)
3047
3047
  :type prompt_template_id: str
3048
- :param reset_and_share_prompt_template_with_groups_request: List of group ids the prompt template should be shared with. (required)
3049
- :type reset_and_share_prompt_template_with_groups_request: ResetAndSharePromptTemplateWithGroupsRequest
3048
+ :param reset_and_share_with_groups_request: List of group ids the prompt template should be shared with. (required)
3049
+ :type reset_and_share_with_groups_request: ResetAndShareWithGroupsRequest
3050
3050
  :param _request_timeout: timeout setting for this request. If one
3051
3051
  number provided, it will be total request
3052
3052
  timeout. It can also be a pair (tuple) of
@@ -3071,7 +3071,7 @@ class PromptTemplatesApi:
3071
3071
 
3072
3072
  _param = self._reset_and_share_prompt_template_with_groups_serialize(
3073
3073
  prompt_template_id=prompt_template_id,
3074
- reset_and_share_prompt_template_with_groups_request=reset_and_share_prompt_template_with_groups_request,
3074
+ reset_and_share_with_groups_request=reset_and_share_with_groups_request,
3075
3075
  _request_auth=_request_auth,
3076
3076
  _content_type=_content_type,
3077
3077
  _headers=_headers,
@@ -3092,7 +3092,7 @@ class PromptTemplatesApi:
3092
3092
  def _reset_and_share_prompt_template_with_groups_serialize(
3093
3093
  self,
3094
3094
  prompt_template_id,
3095
- reset_and_share_prompt_template_with_groups_request,
3095
+ reset_and_share_with_groups_request,
3096
3096
  _request_auth,
3097
3097
  _content_type,
3098
3098
  _headers,
@@ -3120,8 +3120,8 @@ class PromptTemplatesApi:
3120
3120
  # process the header parameters
3121
3121
  # process the form parameters
3122
3122
  # process the body parameter
3123
- if reset_and_share_prompt_template_with_groups_request is not None:
3124
- _body_params = reset_and_share_prompt_template_with_groups_request
3123
+ if reset_and_share_with_groups_request is not None:
3124
+ _body_params = reset_and_share_with_groups_request
3125
3125
 
3126
3126
 
3127
3127
  # set the HTTP header `Accept`
@@ -90,7 +90,7 @@ class ApiClient:
90
90
  self.default_headers[header_name] = header_value
91
91
  self.cookie = cookie
92
92
  # Set default User-Agent.
93
- self.user_agent = 'OpenAPI-Generator/1.6.41-dev5/python'
93
+ self.user_agent = 'OpenAPI-Generator/1.6.43/python'
94
94
  self.client_side_validation = configuration.client_side_validation
95
95
 
96
96
  def __enter__(self):
@@ -503,7 +503,7 @@ class Configuration:
503
503
  "OS: {env}\n"\
504
504
  "Python Version: {pyversion}\n"\
505
505
  "Version of the API: v1.0.0\n"\
506
- "SDK Package Version: 1.6.41-dev5".\
506
+ "SDK Package Version: 1.6.43".\
507
507
  format(env=sys.platform, pyversion=sys.version)
508
508
 
509
509
  def get_host_settings(self) -> List[HostSetting]:
@@ -46,11 +46,13 @@ from h2ogpte.rest_sync.models.collection_create_request import CollectionCreateR
46
46
  from h2ogpte.rest_sync.models.collection_settings import CollectionSettings
47
47
  from h2ogpte.rest_sync.models.collection_update_request import CollectionUpdateRequest
48
48
  from h2ogpte.rest_sync.models.confirm_user_deletion_request import ConfirmUserDeletionRequest
49
+ from h2ogpte.rest_sync.models.confluence_credentials import ConfluenceCredentials
49
50
  from h2ogpte.rest_sync.models.count import Count
50
51
  from h2ogpte.rest_sync.models.count_with_queue_details import CountWithQueueDetails
51
52
  from h2ogpte.rest_sync.models.create_agent_key_request import CreateAgentKeyRequest
52
53
  from h2ogpte.rest_sync.models.create_agent_tool_key_associations_request import CreateAgentToolKeyAssociationsRequest
53
54
  from h2ogpte.rest_sync.models.create_agent_tool_request import CreateAgentToolRequest
55
+ from h2ogpte.rest_sync.models.create_chat_session_request import CreateChatSessionRequest
54
56
  from h2ogpte.rest_sync.models.create_import_collection_to_collection_job_request import CreateImportCollectionToCollectionJobRequest
55
57
  from h2ogpte.rest_sync.models.create_insert_document_to_collection_job_request import CreateInsertDocumentToCollectionJobRequest
56
58
  from h2ogpte.rest_sync.models.create_secret201_response import CreateSecret201Response
@@ -79,6 +81,7 @@ from h2ogpte.rest_sync.models.guardrails_settings_create_request import Guardrai
79
81
  from h2ogpte.rest_sync.models.h2_ogptgpu_info import H2OGPTGPUInfo
80
82
  from h2ogpte.rest_sync.models.h2_ogpt_system_info import H2OGPTSystemInfo
81
83
  from h2ogpte.rest_sync.models.ingest_from_azure_blob_storage_body import IngestFromAzureBlobStorageBody
84
+ from h2ogpte.rest_sync.models.ingest_from_confluence_body import IngestFromConfluenceBody
82
85
  from h2ogpte.rest_sync.models.ingest_from_file_system_body import IngestFromFileSystemBody
83
86
  from h2ogpte.rest_sync.models.ingest_from_gcs_body import IngestFromGcsBody
84
87
  from h2ogpte.rest_sync.models.ingest_from_s3_body import IngestFromS3Body
@@ -106,8 +109,8 @@ from h2ogpte.rest_sync.models.prompt_template_create_request import PromptTempla
106
109
  from h2ogpte.rest_sync.models.qa_feedback import QAFeedback
107
110
  from h2ogpte.rest_sync.models.question_request import QuestionRequest
108
111
  from h2ogpte.rest_sync.models.queue_details import QueueDetails
109
- from h2ogpte.rest_sync.models.reset_and_share_prompt_template_request import ResetAndSharePromptTemplateRequest
110
- from h2ogpte.rest_sync.models.reset_and_share_prompt_template_with_groups_request import ResetAndSharePromptTemplateWithGroupsRequest
112
+ from h2ogpte.rest_sync.models.reset_and_share_request import ResetAndShareRequest
113
+ from h2ogpte.rest_sync.models.reset_and_share_with_groups_request import ResetAndShareWithGroupsRequest
111
114
  from h2ogpte.rest_sync.models.role_create_request import RoleCreateRequest
112
115
  from h2ogpte.rest_sync.models.role_info import RoleInfo
113
116
  from h2ogpte.rest_sync.models.roles_reset_request import RolesResetRequest
@@ -130,9 +133,11 @@ from h2ogpte.rest_sync.models.update_agent_tool_preference_request import Update
130
133
  from h2ogpte.rest_sync.models.update_collection_expiry_date_request import UpdateCollectionExpiryDateRequest
131
134
  from h2ogpte.rest_sync.models.update_collection_inactivity_interval_request import UpdateCollectionInactivityIntervalRequest
132
135
  from h2ogpte.rest_sync.models.update_collection_privacy_request import UpdateCollectionPrivacyRequest
136
+ from h2ogpte.rest_sync.models.update_collection_workspace_request import UpdateCollectionWorkspaceRequest
133
137
  from h2ogpte.rest_sync.models.update_custom_agent_tool200_response import UpdateCustomAgentTool200Response
134
138
  from h2ogpte.rest_sync.models.update_custom_agent_tool_request import UpdateCustomAgentToolRequest
135
139
  from h2ogpte.rest_sync.models.update_default_prompt_template_visibility_request import UpdateDefaultPromptTemplateVisibilityRequest
140
+ from h2ogpte.rest_sync.models.update_extractor_privacy_request import UpdateExtractorPrivacyRequest
136
141
  from h2ogpte.rest_sync.models.update_prompt_template_privacy_request import UpdatePromptTemplatePrivacyRequest
137
142
  from h2ogpte.rest_sync.models.update_qa_feedback_request import UpdateQAFeedbackRequest
138
143
  from h2ogpte.rest_sync.models.update_secret_request import UpdateSecretRequest
@@ -17,7 +17,7 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictStr
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List
22
22
  from typing import Optional, Set
23
23
  from typing_extensions import Self
@@ -26,8 +26,9 @@ class ChatCompletion(BaseModel):
26
26
  """
27
27
  ChatCompletion
28
28
  """ # noqa: E501
29
+ message_id: StrictStr = Field(description="Id of the chat completion message")
29
30
  body: StrictStr
30
- __properties: ClassVar[List[str]] = ["body"]
31
+ __properties: ClassVar[List[str]] = ["message_id", "body"]
31
32
 
32
33
  model_config = ConfigDict(
33
34
  populate_by_name=True,
@@ -80,6 +81,7 @@ class ChatCompletion(BaseModel):
80
81
  return cls.model_validate(obj)
81
82
 
82
83
  _obj = cls.model_validate({
84
+ "message_id": obj.get("message_id"),
83
85
  "body": obj.get("body")
84
86
  })
85
87
  return _obj
@@ -17,8 +17,8 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr
21
- from typing import Any, ClassVar, Dict, List
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
22
  from typing import Optional, Set
23
23
  from typing_extensions import Self
24
24
 
@@ -26,9 +26,10 @@ class ChatCompletionDelta(BaseModel):
26
26
  """
27
27
  ChatCompletionDelta
28
28
  """ # noqa: E501
29
+ message_id: Optional[StrictStr] = Field(default=None, description="Id of the chat completion message. The attribute is populated only in the last message (`finished==True`).")
29
30
  body: StrictStr
30
31
  finished: StrictBool
31
- __properties: ClassVar[List[str]] = ["body", "finished"]
32
+ __properties: ClassVar[List[str]] = ["message_id", "body", "finished"]
32
33
 
33
34
  model_config = ConfigDict(
34
35
  populate_by_name=True,
@@ -81,6 +82,7 @@ class ChatCompletionDelta(BaseModel):
81
82
  return cls.model_validate(obj)
82
83
 
83
84
  _obj = cls.model_validate({
85
+ "message_id": obj.get("message_id"),
84
86
  "body": obj.get("body"),
85
87
  "finished": obj.get("finished")
86
88
  })
@@ -33,7 +33,7 @@ class ChatCompletionRequest(BaseModel):
33
33
  image_batch_final_prompt: Optional[StrictStr] = Field(default=None, description="A prompt for each image batch for vision models.")
34
34
  image_batch_image_prompt: Optional[StrictStr] = Field(default=None, description="A prompt to reduce all answers each image batch for vision models")
35
35
  llm: Optional[StrictStr] = Field(default=None, description="LLM name to send the query. Use \"auto\" for automatic model routing, set cost_controls of llm_args for detailed control over automatic routing.")
36
- llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
36
+ llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `reasoning_effort` **(type=integer, default=0)** - Level of reasoning effort for the model (higher values = deeper reasoning, e.g., 10000-65000). Use for models that support chain-of-thought reasoning. 0 means no additional reasoning effort. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_query_understanding_parallel_calls` **(type=integer, default=None)** - Number of parallel calls for query understanding. * `tool_building_mode` **(type=string, default=None)** - Mode for tool building configuration. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
37
37
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
38
38
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
39
39
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
@@ -34,7 +34,8 @@ class ChatSession(BaseModel):
34
34
  prompt_template_id: Optional[StrictStr] = None
35
35
  latest_message_content: Optional[StrictStr] = None
36
36
  updated_at: datetime
37
- __properties: ClassVar[List[str]] = ["id", "name", "collection_id", "collection_name", "prompt_template_id", "latest_message_content", "updated_at"]
37
+ workspace: Optional[StrictStr] = None
38
+ __properties: ClassVar[List[str]] = ["id", "name", "collection_id", "collection_name", "prompt_template_id", "latest_message_content", "updated_at", "workspace"]
38
39
 
39
40
  model_config = ConfigDict(
40
41
  populate_by_name=True,
@@ -93,7 +94,8 @@ class ChatSession(BaseModel):
93
94
  "collection_name": obj.get("collection_name"),
94
95
  "prompt_template_id": obj.get("prompt_template_id"),
95
96
  "latest_message_content": obj.get("latest_message_content"),
96
- "updated_at": obj.get("updated_at")
97
+ "updated_at": obj.get("updated_at"),
98
+ "workspace": obj.get("workspace")
97
99
  })
98
100
  return _obj
99
101
 
@@ -27,7 +27,7 @@ class ChatSettings(BaseModel):
27
27
  ChatSettings
28
28
  """ # noqa: E501
29
29
  llm: Optional[StrictStr] = Field(default=None, description="LLM name to send the query. Use \"auto\" for automatic model routing, set cost_controls of llm_args for detailed control over automatic routing.")
30
- llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
30
+ llm_args: Optional[Dict[str, Any]] = Field(default=None, description="A map of arguments sent to LLM with query. * `temperature` **(type=double, default=0.0)** - A value used to modulate the next token probabilities. 0 is the most deterministic and 1 is most creative. * `top_k` **(type=integer, default=1)** - A number of highest probability vocabulary tokens to keep for top-k-filtering. * `top_p` **(type=double, default=0.0)** - If set to a value < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * `seed` **(type=integer, default=0)** - A seed for the random number generator when sampling during generation (if temp>0 or top_k>1 or top_p<1), seed=0 picks a random seed. * `repetition_penalty` **(type=double, default=1.07)** - A parameter for repetition penalty. 1.0 means no penalty. * `max_new_tokens` **(type=double, default=1024)** - A maximum number of new tokens to generate. This limit applies to each (map+reduce) step during summarization and each (map) step during extraction. * `min_max_new_tokens` **(type=integer, default=512)** - A minimum value for max_new_tokens when auto-adjusting for content of prompt, docs, etc. * `response_format` **(type=enum[text, json_object, json_code], default=text)** - An output type of LLM * `guided_json` **(type=map)** - If specified, the output will follow the JSON schema. * `guided_regex` **(type=string)** - If specified, the output will follow the regex pattern. Only for models that support guided generation. * `guided_choice` **(type=array[string])** - If specified, the output will be exactly one of the choices. Only for models that support guided generation. * `guided_grammar` **(type=string)** - If specified, the output will follow the context free grammar. Only for models that support guided generation. * `guided_whitespace_pattern` **(type=string)** - If specified, will override the default whitespace pattern for guided json decoding. Only for models that support guided generation. * `enable_vision` **(type=enum[on, off, auto], default=auto)** - Controls vision mode, send images to the LLM in addition to text chunks. * `visible_vision_models` **(type=array[string], default=[auto])** - Controls which vision model to use when processing images. Must provide exactly one model. [auto] for automatic. * `images_num_max` **(type=integer, default=None)** - Maximum number of images to process. * `json_preserve_system_prompt` **(type=boolean, default=None)** - Whether to preserve system prompt in JSON response. * `client_metadata` **(type=string, default=None)** - Additional metadata to send with the request. * `min_chars_per_yield` **(type=integer, default=1)** - Minimum characters to yield in streaming response. * `reasoning_effort` **(type=integer, default=0)** - Level of reasoning effort for the model (higher values = deeper reasoning, e.g., 10000-65000). Use for models that support chain-of-thought reasoning. 0 means no additional reasoning effort. * `cost_controls` **(type=map)** A map with cost controls settings: * `max_cost` **(type=double)** - Sets the maximum allowed cost in USD per LLM call when doing Automatic model routing. If the estimated cost based on input and output token counts is higher than this limit, the request will fail as early as possible. * `max_cost_per_million_tokens` **(type=double)** - Only consider models that cost less than this value in USD per million tokens when doing automatic routing. Using the max of input and output cost. * `model` **(type=array[string])** - Optional subset of models to consider when doing automatic routing. If not specified, all models are considered. * `willingness_to_pay` **(type=double)** - Controls the willingness to pay extra for a more accurate model for every LLM call when doing automatic routing, in units of USD per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated cost divided by the increase in estimated accuracy is no more than this value divided by 10%, up to the upper limit specified above. Lower values will try to keep the cost as low as possible, higher values will approach the cost limit to increase accuracy. 0 means unlimited. * `willingness_to_wait` **(type=double)** - Controls the willingness to wait longer for a more accurate model for every LLM call when doing automatic routing, in units of seconds per +10% increase in accuracy. We start with the least accurate model. For each more accurate model, we accept it if the increase in estimated time divided by the increase in estimated accuracy is no more than this value divided by 10%. Lower values will try to keep the time as low as possible, higher values will take longer to increase accuracy. 0 means unlimited. * `use_agent` **(type=boolean, default=False)** - If True, use the AI agent (with access to tools) to generate the response. * `agent_accuracy` **(type=string, default=\"standard\")** - Effort level by the agent. Only if use_agent=True. One of [\"quick\", \"basic\", \"standard\", \"maximum\"]. * `agent_max_turns` **(type=union[string, integer], default=\"auto\")** - Optional max. number of back-and-forth turns with the agent. Only if use_agent=True. Either \"auto\" or an integer. * `agent_tools` **(type=union[string, array[string]], default=\"auto\")** - Either \"auto\", \"all\", \"any\" to enable all available tools, or a specific list of tools to use. Only if use_agent=True. * `agent_type` **(type=string, default=\"auto\")** - Type of agent to use for task processing. * `agent_original_files` **(type=array[string], default=None)** - List of file paths for agent to process. * `agent_timeout` **(type=integer, default=None)** - Timeout in seconds for each agent turn. * `agent_total_timeout` **(type=integer, default=3600)** - Total timeout in seconds for all agent processing. * `agent_code_writer_system_message` **(type=string, default=None)** - System message for agent code writer. * `agent_num_executable_code_blocks_limit` **(type=integer, default=1)** - Maximum number of executable code blocks. * `agent_system_site_packages` **(type=boolean, default=True)** - Whether agent has access to system site packages. * `agent_main_model` **(type=string, default=None)** - Main model to use for agent. * `agent_max_stream_length` **(type=integer, default=None)** - Maximum stream length for agent response. * `agent_max_memory_usage` **(type=integer, default=16*1024**3)** - Maximum memory usage for agent in bytes (16GB default). * `agent_main_reasoning_effort` **(type=integer, default=None)** - Effort level for main reasoning. * `agent_advanced_reasoning_effort` **(type=integer, default=None)** - Effort level for advanced reasoning. * `agent_max_confidence_level` **(type=integer, default=None)** - Maximum confidence level for agent responses. * `agent_planning_forced_mode` **(type=boolean, default=None)** - Whether to force planning mode for agent. * `agent_too_soon_forced_mode` **(type=boolean, default=None)** - Whether to force \"too soon\" mode for agent. * `agent_critique_forced_mode` **(type=integer, default=None)** - Whether to force critique mode for agent. * `agent_query_understanding_parallel_calls` **(type=integer, default=None)** - Number of parallel calls for query understanding. * `tool_building_mode` **(type=string, default=None)** - Mode for tool building configuration. * `agent_stream_files` **(type=boolean, default=True)** - Whether to stream files from agent. ")
31
31
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
32
32
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
33
33
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
@@ -47,7 +47,8 @@ class Collection(BaseModel):
47
47
  inactivity_interval: Optional[StrictInt] = Field(default=None, description="The inactivity interval as an integer number of days.")
48
48
  rag_type: Optional[StrictStr] = Field(default=None, description="RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. ")
49
49
  metadata_dict: Optional[Dict[str, Any]] = None
50
- __properties: ClassVar[List[str]] = ["id", "name", "description", "embedding_model", "document_count", "document_size", "created_at", "updated_at", "user_count", "is_public", "username", "sessions_count", "status", "prompt_template_id", "thumbnail", "size_limit", "expiry_date", "inactivity_interval", "rag_type", "metadata_dict"]
50
+ workspace: Optional[StrictStr] = Field(default=None, description="The workspace associated with the collection.")
51
+ __properties: ClassVar[List[str]] = ["id", "name", "description", "embedding_model", "document_count", "document_size", "created_at", "updated_at", "user_count", "is_public", "username", "sessions_count", "status", "prompt_template_id", "thumbnail", "size_limit", "expiry_date", "inactivity_interval", "rag_type", "metadata_dict", "workspace"]
51
52
 
52
53
  @field_validator('rag_type')
53
54
  def rag_type_validate_enum(cls, value):
@@ -129,7 +130,8 @@ class Collection(BaseModel):
129
130
  "expiry_date": obj.get("expiry_date"),
130
131
  "inactivity_interval": obj.get("inactivity_interval"),
131
132
  "rag_type": obj.get("rag_type"),
132
- "metadata_dict": obj.get("metadata_dict")
133
+ "metadata_dict": obj.get("metadata_dict"),
134
+ "workspace": obj.get("workspace")
133
135
  })
134
136
  return _obj
135
137
 
@@ -35,7 +35,8 @@ class CollectionCreateRequest(BaseModel):
35
35
  collection_settings: Optional[CollectionSettings] = None
36
36
  chat_settings: Optional[ChatSettings] = None
37
37
  expiry_date: Optional[datetime] = Field(default=None, description="Optional expiration date for the collection")
38
- __properties: ClassVar[List[str]] = ["name", "description", "embedding_model", "collection_settings", "chat_settings", "expiry_date"]
38
+ workspace: Optional[StrictStr] = None
39
+ __properties: ClassVar[List[str]] = ["name", "description", "embedding_model", "collection_settings", "chat_settings", "expiry_date", "workspace"]
39
40
 
40
41
  model_config = ConfigDict(
41
42
  populate_by_name=True,
@@ -99,7 +100,8 @@ class CollectionCreateRequest(BaseModel):
99
100
  "embedding_model": obj.get("embedding_model"),
100
101
  "collection_settings": CollectionSettings.from_dict(obj["collection_settings"]) if obj.get("collection_settings") is not None else None,
101
102
  "chat_settings": ChatSettings.from_dict(obj["chat_settings"]) if obj.get("chat_settings") is not None else None,
102
- "expiry_date": obj.get("expiry_date")
103
+ "expiry_date": obj.get("expiry_date"),
104
+ "workspace": obj.get("workspace")
103
105
  })
104
106
  return _obj
105
107
 
@@ -0,0 +1,89 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ h2oGPTe REST API
5
+
6
+ # Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
7
+
8
+ The version of the OpenAPI document: v1.0.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class ConfluenceCredentials(BaseModel):
26
+ """
27
+ The object with Confluence credentials.
28
+ """ # noqa: E501
29
+ username: StrictStr = Field(description="Name or email of the user.")
30
+ password: StrictStr = Field(description="Password or API token.")
31
+ __properties: ClassVar[List[str]] = ["username", "password"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of ConfluenceCredentials from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ return _dict
73
+
74
+ @classmethod
75
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76
+ """Create an instance of ConfluenceCredentials from a dict"""
77
+ if obj is None:
78
+ return None
79
+
80
+ if not isinstance(obj, dict):
81
+ return cls.model_validate(obj)
82
+
83
+ _obj = cls.model_validate({
84
+ "username": obj.get("username"),
85
+ "password": obj.get("password")
86
+ })
87
+ return _obj
88
+
89
+