h2ogpte 1.6.53rc1__py3-none-any.whl → 1.6.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
h2ogpte/__init__.py CHANGED
@@ -3,7 +3,7 @@ from h2ogpte.h2ogpte import H2OGPTE
3
3
  from h2ogpte.h2ogpte_async import H2OGPTEAsync
4
4
  from h2ogpte.session_async import SessionAsync
5
5
 
6
- __version__ = "1.6.53rc1"
6
+ __version__ = "1.6.54"
7
7
 
8
8
  __all__ = [
9
9
  "H2OGPTE",
h2ogpte/h2ogpte.py CHANGED
@@ -2496,7 +2496,6 @@ class H2OGPTE(H2OGPTESyncBase):
2496
2496
  base_url: str,
2497
2497
  page_id: Union[str, List[str]],
2498
2498
  credentials: ConfluenceCredential,
2499
- include_attachments: Union[bool, None] = None,
2500
2499
  gen_doc_summaries: Union[bool, None] = None,
2501
2500
  gen_doc_questions: Union[bool, None] = None,
2502
2501
  audio_input_language: Union[str, None] = None,
@@ -2520,8 +2519,6 @@ class H2OGPTE(H2OGPTESyncBase):
2520
2519
  The page id or ids of pages to be ingested.
2521
2520
  credentials:
2522
2521
  The object with Confluence credentials.
2523
- include_attachments:
2524
- A flag indicating whether to also ingest attachments with the page.
2525
2522
  gen_doc_summaries:
2526
2523
  Whether to auto-generate document summaries (uses LLM)
2527
2524
  gen_doc_questions:
@@ -2563,7 +2560,6 @@ class H2OGPTE(H2OGPTESyncBase):
2563
2560
  page_ids=[page_id] if isinstance(page_id, str) else page_id,
2564
2561
  credentials=rest.ConfluenceCredentials(**credentials.__dict__),
2565
2562
  metadata=metadata,
2566
- include_attachments=include_attachments,
2567
2563
  ),
2568
2564
  gen_doc_summaries=gen_doc_summaries,
2569
2565
  gen_doc_questions=gen_doc_questions,
@@ -4518,8 +4514,6 @@ class H2OGPTE(H2OGPTESyncBase):
4518
4514
  Requires 1 LLM or Agent call.
4519
4515
  :code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
4520
4516
  Requires 1 Agent call.
4521
- :code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
4522
- Requires 1 Agent call with RAG tool execution.
4523
4517
  :code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
4524
4518
  to answer the query. Requires 1 LLM or Agent call.
4525
4519
  :code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
h2ogpte/h2ogpte_async.py CHANGED
@@ -2698,7 +2698,6 @@ class H2OGPTEAsync:
2698
2698
  base_url: str,
2699
2699
  page_id: Union[str, List[str]],
2700
2700
  credentials: ConfluenceCredential,
2701
- include_attachments: Union[bool, None] = None,
2702
2701
  gen_doc_summaries: Union[bool, None] = None,
2703
2702
  gen_doc_questions: Union[bool, None] = None,
2704
2703
  audio_input_language: Union[str, None] = None,
@@ -2722,8 +2721,6 @@ class H2OGPTEAsync:
2722
2721
  The page id or ids of pages to be ingested.
2723
2722
  credentials:
2724
2723
  The object with Confluence credentials.
2725
- include_attachments:
2726
- A flag indicating whether to also ingest attachments with the page.
2727
2724
  gen_doc_summaries:
2728
2725
  Whether to auto-generate document summaries (uses LLM)
2729
2726
  gen_doc_questions:
@@ -2765,7 +2762,6 @@ class H2OGPTEAsync:
2765
2762
  page_ids=[page_id] if isinstance(page_id, str) else page_id,
2766
2763
  credentials=rest.ConfluenceCredentials(**credentials.__dict__),
2767
2764
  metadata=metadata,
2768
- include_attachments=include_attachments,
2769
2765
  ),
2770
2766
  gen_doc_summaries=gen_doc_summaries,
2771
2767
  gen_doc_questions=gen_doc_questions,
@@ -4726,8 +4722,6 @@ class H2OGPTEAsync:
4726
4722
  Requires 1 LLM or Agent call.
4727
4723
  :code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
4728
4724
  Requires 1 Agent call.
4729
- :code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
4730
- Requires 1 Agent call with RAG tool execution.
4731
4725
  :code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
4732
4726
  to answer the query. Requires 1 LLM or Agent call.
4733
4727
  :code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
@@ -14,7 +14,7 @@
14
14
  """ # noqa: E501
15
15
 
16
16
 
17
- __version__ = "1.6.53-dev1"
17
+ __version__ = "1.6.54"
18
18
 
19
19
  # import apis into sdk package
20
20
  from h2ogpte.rest_async.api.api_keys_api import APIKeysApi
@@ -69,7 +69,6 @@ from h2ogpte.rest_async.models.chat_message_reference import ChatMessageReferenc
69
69
  from h2ogpte.rest_async.models.chat_session import ChatSession
70
70
  from h2ogpte.rest_async.models.chat_session_update_request import ChatSessionUpdateRequest
71
71
  from h2ogpte.rest_async.models.chat_settings import ChatSettings
72
- from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
73
72
  from h2ogpte.rest_async.models.chunk import Chunk
74
73
  from h2ogpte.rest_async.models.chunk_search_result import ChunkSearchResult
75
74
  from h2ogpte.rest_async.models.collection import Collection
@@ -159,7 +158,6 @@ from h2ogpte.rest_async.models.suggested_question import SuggestedQuestion
159
158
  from h2ogpte.rest_async.models.summarize_request import SummarizeRequest
160
159
  from h2ogpte.rest_async.models.tag import Tag
161
160
  from h2ogpte.rest_async.models.tag_create_request import TagCreateRequest
162
- from h2ogpte.rest_async.models.tag_filter import TagFilter
163
161
  from h2ogpte.rest_async.models.tag_update_request import TagUpdateRequest
164
162
  from h2ogpte.rest_async.models.update_agent_key_request import UpdateAgentKeyRequest
165
163
  from h2ogpte.rest_async.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
@@ -90,7 +90,7 @@ class ApiClient:
90
90
  self.default_headers[header_name] = header_value
91
91
  self.cookie = cookie
92
92
  # Set default User-Agent.
93
- self.user_agent = 'OpenAPI-Generator/1.6.53-dev1/python'
93
+ self.user_agent = 'OpenAPI-Generator/1.6.54/python'
94
94
  self.client_side_validation = configuration.client_side_validation
95
95
 
96
96
  async def __aenter__(self):
@@ -499,7 +499,7 @@ class Configuration:
499
499
  "OS: {env}\n"\
500
500
  "Python Version: {pyversion}\n"\
501
501
  "Version of the API: v1.0.0\n"\
502
- "SDK Package Version: 1.6.53-dev1".\
502
+ "SDK Package Version: 1.6.54".\
503
503
  format(env=sys.platform, pyversion=sys.version)
504
504
 
505
505
  def get_host_settings(self) -> List[HostSetting]:
@@ -38,7 +38,6 @@ from h2ogpte.rest_async.models.chat_message_reference import ChatMessageReferenc
38
38
  from h2ogpte.rest_async.models.chat_session import ChatSession
39
39
  from h2ogpte.rest_async.models.chat_session_update_request import ChatSessionUpdateRequest
40
40
  from h2ogpte.rest_async.models.chat_settings import ChatSettings
41
- from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
42
41
  from h2ogpte.rest_async.models.chunk import Chunk
43
42
  from h2ogpte.rest_async.models.chunk_search_result import ChunkSearchResult
44
43
  from h2ogpte.rest_async.models.collection import Collection
@@ -128,7 +127,6 @@ from h2ogpte.rest_async.models.suggested_question import SuggestedQuestion
128
127
  from h2ogpte.rest_async.models.summarize_request import SummarizeRequest
129
128
  from h2ogpte.rest_async.models.tag import Tag
130
129
  from h2ogpte.rest_async.models.tag_create_request import TagCreateRequest
131
- from h2ogpte.rest_async.models.tag_filter import TagFilter
132
130
  from h2ogpte.rest_async.models.tag_update_request import TagUpdateRequest
133
131
  from h2ogpte.rest_async.models.update_agent_key_request import UpdateAgentKeyRequest
134
132
  from h2ogpte.rest_async.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
@@ -19,7 +19,6 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
23
22
  from typing import Optional, Set
24
23
  from typing_extensions import Self
25
24
 
@@ -38,7 +37,7 @@ class ChatCompletionRequest(BaseModel):
38
37
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
39
38
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
40
39
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
41
- tags: Optional[ChatSettingsTags] = None
40
+ tags: Optional[List[StrictStr]] = Field(default=None, description="A list of tags from which to pull the context for RAG.")
42
41
  __properties: ClassVar[List[str]] = ["message", "system_prompt", "pre_prompt_query", "prompt_query", "image_batch_final_prompt", "image_batch_image_prompt", "llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
43
42
 
44
43
  @field_validator('include_chat_history')
@@ -90,9 +89,6 @@ class ChatCompletionRequest(BaseModel):
90
89
  exclude=excluded_fields,
91
90
  exclude_none=True,
92
91
  )
93
- # override the default output from pydantic by calling `to_dict()` of tags
94
- if self.tags:
95
- _dict['tags'] = self.tags.to_dict()
96
92
  return _dict
97
93
 
98
94
  @classmethod
@@ -116,7 +112,7 @@ class ChatCompletionRequest(BaseModel):
116
112
  "self_reflection_config": obj.get("self_reflection_config"),
117
113
  "rag_config": obj.get("rag_config"),
118
114
  "include_chat_history": obj.get("include_chat_history"),
119
- "tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None,
115
+ "tags": obj.get("tags"),
120
116
  })
121
117
  return _obj
122
118
 
@@ -19,7 +19,6 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
23
22
  from typing import Optional, Set
24
23
  from typing_extensions import Self
25
24
 
@@ -32,7 +31,7 @@ class ChatSettings(BaseModel):
32
31
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
33
32
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
34
33
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
35
- tags: Optional[ChatSettingsTags] = None
34
+ tags: Optional[List[StrictStr]] = Field(default=None, description="A list of tags from which to pull the context for RAG.")
36
35
  __properties: ClassVar[List[str]] = ["llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
37
36
 
38
37
  @field_validator('include_chat_history')
@@ -41,8 +40,8 @@ class ChatSettings(BaseModel):
41
40
  if value is None:
42
41
  return value
43
42
 
44
- if value not in set(['true', 'false', 'auto']):
45
- raise ValueError("must be one of enum values ('true', 'false', 'auto')")
43
+ if value not in set(['on', 'off', 'auto']):
44
+ raise ValueError("must be one of enum values ('on', 'off', 'auto')")
46
45
  return value
47
46
 
48
47
  model_config = ConfigDict(
@@ -84,9 +83,6 @@ class ChatSettings(BaseModel):
84
83
  exclude=excluded_fields,
85
84
  exclude_none=True,
86
85
  )
87
- # override the default output from pydantic by calling `to_dict()` of tags
88
- if self.tags:
89
- _dict['tags'] = self.tags.to_dict()
90
86
  return _dict
91
87
 
92
88
  @classmethod
@@ -104,7 +100,7 @@ class ChatSettings(BaseModel):
104
100
  "self_reflection_config": obj.get("self_reflection_config"),
105
101
  "rag_config": obj.get("rag_config"),
106
102
  "include_chat_history": obj.get("include_chat_history"),
107
- "tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None
103
+ "tags": obj.get("tags")
108
104
  })
109
105
  return _obj
110
106
 
@@ -17,7 +17,7 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from h2ogpte.rest_async.models.confluence_credentials import ConfluenceCredentials
23
23
  from typing import Optional, Set
@@ -29,10 +29,9 @@ class IngestFromConfluenceBody(BaseModel):
29
29
  """ # noqa: E501
30
30
  base_url: StrictStr = Field(description="Base url of the confluence instance.")
31
31
  page_ids: List[StrictStr] = Field(description="Ids of pages to be ingested.")
32
- include_attachments: Optional[StrictBool] = Field(default=False, description="A flag indicating whether to also ingest attachments with the page.")
33
32
  credentials: ConfluenceCredentials
34
33
  metadata: Optional[Dict[str, Any]] = Field(default=None, description="Metadata for the documents.")
35
- __properties: ClassVar[List[str]] = ["base_url", "page_ids", "include_attachments", "credentials", "metadata"]
34
+ __properties: ClassVar[List[str]] = ["base_url", "page_ids", "credentials", "metadata"]
36
35
 
37
36
  model_config = ConfigDict(
38
37
  populate_by_name=True,
@@ -90,7 +89,6 @@ class IngestFromConfluenceBody(BaseModel):
90
89
  _obj = cls.model_validate({
91
90
  "base_url": obj.get("base_url"),
92
91
  "page_ids": obj.get("page_ids"),
93
- "include_attachments": obj.get("include_attachments") if obj.get("include_attachments") is not None else False,
94
92
  "credentials": ConfluenceCredentials.from_dict(obj["credentials"]) if obj.get("credentials") is not None else None,
95
93
  "metadata": obj.get("metadata")
96
94
  })
@@ -14,7 +14,7 @@
14
14
  """ # noqa: E501
15
15
 
16
16
 
17
- __version__ = "1.6.53-dev1"
17
+ __version__ = "1.6.54"
18
18
 
19
19
  # import apis into sdk package
20
20
  from h2ogpte.rest_sync.api.api_keys_api import APIKeysApi
@@ -69,7 +69,6 @@ from h2ogpte.rest_sync.models.chat_message_reference import ChatMessageReference
69
69
  from h2ogpte.rest_sync.models.chat_session import ChatSession
70
70
  from h2ogpte.rest_sync.models.chat_session_update_request import ChatSessionUpdateRequest
71
71
  from h2ogpte.rest_sync.models.chat_settings import ChatSettings
72
- from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
73
72
  from h2ogpte.rest_sync.models.chunk import Chunk
74
73
  from h2ogpte.rest_sync.models.chunk_search_result import ChunkSearchResult
75
74
  from h2ogpte.rest_sync.models.collection import Collection
@@ -159,7 +158,6 @@ from h2ogpte.rest_sync.models.suggested_question import SuggestedQuestion
159
158
  from h2ogpte.rest_sync.models.summarize_request import SummarizeRequest
160
159
  from h2ogpte.rest_sync.models.tag import Tag
161
160
  from h2ogpte.rest_sync.models.tag_create_request import TagCreateRequest
162
- from h2ogpte.rest_sync.models.tag_filter import TagFilter
163
161
  from h2ogpte.rest_sync.models.tag_update_request import TagUpdateRequest
164
162
  from h2ogpte.rest_sync.models.update_agent_key_request import UpdateAgentKeyRequest
165
163
  from h2ogpte.rest_sync.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
@@ -90,7 +90,7 @@ class ApiClient:
90
90
  self.default_headers[header_name] = header_value
91
91
  self.cookie = cookie
92
92
  # Set default User-Agent.
93
- self.user_agent = 'OpenAPI-Generator/1.6.53-dev1/python'
93
+ self.user_agent = 'OpenAPI-Generator/1.6.54/python'
94
94
  self.client_side_validation = configuration.client_side_validation
95
95
 
96
96
  def __enter__(self):
@@ -503,7 +503,7 @@ class Configuration:
503
503
  "OS: {env}\n"\
504
504
  "Python Version: {pyversion}\n"\
505
505
  "Version of the API: v1.0.0\n"\
506
- "SDK Package Version: 1.6.53-dev1".\
506
+ "SDK Package Version: 1.6.54".\
507
507
  format(env=sys.platform, pyversion=sys.version)
508
508
 
509
509
  def get_host_settings(self) -> List[HostSetting]:
@@ -38,7 +38,6 @@ from h2ogpte.rest_sync.models.chat_message_reference import ChatMessageReference
38
38
  from h2ogpte.rest_sync.models.chat_session import ChatSession
39
39
  from h2ogpte.rest_sync.models.chat_session_update_request import ChatSessionUpdateRequest
40
40
  from h2ogpte.rest_sync.models.chat_settings import ChatSettings
41
- from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
42
41
  from h2ogpte.rest_sync.models.chunk import Chunk
43
42
  from h2ogpte.rest_sync.models.chunk_search_result import ChunkSearchResult
44
43
  from h2ogpte.rest_sync.models.collection import Collection
@@ -128,7 +127,6 @@ from h2ogpte.rest_sync.models.suggested_question import SuggestedQuestion
128
127
  from h2ogpte.rest_sync.models.summarize_request import SummarizeRequest
129
128
  from h2ogpte.rest_sync.models.tag import Tag
130
129
  from h2ogpte.rest_sync.models.tag_create_request import TagCreateRequest
131
- from h2ogpte.rest_sync.models.tag_filter import TagFilter
132
130
  from h2ogpte.rest_sync.models.tag_update_request import TagUpdateRequest
133
131
  from h2ogpte.rest_sync.models.update_agent_key_request import UpdateAgentKeyRequest
134
132
  from h2ogpte.rest_sync.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
@@ -19,7 +19,6 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
23
22
  from typing import Optional, Set
24
23
  from typing_extensions import Self
25
24
 
@@ -38,7 +37,7 @@ class ChatCompletionRequest(BaseModel):
38
37
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
39
38
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
40
39
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
41
- tags: Optional[ChatSettingsTags] = None
40
+ tags: Optional[List[StrictStr]] = Field(default=None, description="A list of tags from which to pull the context for RAG.")
42
41
  __properties: ClassVar[List[str]] = ["message", "system_prompt", "pre_prompt_query", "prompt_query", "image_batch_final_prompt", "image_batch_image_prompt", "llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
43
42
 
44
43
  @field_validator('include_chat_history')
@@ -90,9 +89,6 @@ class ChatCompletionRequest(BaseModel):
90
89
  exclude=excluded_fields,
91
90
  exclude_none=True,
92
91
  )
93
- # override the default output from pydantic by calling `to_dict()` of tags
94
- if self.tags:
95
- _dict['tags'] = self.tags.to_dict()
96
92
  return _dict
97
93
 
98
94
  @classmethod
@@ -116,7 +112,7 @@ class ChatCompletionRequest(BaseModel):
116
112
  "self_reflection_config": obj.get("self_reflection_config"),
117
113
  "rag_config": obj.get("rag_config"),
118
114
  "include_chat_history": obj.get("include_chat_history"),
119
- "tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None,
115
+ "tags": obj.get("tags"),
120
116
  })
121
117
  return _obj
122
118
 
@@ -19,7 +19,6 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
23
22
  from typing import Optional, Set
24
23
  from typing_extensions import Self
25
24
 
@@ -32,7 +31,7 @@ class ChatSettings(BaseModel):
32
31
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
33
32
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
34
33
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
35
- tags: Optional[ChatSettingsTags] = None
34
+ tags: Optional[List[StrictStr]] = Field(default=None, description="A list of tags from which to pull the context for RAG.")
36
35
  __properties: ClassVar[List[str]] = ["llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
37
36
 
38
37
  @field_validator('include_chat_history')
@@ -41,8 +40,8 @@ class ChatSettings(BaseModel):
41
40
  if value is None:
42
41
  return value
43
42
 
44
- if value not in set(['true', 'false', 'auto']):
45
- raise ValueError("must be one of enum values ('true', 'false', 'auto')")
43
+ if value not in set(['on', 'off', 'auto']):
44
+ raise ValueError("must be one of enum values ('on', 'off', 'auto')")
46
45
  return value
47
46
 
48
47
  model_config = ConfigDict(
@@ -84,9 +83,6 @@ class ChatSettings(BaseModel):
84
83
  exclude=excluded_fields,
85
84
  exclude_none=True,
86
85
  )
87
- # override the default output from pydantic by calling `to_dict()` of tags
88
- if self.tags:
89
- _dict['tags'] = self.tags.to_dict()
90
86
  return _dict
91
87
 
92
88
  @classmethod
@@ -104,7 +100,7 @@ class ChatSettings(BaseModel):
104
100
  "self_reflection_config": obj.get("self_reflection_config"),
105
101
  "rag_config": obj.get("rag_config"),
106
102
  "include_chat_history": obj.get("include_chat_history"),
107
- "tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None
103
+ "tags": obj.get("tags")
108
104
  })
109
105
  return _obj
110
106
 
@@ -17,7 +17,7 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from h2ogpte.rest_sync.models.confluence_credentials import ConfluenceCredentials
23
23
  from typing import Optional, Set
@@ -29,10 +29,9 @@ class IngestFromConfluenceBody(BaseModel):
29
29
  """ # noqa: E501
30
30
  base_url: StrictStr = Field(description="Base url of the confluence instance.")
31
31
  page_ids: List[StrictStr] = Field(description="Ids of pages to be ingested.")
32
- include_attachments: Optional[StrictBool] = Field(default=False, description="A flag indicating whether to also ingest attachments with the page.")
33
32
  credentials: ConfluenceCredentials
34
33
  metadata: Optional[Dict[str, Any]] = Field(default=None, description="Metadata for the documents.")
35
- __properties: ClassVar[List[str]] = ["base_url", "page_ids", "include_attachments", "credentials", "metadata"]
34
+ __properties: ClassVar[List[str]] = ["base_url", "page_ids", "credentials", "metadata"]
36
35
 
37
36
  model_config = ConfigDict(
38
37
  populate_by_name=True,
@@ -90,7 +89,6 @@ class IngestFromConfluenceBody(BaseModel):
90
89
  _obj = cls.model_validate({
91
90
  "base_url": obj.get("base_url"),
92
91
  "page_ids": obj.get("page_ids"),
93
- "include_attachments": obj.get("include_attachments") if obj.get("include_attachments") is not None else False,
94
92
  "credentials": ConfluenceCredentials.from_dict(obj["credentials"]) if obj.get("credentials") is not None else None,
95
93
  "metadata": obj.get("metadata")
96
94
  })
h2ogpte/session.py CHANGED
@@ -29,7 +29,6 @@ from h2ogpte.types import (
29
29
  ChatResponse,
30
30
  PartialChatMessage,
31
31
  SessionError,
32
- TagFilter,
33
32
  )
34
33
  from h2ogpte.errors import (
35
34
  UnauthorizedError,
@@ -92,9 +91,7 @@ class Session:
92
91
  url = urlparse(address)
93
92
  scheme = "wss" if url.scheme == "https" else "ws"
94
93
  # TODO handle base URLs
95
- self._address = (
96
- f"{scheme}://{url.netloc}/ws?currentSessionID={chat_session_id}&source=py"
97
- )
94
+ self._address = f"{scheme}://{url.netloc}/ws?currentSessionID={chat_session_id}"
98
95
  self._client = client
99
96
  self._chat_session_id = chat_session_id
100
97
  self._connection: Optional[ClientConnection] = None
@@ -195,7 +192,7 @@ class Session:
195
192
  self_reflection_config: Optional[Dict[str, Any]] = None,
196
193
  rag_config: Optional[Dict[str, Any]] = None,
197
194
  include_chat_history: Optional[Union[bool, str]] = "auto",
198
- tags: Optional[Union[List[str], TagFilter]] = None,
195
+ tags: Optional[List[str]] = None,
199
196
  metadata_filter: Optional[Dict[str, Any]] = None,
200
197
  timeout: Optional[float] = None,
201
198
  retries: int = 3,
@@ -329,8 +326,6 @@ class Session:
329
326
  Requires 1 LLM or Agent call.
330
327
  :code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
331
328
  Requires 1 Agent call.
332
- :code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
333
- Requires 1 Agent call with RAG tool execution.
334
329
  :code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
335
330
  to answer the query. Requires 1 LLM or Agent call.
336
331
  :code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
@@ -345,6 +340,9 @@ class Session:
345
340
  :code:`"all_data"` All Data RAG - Like Summary RAG, but includes all document
346
341
  chunks. Uses recursive summarization to overcome LLM context limits.
347
342
  Can require several LLM calls.
343
+ :code:`"all_data"` All Data RAG - Like Summary RAG, but includes all document
344
+ chunks. Uses recursive summarization to overcome LLM context limits.
345
+ Can require several LLM calls.
348
346
  hyde_no_rag_llm_prompt_extension: str
349
347
  Add this prompt to every user's prompt, when generating answers to be used
350
348
  for subsequent retrieval during HyDE. Only used when rag_type is "hyde1" or "hyde2".
@@ -382,9 +380,7 @@ class Session:
382
380
  answers for a given question.
383
381
  Choices are: ["on","off","auto",True,False]
384
382
  tags:
385
- Filter documents by tags for RAG. Can be:
386
- - List format: ["red", "blue"] includes documents with these tags
387
- - TagFilter object: TagFilter(include=["red"], exclude=["blue"])
383
+ A list of tags from which to pull the context for RAG.
388
384
  metadata_filter:
389
385
  A dictionary to filter documents by metadata, from which to pull the context for RAG.
390
386
  timeout:
h2ogpte/session_async.py CHANGED
@@ -30,7 +30,6 @@ from h2ogpte.types import (
30
30
  ChatResponse,
31
31
  SessionError,
32
32
  PartialChatMessage,
33
- TagFilter,
34
33
  )
35
34
  from h2ogpte.errors import (
36
35
  UnauthorizedError,
@@ -109,7 +108,7 @@ class SessionAsync:
109
108
  self_reflection_config: Optional[Dict[str, Any]] = None,
110
109
  rag_config: Optional[Dict[str, Any]] = None,
111
110
  include_chat_history: Optional[Union[bool, str]] = "auto",
112
- tags: Optional[Union[List[str], TagFilter]] = None,
111
+ tags: Optional[List[str]] = None,
113
112
  metadata_filter: Optional[Dict[str, Any]] = None,
114
113
  timeout: Optional[float] = None,
115
114
  retries: int = 3,
@@ -240,8 +239,6 @@ class SessionAsync:
240
239
  Requires 1 LLM or Agent call.
241
240
  :code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
242
241
  Requires 1 Agent call.
243
- :code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
244
- Requires 1 Agent call with RAG tool execution.
245
242
  :code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
246
243
  to answer the query. Requires 1 LLM or Agent call.
247
244
  :code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
@@ -293,9 +290,7 @@ class SessionAsync:
293
290
  answers for a given question.
294
291
  Choices are: ["on","off","auto",True,False]
295
292
  tags:
296
- Filter documents by tags for RAG. Can be:
297
- - List format: ["red", "blue"] includes documents with these tags
298
- - TagFilter object: TagFilter(include=["red"], exclude=["blue"])
293
+ A list of tags from which to pull the context for RAG.
299
294
  metadata_filter:
300
295
  A dictionary to filter documents by metadata, from which to pull the context for RAG.
301
296
  timeout:
@@ -478,7 +473,7 @@ class SessionAsync:
478
473
  while retries < self._max_connect_retries:
479
474
  try:
480
475
  self._websocket = await ws_old_connect(
481
- uri=f"{scheme}://{url.netloc}/ws?currentSessionID={self._chat_session_id}&source=py",
476
+ uri=f"{scheme}://{url.netloc}/ws?currentSessionID={self._chat_session_id}",
482
477
  extra_headers=headers,
483
478
  open_timeout=self._open_timeout,
484
479
  close_timeout=self._close_timeout,
h2ogpte/types.py CHANGED
@@ -592,21 +592,6 @@ class APIKey(BaseModel):
592
592
  is_global_key: bool
593
593
 
594
594
 
595
- @dataclass
596
- class TagFilter:
597
- """
598
- Filter for document tags supporting inclusion and exclusion.
599
-
600
- Examples:
601
- TagFilter(include=['red', 'blue'])
602
- TagFilter(exclude=['red', 'blue'])
603
- TagFilter(include=['color'], exclude=['red', 'blue'])
604
- """
605
-
606
- include: Optional[List[str]] = None
607
- exclude: Optional[List[str]] = None
608
-
609
-
610
595
  @dataclass
611
596
  class ChatRequest:
612
597
  t: str # cq
@@ -624,7 +609,7 @@ class ChatRequest:
624
609
  self_reflection_config: Optional[str]
625
610
  rag_config: Optional[str]
626
611
  include_chat_history: Optional[Union[bool, str]] = False
627
- tags: Optional[Union[List[str], "TagFilter"]] = None
612
+ tags: Optional[List[str]] = None
628
613
  metadata_filter: Optional[str] = None
629
614
  image_batch_image_prompt: Optional[str] = None
630
615
  image_batch_final_prompt: Optional[str] = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: h2ogpte
3
- Version: 1.6.53rc1
3
+ Version: 1.6.54
4
4
  Summary: Client library for Enterprise h2oGPTe
5
5
  Author-email: "H2O.ai, Inc." <support@h2o.ai>
6
6
  Project-URL: Source, https://github.com/h2oai/h2ogpte
@@ -1,13 +1,13 @@
1
- h2ogpte/__init__.py,sha256=pEqhFD2G22hR2KXwsAOt70pbrZDJBfO2cnCxWWXlrG4,1524
1
+ h2ogpte/__init__.py,sha256=fuy1nl0Mg1Ia2Z3e-Q9L0vPNuO5b8Jw8g-Thlsvseow,1521
2
2
  h2ogpte/connectors.py,sha256=CRAEpkn9GotcCjWANfJjZ5Hq1cjGWJ4H_IO4eJgVWiI,8466
3
3
  h2ogpte/errors.py,sha256=XgLdfJO1fZ9Bf9rhUKpnvRzzvkNyan3Oc6WzGS6hCUA,1248
4
- h2ogpte/h2ogpte.py,sha256=X0MIQ6JOgHJkCeBhlTwvqrSuom5KFZ3H7DZJN1EWn1w,310278
5
- h2ogpte/h2ogpte_async.py,sha256=x2KlYVQN0CrNe1N67Of-WDpVDBzw0sIlQm5kQByAGJ4,330175
4
+ h2ogpte/h2ogpte.py,sha256=QdGX0SKMv0kdU8C33un1DFvyRrqvSQnnERqm9gF4IN4,309850
5
+ h2ogpte/h2ogpte_async.py,sha256=yuR3OdgSMxUIvuzUmgE1BYP-Ki9bYmpaHI8fTI3K30o,329747
6
6
  h2ogpte/h2ogpte_sync_base.py,sha256=ftsVzpMqEsyi0UACMI-7H_EIYEx9JEdEUImbyjWy_Hc,15285
7
- h2ogpte/session.py,sha256=uyU0QJhTpN9vMfie3hj3S8pvMOLkcJdsnnXrSgCgxqE,32770
8
- h2ogpte/session_async.py,sha256=F5wg8bIRhdXZNvc_6WLtT1tQUAPVPaKq4bYJDoMCEOA,31738
7
+ h2ogpte/session.py,sha256=BqJg3mWVeyz_ZLUJC_olzZzeLnRSaJwCH1NkXXfhg54,32608
8
+ h2ogpte/session_async.py,sha256=UzNijTn3kZjAUYl_Jn5Oji4QrPTOpdX9KKByTmhLlK8,31354
9
9
  h2ogpte/shared_client.py,sha256=Zh24myL--5JDdrKoJPW4aeprHX6a_oB9o461Ho3hnU8,14691
10
- h2ogpte/types.py,sha256=IwgtLX4GNC6GNdbaLkQhy5Wn8h-p5AM9_Yi326tdwFk,15728
10
+ h2ogpte/types.py,sha256=es2xD57bnsnZFq4GcVKcd1pA6nGSiITGAhe24U-QOD8,15353
11
11
  h2ogpte/utils.py,sha256=Z9n57xxPu0KtsCzkJ9V_VgTW--oG_aXTLBgmXDWSdnM,3201
12
12
  h2ogpte/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  h2ogpte/cli/main.py,sha256=Upf3t_5m1RqLh1jKGB6Gbyp3n9sujVny7sY-qxh2PYo,2722
@@ -41,10 +41,10 @@ h2ogpte/cli/ui/prompts.py,sha256=bJvRe_32KppQTK5bqnsrPh0RS4JaY9KkiV7y-3v8PMQ,538
41
41
  h2ogpte/cli/ui/status_bar.py,sha256=hs2MLvkg-y3Aiu3gWRtgMXf3jv3DGe7Y47ucgoBAP7Y,3852
42
42
  h2ogpte/cli/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
43
  h2ogpte/cli/utils/file_manager.py,sha256=ghNDX6G3Dr0vFvBYjbqx5o7qxq-pN8Vo2Rp1vyITfLo,13988
44
- h2ogpte/rest_async/__init__.py,sha256=k_WohFmzdiWzmABab4-dP6cJm8Sb9pjxLA8QEwvB3Vw,15336
45
- h2ogpte/rest_async/api_client.py,sha256=fd2ZBAkGB8UwbOTrIAsD65F47qtn5I2132UzsRPEcOc,29510
44
+ h2ogpte/rest_async/__init__.py,sha256=cQ-xpJby2swPZ4xjvjizuHl7LgsXgvnzpLBbd00j0ss,15198
45
+ h2ogpte/rest_async/api_client.py,sha256=jyc5rBiB0-RQq4U2_JRb71ZJ2gAGcKmwMQ7vqRmYLdY,29505
46
46
  h2ogpte/rest_async/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
47
- h2ogpte/rest_async/configuration.py,sha256=FZAei-oFif6kJxYXZtTwQdnXU6YZJicK25Uo1wrftqM,19567
47
+ h2ogpte/rest_async/configuration.py,sha256=Cr3o-V6rth5F54Qi77hv1ftywcgB756mDs_MnoEJ3D8,19562
48
48
  h2ogpte/rest_async/exceptions.py,sha256=aSDc-0lURtyQjf5HGa7_Ta0nATxKxfHW3huDA2Zdj6o,8370
49
49
  h2ogpte/rest_async/rest.py,sha256=mdjDwzJ1kiaYtONUfDRqKsRPw5-tG6eyZV2P1yBuwRo,9147
50
50
  h2ogpte/rest_async/api/__init__.py,sha256=R_x57GGyaSgxZyrJOyOt551TodbRSQf3T7VrraQc-84,973
@@ -63,7 +63,7 @@ h2ogpte/rest_async/api/prompt_templates_api.py,sha256=RJnYC3jfhvx2L_vpTlU6kCqujs
63
63
  h2ogpte/rest_async/api/secrets_api.py,sha256=MTtmpYO2IOXuCklK-BxVyF9aBNZebgWuQenada-uM7o,68122
64
64
  h2ogpte/rest_async/api/system_api.py,sha256=wXxO1lFEnrPHO0JRCgg13j6CpRKb3nou81dk8nA31v0,12532
65
65
  h2ogpte/rest_async/api/tags_api.py,sha256=VwamxhJKsuBu3UeslsZ0vflxbnV1FmUV2pbWvIBwvFk,56168
66
- h2ogpte/rest_async/models/__init__.py,sha256=ntZCsoyBUoYEO9f3dbmk8x6FNyjMyJx4lDoZ4mlGqDM,13825
66
+ h2ogpte/rest_async/models/__init__.py,sha256=UKQwRSTiPYtkg58_uJiCXnCKmPdYp0qtIv6Vb1F1h4M,13692
67
67
  h2ogpte/rest_async/models/add_custom_agent_tool201_response_inner.py,sha256=0pxOC4ETqAnl2Amyt9d47oZDCH7Gjz0kexbpPsXurlg,4619
68
68
  h2ogpte/rest_async/models/agent_key.py,sha256=u-48HJqvAd3fpY8SZnl6_iDnv_2_V_wGrGu9w54V7s8,5226
69
69
  h2ogpte/rest_async/models/agent_server_directory_file_stats.py,sha256=Y25fTkk8kbY_p2AXFNTM4sUlPwEGGSMLxmC_csmTn1w,6335
@@ -80,15 +80,14 @@ h2ogpte/rest_async/models/api_key_update_expiry_request.py,sha256=GTMkaqLOUqUpjx
80
80
  h2ogpte/rest_async/models/azure_credentials.py,sha256=hy6hv5Uf5CIGgO5S-2jVbO5N25QvEkiUxXnvItESoBA,4620
81
81
  h2ogpte/rest_async/models/chat_completion.py,sha256=iVTiDzWJ7v5p_j37PO5aRdLrKhY98J_cl7eXTsymudU,4524
82
82
  h2ogpte/rest_async/models/chat_completion_delta.py,sha256=TGEeMoSgBIph1YzTJYN2lYekboFo4btRRGtDbd5HHtw,4745
83
- h2ogpte/rest_async/models/chat_completion_request.py,sha256=7a_x6kQtF5Lap73XpvY3DgCCznykygLEEGD-SVwwlEo,19251
83
+ h2ogpte/rest_async/models/chat_completion_request.py,sha256=PlVLv-ySy3ukMwtNzgrxTDwDYj1yXwfd6-wGFoFhPbk,19043
84
84
  h2ogpte/rest_async/models/chat_error.py,sha256=Ob1UB0nhrKdEGA5Z63VD_TdxokV-8CyA5m-NDgnwqt4,4355
85
85
  h2ogpte/rest_async/models/chat_message.py,sha256=D46MmPf86LPKkcTJKcPyH-EFyMMkPRNOCC1jfQu0xYE,5768
86
86
  h2ogpte/rest_async/models/chat_message_meta.py,sha256=dgM0NIDSdB6_MN7lEiR4frDFCVZa7C58UATW0SiJB2s,4484
87
87
  h2ogpte/rest_async/models/chat_message_reference.py,sha256=P5_jxbgfNcwdzC7OgND27EbVemPKiZay0jsCYn8qqTs,5248
88
88
  h2ogpte/rest_async/models/chat_session.py,sha256=RVvL2IvMzIQPJ2W6lheUJyN3i6kaffQ80ox66sivq_M,5199
89
89
  h2ogpte/rest_async/models/chat_session_update_request.py,sha256=yiH14-IrQfbZ0qINIAyGgtrmhgDr-E-cmd9_5OVVHKU,4411
90
- h2ogpte/rest_async/models/chat_settings.py,sha256=95VV_za51NcVzgn5EADwRjPmP8ek4iHWRkOQCSQOlfA,17149
91
- h2ogpte/rest_async/models/chat_settings_tags.py,sha256=W8q1R6hMIXGNOcyc5k-hAOSOUCV7744IOcTsT7SKOU4,7424
90
+ h2ogpte/rest_async/models/chat_settings.py,sha256=V5k0f7dMpwoWu6TBY1IGkma5jIlX4O62YsMABvBWRBk,16933
92
91
  h2ogpte/rest_async/models/chunk.py,sha256=4t2oms4W29WEYKi7KvzCArsLOaCOLYyyQRrJttlDUAU,4759
93
92
  h2ogpte/rest_async/models/chunk_search_result.py,sha256=keifMKId0YhLFGzh5nv3jNCtQt7YciiwUd6-DsNckAs,4985
94
93
  h2ogpte/rest_async/models/collection.py,sha256=NR9Ze5D8PNTDbSKWD3J5y9OiF_KdHEJnJmZKQJCkg00,9181
@@ -132,7 +131,7 @@ h2ogpte/rest_async/models/guardrails_settings_create_request.py,sha256=6DMke_u-1
132
131
  h2ogpte/rest_async/models/h2_ogpt_system_info.py,sha256=6pBoTwU-QOh3oSk48drmuFhOcv9zEEzsWXvn-P4LIHk,8652
133
132
  h2ogpte/rest_async/models/h2_ogptgpu_info.py,sha256=gUdC0izDgwpyRBJa9_bua6BYnJo8K0H9nG_E4kO_pNE,5124
134
133
  h2ogpte/rest_async/models/ingest_from_azure_blob_storage_body.py,sha256=ouEUrdMYJU8kcjTOD8FfzPiaZYwU6RJFP6DYfY9oNyk,5470
135
- h2ogpte/rest_async/models/ingest_from_confluence_body.py,sha256=MWCCDW9lnKQuBPNCNNqVwoijOT-EW8JlQsD88KRjYmo,5561
134
+ h2ogpte/rest_async/models/ingest_from_confluence_body.py,sha256=7nViW05VVPkyBCVmy3cVn8oLdjfJaT6oE5hbjdwsLD0,5250
136
135
  h2ogpte/rest_async/models/ingest_from_file_system_body.py,sha256=JnbjY-PxMxaLZXvHRjKdfNTZDtJj9CfPpRPG1QVyBjU,4655
137
136
  h2ogpte/rest_async/models/ingest_from_gcs_body.py,sha256=ygQsntThO7SHxzHlwsftFvPvZQsGj6qHMCDp5HOdipg,5079
138
137
  h2ogpte/rest_async/models/ingest_from_s3_body.py,sha256=n7nuAHbMBQpFPerWspgxy5Pua-Bvkc3axcYgFEg33mU,5311
@@ -178,7 +177,6 @@ h2ogpte/rest_async/models/suggested_question.py,sha256=RcXlzaTsj-GFtT5gGuiHkNHtN
178
177
  h2ogpte/rest_async/models/summarize_request.py,sha256=LpiWC-XTgxaXvezCoJdCCvl_cM7vy6f7ocEZZUsgaYU,14882
179
178
  h2ogpte/rest_async/models/tag.py,sha256=rnE0UXIzF3tqM9EWXRZ1oY3OU1Piq5MOU9t2svwgk3w,4594
180
179
  h2ogpte/rest_async/models/tag_create_request.py,sha256=jETninpugqtUUkwHmcUZj3hj1qbSqcb7xLxnHkB1CCE,4379
181
- h2ogpte/rest_async/models/tag_filter.py,sha256=Qnis6iEOQOPi5bpRA5YrmxxjOcg0hNwrf7UeZ332AtU,5217
182
180
  h2ogpte/rest_async/models/tag_update_request.py,sha256=QD9iUZIqaUsuobauQF_f6OkyRE2bTG3O6f1N2pqBnBM,4524
183
181
  h2ogpte/rest_async/models/update_agent_key_request.py,sha256=7EqlI-kZw0U2fyTnJumnUUlXslYZTBWvcTszsVkB310,5030
184
182
  h2ogpte/rest_async/models/update_agent_tool_preference_request.py,sha256=GguSv4qEmF7OJZRm8vMZJ-9Md2Ce_hgModJ4PE4OruU,4493
@@ -203,10 +201,10 @@ h2ogpte/rest_async/models/user_deletion_request.py,sha256=z7gD8XKOGwwg782TRzXJii
203
201
  h2ogpte/rest_async/models/user_info.py,sha256=ef59Eh9k42JUY3X2RnCrwYR7sc_8lXT1vRLGoNz3uTU,4489
204
202
  h2ogpte/rest_async/models/user_job_details.py,sha256=kzu8fLxVsRMgnyt6dLr0VWjlIoE3i1VRpGR9nDxFyk4,4985
205
203
  h2ogpte/rest_async/models/user_permission.py,sha256=1k74E7s2kD2waSZ79KPlgTupVYEacTKWMqcKxv2972A,4856
206
- h2ogpte/rest_sync/__init__.py,sha256=JRT0iDDzvcy-A7NGkn7zsjlCnIV-YekoM8RZfA6H7Ns,15173
207
- h2ogpte/rest_sync/api_client.py,sha256=xkdmqVase4zywcw6kLGV0rbXvN_FPyET8txFnDSPCkQ,29397
204
+ h2ogpte/rest_sync/__init__.py,sha256=aKJGA98wa_WLmuwV2M3m1mR1Sa933qf4501ij25Twxo,15037
205
+ h2ogpte/rest_sync/api_client.py,sha256=HyRBxQopzJrNO_TA0A_RbB1Q4qK2ToUfYgtYFyIEyG0,29392
208
206
  h2ogpte/rest_sync/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
209
- h2ogpte/rest_sync/configuration.py,sha256=p8FDbRC4zG7pIpLmzyA5xUk1McYCqJygnNZnwustIog,19850
207
+ h2ogpte/rest_sync/configuration.py,sha256=E1siqzb-04zlR4QzzdGl64QDKC3Y8gBkiAgMYsehGhk,19845
210
208
  h2ogpte/rest_sync/exceptions.py,sha256=aSDc-0lURtyQjf5HGa7_Ta0nATxKxfHW3huDA2Zdj6o,8370
211
209
  h2ogpte/rest_sync/rest.py,sha256=evRzviTYC_fsrpTtFlGvruXmquH9C0jDn-oQrGrE5A0,11314
212
210
  h2ogpte/rest_sync/api/__init__.py,sha256=ZuLQQtyiXnP5UOwTlIOYLGLQq1BG_0PEkzC9s698vjM,958
@@ -225,7 +223,7 @@ h2ogpte/rest_sync/api/prompt_templates_api.py,sha256=157y9lzY7Ky_ALu8TEemi0rfYzX
225
223
  h2ogpte/rest_sync/api/secrets_api.py,sha256=5rAikvrX7n3Cj9M0ME-cPjISLpqrEFh2LmW23mvGk4g,67828
226
224
  h2ogpte/rest_sync/api/system_api.py,sha256=knhP97lzeZt-YFTpcNJm9NdnqjoSg_Oh0yMGowiV1IM,12480
227
225
  h2ogpte/rest_sync/api/tags_api.py,sha256=oCBsrFFLk0su8mz4wnCGSR_NxpCQgwEx18IwJKsOKrA,55921
228
- h2ogpte/rest_sync/models/__init__.py,sha256=IENU9hChLstPUghLDSznNBoJ2Tdz2yD8SPOwRlagXec,13686
226
+ h2ogpte/rest_sync/models/__init__.py,sha256=jRJSG4_PvnmArljk4J3ioPvD0v_iFIqB3YEpYpdLa54,13555
229
227
  h2ogpte/rest_sync/models/add_custom_agent_tool201_response_inner.py,sha256=0pxOC4ETqAnl2Amyt9d47oZDCH7Gjz0kexbpPsXurlg,4619
230
228
  h2ogpte/rest_sync/models/agent_key.py,sha256=u-48HJqvAd3fpY8SZnl6_iDnv_2_V_wGrGu9w54V7s8,5226
231
229
  h2ogpte/rest_sync/models/agent_server_directory_file_stats.py,sha256=Y25fTkk8kbY_p2AXFNTM4sUlPwEGGSMLxmC_csmTn1w,6335
@@ -242,15 +240,14 @@ h2ogpte/rest_sync/models/api_key_update_expiry_request.py,sha256=GTMkaqLOUqUpjxl
242
240
  h2ogpte/rest_sync/models/azure_credentials.py,sha256=hy6hv5Uf5CIGgO5S-2jVbO5N25QvEkiUxXnvItESoBA,4620
243
241
  h2ogpte/rest_sync/models/chat_completion.py,sha256=iVTiDzWJ7v5p_j37PO5aRdLrKhY98J_cl7eXTsymudU,4524
244
242
  h2ogpte/rest_sync/models/chat_completion_delta.py,sha256=TGEeMoSgBIph1YzTJYN2lYekboFo4btRRGtDbd5HHtw,4745
245
- h2ogpte/rest_sync/models/chat_completion_request.py,sha256=9LG4N3Dh2YoY3dx6aRNXcdqoWPfDqnOxFOAa9NDGYZQ,19250
243
+ h2ogpte/rest_sync/models/chat_completion_request.py,sha256=PlVLv-ySy3ukMwtNzgrxTDwDYj1yXwfd6-wGFoFhPbk,19043
246
244
  h2ogpte/rest_sync/models/chat_error.py,sha256=Ob1UB0nhrKdEGA5Z63VD_TdxokV-8CyA5m-NDgnwqt4,4355
247
245
  h2ogpte/rest_sync/models/chat_message.py,sha256=OLBO6sF7Wn8NC2Qf2anxGZYJ7YpWQTf8oI7ENcOSmQ8,5767
248
246
  h2ogpte/rest_sync/models/chat_message_meta.py,sha256=dgM0NIDSdB6_MN7lEiR4frDFCVZa7C58UATW0SiJB2s,4484
249
247
  h2ogpte/rest_sync/models/chat_message_reference.py,sha256=P5_jxbgfNcwdzC7OgND27EbVemPKiZay0jsCYn8qqTs,5248
250
248
  h2ogpte/rest_sync/models/chat_session.py,sha256=RVvL2IvMzIQPJ2W6lheUJyN3i6kaffQ80ox66sivq_M,5199
251
249
  h2ogpte/rest_sync/models/chat_session_update_request.py,sha256=yiH14-IrQfbZ0qINIAyGgtrmhgDr-E-cmd9_5OVVHKU,4411
252
- h2ogpte/rest_sync/models/chat_settings.py,sha256=Qrkq4iAfK83Ts8oo50UYiA1vX_QHXpzJvF_7LEWFQq0,17148
253
- h2ogpte/rest_sync/models/chat_settings_tags.py,sha256=fZoLR7g19bvVz4ChhttflYp36PkUsiEFwwh4A5VFEHk,7423
250
+ h2ogpte/rest_sync/models/chat_settings.py,sha256=V5k0f7dMpwoWu6TBY1IGkma5jIlX4O62YsMABvBWRBk,16933
254
251
  h2ogpte/rest_sync/models/chunk.py,sha256=4t2oms4W29WEYKi7KvzCArsLOaCOLYyyQRrJttlDUAU,4759
255
252
  h2ogpte/rest_sync/models/chunk_search_result.py,sha256=keifMKId0YhLFGzh5nv3jNCtQt7YciiwUd6-DsNckAs,4985
256
253
  h2ogpte/rest_sync/models/collection.py,sha256=NR9Ze5D8PNTDbSKWD3J5y9OiF_KdHEJnJmZKQJCkg00,9181
@@ -294,7 +291,7 @@ h2ogpte/rest_sync/models/guardrails_settings_create_request.py,sha256=W3-vZsU0Cu
294
291
  h2ogpte/rest_sync/models/h2_ogpt_system_info.py,sha256=eaFSINplInnPIW-dRO9K25AbQouNYngBI_JXX-AuY_w,8651
295
292
  h2ogpte/rest_sync/models/h2_ogptgpu_info.py,sha256=gUdC0izDgwpyRBJa9_bua6BYnJo8K0H9nG_E4kO_pNE,5124
296
293
  h2ogpte/rest_sync/models/ingest_from_azure_blob_storage_body.py,sha256=G_0SInDzFcpWWwnOEByjDir3QkMBiMxU4D-rGKeBSUU,5469
297
- h2ogpte/rest_sync/models/ingest_from_confluence_body.py,sha256=eFr4bmTQOBfjMywxoNIDWiG4y_untC7Ws1JV0m4rfIQ,5560
294
+ h2ogpte/rest_sync/models/ingest_from_confluence_body.py,sha256=e9wndvmRRHs161DoYDwnNPiuJmynU1f2r5NgldbJwKk,5249
298
295
  h2ogpte/rest_sync/models/ingest_from_file_system_body.py,sha256=JnbjY-PxMxaLZXvHRjKdfNTZDtJj9CfPpRPG1QVyBjU,4655
299
296
  h2ogpte/rest_sync/models/ingest_from_gcs_body.py,sha256=XLRQMzcYLHWUWaRD_hnhSwIRz8TYGM3emDgpvWw_Gak,5078
300
297
  h2ogpte/rest_sync/models/ingest_from_s3_body.py,sha256=OTZ01MO7hn-LRlATgsrv1DUX6oz04jv4Qk94fsGSfnE,5310
@@ -340,7 +337,6 @@ h2ogpte/rest_sync/models/suggested_question.py,sha256=RcXlzaTsj-GFtT5gGuiHkNHtNX
340
337
  h2ogpte/rest_sync/models/summarize_request.py,sha256=L58eJZiqu-1Ssc2sat3Hp75k1mTixI_ibUiqYFTYptM,14881
341
338
  h2ogpte/rest_sync/models/tag.py,sha256=rnE0UXIzF3tqM9EWXRZ1oY3OU1Piq5MOU9t2svwgk3w,4594
342
339
  h2ogpte/rest_sync/models/tag_create_request.py,sha256=jETninpugqtUUkwHmcUZj3hj1qbSqcb7xLxnHkB1CCE,4379
343
- h2ogpte/rest_sync/models/tag_filter.py,sha256=Qnis6iEOQOPi5bpRA5YrmxxjOcg0hNwrf7UeZ332AtU,5217
344
340
  h2ogpte/rest_sync/models/tag_update_request.py,sha256=QD9iUZIqaUsuobauQF_f6OkyRE2bTG3O6f1N2pqBnBM,4524
345
341
  h2ogpte/rest_sync/models/update_agent_key_request.py,sha256=7EqlI-kZw0U2fyTnJumnUUlXslYZTBWvcTszsVkB310,5030
346
342
  h2ogpte/rest_sync/models/update_agent_tool_preference_request.py,sha256=GguSv4qEmF7OJZRm8vMZJ-9Md2Ce_hgModJ4PE4OruU,4493
@@ -365,8 +361,8 @@ h2ogpte/rest_sync/models/user_deletion_request.py,sha256=z7gD8XKOGwwg782TRzXJiiP
365
361
  h2ogpte/rest_sync/models/user_info.py,sha256=ef59Eh9k42JUY3X2RnCrwYR7sc_8lXT1vRLGoNz3uTU,4489
366
362
  h2ogpte/rest_sync/models/user_job_details.py,sha256=9cbhpgLMDpar-aTOaY5Ygud-8Kbi23cLNldTGab0Sd8,4984
367
363
  h2ogpte/rest_sync/models/user_permission.py,sha256=1k74E7s2kD2waSZ79KPlgTupVYEacTKWMqcKxv2972A,4856
368
- h2ogpte-1.6.53rc1.dist-info/METADATA,sha256=GXYMRNmJ4n9nMpTcDUb6YQ6Z1LnMwylBtZa-pDzoXVQ,8615
369
- h2ogpte-1.6.53rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
370
- h2ogpte-1.6.53rc1.dist-info/entry_points.txt,sha256=BlaqX2SXJanrOGqNYwnzvCxHGNadM7RBI4pW4rVo5z4,54
371
- h2ogpte-1.6.53rc1.dist-info/top_level.txt,sha256=vXV4JnNwFWFAqTWyHrH-cGIQqbCcEDG9-BbyNn58JpM,8
372
- h2ogpte-1.6.53rc1.dist-info/RECORD,,
364
+ h2ogpte-1.6.54.dist-info/METADATA,sha256=DTbsDllR7TUzIliR8tpfnFwmLtK050HLrmxNQ_bo3IE,8612
365
+ h2ogpte-1.6.54.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
366
+ h2ogpte-1.6.54.dist-info/entry_points.txt,sha256=BlaqX2SXJanrOGqNYwnzvCxHGNadM7RBI4pW4rVo5z4,54
367
+ h2ogpte-1.6.54.dist-info/top_level.txt,sha256=vXV4JnNwFWFAqTWyHrH-cGIQqbCcEDG9-BbyNn58JpM,8
368
+ h2ogpte-1.6.54.dist-info/RECORD,,
@@ -1,140 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- h2oGPTe REST API
5
-
6
- # Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
7
-
8
- The version of the OpenAPI document: v1.0.0
9
- Generated by OpenAPI Generator (https://openapi-generator.tech)
10
-
11
- Do not edit the class manually.
12
- """ # noqa: E501
13
-
14
-
15
- from __future__ import annotations
16
- import json
17
- import pprint
18
- from pydantic import BaseModel, ConfigDict, Field, StrictStr, ValidationError, field_validator
19
- from typing import Any, List, Optional
20
- from h2ogpte.rest_async.models.tag_filter import TagFilter
21
- from pydantic import StrictStr, Field
22
- from typing import Union, List, Set, Optional, Dict
23
- from typing_extensions import Literal, Self
24
-
25
- CHATSETTINGSTAGS_ONE_OF_SCHEMAS = ["List[str]", "TagFilter"]
26
-
27
- class ChatSettingsTags(BaseModel):
28
- """
29
- Filter documents by tags for RAG context. Supports two formats: - Array format (backward compatible): [\"red\", \"blue\"] includes documents with 'red' OR 'blue' tags - Object format (with exclusions): {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
30
- """
31
- # data type: List[str]
32
- oneof_schema_1_validator: Optional[List[StrictStr]] = None
33
- # data type: TagFilter
34
- oneof_schema_2_validator: Optional[TagFilter] = None
35
- actual_instance: Optional[Union[List[str], TagFilter]] = None
36
- one_of_schemas: Set[str] = { "List[str]", "TagFilter" }
37
-
38
- model_config = ConfigDict(
39
- validate_assignment=True,
40
- protected_namespaces=(),
41
- )
42
-
43
-
44
- def __init__(self, *args, **kwargs) -> None:
45
- if args:
46
- if len(args) > 1:
47
- raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
48
- if kwargs:
49
- raise ValueError("If a position argument is used, keyword arguments cannot be used.")
50
- super().__init__(actual_instance=args[0])
51
- else:
52
- super().__init__(**kwargs)
53
-
54
- @field_validator('actual_instance')
55
- def actual_instance_must_validate_oneof(cls, v):
56
- instance = ChatSettingsTags.model_construct()
57
- error_messages = []
58
- match = 0
59
- # validate data type: List[str]
60
- try:
61
- instance.oneof_schema_1_validator = v
62
- match += 1
63
- except (ValidationError, ValueError) as e:
64
- error_messages.append(str(e))
65
- # validate data type: TagFilter
66
- if not isinstance(v, TagFilter):
67
- error_messages.append(f"Error! Input type `{type(v)}` is not `TagFilter`")
68
- else:
69
- match += 1
70
- if match > 1:
71
- # more than 1 match
72
- raise ValueError("Multiple matches found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
73
- elif match == 0:
74
- # no match
75
- raise ValueError("No match found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
76
- else:
77
- return v
78
-
79
- @classmethod
80
- def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self:
81
- return cls.from_json(json.dumps(obj))
82
-
83
- @classmethod
84
- def from_json(cls, json_str: str) -> Self:
85
- """Returns the object represented by the json string"""
86
- instance = cls.model_construct()
87
- error_messages = []
88
- match = 0
89
-
90
- # deserialize data into List[str]
91
- try:
92
- # validation
93
- instance.oneof_schema_1_validator = json.loads(json_str)
94
- # assign value to actual_instance
95
- instance.actual_instance = instance.oneof_schema_1_validator
96
- match += 1
97
- except (ValidationError, ValueError) as e:
98
- error_messages.append(str(e))
99
- # deserialize data into TagFilter
100
- try:
101
- instance.actual_instance = TagFilter.from_json(json_str)
102
- match += 1
103
- except (ValidationError, ValueError) as e:
104
- error_messages.append(str(e))
105
-
106
- if match > 1:
107
- # more than 1 match
108
- raise ValueError("Multiple matches found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
109
- elif match == 0:
110
- # no match
111
- raise ValueError("No match found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
112
- else:
113
- return instance
114
-
115
- def to_json(self) -> str:
116
- """Returns the JSON representation of the actual instance"""
117
- if self.actual_instance is None:
118
- return "null"
119
-
120
- if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json):
121
- return self.actual_instance.to_json()
122
- else:
123
- return json.dumps(self.actual_instance)
124
-
125
- def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], TagFilter]]:
126
- """Returns the dict representation of the actual instance"""
127
- if self.actual_instance is None:
128
- return None
129
-
130
- if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict):
131
- return self.actual_instance.to_dict()
132
- else:
133
- # primitive type
134
- return self.actual_instance
135
-
136
- def to_str(self) -> str:
137
- """Returns the string representation of the actual instance"""
138
- return pprint.pformat(self.model_dump())
139
-
140
-
@@ -1,89 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- h2oGPTe REST API
5
-
6
- # Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
7
-
8
- The version of the OpenAPI document: v1.0.0
9
- Generated by OpenAPI Generator (https://openapi-generator.tech)
10
-
11
- Do not edit the class manually.
12
- """ # noqa: E501
13
-
14
-
15
- from __future__ import annotations
16
- import pprint
17
- import re # noqa: F401
18
- import json
19
-
20
- from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
- from typing import Any, ClassVar, Dict, List, Optional
22
- from typing import Optional, Set
23
- from typing_extensions import Self
24
-
25
- class TagFilter(BaseModel):
26
- """
27
- Filter for document tags supporting inclusion and exclusion. Note: The exclude list takes priority over the include list. If a document has a tag that appears in both lists, the document will be excluded. Examples: - Include only documents with 'red' OR 'blue' tags: {\"include\": [\"red\", \"blue\"]} - Exclude documents with 'red' OR 'blue' tags: {\"exclude\": [\"red\", \"blue\"]} - Include documents with 'color' tag BUT exclude 'red' and 'blue': {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
28
- """ # noqa: E501
29
- include: Optional[List[StrictStr]] = Field(default=None, description="Include documents with ANY of these tags (OR operation).")
30
- exclude: Optional[List[StrictStr]] = Field(default=None, description="Exclude documents with ANY of these tags (OR operation). Takes priority over include.")
31
- __properties: ClassVar[List[str]] = ["include", "exclude"]
32
-
33
- model_config = ConfigDict(
34
- populate_by_name=True,
35
- validate_assignment=True,
36
- protected_namespaces=(),
37
- )
38
-
39
-
40
- def to_str(self) -> str:
41
- """Returns the string representation of the model using alias"""
42
- return pprint.pformat(self.model_dump(by_alias=True))
43
-
44
- def to_json(self) -> str:
45
- """Returns the JSON representation of the model using alias"""
46
- # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
- return json.dumps(self.to_dict())
48
-
49
- @classmethod
50
- def from_json(cls, json_str: str) -> Optional[Self]:
51
- """Create an instance of TagFilter from a JSON string"""
52
- return cls.from_dict(json.loads(json_str))
53
-
54
- def to_dict(self) -> Dict[str, Any]:
55
- """Return the dictionary representation of the model using alias.
56
-
57
- This has the following differences from calling pydantic's
58
- `self.model_dump(by_alias=True)`:
59
-
60
- * `None` is only added to the output dict for nullable fields that
61
- were set at model initialization. Other fields with value `None`
62
- are ignored.
63
- """
64
- excluded_fields: Set[str] = set([
65
- ])
66
-
67
- _dict = self.model_dump(
68
- by_alias=True,
69
- exclude=excluded_fields,
70
- exclude_none=True,
71
- )
72
- return _dict
73
-
74
- @classmethod
75
- def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76
- """Create an instance of TagFilter from a dict"""
77
- if obj is None:
78
- return None
79
-
80
- if not isinstance(obj, dict):
81
- return cls.model_validate(obj)
82
-
83
- _obj = cls.model_validate({
84
- "include": obj.get("include"),
85
- "exclude": obj.get("exclude")
86
- })
87
- return _obj
88
-
89
-
@@ -1,140 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- h2oGPTe REST API
5
-
6
- # Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
7
-
8
- The version of the OpenAPI document: v1.0.0
9
- Generated by OpenAPI Generator (https://openapi-generator.tech)
10
-
11
- Do not edit the class manually.
12
- """ # noqa: E501
13
-
14
-
15
- from __future__ import annotations
16
- import json
17
- import pprint
18
- from pydantic import BaseModel, ConfigDict, Field, StrictStr, ValidationError, field_validator
19
- from typing import Any, List, Optional
20
- from h2ogpte.rest_sync.models.tag_filter import TagFilter
21
- from pydantic import StrictStr, Field
22
- from typing import Union, List, Set, Optional, Dict
23
- from typing_extensions import Literal, Self
24
-
25
- CHATSETTINGSTAGS_ONE_OF_SCHEMAS = ["List[str]", "TagFilter"]
26
-
27
- class ChatSettingsTags(BaseModel):
28
- """
29
- Filter documents by tags for RAG context. Supports two formats: - Array format (backward compatible): [\"red\", \"blue\"] includes documents with 'red' OR 'blue' tags - Object format (with exclusions): {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
30
- """
31
- # data type: List[str]
32
- oneof_schema_1_validator: Optional[List[StrictStr]] = None
33
- # data type: TagFilter
34
- oneof_schema_2_validator: Optional[TagFilter] = None
35
- actual_instance: Optional[Union[List[str], TagFilter]] = None
36
- one_of_schemas: Set[str] = { "List[str]", "TagFilter" }
37
-
38
- model_config = ConfigDict(
39
- validate_assignment=True,
40
- protected_namespaces=(),
41
- )
42
-
43
-
44
- def __init__(self, *args, **kwargs) -> None:
45
- if args:
46
- if len(args) > 1:
47
- raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
48
- if kwargs:
49
- raise ValueError("If a position argument is used, keyword arguments cannot be used.")
50
- super().__init__(actual_instance=args[0])
51
- else:
52
- super().__init__(**kwargs)
53
-
54
- @field_validator('actual_instance')
55
- def actual_instance_must_validate_oneof(cls, v):
56
- instance = ChatSettingsTags.model_construct()
57
- error_messages = []
58
- match = 0
59
- # validate data type: List[str]
60
- try:
61
- instance.oneof_schema_1_validator = v
62
- match += 1
63
- except (ValidationError, ValueError) as e:
64
- error_messages.append(str(e))
65
- # validate data type: TagFilter
66
- if not isinstance(v, TagFilter):
67
- error_messages.append(f"Error! Input type `{type(v)}` is not `TagFilter`")
68
- else:
69
- match += 1
70
- if match > 1:
71
- # more than 1 match
72
- raise ValueError("Multiple matches found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
73
- elif match == 0:
74
- # no match
75
- raise ValueError("No match found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
76
- else:
77
- return v
78
-
79
- @classmethod
80
- def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self:
81
- return cls.from_json(json.dumps(obj))
82
-
83
- @classmethod
84
- def from_json(cls, json_str: str) -> Self:
85
- """Returns the object represented by the json string"""
86
- instance = cls.model_construct()
87
- error_messages = []
88
- match = 0
89
-
90
- # deserialize data into List[str]
91
- try:
92
- # validation
93
- instance.oneof_schema_1_validator = json.loads(json_str)
94
- # assign value to actual_instance
95
- instance.actual_instance = instance.oneof_schema_1_validator
96
- match += 1
97
- except (ValidationError, ValueError) as e:
98
- error_messages.append(str(e))
99
- # deserialize data into TagFilter
100
- try:
101
- instance.actual_instance = TagFilter.from_json(json_str)
102
- match += 1
103
- except (ValidationError, ValueError) as e:
104
- error_messages.append(str(e))
105
-
106
- if match > 1:
107
- # more than 1 match
108
- raise ValueError("Multiple matches found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
109
- elif match == 0:
110
- # no match
111
- raise ValueError("No match found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
112
- else:
113
- return instance
114
-
115
- def to_json(self) -> str:
116
- """Returns the JSON representation of the actual instance"""
117
- if self.actual_instance is None:
118
- return "null"
119
-
120
- if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json):
121
- return self.actual_instance.to_json()
122
- else:
123
- return json.dumps(self.actual_instance)
124
-
125
- def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], TagFilter]]:
126
- """Returns the dict representation of the actual instance"""
127
- if self.actual_instance is None:
128
- return None
129
-
130
- if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict):
131
- return self.actual_instance.to_dict()
132
- else:
133
- # primitive type
134
- return self.actual_instance
135
-
136
- def to_str(self) -> str:
137
- """Returns the string representation of the actual instance"""
138
- return pprint.pformat(self.model_dump())
139
-
140
-
@@ -1,89 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- h2oGPTe REST API
5
-
6
- # Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
7
-
8
- The version of the OpenAPI document: v1.0.0
9
- Generated by OpenAPI Generator (https://openapi-generator.tech)
10
-
11
- Do not edit the class manually.
12
- """ # noqa: E501
13
-
14
-
15
- from __future__ import annotations
16
- import pprint
17
- import re # noqa: F401
18
- import json
19
-
20
- from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
- from typing import Any, ClassVar, Dict, List, Optional
22
- from typing import Optional, Set
23
- from typing_extensions import Self
24
-
25
- class TagFilter(BaseModel):
26
- """
27
- Filter for document tags supporting inclusion and exclusion. Note: The exclude list takes priority over the include list. If a document has a tag that appears in both lists, the document will be excluded. Examples: - Include only documents with 'red' OR 'blue' tags: {\"include\": [\"red\", \"blue\"]} - Exclude documents with 'red' OR 'blue' tags: {\"exclude\": [\"red\", \"blue\"]} - Include documents with 'color' tag BUT exclude 'red' and 'blue': {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
28
- """ # noqa: E501
29
- include: Optional[List[StrictStr]] = Field(default=None, description="Include documents with ANY of these tags (OR operation).")
30
- exclude: Optional[List[StrictStr]] = Field(default=None, description="Exclude documents with ANY of these tags (OR operation). Takes priority over include.")
31
- __properties: ClassVar[List[str]] = ["include", "exclude"]
32
-
33
- model_config = ConfigDict(
34
- populate_by_name=True,
35
- validate_assignment=True,
36
- protected_namespaces=(),
37
- )
38
-
39
-
40
- def to_str(self) -> str:
41
- """Returns the string representation of the model using alias"""
42
- return pprint.pformat(self.model_dump(by_alias=True))
43
-
44
- def to_json(self) -> str:
45
- """Returns the JSON representation of the model using alias"""
46
- # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
- return json.dumps(self.to_dict())
48
-
49
- @classmethod
50
- def from_json(cls, json_str: str) -> Optional[Self]:
51
- """Create an instance of TagFilter from a JSON string"""
52
- return cls.from_dict(json.loads(json_str))
53
-
54
- def to_dict(self) -> Dict[str, Any]:
55
- """Return the dictionary representation of the model using alias.
56
-
57
- This has the following differences from calling pydantic's
58
- `self.model_dump(by_alias=True)`:
59
-
60
- * `None` is only added to the output dict for nullable fields that
61
- were set at model initialization. Other fields with value `None`
62
- are ignored.
63
- """
64
- excluded_fields: Set[str] = set([
65
- ])
66
-
67
- _dict = self.model_dump(
68
- by_alias=True,
69
- exclude=excluded_fields,
70
- exclude_none=True,
71
- )
72
- return _dict
73
-
74
- @classmethod
75
- def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76
- """Create an instance of TagFilter from a dict"""
77
- if obj is None:
78
- return None
79
-
80
- if not isinstance(obj, dict):
81
- return cls.model_validate(obj)
82
-
83
- _obj = cls.model_validate({
84
- "include": obj.get("include"),
85
- "exclude": obj.get("exclude")
86
- })
87
- return _obj
88
-
89
-