h2ogpte 1.6.52__py3-none-any.whl → 1.6.53rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h2ogpte/__init__.py +1 -1
- h2ogpte/h2ogpte.py +6 -0
- h2ogpte/h2ogpte_async.py +6 -0
- h2ogpte/rest_async/__init__.py +3 -1
- h2ogpte/rest_async/api_client.py +1 -1
- h2ogpte/rest_async/configuration.py +1 -1
- h2ogpte/rest_async/models/__init__.py +2 -0
- h2ogpte/rest_async/models/chat_completion_request.py +6 -2
- h2ogpte/rest_async/models/chat_settings.py +6 -2
- h2ogpte/rest_async/models/chat_settings_tags.py +140 -0
- h2ogpte/rest_async/models/ingest_from_confluence_body.py +4 -2
- h2ogpte/rest_async/models/tag_filter.py +89 -0
- h2ogpte/rest_sync/__init__.py +3 -1
- h2ogpte/rest_sync/api_client.py +1 -1
- h2ogpte/rest_sync/configuration.py +1 -1
- h2ogpte/rest_sync/models/__init__.py +2 -0
- h2ogpte/rest_sync/models/chat_completion_request.py +6 -2
- h2ogpte/rest_sync/models/chat_settings.py +6 -2
- h2ogpte/rest_sync/models/chat_settings_tags.py +140 -0
- h2ogpte/rest_sync/models/ingest_from_confluence_body.py +4 -2
- h2ogpte/rest_sync/models/tag_filter.py +89 -0
- h2ogpte/session.py +10 -6
- h2ogpte/session_async.py +8 -3
- h2ogpte/types.py +16 -1
- {h2ogpte-1.6.52.dist-info → h2ogpte-1.6.53rc1.dist-info}/METADATA +1 -1
- {h2ogpte-1.6.52.dist-info → h2ogpte-1.6.53rc1.dist-info}/RECORD +29 -25
- {h2ogpte-1.6.52.dist-info → h2ogpte-1.6.53rc1.dist-info}/WHEEL +0 -0
- {h2ogpte-1.6.52.dist-info → h2ogpte-1.6.53rc1.dist-info}/entry_points.txt +0 -0
- {h2ogpte-1.6.52.dist-info → h2ogpte-1.6.53rc1.dist-info}/top_level.txt +0 -0
h2ogpte/__init__.py
CHANGED
h2ogpte/h2ogpte.py
CHANGED
|
@@ -2496,6 +2496,7 @@ class H2OGPTE(H2OGPTESyncBase):
|
|
|
2496
2496
|
base_url: str,
|
|
2497
2497
|
page_id: Union[str, List[str]],
|
|
2498
2498
|
credentials: ConfluenceCredential,
|
|
2499
|
+
include_attachments: Union[bool, None] = None,
|
|
2499
2500
|
gen_doc_summaries: Union[bool, None] = None,
|
|
2500
2501
|
gen_doc_questions: Union[bool, None] = None,
|
|
2501
2502
|
audio_input_language: Union[str, None] = None,
|
|
@@ -2519,6 +2520,8 @@ class H2OGPTE(H2OGPTESyncBase):
|
|
|
2519
2520
|
The page id or ids of pages to be ingested.
|
|
2520
2521
|
credentials:
|
|
2521
2522
|
The object with Confluence credentials.
|
|
2523
|
+
include_attachments:
|
|
2524
|
+
A flag indicating whether to also ingest attachments with the page.
|
|
2522
2525
|
gen_doc_summaries:
|
|
2523
2526
|
Whether to auto-generate document summaries (uses LLM)
|
|
2524
2527
|
gen_doc_questions:
|
|
@@ -2560,6 +2563,7 @@ class H2OGPTE(H2OGPTESyncBase):
|
|
|
2560
2563
|
page_ids=[page_id] if isinstance(page_id, str) else page_id,
|
|
2561
2564
|
credentials=rest.ConfluenceCredentials(**credentials.__dict__),
|
|
2562
2565
|
metadata=metadata,
|
|
2566
|
+
include_attachments=include_attachments,
|
|
2563
2567
|
),
|
|
2564
2568
|
gen_doc_summaries=gen_doc_summaries,
|
|
2565
2569
|
gen_doc_questions=gen_doc_questions,
|
|
@@ -4514,6 +4518,8 @@ class H2OGPTE(H2OGPTESyncBase):
|
|
|
4514
4518
|
Requires 1 LLM or Agent call.
|
|
4515
4519
|
:code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
|
|
4516
4520
|
Requires 1 Agent call.
|
|
4521
|
+
:code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
|
|
4522
|
+
Requires 1 Agent call with RAG tool execution.
|
|
4517
4523
|
:code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
|
|
4518
4524
|
to answer the query. Requires 1 LLM or Agent call.
|
|
4519
4525
|
:code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
|
h2ogpte/h2ogpte_async.py
CHANGED
|
@@ -2698,6 +2698,7 @@ class H2OGPTEAsync:
|
|
|
2698
2698
|
base_url: str,
|
|
2699
2699
|
page_id: Union[str, List[str]],
|
|
2700
2700
|
credentials: ConfluenceCredential,
|
|
2701
|
+
include_attachments: Union[bool, None] = None,
|
|
2701
2702
|
gen_doc_summaries: Union[bool, None] = None,
|
|
2702
2703
|
gen_doc_questions: Union[bool, None] = None,
|
|
2703
2704
|
audio_input_language: Union[str, None] = None,
|
|
@@ -2721,6 +2722,8 @@ class H2OGPTEAsync:
|
|
|
2721
2722
|
The page id or ids of pages to be ingested.
|
|
2722
2723
|
credentials:
|
|
2723
2724
|
The object with Confluence credentials.
|
|
2725
|
+
include_attachments:
|
|
2726
|
+
A flag indicating whether to also ingest attachments with the page.
|
|
2724
2727
|
gen_doc_summaries:
|
|
2725
2728
|
Whether to auto-generate document summaries (uses LLM)
|
|
2726
2729
|
gen_doc_questions:
|
|
@@ -2762,6 +2765,7 @@ class H2OGPTEAsync:
|
|
|
2762
2765
|
page_ids=[page_id] if isinstance(page_id, str) else page_id,
|
|
2763
2766
|
credentials=rest.ConfluenceCredentials(**credentials.__dict__),
|
|
2764
2767
|
metadata=metadata,
|
|
2768
|
+
include_attachments=include_attachments,
|
|
2765
2769
|
),
|
|
2766
2770
|
gen_doc_summaries=gen_doc_summaries,
|
|
2767
2771
|
gen_doc_questions=gen_doc_questions,
|
|
@@ -4722,6 +4726,8 @@ class H2OGPTEAsync:
|
|
|
4722
4726
|
Requires 1 LLM or Agent call.
|
|
4723
4727
|
:code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
|
|
4724
4728
|
Requires 1 Agent call.
|
|
4729
|
+
:code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
|
|
4730
|
+
Requires 1 Agent call with RAG tool execution.
|
|
4725
4731
|
:code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
|
|
4726
4732
|
to answer the query. Requires 1 LLM or Agent call.
|
|
4727
4733
|
:code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
|
h2ogpte/rest_async/__init__.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
""" # noqa: E501
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
__version__ = "1.6.
|
|
17
|
+
__version__ = "1.6.53-dev1"
|
|
18
18
|
|
|
19
19
|
# import apis into sdk package
|
|
20
20
|
from h2ogpte.rest_async.api.api_keys_api import APIKeysApi
|
|
@@ -69,6 +69,7 @@ from h2ogpte.rest_async.models.chat_message_reference import ChatMessageReferenc
|
|
|
69
69
|
from h2ogpte.rest_async.models.chat_session import ChatSession
|
|
70
70
|
from h2ogpte.rest_async.models.chat_session_update_request import ChatSessionUpdateRequest
|
|
71
71
|
from h2ogpte.rest_async.models.chat_settings import ChatSettings
|
|
72
|
+
from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
|
|
72
73
|
from h2ogpte.rest_async.models.chunk import Chunk
|
|
73
74
|
from h2ogpte.rest_async.models.chunk_search_result import ChunkSearchResult
|
|
74
75
|
from h2ogpte.rest_async.models.collection import Collection
|
|
@@ -158,6 +159,7 @@ from h2ogpte.rest_async.models.suggested_question import SuggestedQuestion
|
|
|
158
159
|
from h2ogpte.rest_async.models.summarize_request import SummarizeRequest
|
|
159
160
|
from h2ogpte.rest_async.models.tag import Tag
|
|
160
161
|
from h2ogpte.rest_async.models.tag_create_request import TagCreateRequest
|
|
162
|
+
from h2ogpte.rest_async.models.tag_filter import TagFilter
|
|
161
163
|
from h2ogpte.rest_async.models.tag_update_request import TagUpdateRequest
|
|
162
164
|
from h2ogpte.rest_async.models.update_agent_key_request import UpdateAgentKeyRequest
|
|
163
165
|
from h2ogpte.rest_async.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
|
h2ogpte/rest_async/api_client.py
CHANGED
|
@@ -90,7 +90,7 @@ class ApiClient:
|
|
|
90
90
|
self.default_headers[header_name] = header_value
|
|
91
91
|
self.cookie = cookie
|
|
92
92
|
# Set default User-Agent.
|
|
93
|
-
self.user_agent = 'OpenAPI-Generator/1.6.
|
|
93
|
+
self.user_agent = 'OpenAPI-Generator/1.6.53-dev1/python'
|
|
94
94
|
self.client_side_validation = configuration.client_side_validation
|
|
95
95
|
|
|
96
96
|
async def __aenter__(self):
|
|
@@ -499,7 +499,7 @@ class Configuration:
|
|
|
499
499
|
"OS: {env}\n"\
|
|
500
500
|
"Python Version: {pyversion}\n"\
|
|
501
501
|
"Version of the API: v1.0.0\n"\
|
|
502
|
-
"SDK Package Version: 1.6.
|
|
502
|
+
"SDK Package Version: 1.6.53-dev1".\
|
|
503
503
|
format(env=sys.platform, pyversion=sys.version)
|
|
504
504
|
|
|
505
505
|
def get_host_settings(self) -> List[HostSetting]:
|
|
@@ -38,6 +38,7 @@ from h2ogpte.rest_async.models.chat_message_reference import ChatMessageReferenc
|
|
|
38
38
|
from h2ogpte.rest_async.models.chat_session import ChatSession
|
|
39
39
|
from h2ogpte.rest_async.models.chat_session_update_request import ChatSessionUpdateRequest
|
|
40
40
|
from h2ogpte.rest_async.models.chat_settings import ChatSettings
|
|
41
|
+
from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
|
|
41
42
|
from h2ogpte.rest_async.models.chunk import Chunk
|
|
42
43
|
from h2ogpte.rest_async.models.chunk_search_result import ChunkSearchResult
|
|
43
44
|
from h2ogpte.rest_async.models.collection import Collection
|
|
@@ -127,6 +128,7 @@ from h2ogpte.rest_async.models.suggested_question import SuggestedQuestion
|
|
|
127
128
|
from h2ogpte.rest_async.models.summarize_request import SummarizeRequest
|
|
128
129
|
from h2ogpte.rest_async.models.tag import Tag
|
|
129
130
|
from h2ogpte.rest_async.models.tag_create_request import TagCreateRequest
|
|
131
|
+
from h2ogpte.rest_async.models.tag_filter import TagFilter
|
|
130
132
|
from h2ogpte.rest_async.models.tag_update_request import TagUpdateRequest
|
|
131
133
|
from h2ogpte.rest_async.models.update_agent_key_request import UpdateAgentKeyRequest
|
|
132
134
|
from h2ogpte.rest_async.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
|
|
@@ -19,6 +19,7 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
|
|
22
23
|
from typing import Optional, Set
|
|
23
24
|
from typing_extensions import Self
|
|
24
25
|
|
|
@@ -37,7 +38,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
37
38
|
self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
|
|
38
39
|
rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
|
|
39
40
|
include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
|
|
40
|
-
tags: Optional[
|
|
41
|
+
tags: Optional[ChatSettingsTags] = None
|
|
41
42
|
__properties: ClassVar[List[str]] = ["message", "system_prompt", "pre_prompt_query", "prompt_query", "image_batch_final_prompt", "image_batch_image_prompt", "llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
|
|
42
43
|
|
|
43
44
|
@field_validator('include_chat_history')
|
|
@@ -89,6 +90,9 @@ class ChatCompletionRequest(BaseModel):
|
|
|
89
90
|
exclude=excluded_fields,
|
|
90
91
|
exclude_none=True,
|
|
91
92
|
)
|
|
93
|
+
# override the default output from pydantic by calling `to_dict()` of tags
|
|
94
|
+
if self.tags:
|
|
95
|
+
_dict['tags'] = self.tags.to_dict()
|
|
92
96
|
return _dict
|
|
93
97
|
|
|
94
98
|
@classmethod
|
|
@@ -112,7 +116,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
112
116
|
"self_reflection_config": obj.get("self_reflection_config"),
|
|
113
117
|
"rag_config": obj.get("rag_config"),
|
|
114
118
|
"include_chat_history": obj.get("include_chat_history"),
|
|
115
|
-
"tags": obj.get("tags"),
|
|
119
|
+
"tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None,
|
|
116
120
|
})
|
|
117
121
|
return _obj
|
|
118
122
|
|
|
@@ -19,6 +19,7 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
|
|
22
23
|
from typing import Optional, Set
|
|
23
24
|
from typing_extensions import Self
|
|
24
25
|
|
|
@@ -31,7 +32,7 @@ class ChatSettings(BaseModel):
|
|
|
31
32
|
self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
|
|
32
33
|
rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
|
|
33
34
|
include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
|
|
34
|
-
tags: Optional[
|
|
35
|
+
tags: Optional[ChatSettingsTags] = None
|
|
35
36
|
__properties: ClassVar[List[str]] = ["llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
|
|
36
37
|
|
|
37
38
|
@field_validator('include_chat_history')
|
|
@@ -83,6 +84,9 @@ class ChatSettings(BaseModel):
|
|
|
83
84
|
exclude=excluded_fields,
|
|
84
85
|
exclude_none=True,
|
|
85
86
|
)
|
|
87
|
+
# override the default output from pydantic by calling `to_dict()` of tags
|
|
88
|
+
if self.tags:
|
|
89
|
+
_dict['tags'] = self.tags.to_dict()
|
|
86
90
|
return _dict
|
|
87
91
|
|
|
88
92
|
@classmethod
|
|
@@ -100,7 +104,7 @@ class ChatSettings(BaseModel):
|
|
|
100
104
|
"self_reflection_config": obj.get("self_reflection_config"),
|
|
101
105
|
"rag_config": obj.get("rag_config"),
|
|
102
106
|
"include_chat_history": obj.get("include_chat_history"),
|
|
103
|
-
"tags": obj.get("tags")
|
|
107
|
+
"tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None
|
|
104
108
|
})
|
|
105
109
|
return _obj
|
|
106
110
|
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
h2oGPTe REST API
|
|
5
|
+
|
|
6
|
+
# Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
import pprint
|
|
18
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr, ValidationError, field_validator
|
|
19
|
+
from typing import Any, List, Optional
|
|
20
|
+
from h2ogpte.rest_async.models.tag_filter import TagFilter
|
|
21
|
+
from pydantic import StrictStr, Field
|
|
22
|
+
from typing import Union, List, Set, Optional, Dict
|
|
23
|
+
from typing_extensions import Literal, Self
|
|
24
|
+
|
|
25
|
+
CHATSETTINGSTAGS_ONE_OF_SCHEMAS = ["List[str]", "TagFilter"]
|
|
26
|
+
|
|
27
|
+
class ChatSettingsTags(BaseModel):
|
|
28
|
+
"""
|
|
29
|
+
Filter documents by tags for RAG context. Supports two formats: - Array format (backward compatible): [\"red\", \"blue\"] includes documents with 'red' OR 'blue' tags - Object format (with exclusions): {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
|
|
30
|
+
"""
|
|
31
|
+
# data type: List[str]
|
|
32
|
+
oneof_schema_1_validator: Optional[List[StrictStr]] = None
|
|
33
|
+
# data type: TagFilter
|
|
34
|
+
oneof_schema_2_validator: Optional[TagFilter] = None
|
|
35
|
+
actual_instance: Optional[Union[List[str], TagFilter]] = None
|
|
36
|
+
one_of_schemas: Set[str] = { "List[str]", "TagFilter" }
|
|
37
|
+
|
|
38
|
+
model_config = ConfigDict(
|
|
39
|
+
validate_assignment=True,
|
|
40
|
+
protected_namespaces=(),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
45
|
+
if args:
|
|
46
|
+
if len(args) > 1:
|
|
47
|
+
raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
|
|
48
|
+
if kwargs:
|
|
49
|
+
raise ValueError("If a position argument is used, keyword arguments cannot be used.")
|
|
50
|
+
super().__init__(actual_instance=args[0])
|
|
51
|
+
else:
|
|
52
|
+
super().__init__(**kwargs)
|
|
53
|
+
|
|
54
|
+
@field_validator('actual_instance')
|
|
55
|
+
def actual_instance_must_validate_oneof(cls, v):
|
|
56
|
+
instance = ChatSettingsTags.model_construct()
|
|
57
|
+
error_messages = []
|
|
58
|
+
match = 0
|
|
59
|
+
# validate data type: List[str]
|
|
60
|
+
try:
|
|
61
|
+
instance.oneof_schema_1_validator = v
|
|
62
|
+
match += 1
|
|
63
|
+
except (ValidationError, ValueError) as e:
|
|
64
|
+
error_messages.append(str(e))
|
|
65
|
+
# validate data type: TagFilter
|
|
66
|
+
if not isinstance(v, TagFilter):
|
|
67
|
+
error_messages.append(f"Error! Input type `{type(v)}` is not `TagFilter`")
|
|
68
|
+
else:
|
|
69
|
+
match += 1
|
|
70
|
+
if match > 1:
|
|
71
|
+
# more than 1 match
|
|
72
|
+
raise ValueError("Multiple matches found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
73
|
+
elif match == 0:
|
|
74
|
+
# no match
|
|
75
|
+
raise ValueError("No match found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
76
|
+
else:
|
|
77
|
+
return v
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self:
|
|
81
|
+
return cls.from_json(json.dumps(obj))
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_json(cls, json_str: str) -> Self:
|
|
85
|
+
"""Returns the object represented by the json string"""
|
|
86
|
+
instance = cls.model_construct()
|
|
87
|
+
error_messages = []
|
|
88
|
+
match = 0
|
|
89
|
+
|
|
90
|
+
# deserialize data into List[str]
|
|
91
|
+
try:
|
|
92
|
+
# validation
|
|
93
|
+
instance.oneof_schema_1_validator = json.loads(json_str)
|
|
94
|
+
# assign value to actual_instance
|
|
95
|
+
instance.actual_instance = instance.oneof_schema_1_validator
|
|
96
|
+
match += 1
|
|
97
|
+
except (ValidationError, ValueError) as e:
|
|
98
|
+
error_messages.append(str(e))
|
|
99
|
+
# deserialize data into TagFilter
|
|
100
|
+
try:
|
|
101
|
+
instance.actual_instance = TagFilter.from_json(json_str)
|
|
102
|
+
match += 1
|
|
103
|
+
except (ValidationError, ValueError) as e:
|
|
104
|
+
error_messages.append(str(e))
|
|
105
|
+
|
|
106
|
+
if match > 1:
|
|
107
|
+
# more than 1 match
|
|
108
|
+
raise ValueError("Multiple matches found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
109
|
+
elif match == 0:
|
|
110
|
+
# no match
|
|
111
|
+
raise ValueError("No match found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
112
|
+
else:
|
|
113
|
+
return instance
|
|
114
|
+
|
|
115
|
+
def to_json(self) -> str:
|
|
116
|
+
"""Returns the JSON representation of the actual instance"""
|
|
117
|
+
if self.actual_instance is None:
|
|
118
|
+
return "null"
|
|
119
|
+
|
|
120
|
+
if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json):
|
|
121
|
+
return self.actual_instance.to_json()
|
|
122
|
+
else:
|
|
123
|
+
return json.dumps(self.actual_instance)
|
|
124
|
+
|
|
125
|
+
def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], TagFilter]]:
|
|
126
|
+
"""Returns the dict representation of the actual instance"""
|
|
127
|
+
if self.actual_instance is None:
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict):
|
|
131
|
+
return self.actual_instance.to_dict()
|
|
132
|
+
else:
|
|
133
|
+
# primitive type
|
|
134
|
+
return self.actual_instance
|
|
135
|
+
|
|
136
|
+
def to_str(self) -> str:
|
|
137
|
+
"""Returns the string representation of the actual instance"""
|
|
138
|
+
return pprint.pformat(self.model_dump())
|
|
139
|
+
|
|
140
|
+
|
|
@@ -17,7 +17,7 @@ import pprint
|
|
|
17
17
|
import re # noqa: F401
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
|
-
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
22
|
from h2ogpte.rest_async.models.confluence_credentials import ConfluenceCredentials
|
|
23
23
|
from typing import Optional, Set
|
|
@@ -29,9 +29,10 @@ class IngestFromConfluenceBody(BaseModel):
|
|
|
29
29
|
""" # noqa: E501
|
|
30
30
|
base_url: StrictStr = Field(description="Base url of the confluence instance.")
|
|
31
31
|
page_ids: List[StrictStr] = Field(description="Ids of pages to be ingested.")
|
|
32
|
+
include_attachments: Optional[StrictBool] = Field(default=False, description="A flag indicating whether to also ingest attachments with the page.")
|
|
32
33
|
credentials: ConfluenceCredentials
|
|
33
34
|
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Metadata for the documents.")
|
|
34
|
-
__properties: ClassVar[List[str]] = ["base_url", "page_ids", "credentials", "metadata"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["base_url", "page_ids", "include_attachments", "credentials", "metadata"]
|
|
35
36
|
|
|
36
37
|
model_config = ConfigDict(
|
|
37
38
|
populate_by_name=True,
|
|
@@ -89,6 +90,7 @@ class IngestFromConfluenceBody(BaseModel):
|
|
|
89
90
|
_obj = cls.model_validate({
|
|
90
91
|
"base_url": obj.get("base_url"),
|
|
91
92
|
"page_ids": obj.get("page_ids"),
|
|
93
|
+
"include_attachments": obj.get("include_attachments") if obj.get("include_attachments") is not None else False,
|
|
92
94
|
"credentials": ConfluenceCredentials.from_dict(obj["credentials"]) if obj.get("credentials") is not None else None,
|
|
93
95
|
"metadata": obj.get("metadata")
|
|
94
96
|
})
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
h2oGPTe REST API
|
|
5
|
+
|
|
6
|
+
# Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class TagFilter(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
Filter for document tags supporting inclusion and exclusion. Note: The exclude list takes priority over the include list. If a document has a tag that appears in both lists, the document will be excluded. Examples: - Include only documents with 'red' OR 'blue' tags: {\"include\": [\"red\", \"blue\"]} - Exclude documents with 'red' OR 'blue' tags: {\"exclude\": [\"red\", \"blue\"]} - Include documents with 'color' tag BUT exclude 'red' and 'blue': {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
include: Optional[List[StrictStr]] = Field(default=None, description="Include documents with ANY of these tags (OR operation).")
|
|
30
|
+
exclude: Optional[List[StrictStr]] = Field(default=None, description="Exclude documents with ANY of these tags (OR operation). Takes priority over include.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["include", "exclude"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of TagFilter from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
return _dict
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
76
|
+
"""Create an instance of TagFilter from a dict"""
|
|
77
|
+
if obj is None:
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
if not isinstance(obj, dict):
|
|
81
|
+
return cls.model_validate(obj)
|
|
82
|
+
|
|
83
|
+
_obj = cls.model_validate({
|
|
84
|
+
"include": obj.get("include"),
|
|
85
|
+
"exclude": obj.get("exclude")
|
|
86
|
+
})
|
|
87
|
+
return _obj
|
|
88
|
+
|
|
89
|
+
|
h2ogpte/rest_sync/__init__.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
""" # noqa: E501
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
__version__ = "1.6.
|
|
17
|
+
__version__ = "1.6.53-dev1"
|
|
18
18
|
|
|
19
19
|
# import apis into sdk package
|
|
20
20
|
from h2ogpte.rest_sync.api.api_keys_api import APIKeysApi
|
|
@@ -69,6 +69,7 @@ from h2ogpte.rest_sync.models.chat_message_reference import ChatMessageReference
|
|
|
69
69
|
from h2ogpte.rest_sync.models.chat_session import ChatSession
|
|
70
70
|
from h2ogpte.rest_sync.models.chat_session_update_request import ChatSessionUpdateRequest
|
|
71
71
|
from h2ogpte.rest_sync.models.chat_settings import ChatSettings
|
|
72
|
+
from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
|
|
72
73
|
from h2ogpte.rest_sync.models.chunk import Chunk
|
|
73
74
|
from h2ogpte.rest_sync.models.chunk_search_result import ChunkSearchResult
|
|
74
75
|
from h2ogpte.rest_sync.models.collection import Collection
|
|
@@ -158,6 +159,7 @@ from h2ogpte.rest_sync.models.suggested_question import SuggestedQuestion
|
|
|
158
159
|
from h2ogpte.rest_sync.models.summarize_request import SummarizeRequest
|
|
159
160
|
from h2ogpte.rest_sync.models.tag import Tag
|
|
160
161
|
from h2ogpte.rest_sync.models.tag_create_request import TagCreateRequest
|
|
162
|
+
from h2ogpte.rest_sync.models.tag_filter import TagFilter
|
|
161
163
|
from h2ogpte.rest_sync.models.tag_update_request import TagUpdateRequest
|
|
162
164
|
from h2ogpte.rest_sync.models.update_agent_key_request import UpdateAgentKeyRequest
|
|
163
165
|
from h2ogpte.rest_sync.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
|
h2ogpte/rest_sync/api_client.py
CHANGED
|
@@ -90,7 +90,7 @@ class ApiClient:
|
|
|
90
90
|
self.default_headers[header_name] = header_value
|
|
91
91
|
self.cookie = cookie
|
|
92
92
|
# Set default User-Agent.
|
|
93
|
-
self.user_agent = 'OpenAPI-Generator/1.6.
|
|
93
|
+
self.user_agent = 'OpenAPI-Generator/1.6.53-dev1/python'
|
|
94
94
|
self.client_side_validation = configuration.client_side_validation
|
|
95
95
|
|
|
96
96
|
def __enter__(self):
|
|
@@ -503,7 +503,7 @@ class Configuration:
|
|
|
503
503
|
"OS: {env}\n"\
|
|
504
504
|
"Python Version: {pyversion}\n"\
|
|
505
505
|
"Version of the API: v1.0.0\n"\
|
|
506
|
-
"SDK Package Version: 1.6.
|
|
506
|
+
"SDK Package Version: 1.6.53-dev1".\
|
|
507
507
|
format(env=sys.platform, pyversion=sys.version)
|
|
508
508
|
|
|
509
509
|
def get_host_settings(self) -> List[HostSetting]:
|
|
@@ -38,6 +38,7 @@ from h2ogpte.rest_sync.models.chat_message_reference import ChatMessageReference
|
|
|
38
38
|
from h2ogpte.rest_sync.models.chat_session import ChatSession
|
|
39
39
|
from h2ogpte.rest_sync.models.chat_session_update_request import ChatSessionUpdateRequest
|
|
40
40
|
from h2ogpte.rest_sync.models.chat_settings import ChatSettings
|
|
41
|
+
from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
|
|
41
42
|
from h2ogpte.rest_sync.models.chunk import Chunk
|
|
42
43
|
from h2ogpte.rest_sync.models.chunk_search_result import ChunkSearchResult
|
|
43
44
|
from h2ogpte.rest_sync.models.collection import Collection
|
|
@@ -127,6 +128,7 @@ from h2ogpte.rest_sync.models.suggested_question import SuggestedQuestion
|
|
|
127
128
|
from h2ogpte.rest_sync.models.summarize_request import SummarizeRequest
|
|
128
129
|
from h2ogpte.rest_sync.models.tag import Tag
|
|
129
130
|
from h2ogpte.rest_sync.models.tag_create_request import TagCreateRequest
|
|
131
|
+
from h2ogpte.rest_sync.models.tag_filter import TagFilter
|
|
130
132
|
from h2ogpte.rest_sync.models.tag_update_request import TagUpdateRequest
|
|
131
133
|
from h2ogpte.rest_sync.models.update_agent_key_request import UpdateAgentKeyRequest
|
|
132
134
|
from h2ogpte.rest_sync.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
|
|
@@ -19,6 +19,7 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
|
|
22
23
|
from typing import Optional, Set
|
|
23
24
|
from typing_extensions import Self
|
|
24
25
|
|
|
@@ -37,7 +38,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
37
38
|
self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
|
|
38
39
|
rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
|
|
39
40
|
include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
|
|
40
|
-
tags: Optional[
|
|
41
|
+
tags: Optional[ChatSettingsTags] = None
|
|
41
42
|
__properties: ClassVar[List[str]] = ["message", "system_prompt", "pre_prompt_query", "prompt_query", "image_batch_final_prompt", "image_batch_image_prompt", "llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
|
|
42
43
|
|
|
43
44
|
@field_validator('include_chat_history')
|
|
@@ -89,6 +90,9 @@ class ChatCompletionRequest(BaseModel):
|
|
|
89
90
|
exclude=excluded_fields,
|
|
90
91
|
exclude_none=True,
|
|
91
92
|
)
|
|
93
|
+
# override the default output from pydantic by calling `to_dict()` of tags
|
|
94
|
+
if self.tags:
|
|
95
|
+
_dict['tags'] = self.tags.to_dict()
|
|
92
96
|
return _dict
|
|
93
97
|
|
|
94
98
|
@classmethod
|
|
@@ -112,7 +116,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
112
116
|
"self_reflection_config": obj.get("self_reflection_config"),
|
|
113
117
|
"rag_config": obj.get("rag_config"),
|
|
114
118
|
"include_chat_history": obj.get("include_chat_history"),
|
|
115
|
-
"tags": obj.get("tags"),
|
|
119
|
+
"tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None,
|
|
116
120
|
})
|
|
117
121
|
return _obj
|
|
118
122
|
|
|
@@ -19,6 +19,7 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
|
|
22
23
|
from typing import Optional, Set
|
|
23
24
|
from typing_extensions import Self
|
|
24
25
|
|
|
@@ -31,7 +32,7 @@ class ChatSettings(BaseModel):
|
|
|
31
32
|
self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
|
|
32
33
|
rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
|
|
33
34
|
include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
|
|
34
|
-
tags: Optional[
|
|
35
|
+
tags: Optional[ChatSettingsTags] = None
|
|
35
36
|
__properties: ClassVar[List[str]] = ["llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
|
|
36
37
|
|
|
37
38
|
@field_validator('include_chat_history')
|
|
@@ -83,6 +84,9 @@ class ChatSettings(BaseModel):
|
|
|
83
84
|
exclude=excluded_fields,
|
|
84
85
|
exclude_none=True,
|
|
85
86
|
)
|
|
87
|
+
# override the default output from pydantic by calling `to_dict()` of tags
|
|
88
|
+
if self.tags:
|
|
89
|
+
_dict['tags'] = self.tags.to_dict()
|
|
86
90
|
return _dict
|
|
87
91
|
|
|
88
92
|
@classmethod
|
|
@@ -100,7 +104,7 @@ class ChatSettings(BaseModel):
|
|
|
100
104
|
"self_reflection_config": obj.get("self_reflection_config"),
|
|
101
105
|
"rag_config": obj.get("rag_config"),
|
|
102
106
|
"include_chat_history": obj.get("include_chat_history"),
|
|
103
|
-
"tags": obj.get("tags")
|
|
107
|
+
"tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None
|
|
104
108
|
})
|
|
105
109
|
return _obj
|
|
106
110
|
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
h2oGPTe REST API
|
|
5
|
+
|
|
6
|
+
# Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
import pprint
|
|
18
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr, ValidationError, field_validator
|
|
19
|
+
from typing import Any, List, Optional
|
|
20
|
+
from h2ogpte.rest_sync.models.tag_filter import TagFilter
|
|
21
|
+
from pydantic import StrictStr, Field
|
|
22
|
+
from typing import Union, List, Set, Optional, Dict
|
|
23
|
+
from typing_extensions import Literal, Self
|
|
24
|
+
|
|
25
|
+
CHATSETTINGSTAGS_ONE_OF_SCHEMAS = ["List[str]", "TagFilter"]
|
|
26
|
+
|
|
27
|
+
class ChatSettingsTags(BaseModel):
|
|
28
|
+
"""
|
|
29
|
+
Filter documents by tags for RAG context. Supports two formats: - Array format (backward compatible): [\"red\", \"blue\"] includes documents with 'red' OR 'blue' tags - Object format (with exclusions): {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
|
|
30
|
+
"""
|
|
31
|
+
# data type: List[str]
|
|
32
|
+
oneof_schema_1_validator: Optional[List[StrictStr]] = None
|
|
33
|
+
# data type: TagFilter
|
|
34
|
+
oneof_schema_2_validator: Optional[TagFilter] = None
|
|
35
|
+
actual_instance: Optional[Union[List[str], TagFilter]] = None
|
|
36
|
+
one_of_schemas: Set[str] = { "List[str]", "TagFilter" }
|
|
37
|
+
|
|
38
|
+
model_config = ConfigDict(
|
|
39
|
+
validate_assignment=True,
|
|
40
|
+
protected_namespaces=(),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
45
|
+
if args:
|
|
46
|
+
if len(args) > 1:
|
|
47
|
+
raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
|
|
48
|
+
if kwargs:
|
|
49
|
+
raise ValueError("If a position argument is used, keyword arguments cannot be used.")
|
|
50
|
+
super().__init__(actual_instance=args[0])
|
|
51
|
+
else:
|
|
52
|
+
super().__init__(**kwargs)
|
|
53
|
+
|
|
54
|
+
@field_validator('actual_instance')
|
|
55
|
+
def actual_instance_must_validate_oneof(cls, v):
|
|
56
|
+
instance = ChatSettingsTags.model_construct()
|
|
57
|
+
error_messages = []
|
|
58
|
+
match = 0
|
|
59
|
+
# validate data type: List[str]
|
|
60
|
+
try:
|
|
61
|
+
instance.oneof_schema_1_validator = v
|
|
62
|
+
match += 1
|
|
63
|
+
except (ValidationError, ValueError) as e:
|
|
64
|
+
error_messages.append(str(e))
|
|
65
|
+
# validate data type: TagFilter
|
|
66
|
+
if not isinstance(v, TagFilter):
|
|
67
|
+
error_messages.append(f"Error! Input type `{type(v)}` is not `TagFilter`")
|
|
68
|
+
else:
|
|
69
|
+
match += 1
|
|
70
|
+
if match > 1:
|
|
71
|
+
# more than 1 match
|
|
72
|
+
raise ValueError("Multiple matches found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
73
|
+
elif match == 0:
|
|
74
|
+
# no match
|
|
75
|
+
raise ValueError("No match found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
76
|
+
else:
|
|
77
|
+
return v
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self:
|
|
81
|
+
return cls.from_json(json.dumps(obj))
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_json(cls, json_str: str) -> Self:
|
|
85
|
+
"""Returns the object represented by the json string"""
|
|
86
|
+
instance = cls.model_construct()
|
|
87
|
+
error_messages = []
|
|
88
|
+
match = 0
|
|
89
|
+
|
|
90
|
+
# deserialize data into List[str]
|
|
91
|
+
try:
|
|
92
|
+
# validation
|
|
93
|
+
instance.oneof_schema_1_validator = json.loads(json_str)
|
|
94
|
+
# assign value to actual_instance
|
|
95
|
+
instance.actual_instance = instance.oneof_schema_1_validator
|
|
96
|
+
match += 1
|
|
97
|
+
except (ValidationError, ValueError) as e:
|
|
98
|
+
error_messages.append(str(e))
|
|
99
|
+
# deserialize data into TagFilter
|
|
100
|
+
try:
|
|
101
|
+
instance.actual_instance = TagFilter.from_json(json_str)
|
|
102
|
+
match += 1
|
|
103
|
+
except (ValidationError, ValueError) as e:
|
|
104
|
+
error_messages.append(str(e))
|
|
105
|
+
|
|
106
|
+
if match > 1:
|
|
107
|
+
# more than 1 match
|
|
108
|
+
raise ValueError("Multiple matches found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
109
|
+
elif match == 0:
|
|
110
|
+
# no match
|
|
111
|
+
raise ValueError("No match found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
|
|
112
|
+
else:
|
|
113
|
+
return instance
|
|
114
|
+
|
|
115
|
+
def to_json(self) -> str:
|
|
116
|
+
"""Returns the JSON representation of the actual instance"""
|
|
117
|
+
if self.actual_instance is None:
|
|
118
|
+
return "null"
|
|
119
|
+
|
|
120
|
+
if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json):
|
|
121
|
+
return self.actual_instance.to_json()
|
|
122
|
+
else:
|
|
123
|
+
return json.dumps(self.actual_instance)
|
|
124
|
+
|
|
125
|
+
def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], TagFilter]]:
|
|
126
|
+
"""Returns the dict representation of the actual instance"""
|
|
127
|
+
if self.actual_instance is None:
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict):
|
|
131
|
+
return self.actual_instance.to_dict()
|
|
132
|
+
else:
|
|
133
|
+
# primitive type
|
|
134
|
+
return self.actual_instance
|
|
135
|
+
|
|
136
|
+
def to_str(self) -> str:
|
|
137
|
+
"""Returns the string representation of the actual instance"""
|
|
138
|
+
return pprint.pformat(self.model_dump())
|
|
139
|
+
|
|
140
|
+
|
|
@@ -17,7 +17,7 @@ import pprint
|
|
|
17
17
|
import re # noqa: F401
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
|
-
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
22
|
from h2ogpte.rest_sync.models.confluence_credentials import ConfluenceCredentials
|
|
23
23
|
from typing import Optional, Set
|
|
@@ -29,9 +29,10 @@ class IngestFromConfluenceBody(BaseModel):
|
|
|
29
29
|
""" # noqa: E501
|
|
30
30
|
base_url: StrictStr = Field(description="Base url of the confluence instance.")
|
|
31
31
|
page_ids: List[StrictStr] = Field(description="Ids of pages to be ingested.")
|
|
32
|
+
include_attachments: Optional[StrictBool] = Field(default=False, description="A flag indicating whether to also ingest attachments with the page.")
|
|
32
33
|
credentials: ConfluenceCredentials
|
|
33
34
|
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Metadata for the documents.")
|
|
34
|
-
__properties: ClassVar[List[str]] = ["base_url", "page_ids", "credentials", "metadata"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["base_url", "page_ids", "include_attachments", "credentials", "metadata"]
|
|
35
36
|
|
|
36
37
|
model_config = ConfigDict(
|
|
37
38
|
populate_by_name=True,
|
|
@@ -89,6 +90,7 @@ class IngestFromConfluenceBody(BaseModel):
|
|
|
89
90
|
_obj = cls.model_validate({
|
|
90
91
|
"base_url": obj.get("base_url"),
|
|
91
92
|
"page_ids": obj.get("page_ids"),
|
|
93
|
+
"include_attachments": obj.get("include_attachments") if obj.get("include_attachments") is not None else False,
|
|
92
94
|
"credentials": ConfluenceCredentials.from_dict(obj["credentials"]) if obj.get("credentials") is not None else None,
|
|
93
95
|
"metadata": obj.get("metadata")
|
|
94
96
|
})
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
h2oGPTe REST API
|
|
5
|
+
|
|
6
|
+
# Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1.0.0
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class TagFilter(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
Filter for document tags supporting inclusion and exclusion. Note: The exclude list takes priority over the include list. If a document has a tag that appears in both lists, the document will be excluded. Examples: - Include only documents with 'red' OR 'blue' tags: {\"include\": [\"red\", \"blue\"]} - Exclude documents with 'red' OR 'blue' tags: {\"exclude\": [\"red\", \"blue\"]} - Include documents with 'color' tag BUT exclude 'red' and 'blue': {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
include: Optional[List[StrictStr]] = Field(default=None, description="Include documents with ANY of these tags (OR operation).")
|
|
30
|
+
exclude: Optional[List[StrictStr]] = Field(default=None, description="Exclude documents with ANY of these tags (OR operation). Takes priority over include.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["include", "exclude"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of TagFilter from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
return _dict
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
76
|
+
"""Create an instance of TagFilter from a dict"""
|
|
77
|
+
if obj is None:
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
if not isinstance(obj, dict):
|
|
81
|
+
return cls.model_validate(obj)
|
|
82
|
+
|
|
83
|
+
_obj = cls.model_validate({
|
|
84
|
+
"include": obj.get("include"),
|
|
85
|
+
"exclude": obj.get("exclude")
|
|
86
|
+
})
|
|
87
|
+
return _obj
|
|
88
|
+
|
|
89
|
+
|
h2ogpte/session.py
CHANGED
|
@@ -29,6 +29,7 @@ from h2ogpte.types import (
|
|
|
29
29
|
ChatResponse,
|
|
30
30
|
PartialChatMessage,
|
|
31
31
|
SessionError,
|
|
32
|
+
TagFilter,
|
|
32
33
|
)
|
|
33
34
|
from h2ogpte.errors import (
|
|
34
35
|
UnauthorizedError,
|
|
@@ -91,7 +92,9 @@ class Session:
|
|
|
91
92
|
url = urlparse(address)
|
|
92
93
|
scheme = "wss" if url.scheme == "https" else "ws"
|
|
93
94
|
# TODO handle base URLs
|
|
94
|
-
self._address =
|
|
95
|
+
self._address = (
|
|
96
|
+
f"{scheme}://{url.netloc}/ws?currentSessionID={chat_session_id}&source=py"
|
|
97
|
+
)
|
|
95
98
|
self._client = client
|
|
96
99
|
self._chat_session_id = chat_session_id
|
|
97
100
|
self._connection: Optional[ClientConnection] = None
|
|
@@ -192,7 +195,7 @@ class Session:
|
|
|
192
195
|
self_reflection_config: Optional[Dict[str, Any]] = None,
|
|
193
196
|
rag_config: Optional[Dict[str, Any]] = None,
|
|
194
197
|
include_chat_history: Optional[Union[bool, str]] = "auto",
|
|
195
|
-
tags: Optional[List[str]] = None,
|
|
198
|
+
tags: Optional[Union[List[str], TagFilter]] = None,
|
|
196
199
|
metadata_filter: Optional[Dict[str, Any]] = None,
|
|
197
200
|
timeout: Optional[float] = None,
|
|
198
201
|
retries: int = 3,
|
|
@@ -326,6 +329,8 @@ class Session:
|
|
|
326
329
|
Requires 1 LLM or Agent call.
|
|
327
330
|
:code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
|
|
328
331
|
Requires 1 Agent call.
|
|
332
|
+
:code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
|
|
333
|
+
Requires 1 Agent call with RAG tool execution.
|
|
329
334
|
:code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
|
|
330
335
|
to answer the query. Requires 1 LLM or Agent call.
|
|
331
336
|
:code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
|
|
@@ -340,9 +345,6 @@ class Session:
|
|
|
340
345
|
:code:`"all_data"` All Data RAG - Like Summary RAG, but includes all document
|
|
341
346
|
chunks. Uses recursive summarization to overcome LLM context limits.
|
|
342
347
|
Can require several LLM calls.
|
|
343
|
-
:code:`"all_data"` All Data RAG - Like Summary RAG, but includes all document
|
|
344
|
-
chunks. Uses recursive summarization to overcome LLM context limits.
|
|
345
|
-
Can require several LLM calls.
|
|
346
348
|
hyde_no_rag_llm_prompt_extension: str
|
|
347
349
|
Add this prompt to every user's prompt, when generating answers to be used
|
|
348
350
|
for subsequent retrieval during HyDE. Only used when rag_type is "hyde1" or "hyde2".
|
|
@@ -380,7 +382,9 @@ class Session:
|
|
|
380
382
|
answers for a given question.
|
|
381
383
|
Choices are: ["on","off","auto",True,False]
|
|
382
384
|
tags:
|
|
383
|
-
|
|
385
|
+
Filter documents by tags for RAG. Can be:
|
|
386
|
+
- List format: ["red", "blue"] includes documents with these tags
|
|
387
|
+
- TagFilter object: TagFilter(include=["red"], exclude=["blue"])
|
|
384
388
|
metadata_filter:
|
|
385
389
|
A dictionary to filter documents by metadata, from which to pull the context for RAG.
|
|
386
390
|
timeout:
|
h2ogpte/session_async.py
CHANGED
|
@@ -30,6 +30,7 @@ from h2ogpte.types import (
|
|
|
30
30
|
ChatResponse,
|
|
31
31
|
SessionError,
|
|
32
32
|
PartialChatMessage,
|
|
33
|
+
TagFilter,
|
|
33
34
|
)
|
|
34
35
|
from h2ogpte.errors import (
|
|
35
36
|
UnauthorizedError,
|
|
@@ -108,7 +109,7 @@ class SessionAsync:
|
|
|
108
109
|
self_reflection_config: Optional[Dict[str, Any]] = None,
|
|
109
110
|
rag_config: Optional[Dict[str, Any]] = None,
|
|
110
111
|
include_chat_history: Optional[Union[bool, str]] = "auto",
|
|
111
|
-
tags: Optional[List[str]] = None,
|
|
112
|
+
tags: Optional[Union[List[str], TagFilter]] = None,
|
|
112
113
|
metadata_filter: Optional[Dict[str, Any]] = None,
|
|
113
114
|
timeout: Optional[float] = None,
|
|
114
115
|
retries: int = 3,
|
|
@@ -239,6 +240,8 @@ class SessionAsync:
|
|
|
239
240
|
Requires 1 LLM or Agent call.
|
|
240
241
|
:code:`"agent_only"` Agent Only - Answer the query with only original files passed to agent.
|
|
241
242
|
Requires 1 Agent call.
|
|
243
|
+
:code:`"agentic_rag"` Agentic RAG - Agent with RAG tool that retrieves and answers from collection.
|
|
244
|
+
Requires 1 Agent call with RAG tool execution.
|
|
242
245
|
:code:`"rag"` RAG (Retrieval Augmented Generation) - Use supporting document contexts
|
|
243
246
|
to answer the query. Requires 1 LLM or Agent call.
|
|
244
247
|
:code:`"hyde1"` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding).
|
|
@@ -290,7 +293,9 @@ class SessionAsync:
|
|
|
290
293
|
answers for a given question.
|
|
291
294
|
Choices are: ["on","off","auto",True,False]
|
|
292
295
|
tags:
|
|
293
|
-
|
|
296
|
+
Filter documents by tags for RAG. Can be:
|
|
297
|
+
- List format: ["red", "blue"] includes documents with these tags
|
|
298
|
+
- TagFilter object: TagFilter(include=["red"], exclude=["blue"])
|
|
294
299
|
metadata_filter:
|
|
295
300
|
A dictionary to filter documents by metadata, from which to pull the context for RAG.
|
|
296
301
|
timeout:
|
|
@@ -473,7 +478,7 @@ class SessionAsync:
|
|
|
473
478
|
while retries < self._max_connect_retries:
|
|
474
479
|
try:
|
|
475
480
|
self._websocket = await ws_old_connect(
|
|
476
|
-
uri=f"{scheme}://{url.netloc}/ws?currentSessionID={self._chat_session_id}",
|
|
481
|
+
uri=f"{scheme}://{url.netloc}/ws?currentSessionID={self._chat_session_id}&source=py",
|
|
477
482
|
extra_headers=headers,
|
|
478
483
|
open_timeout=self._open_timeout,
|
|
479
484
|
close_timeout=self._close_timeout,
|
h2ogpte/types.py
CHANGED
|
@@ -592,6 +592,21 @@ class APIKey(BaseModel):
|
|
|
592
592
|
is_global_key: bool
|
|
593
593
|
|
|
594
594
|
|
|
595
|
+
@dataclass
|
|
596
|
+
class TagFilter:
|
|
597
|
+
"""
|
|
598
|
+
Filter for document tags supporting inclusion and exclusion.
|
|
599
|
+
|
|
600
|
+
Examples:
|
|
601
|
+
TagFilter(include=['red', 'blue'])
|
|
602
|
+
TagFilter(exclude=['red', 'blue'])
|
|
603
|
+
TagFilter(include=['color'], exclude=['red', 'blue'])
|
|
604
|
+
"""
|
|
605
|
+
|
|
606
|
+
include: Optional[List[str]] = None
|
|
607
|
+
exclude: Optional[List[str]] = None
|
|
608
|
+
|
|
609
|
+
|
|
595
610
|
@dataclass
|
|
596
611
|
class ChatRequest:
|
|
597
612
|
t: str # cq
|
|
@@ -609,7 +624,7 @@ class ChatRequest:
|
|
|
609
624
|
self_reflection_config: Optional[str]
|
|
610
625
|
rag_config: Optional[str]
|
|
611
626
|
include_chat_history: Optional[Union[bool, str]] = False
|
|
612
|
-
tags: Optional[List[str]] = None
|
|
627
|
+
tags: Optional[Union[List[str], "TagFilter"]] = None
|
|
613
628
|
metadata_filter: Optional[str] = None
|
|
614
629
|
image_batch_image_prompt: Optional[str] = None
|
|
615
630
|
image_batch_final_prompt: Optional[str] = None
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
h2ogpte/__init__.py,sha256=
|
|
1
|
+
h2ogpte/__init__.py,sha256=pEqhFD2G22hR2KXwsAOt70pbrZDJBfO2cnCxWWXlrG4,1524
|
|
2
2
|
h2ogpte/connectors.py,sha256=CRAEpkn9GotcCjWANfJjZ5Hq1cjGWJ4H_IO4eJgVWiI,8466
|
|
3
3
|
h2ogpte/errors.py,sha256=XgLdfJO1fZ9Bf9rhUKpnvRzzvkNyan3Oc6WzGS6hCUA,1248
|
|
4
|
-
h2ogpte/h2ogpte.py,sha256=
|
|
5
|
-
h2ogpte/h2ogpte_async.py,sha256=
|
|
4
|
+
h2ogpte/h2ogpte.py,sha256=X0MIQ6JOgHJkCeBhlTwvqrSuom5KFZ3H7DZJN1EWn1w,310278
|
|
5
|
+
h2ogpte/h2ogpte_async.py,sha256=x2KlYVQN0CrNe1N67Of-WDpVDBzw0sIlQm5kQByAGJ4,330175
|
|
6
6
|
h2ogpte/h2ogpte_sync_base.py,sha256=ftsVzpMqEsyi0UACMI-7H_EIYEx9JEdEUImbyjWy_Hc,15285
|
|
7
|
-
h2ogpte/session.py,sha256=
|
|
8
|
-
h2ogpte/session_async.py,sha256=
|
|
7
|
+
h2ogpte/session.py,sha256=uyU0QJhTpN9vMfie3hj3S8pvMOLkcJdsnnXrSgCgxqE,32770
|
|
8
|
+
h2ogpte/session_async.py,sha256=F5wg8bIRhdXZNvc_6WLtT1tQUAPVPaKq4bYJDoMCEOA,31738
|
|
9
9
|
h2ogpte/shared_client.py,sha256=Zh24myL--5JDdrKoJPW4aeprHX6a_oB9o461Ho3hnU8,14691
|
|
10
|
-
h2ogpte/types.py,sha256=
|
|
10
|
+
h2ogpte/types.py,sha256=IwgtLX4GNC6GNdbaLkQhy5Wn8h-p5AM9_Yi326tdwFk,15728
|
|
11
11
|
h2ogpte/utils.py,sha256=Z9n57xxPu0KtsCzkJ9V_VgTW--oG_aXTLBgmXDWSdnM,3201
|
|
12
12
|
h2ogpte/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
h2ogpte/cli/main.py,sha256=Upf3t_5m1RqLh1jKGB6Gbyp3n9sujVny7sY-qxh2PYo,2722
|
|
@@ -41,10 +41,10 @@ h2ogpte/cli/ui/prompts.py,sha256=bJvRe_32KppQTK5bqnsrPh0RS4JaY9KkiV7y-3v8PMQ,538
|
|
|
41
41
|
h2ogpte/cli/ui/status_bar.py,sha256=hs2MLvkg-y3Aiu3gWRtgMXf3jv3DGe7Y47ucgoBAP7Y,3852
|
|
42
42
|
h2ogpte/cli/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
43
43
|
h2ogpte/cli/utils/file_manager.py,sha256=ghNDX6G3Dr0vFvBYjbqx5o7qxq-pN8Vo2Rp1vyITfLo,13988
|
|
44
|
-
h2ogpte/rest_async/__init__.py,sha256=
|
|
45
|
-
h2ogpte/rest_async/api_client.py,sha256=
|
|
44
|
+
h2ogpte/rest_async/__init__.py,sha256=k_WohFmzdiWzmABab4-dP6cJm8Sb9pjxLA8QEwvB3Vw,15336
|
|
45
|
+
h2ogpte/rest_async/api_client.py,sha256=fd2ZBAkGB8UwbOTrIAsD65F47qtn5I2132UzsRPEcOc,29510
|
|
46
46
|
h2ogpte/rest_async/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
|
|
47
|
-
h2ogpte/rest_async/configuration.py,sha256=
|
|
47
|
+
h2ogpte/rest_async/configuration.py,sha256=FZAei-oFif6kJxYXZtTwQdnXU6YZJicK25Uo1wrftqM,19567
|
|
48
48
|
h2ogpte/rest_async/exceptions.py,sha256=aSDc-0lURtyQjf5HGa7_Ta0nATxKxfHW3huDA2Zdj6o,8370
|
|
49
49
|
h2ogpte/rest_async/rest.py,sha256=mdjDwzJ1kiaYtONUfDRqKsRPw5-tG6eyZV2P1yBuwRo,9147
|
|
50
50
|
h2ogpte/rest_async/api/__init__.py,sha256=R_x57GGyaSgxZyrJOyOt551TodbRSQf3T7VrraQc-84,973
|
|
@@ -63,7 +63,7 @@ h2ogpte/rest_async/api/prompt_templates_api.py,sha256=RJnYC3jfhvx2L_vpTlU6kCqujs
|
|
|
63
63
|
h2ogpte/rest_async/api/secrets_api.py,sha256=MTtmpYO2IOXuCklK-BxVyF9aBNZebgWuQenada-uM7o,68122
|
|
64
64
|
h2ogpte/rest_async/api/system_api.py,sha256=wXxO1lFEnrPHO0JRCgg13j6CpRKb3nou81dk8nA31v0,12532
|
|
65
65
|
h2ogpte/rest_async/api/tags_api.py,sha256=VwamxhJKsuBu3UeslsZ0vflxbnV1FmUV2pbWvIBwvFk,56168
|
|
66
|
-
h2ogpte/rest_async/models/__init__.py,sha256=
|
|
66
|
+
h2ogpte/rest_async/models/__init__.py,sha256=ntZCsoyBUoYEO9f3dbmk8x6FNyjMyJx4lDoZ4mlGqDM,13825
|
|
67
67
|
h2ogpte/rest_async/models/add_custom_agent_tool201_response_inner.py,sha256=0pxOC4ETqAnl2Amyt9d47oZDCH7Gjz0kexbpPsXurlg,4619
|
|
68
68
|
h2ogpte/rest_async/models/agent_key.py,sha256=u-48HJqvAd3fpY8SZnl6_iDnv_2_V_wGrGu9w54V7s8,5226
|
|
69
69
|
h2ogpte/rest_async/models/agent_server_directory_file_stats.py,sha256=Y25fTkk8kbY_p2AXFNTM4sUlPwEGGSMLxmC_csmTn1w,6335
|
|
@@ -80,14 +80,15 @@ h2ogpte/rest_async/models/api_key_update_expiry_request.py,sha256=GTMkaqLOUqUpjx
|
|
|
80
80
|
h2ogpte/rest_async/models/azure_credentials.py,sha256=hy6hv5Uf5CIGgO5S-2jVbO5N25QvEkiUxXnvItESoBA,4620
|
|
81
81
|
h2ogpte/rest_async/models/chat_completion.py,sha256=iVTiDzWJ7v5p_j37PO5aRdLrKhY98J_cl7eXTsymudU,4524
|
|
82
82
|
h2ogpte/rest_async/models/chat_completion_delta.py,sha256=TGEeMoSgBIph1YzTJYN2lYekboFo4btRRGtDbd5HHtw,4745
|
|
83
|
-
h2ogpte/rest_async/models/chat_completion_request.py,sha256=
|
|
83
|
+
h2ogpte/rest_async/models/chat_completion_request.py,sha256=7a_x6kQtF5Lap73XpvY3DgCCznykygLEEGD-SVwwlEo,19251
|
|
84
84
|
h2ogpte/rest_async/models/chat_error.py,sha256=Ob1UB0nhrKdEGA5Z63VD_TdxokV-8CyA5m-NDgnwqt4,4355
|
|
85
85
|
h2ogpte/rest_async/models/chat_message.py,sha256=D46MmPf86LPKkcTJKcPyH-EFyMMkPRNOCC1jfQu0xYE,5768
|
|
86
86
|
h2ogpte/rest_async/models/chat_message_meta.py,sha256=dgM0NIDSdB6_MN7lEiR4frDFCVZa7C58UATW0SiJB2s,4484
|
|
87
87
|
h2ogpte/rest_async/models/chat_message_reference.py,sha256=P5_jxbgfNcwdzC7OgND27EbVemPKiZay0jsCYn8qqTs,5248
|
|
88
88
|
h2ogpte/rest_async/models/chat_session.py,sha256=RVvL2IvMzIQPJ2W6lheUJyN3i6kaffQ80ox66sivq_M,5199
|
|
89
89
|
h2ogpte/rest_async/models/chat_session_update_request.py,sha256=yiH14-IrQfbZ0qINIAyGgtrmhgDr-E-cmd9_5OVVHKU,4411
|
|
90
|
-
h2ogpte/rest_async/models/chat_settings.py,sha256=
|
|
90
|
+
h2ogpte/rest_async/models/chat_settings.py,sha256=95VV_za51NcVzgn5EADwRjPmP8ek4iHWRkOQCSQOlfA,17149
|
|
91
|
+
h2ogpte/rest_async/models/chat_settings_tags.py,sha256=W8q1R6hMIXGNOcyc5k-hAOSOUCV7744IOcTsT7SKOU4,7424
|
|
91
92
|
h2ogpte/rest_async/models/chunk.py,sha256=4t2oms4W29WEYKi7KvzCArsLOaCOLYyyQRrJttlDUAU,4759
|
|
92
93
|
h2ogpte/rest_async/models/chunk_search_result.py,sha256=keifMKId0YhLFGzh5nv3jNCtQt7YciiwUd6-DsNckAs,4985
|
|
93
94
|
h2ogpte/rest_async/models/collection.py,sha256=NR9Ze5D8PNTDbSKWD3J5y9OiF_KdHEJnJmZKQJCkg00,9181
|
|
@@ -131,7 +132,7 @@ h2ogpte/rest_async/models/guardrails_settings_create_request.py,sha256=6DMke_u-1
|
|
|
131
132
|
h2ogpte/rest_async/models/h2_ogpt_system_info.py,sha256=6pBoTwU-QOh3oSk48drmuFhOcv9zEEzsWXvn-P4LIHk,8652
|
|
132
133
|
h2ogpte/rest_async/models/h2_ogptgpu_info.py,sha256=gUdC0izDgwpyRBJa9_bua6BYnJo8K0H9nG_E4kO_pNE,5124
|
|
133
134
|
h2ogpte/rest_async/models/ingest_from_azure_blob_storage_body.py,sha256=ouEUrdMYJU8kcjTOD8FfzPiaZYwU6RJFP6DYfY9oNyk,5470
|
|
134
|
-
h2ogpte/rest_async/models/ingest_from_confluence_body.py,sha256=
|
|
135
|
+
h2ogpte/rest_async/models/ingest_from_confluence_body.py,sha256=MWCCDW9lnKQuBPNCNNqVwoijOT-EW8JlQsD88KRjYmo,5561
|
|
135
136
|
h2ogpte/rest_async/models/ingest_from_file_system_body.py,sha256=JnbjY-PxMxaLZXvHRjKdfNTZDtJj9CfPpRPG1QVyBjU,4655
|
|
136
137
|
h2ogpte/rest_async/models/ingest_from_gcs_body.py,sha256=ygQsntThO7SHxzHlwsftFvPvZQsGj6qHMCDp5HOdipg,5079
|
|
137
138
|
h2ogpte/rest_async/models/ingest_from_s3_body.py,sha256=n7nuAHbMBQpFPerWspgxy5Pua-Bvkc3axcYgFEg33mU,5311
|
|
@@ -177,6 +178,7 @@ h2ogpte/rest_async/models/suggested_question.py,sha256=RcXlzaTsj-GFtT5gGuiHkNHtN
|
|
|
177
178
|
h2ogpte/rest_async/models/summarize_request.py,sha256=LpiWC-XTgxaXvezCoJdCCvl_cM7vy6f7ocEZZUsgaYU,14882
|
|
178
179
|
h2ogpte/rest_async/models/tag.py,sha256=rnE0UXIzF3tqM9EWXRZ1oY3OU1Piq5MOU9t2svwgk3w,4594
|
|
179
180
|
h2ogpte/rest_async/models/tag_create_request.py,sha256=jETninpugqtUUkwHmcUZj3hj1qbSqcb7xLxnHkB1CCE,4379
|
|
181
|
+
h2ogpte/rest_async/models/tag_filter.py,sha256=Qnis6iEOQOPi5bpRA5YrmxxjOcg0hNwrf7UeZ332AtU,5217
|
|
180
182
|
h2ogpte/rest_async/models/tag_update_request.py,sha256=QD9iUZIqaUsuobauQF_f6OkyRE2bTG3O6f1N2pqBnBM,4524
|
|
181
183
|
h2ogpte/rest_async/models/update_agent_key_request.py,sha256=7EqlI-kZw0U2fyTnJumnUUlXslYZTBWvcTszsVkB310,5030
|
|
182
184
|
h2ogpte/rest_async/models/update_agent_tool_preference_request.py,sha256=GguSv4qEmF7OJZRm8vMZJ-9Md2Ce_hgModJ4PE4OruU,4493
|
|
@@ -201,10 +203,10 @@ h2ogpte/rest_async/models/user_deletion_request.py,sha256=z7gD8XKOGwwg782TRzXJii
|
|
|
201
203
|
h2ogpte/rest_async/models/user_info.py,sha256=ef59Eh9k42JUY3X2RnCrwYR7sc_8lXT1vRLGoNz3uTU,4489
|
|
202
204
|
h2ogpte/rest_async/models/user_job_details.py,sha256=kzu8fLxVsRMgnyt6dLr0VWjlIoE3i1VRpGR9nDxFyk4,4985
|
|
203
205
|
h2ogpte/rest_async/models/user_permission.py,sha256=1k74E7s2kD2waSZ79KPlgTupVYEacTKWMqcKxv2972A,4856
|
|
204
|
-
h2ogpte/rest_sync/__init__.py,sha256=
|
|
205
|
-
h2ogpte/rest_sync/api_client.py,sha256=
|
|
206
|
+
h2ogpte/rest_sync/__init__.py,sha256=JRT0iDDzvcy-A7NGkn7zsjlCnIV-YekoM8RZfA6H7Ns,15173
|
|
207
|
+
h2ogpte/rest_sync/api_client.py,sha256=xkdmqVase4zywcw6kLGV0rbXvN_FPyET8txFnDSPCkQ,29397
|
|
206
208
|
h2ogpte/rest_sync/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
|
|
207
|
-
h2ogpte/rest_sync/configuration.py,sha256=
|
|
209
|
+
h2ogpte/rest_sync/configuration.py,sha256=p8FDbRC4zG7pIpLmzyA5xUk1McYCqJygnNZnwustIog,19850
|
|
208
210
|
h2ogpte/rest_sync/exceptions.py,sha256=aSDc-0lURtyQjf5HGa7_Ta0nATxKxfHW3huDA2Zdj6o,8370
|
|
209
211
|
h2ogpte/rest_sync/rest.py,sha256=evRzviTYC_fsrpTtFlGvruXmquH9C0jDn-oQrGrE5A0,11314
|
|
210
212
|
h2ogpte/rest_sync/api/__init__.py,sha256=ZuLQQtyiXnP5UOwTlIOYLGLQq1BG_0PEkzC9s698vjM,958
|
|
@@ -223,7 +225,7 @@ h2ogpte/rest_sync/api/prompt_templates_api.py,sha256=157y9lzY7Ky_ALu8TEemi0rfYzX
|
|
|
223
225
|
h2ogpte/rest_sync/api/secrets_api.py,sha256=5rAikvrX7n3Cj9M0ME-cPjISLpqrEFh2LmW23mvGk4g,67828
|
|
224
226
|
h2ogpte/rest_sync/api/system_api.py,sha256=knhP97lzeZt-YFTpcNJm9NdnqjoSg_Oh0yMGowiV1IM,12480
|
|
225
227
|
h2ogpte/rest_sync/api/tags_api.py,sha256=oCBsrFFLk0su8mz4wnCGSR_NxpCQgwEx18IwJKsOKrA,55921
|
|
226
|
-
h2ogpte/rest_sync/models/__init__.py,sha256=
|
|
228
|
+
h2ogpte/rest_sync/models/__init__.py,sha256=IENU9hChLstPUghLDSznNBoJ2Tdz2yD8SPOwRlagXec,13686
|
|
227
229
|
h2ogpte/rest_sync/models/add_custom_agent_tool201_response_inner.py,sha256=0pxOC4ETqAnl2Amyt9d47oZDCH7Gjz0kexbpPsXurlg,4619
|
|
228
230
|
h2ogpte/rest_sync/models/agent_key.py,sha256=u-48HJqvAd3fpY8SZnl6_iDnv_2_V_wGrGu9w54V7s8,5226
|
|
229
231
|
h2ogpte/rest_sync/models/agent_server_directory_file_stats.py,sha256=Y25fTkk8kbY_p2AXFNTM4sUlPwEGGSMLxmC_csmTn1w,6335
|
|
@@ -240,14 +242,15 @@ h2ogpte/rest_sync/models/api_key_update_expiry_request.py,sha256=GTMkaqLOUqUpjxl
|
|
|
240
242
|
h2ogpte/rest_sync/models/azure_credentials.py,sha256=hy6hv5Uf5CIGgO5S-2jVbO5N25QvEkiUxXnvItESoBA,4620
|
|
241
243
|
h2ogpte/rest_sync/models/chat_completion.py,sha256=iVTiDzWJ7v5p_j37PO5aRdLrKhY98J_cl7eXTsymudU,4524
|
|
242
244
|
h2ogpte/rest_sync/models/chat_completion_delta.py,sha256=TGEeMoSgBIph1YzTJYN2lYekboFo4btRRGtDbd5HHtw,4745
|
|
243
|
-
h2ogpte/rest_sync/models/chat_completion_request.py,sha256=
|
|
245
|
+
h2ogpte/rest_sync/models/chat_completion_request.py,sha256=9LG4N3Dh2YoY3dx6aRNXcdqoWPfDqnOxFOAa9NDGYZQ,19250
|
|
244
246
|
h2ogpte/rest_sync/models/chat_error.py,sha256=Ob1UB0nhrKdEGA5Z63VD_TdxokV-8CyA5m-NDgnwqt4,4355
|
|
245
247
|
h2ogpte/rest_sync/models/chat_message.py,sha256=OLBO6sF7Wn8NC2Qf2anxGZYJ7YpWQTf8oI7ENcOSmQ8,5767
|
|
246
248
|
h2ogpte/rest_sync/models/chat_message_meta.py,sha256=dgM0NIDSdB6_MN7lEiR4frDFCVZa7C58UATW0SiJB2s,4484
|
|
247
249
|
h2ogpte/rest_sync/models/chat_message_reference.py,sha256=P5_jxbgfNcwdzC7OgND27EbVemPKiZay0jsCYn8qqTs,5248
|
|
248
250
|
h2ogpte/rest_sync/models/chat_session.py,sha256=RVvL2IvMzIQPJ2W6lheUJyN3i6kaffQ80ox66sivq_M,5199
|
|
249
251
|
h2ogpte/rest_sync/models/chat_session_update_request.py,sha256=yiH14-IrQfbZ0qINIAyGgtrmhgDr-E-cmd9_5OVVHKU,4411
|
|
250
|
-
h2ogpte/rest_sync/models/chat_settings.py,sha256=
|
|
252
|
+
h2ogpte/rest_sync/models/chat_settings.py,sha256=Qrkq4iAfK83Ts8oo50UYiA1vX_QHXpzJvF_7LEWFQq0,17148
|
|
253
|
+
h2ogpte/rest_sync/models/chat_settings_tags.py,sha256=fZoLR7g19bvVz4ChhttflYp36PkUsiEFwwh4A5VFEHk,7423
|
|
251
254
|
h2ogpte/rest_sync/models/chunk.py,sha256=4t2oms4W29WEYKi7KvzCArsLOaCOLYyyQRrJttlDUAU,4759
|
|
252
255
|
h2ogpte/rest_sync/models/chunk_search_result.py,sha256=keifMKId0YhLFGzh5nv3jNCtQt7YciiwUd6-DsNckAs,4985
|
|
253
256
|
h2ogpte/rest_sync/models/collection.py,sha256=NR9Ze5D8PNTDbSKWD3J5y9OiF_KdHEJnJmZKQJCkg00,9181
|
|
@@ -291,7 +294,7 @@ h2ogpte/rest_sync/models/guardrails_settings_create_request.py,sha256=W3-vZsU0Cu
|
|
|
291
294
|
h2ogpte/rest_sync/models/h2_ogpt_system_info.py,sha256=eaFSINplInnPIW-dRO9K25AbQouNYngBI_JXX-AuY_w,8651
|
|
292
295
|
h2ogpte/rest_sync/models/h2_ogptgpu_info.py,sha256=gUdC0izDgwpyRBJa9_bua6BYnJo8K0H9nG_E4kO_pNE,5124
|
|
293
296
|
h2ogpte/rest_sync/models/ingest_from_azure_blob_storage_body.py,sha256=G_0SInDzFcpWWwnOEByjDir3QkMBiMxU4D-rGKeBSUU,5469
|
|
294
|
-
h2ogpte/rest_sync/models/ingest_from_confluence_body.py,sha256=
|
|
297
|
+
h2ogpte/rest_sync/models/ingest_from_confluence_body.py,sha256=eFr4bmTQOBfjMywxoNIDWiG4y_untC7Ws1JV0m4rfIQ,5560
|
|
295
298
|
h2ogpte/rest_sync/models/ingest_from_file_system_body.py,sha256=JnbjY-PxMxaLZXvHRjKdfNTZDtJj9CfPpRPG1QVyBjU,4655
|
|
296
299
|
h2ogpte/rest_sync/models/ingest_from_gcs_body.py,sha256=XLRQMzcYLHWUWaRD_hnhSwIRz8TYGM3emDgpvWw_Gak,5078
|
|
297
300
|
h2ogpte/rest_sync/models/ingest_from_s3_body.py,sha256=OTZ01MO7hn-LRlATgsrv1DUX6oz04jv4Qk94fsGSfnE,5310
|
|
@@ -337,6 +340,7 @@ h2ogpte/rest_sync/models/suggested_question.py,sha256=RcXlzaTsj-GFtT5gGuiHkNHtNX
|
|
|
337
340
|
h2ogpte/rest_sync/models/summarize_request.py,sha256=L58eJZiqu-1Ssc2sat3Hp75k1mTixI_ibUiqYFTYptM,14881
|
|
338
341
|
h2ogpte/rest_sync/models/tag.py,sha256=rnE0UXIzF3tqM9EWXRZ1oY3OU1Piq5MOU9t2svwgk3w,4594
|
|
339
342
|
h2ogpte/rest_sync/models/tag_create_request.py,sha256=jETninpugqtUUkwHmcUZj3hj1qbSqcb7xLxnHkB1CCE,4379
|
|
343
|
+
h2ogpte/rest_sync/models/tag_filter.py,sha256=Qnis6iEOQOPi5bpRA5YrmxxjOcg0hNwrf7UeZ332AtU,5217
|
|
340
344
|
h2ogpte/rest_sync/models/tag_update_request.py,sha256=QD9iUZIqaUsuobauQF_f6OkyRE2bTG3O6f1N2pqBnBM,4524
|
|
341
345
|
h2ogpte/rest_sync/models/update_agent_key_request.py,sha256=7EqlI-kZw0U2fyTnJumnUUlXslYZTBWvcTszsVkB310,5030
|
|
342
346
|
h2ogpte/rest_sync/models/update_agent_tool_preference_request.py,sha256=GguSv4qEmF7OJZRm8vMZJ-9Md2Ce_hgModJ4PE4OruU,4493
|
|
@@ -361,8 +365,8 @@ h2ogpte/rest_sync/models/user_deletion_request.py,sha256=z7gD8XKOGwwg782TRzXJiiP
|
|
|
361
365
|
h2ogpte/rest_sync/models/user_info.py,sha256=ef59Eh9k42JUY3X2RnCrwYR7sc_8lXT1vRLGoNz3uTU,4489
|
|
362
366
|
h2ogpte/rest_sync/models/user_job_details.py,sha256=9cbhpgLMDpar-aTOaY5Ygud-8Kbi23cLNldTGab0Sd8,4984
|
|
363
367
|
h2ogpte/rest_sync/models/user_permission.py,sha256=1k74E7s2kD2waSZ79KPlgTupVYEacTKWMqcKxv2972A,4856
|
|
364
|
-
h2ogpte-1.6.
|
|
365
|
-
h2ogpte-1.6.
|
|
366
|
-
h2ogpte-1.6.
|
|
367
|
-
h2ogpte-1.6.
|
|
368
|
-
h2ogpte-1.6.
|
|
368
|
+
h2ogpte-1.6.53rc1.dist-info/METADATA,sha256=GXYMRNmJ4n9nMpTcDUb6YQ6Z1LnMwylBtZa-pDzoXVQ,8615
|
|
369
|
+
h2ogpte-1.6.53rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
370
|
+
h2ogpte-1.6.53rc1.dist-info/entry_points.txt,sha256=BlaqX2SXJanrOGqNYwnzvCxHGNadM7RBI4pW4rVo5z4,54
|
|
371
|
+
h2ogpte-1.6.53rc1.dist-info/top_level.txt,sha256=vXV4JnNwFWFAqTWyHrH-cGIQqbCcEDG9-BbyNn58JpM,8
|
|
372
|
+
h2ogpte-1.6.53rc1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|