h2ogpte 1.6.55rc1__py3-none-any.whl → 1.7.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. h2ogpte/__init__.py +1 -1
  2. h2ogpte/h2ogpte.py +213 -20
  3. h2ogpte/h2ogpte_async.py +213 -20
  4. h2ogpte/rest_async/__init__.py +3 -2
  5. h2ogpte/rest_async/api/agents_api.py +25 -25
  6. h2ogpte/rest_async/api/chat_api.py +1077 -21
  7. h2ogpte/rest_async/api/collections_api.py +281 -0
  8. h2ogpte/rest_async/api/models_api.py +35 -67
  9. h2ogpte/rest_async/api_client.py +1 -1
  10. h2ogpte/rest_async/configuration.py +1 -1
  11. h2ogpte/rest_async/models/__init__.py +2 -1
  12. h2ogpte/rest_async/models/chat_completion_request.py +6 -2
  13. h2ogpte/rest_async/models/chat_settings.py +6 -2
  14. h2ogpte/rest_async/models/chat_settings_tags.py +140 -0
  15. h2ogpte/rest_async/models/extractor.py +26 -2
  16. h2ogpte/rest_async/models/extractor_create_request.py +29 -5
  17. h2ogpte/rest_async/models/ingest_from_confluence_body.py +4 -2
  18. h2ogpte/rest_async/models/{create_topic_model_job_request.py → tag_filter.py} +11 -9
  19. h2ogpte/rest_sync/__init__.py +3 -2
  20. h2ogpte/rest_sync/api/agents_api.py +25 -25
  21. h2ogpte/rest_sync/api/chat_api.py +1077 -21
  22. h2ogpte/rest_sync/api/collections_api.py +281 -0
  23. h2ogpte/rest_sync/api/models_api.py +35 -67
  24. h2ogpte/rest_sync/api_client.py +1 -1
  25. h2ogpte/rest_sync/configuration.py +1 -1
  26. h2ogpte/rest_sync/models/__init__.py +2 -1
  27. h2ogpte/rest_sync/models/chat_completion_request.py +6 -2
  28. h2ogpte/rest_sync/models/chat_settings.py +6 -2
  29. h2ogpte/rest_sync/models/chat_settings_tags.py +140 -0
  30. h2ogpte/rest_sync/models/extractor.py +26 -2
  31. h2ogpte/rest_sync/models/extractor_create_request.py +29 -5
  32. h2ogpte/rest_sync/models/ingest_from_confluence_body.py +4 -2
  33. h2ogpte/rest_sync/models/{create_topic_model_job_request.py → tag_filter.py} +11 -9
  34. h2ogpte/session.py +10 -5
  35. h2ogpte/session_async.py +10 -2
  36. h2ogpte/types.py +28 -1
  37. {h2ogpte-1.6.55rc1.dist-info → h2ogpte-1.7.0rc2.dist-info}/METADATA +1 -1
  38. {h2ogpte-1.6.55rc1.dist-info → h2ogpte-1.7.0rc2.dist-info}/RECORD +41 -39
  39. {h2ogpte-1.6.55rc1.dist-info → h2ogpte-1.7.0rc2.dist-info}/WHEEL +0 -0
  40. {h2ogpte-1.6.55rc1.dist-info → h2ogpte-1.7.0rc2.dist-info}/entry_points.txt +0 -0
  41. {h2ogpte-1.6.55rc1.dist-info → h2ogpte-1.7.0rc2.dist-info}/top_level.txt +0 -0
@@ -19,6 +19,7 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
+ from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
22
23
  from typing import Optional, Set
23
24
  from typing_extensions import Self
24
25
 
@@ -37,7 +38,7 @@ class ChatCompletionRequest(BaseModel):
37
38
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
38
39
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
39
40
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
40
- tags: Optional[List[StrictStr]] = Field(default=None, description="A list of tags from which to pull the context for RAG.")
41
+ tags: Optional[ChatSettingsTags] = None
41
42
  __properties: ClassVar[List[str]] = ["message", "system_prompt", "pre_prompt_query", "prompt_query", "image_batch_final_prompt", "image_batch_image_prompt", "llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
42
43
 
43
44
  @field_validator('include_chat_history')
@@ -89,6 +90,9 @@ class ChatCompletionRequest(BaseModel):
89
90
  exclude=excluded_fields,
90
91
  exclude_none=True,
91
92
  )
93
+ # override the default output from pydantic by calling `to_dict()` of tags
94
+ if self.tags:
95
+ _dict['tags'] = self.tags.to_dict()
92
96
  return _dict
93
97
 
94
98
  @classmethod
@@ -112,7 +116,7 @@ class ChatCompletionRequest(BaseModel):
112
116
  "self_reflection_config": obj.get("self_reflection_config"),
113
117
  "rag_config": obj.get("rag_config"),
114
118
  "include_chat_history": obj.get("include_chat_history"),
115
- "tags": obj.get("tags"),
119
+ "tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None,
116
120
  })
117
121
  return _obj
118
122
 
@@ -19,6 +19,7 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
+ from h2ogpte.rest_async.models.chat_settings_tags import ChatSettingsTags
22
23
  from typing import Optional, Set
23
24
  from typing_extensions import Self
24
25
 
@@ -31,7 +32,7 @@ class ChatSettings(BaseModel):
31
32
  self_reflection_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with self reflection settings: * `llm_reflection` **(type=string, example=gpt-4-0613)** * `prompt_reflection` **(type=string, example=\\\"\\\"\\\"Prompt:\\\\\\\\n%s\\\\\\\\n\\\"\\\"\\\"\\\\\\\\n\\\\\\\\n\\\"\\\"\\\")** * `system_prompt_reflection` **(type=string)** * `llm_args_reflection` **(type=string, example={})** ")
32
33
  rag_config: Optional[Dict[str, Any]] = Field(default=None, description="A map with arguments to control RAG (retrieval-augmented-generation) types.: * `rag_type` **(type=enum[auto, llm_only, rag, hyde1, hyde2, rag+, all_data])** RAG type options: * `auto` - Automatically select the best rag_type. * `llm_only` LLM Only - Answer the query without any supporting document contexts. Requires 1 LLM call. * `rag` RAG (Retrieval Augmented Generation) - Use supporting document contexts to answer the query. Requires 1 LLM call. * `hyde1` LLM Only + RAG composite - HyDE RAG (Hypothetical Document Embedding). Use 'LLM Only' response to find relevant contexts from a collection for generating a response. Requires 2 LLM calls. * `hyde2` HyDE + RAG composite - Use the 'HyDE RAG' response to find relevant contexts from a collection for generating a response. Requires 3 LLM calls. * `rag+` Summary RAG - Like RAG, but uses more context and recursive summarization to overcome LLM context limits. Keeps all retrieved chunks, puts them in order, adds neighboring chunks, then uses the summary API to get the answer. Can require several LLM calls. * `all_data` All Data RAG - Like Summary RAG, but includes all document chunks. Uses recursive summarization to overcome LLM context limits. Can require several LLM calls. * `hyde_no_rag_llm_prompt_extension` **(type=string, example=\\\\\\\\nKeep the answer brief, and list the 5 most relevant key words at the end.)** - Add this prompt to every user's prompt, when generating answers to be used for subsequent retrieval during HyDE. Only used when rag_type is `hyde1` or `hyde2`. * `num_neighbor_chunks_to_include` **(type=integer, default=1)** - A number of neighboring chunks to include for every retrieved relevant chunk. It helps to keep surrounding context together. Only enabled for rag_type `rag+`. * `meta_data_to_include` **(type=map)** - A map with flags that indicate whether each piece of document metadata is to be included as part of the context for a chat with a collection. * `name` **(type: boolean, default=True)** * `text` **(type: boolean, default=True)** * `page` **(type: boolean, default=True)** * `captions` **(type: boolean, default=True)** * `uri` **(type: boolean, default=False)** * `connector` **(type: boolean, default=False)** * `original_mtime` **(type: boolean, default=False)** * `age` **(type: boolean, default=False)** * `score` **(type: boolean, default=False)** * `rag_max_chunks` **(type=integer, default=-1)** - Maximum number of document chunks to retrieve for RAG. Actual number depends on rag_type and admin configuration. Set to >0 values to enable. Can be combined with rag_min_chunk_score. * `rag_min_chunk_score` **(type=double, default=0.0)** - Minimum score of document chunks to retrieve for RAG. Set to >0 values to enable. Can be combined with rag_max_chunks. ")
33
34
  include_chat_history: Optional[StrictStr] = Field(default=None, description="Whether to include chat history. Includes previous questions and answers for the current chat session for each new chat request. Disable if require deterministic answers for a given question.")
34
- tags: Optional[List[StrictStr]] = Field(default=None, description="A list of tags from which to pull the context for RAG.")
35
+ tags: Optional[ChatSettingsTags] = None
35
36
  __properties: ClassVar[List[str]] = ["llm", "llm_args", "self_reflection_config", "rag_config", "include_chat_history", "tags"]
36
37
 
37
38
  @field_validator('include_chat_history')
@@ -83,6 +84,9 @@ class ChatSettings(BaseModel):
83
84
  exclude=excluded_fields,
84
85
  exclude_none=True,
85
86
  )
87
+ # override the default output from pydantic by calling `to_dict()` of tags
88
+ if self.tags:
89
+ _dict['tags'] = self.tags.to_dict()
86
90
  return _dict
87
91
 
88
92
  @classmethod
@@ -100,7 +104,7 @@ class ChatSettings(BaseModel):
100
104
  "self_reflection_config": obj.get("self_reflection_config"),
101
105
  "rag_config": obj.get("rag_config"),
102
106
  "include_chat_history": obj.get("include_chat_history"),
103
- "tags": obj.get("tags")
107
+ "tags": ChatSettingsTags.from_dict(obj["tags"]) if obj.get("tags") is not None else None
104
108
  })
105
109
  return _obj
106
110
 
@@ -0,0 +1,140 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ h2oGPTe REST API
5
+
6
+ # Overview Users can easily interact with the h2oGPTe API through its REST API, allowing HTTP requests from any programming language. ## Authorization: Getting an API key Sign up/in at Enterprise h2oGPTe and generate one of the following two types of API keys: - **Global API key**: If a Collection is not specified when creating a new API Key, that key is considered to be a global API Key. Use global API Keys to grant full user impersonation and system-wide access to all of your work. Anyone with access to one of your global API Keys can create, delete, or interact with any of your past, current, and future Collections, Documents, Chats, and settings. - **Collection-specific API key**: Use Collection-specific API Keys to grant external access to only Chat with a specified Collection and make related API calls to it. Collection-specific API keys do not allow other API calls, such as creation, deletion, or access to other Collections or Chats. Access Enterprise h2oGPTe through your [H2O Generative AI](https://genai.h2o.ai/appstore) app store account, available with a freemium tier. ## Authorization: Using an API key All h2oGPTe REST API requests must include an API Key in the \"Authorization\" HTTP header, formatted as follows: ``` Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ``` ```sh curl -X 'POST' \\ 'https://h2ogpte.genai.h2o.ai/api/v1/collections' \\ -H 'accept: application/json' \\ -H 'Content-Type: application/json' \\ -H 'Authorization: Bearer sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' \\ -d '{ \"name\": \"The name of my Collection\", \"description\": \"The description of my Collection\", \"embedding_model\": \"BAAI/bge-large-en-v1.5\" }' ``` ## Interactive h2oGPTe API testing This page only showcases the h2oGPTe REST API; you can test it directly in the [Swagger UI](https://h2ogpte.genai.h2o.ai/swagger-ui/). Ensure that you are logged into your Enterprise h2oGPTe account.
7
+
8
+ The version of the OpenAPI document: v1.0.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ import pprint
18
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr, ValidationError, field_validator
19
+ from typing import Any, List, Optional
20
+ from h2ogpte.rest_async.models.tag_filter import TagFilter
21
+ from pydantic import StrictStr, Field
22
+ from typing import Union, List, Set, Optional, Dict
23
+ from typing_extensions import Literal, Self
24
+
25
+ CHATSETTINGSTAGS_ONE_OF_SCHEMAS = ["List[str]", "TagFilter"]
26
+
27
+ class ChatSettingsTags(BaseModel):
28
+ """
29
+ Filter documents by tags for RAG context. Supports two formats: - Array format (backward compatible): [\"red\", \"blue\"] includes documents with 'red' OR 'blue' tags - Object format (with exclusions): {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
30
+ """
31
+ # data type: List[str]
32
+ oneof_schema_1_validator: Optional[List[StrictStr]] = None
33
+ # data type: TagFilter
34
+ oneof_schema_2_validator: Optional[TagFilter] = None
35
+ actual_instance: Optional[Union[List[str], TagFilter]] = None
36
+ one_of_schemas: Set[str] = { "List[str]", "TagFilter" }
37
+
38
+ model_config = ConfigDict(
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def __init__(self, *args, **kwargs) -> None:
45
+ if args:
46
+ if len(args) > 1:
47
+ raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
48
+ if kwargs:
49
+ raise ValueError("If a position argument is used, keyword arguments cannot be used.")
50
+ super().__init__(actual_instance=args[0])
51
+ else:
52
+ super().__init__(**kwargs)
53
+
54
+ @field_validator('actual_instance')
55
+ def actual_instance_must_validate_oneof(cls, v):
56
+ instance = ChatSettingsTags.model_construct()
57
+ error_messages = []
58
+ match = 0
59
+ # validate data type: List[str]
60
+ try:
61
+ instance.oneof_schema_1_validator = v
62
+ match += 1
63
+ except (ValidationError, ValueError) as e:
64
+ error_messages.append(str(e))
65
+ # validate data type: TagFilter
66
+ if not isinstance(v, TagFilter):
67
+ error_messages.append(f"Error! Input type `{type(v)}` is not `TagFilter`")
68
+ else:
69
+ match += 1
70
+ if match > 1:
71
+ # more than 1 match
72
+ raise ValueError("Multiple matches found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
73
+ elif match == 0:
74
+ # no match
75
+ raise ValueError("No match found when setting `actual_instance` in ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
76
+ else:
77
+ return v
78
+
79
+ @classmethod
80
+ def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self:
81
+ return cls.from_json(json.dumps(obj))
82
+
83
+ @classmethod
84
+ def from_json(cls, json_str: str) -> Self:
85
+ """Returns the object represented by the json string"""
86
+ instance = cls.model_construct()
87
+ error_messages = []
88
+ match = 0
89
+
90
+ # deserialize data into List[str]
91
+ try:
92
+ # validation
93
+ instance.oneof_schema_1_validator = json.loads(json_str)
94
+ # assign value to actual_instance
95
+ instance.actual_instance = instance.oneof_schema_1_validator
96
+ match += 1
97
+ except (ValidationError, ValueError) as e:
98
+ error_messages.append(str(e))
99
+ # deserialize data into TagFilter
100
+ try:
101
+ instance.actual_instance = TagFilter.from_json(json_str)
102
+ match += 1
103
+ except (ValidationError, ValueError) as e:
104
+ error_messages.append(str(e))
105
+
106
+ if match > 1:
107
+ # more than 1 match
108
+ raise ValueError("Multiple matches found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
109
+ elif match == 0:
110
+ # no match
111
+ raise ValueError("No match found when deserializing the JSON string into ChatSettingsTags with oneOf schemas: List[str], TagFilter. Details: " + ", ".join(error_messages))
112
+ else:
113
+ return instance
114
+
115
+ def to_json(self) -> str:
116
+ """Returns the JSON representation of the actual instance"""
117
+ if self.actual_instance is None:
118
+ return "null"
119
+
120
+ if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json):
121
+ return self.actual_instance.to_json()
122
+ else:
123
+ return json.dumps(self.actual_instance)
124
+
125
+ def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], TagFilter]]:
126
+ """Returns the dict representation of the actual instance"""
127
+ if self.actual_instance is None:
128
+ return None
129
+
130
+ if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict):
131
+ return self.actual_instance.to_dict()
132
+ else:
133
+ # primitive type
134
+ return self.actual_instance
135
+
136
+ def to_str(self) -> str:
137
+ """Returns the string representation of the actual instance"""
138
+ return pprint.pformat(self.model_dump())
139
+
140
+
@@ -18,8 +18,9 @@ import re # noqa: F401
18
18
  import json
19
19
 
20
20
  from datetime import datetime
21
- from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
21
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr
22
22
  from typing import Any, ClassVar, Dict, List, Optional
23
+ from h2ogpte.rest_async.models.guardrails_settings import GuardrailsSettings
23
24
  from typing import Optional, Set
24
25
  from typing_extensions import Self
25
26
 
@@ -31,10 +32,20 @@ class Extractor(BaseModel):
31
32
  description: StrictStr = Field(description="What this extractor does")
32
33
  llm: Optional[StrictStr] = Field(default=None, description="(Optional) Identifier or version of the language model the extractor uses")
33
34
  var_schema: Optional[StrictStr] = Field(default=None, description="(Optional) JSONSchema (or other spec) that the extractor outputs", alias="schema")
35
+ prompt: Optional[StrictStr] = Field(default=None, description="(Optional) Prompt text for the extractor")
36
+ pre_prompt_summary: Optional[StrictStr] = Field(default=None, description="(Optional) Pre-prompt summary text for the extractor")
37
+ keep_intermediate_results: Optional[StrictBool] = Field(default=None, description="(Optional) Flag indicating whether to keep intermediate results during extraction")
38
+ system_prompt: Optional[StrictStr] = Field(default=None, description="(Optional) System prompt text for the extractor")
39
+ max_num_chunks: Optional[StrictInt] = Field(default=None, description="(Optional) Maximum number of chunks to process")
40
+ vision: Optional[StrictStr] = Field(default=None, description="(Optional) Vision mode setting")
41
+ vision_llm: Optional[StrictStr] = Field(default=None, description="(Optional) Vision LLM model identifier")
42
+ image_batch_image_prompt: Optional[StrictStr] = Field(default=None, description="(Optional) Image batch processing prompt")
43
+ image_batch_final_prompt: Optional[StrictStr] = Field(default=None, description="(Optional) Final prompt for image batch processing")
44
+ guardrails_settings: Optional[GuardrailsSettings] = None
34
45
  id: StrictStr = Field(description="Unique identifier of the extractor")
35
46
  is_public: StrictBool = Field(description="Flag indicating if the extractor is public")
36
47
  created_at: datetime = Field(description="When the extractor definition was created")
37
- __properties: ClassVar[List[str]] = ["name", "description", "llm", "schema", "id", "is_public", "created_at"]
48
+ __properties: ClassVar[List[str]] = ["name", "description", "llm", "schema", "prompt", "pre_prompt_summary", "keep_intermediate_results", "system_prompt", "max_num_chunks", "vision", "vision_llm", "image_batch_image_prompt", "image_batch_final_prompt", "guardrails_settings", "id", "is_public", "created_at"]
38
49
 
39
50
  model_config = ConfigDict(
40
51
  populate_by_name=True,
@@ -75,6 +86,9 @@ class Extractor(BaseModel):
75
86
  exclude=excluded_fields,
76
87
  exclude_none=True,
77
88
  )
89
+ # override the default output from pydantic by calling `to_dict()` of guardrails_settings
90
+ if self.guardrails_settings:
91
+ _dict['guardrails_settings'] = self.guardrails_settings.to_dict()
78
92
  return _dict
79
93
 
80
94
  @classmethod
@@ -91,6 +105,16 @@ class Extractor(BaseModel):
91
105
  "description": obj.get("description"),
92
106
  "llm": obj.get("llm"),
93
107
  "schema": obj.get("schema"),
108
+ "prompt": obj.get("prompt"),
109
+ "pre_prompt_summary": obj.get("pre_prompt_summary"),
110
+ "keep_intermediate_results": obj.get("keep_intermediate_results"),
111
+ "system_prompt": obj.get("system_prompt"),
112
+ "max_num_chunks": obj.get("max_num_chunks"),
113
+ "vision": obj.get("vision"),
114
+ "vision_llm": obj.get("vision_llm"),
115
+ "image_batch_image_prompt": obj.get("image_batch_image_prompt"),
116
+ "image_batch_final_prompt": obj.get("image_batch_final_prompt"),
117
+ "guardrails_settings": GuardrailsSettings.from_dict(obj["guardrails_settings"]) if obj.get("guardrails_settings") is not None else None,
94
118
  "id": obj.get("id"),
95
119
  "is_public": obj.get("is_public"),
96
120
  "created_at": obj.get("created_at")
@@ -17,8 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
- from typing import Any, ClassVar, Dict, List
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from h2ogpte.rest_async.models.guardrails_settings import GuardrailsSettings
22
23
  from typing import Optional, Set
23
24
  from typing_extensions import Self
24
25
 
@@ -29,8 +30,18 @@ class ExtractorCreateRequest(BaseModel):
29
30
  name: StrictStr = Field(description="Human-readable name")
30
31
  description: StrictStr = Field(description="What this extractor does")
31
32
  llm: StrictStr = Field(description="Identifier or version of the language model the extractor uses")
32
- var_schema: StrictStr = Field(description="JSONSchema (or other spec) that the extractor outputs", alias="schema")
33
- __properties: ClassVar[List[str]] = ["name", "description", "llm", "schema"]
33
+ var_schema: StrictStr = Field(description="JSONSchema (or other spec) that the extractor outputs. When schema is valid and not empty, the output will be always returned as JSON object.", alias="schema")
34
+ prompt: Optional[StrictStr] = Field(default=None, description="Custom prompt text for the extractor. If empty, default prompt will be used. If combined with schema, prompt can customize the output, but cannot change its format as it will always be JSON object.")
35
+ pre_prompt_summary: Optional[StrictStr] = Field(default=None, description="(Optional) Pre-prompt summary text for the extractor")
36
+ keep_intermediate_results: Optional[StrictBool] = Field(default=None, description="(Optional) Flag indicating whether to keep intermediate results during extraction")
37
+ system_prompt: Optional[StrictStr] = Field(default=None, description="(Optional) System prompt text for the extractor")
38
+ max_num_chunks: Optional[StrictInt] = Field(default=None, description="(Optional) Maximum number of chunks to process")
39
+ vision: Optional[StrictStr] = Field(default=None, description="(Optional) Vision mode setting")
40
+ vision_llm: Optional[StrictStr] = Field(default=None, description="(Optional) Vision LLM model identifier")
41
+ image_batch_image_prompt: Optional[StrictStr] = Field(default=None, description="(Optional) Image batch processing prompt")
42
+ image_batch_final_prompt: Optional[StrictStr] = Field(default=None, description="(Optional) Final prompt for image batch processing")
43
+ guardrails_settings: Optional[GuardrailsSettings] = None
44
+ __properties: ClassVar[List[str]] = ["name", "description", "llm", "schema", "prompt", "pre_prompt_summary", "keep_intermediate_results", "system_prompt", "max_num_chunks", "vision", "vision_llm", "image_batch_image_prompt", "image_batch_final_prompt", "guardrails_settings"]
34
45
 
35
46
  model_config = ConfigDict(
36
47
  populate_by_name=True,
@@ -71,6 +82,9 @@ class ExtractorCreateRequest(BaseModel):
71
82
  exclude=excluded_fields,
72
83
  exclude_none=True,
73
84
  )
85
+ # override the default output from pydantic by calling `to_dict()` of guardrails_settings
86
+ if self.guardrails_settings:
87
+ _dict['guardrails_settings'] = self.guardrails_settings.to_dict()
74
88
  return _dict
75
89
 
76
90
  @classmethod
@@ -86,7 +100,17 @@ class ExtractorCreateRequest(BaseModel):
86
100
  "name": obj.get("name"),
87
101
  "description": obj.get("description"),
88
102
  "llm": obj.get("llm"),
89
- "schema": obj.get("schema")
103
+ "schema": obj.get("schema"),
104
+ "prompt": obj.get("prompt"),
105
+ "pre_prompt_summary": obj.get("pre_prompt_summary"),
106
+ "keep_intermediate_results": obj.get("keep_intermediate_results"),
107
+ "system_prompt": obj.get("system_prompt"),
108
+ "max_num_chunks": obj.get("max_num_chunks"),
109
+ "vision": obj.get("vision"),
110
+ "vision_llm": obj.get("vision_llm"),
111
+ "image_batch_image_prompt": obj.get("image_batch_image_prompt"),
112
+ "image_batch_final_prompt": obj.get("image_batch_final_prompt"),
113
+ "guardrails_settings": GuardrailsSettings.from_dict(obj["guardrails_settings"]) if obj.get("guardrails_settings") is not None else None
90
114
  })
91
115
  return _obj
92
116
 
@@ -17,7 +17,7 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field, StrictStr
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from h2ogpte.rest_async.models.confluence_credentials import ConfluenceCredentials
23
23
  from typing import Optional, Set
@@ -29,9 +29,10 @@ class IngestFromConfluenceBody(BaseModel):
29
29
  """ # noqa: E501
30
30
  base_url: StrictStr = Field(description="Base url of the confluence instance.")
31
31
  page_ids: List[StrictStr] = Field(description="Ids of pages to be ingested.")
32
+ include_attachments: Optional[StrictBool] = Field(default=False, description="A flag indicating whether to also ingest attachments with the page.")
32
33
  credentials: ConfluenceCredentials
33
34
  metadata: Optional[Dict[str, Any]] = Field(default=None, description="Metadata for the documents.")
34
- __properties: ClassVar[List[str]] = ["base_url", "page_ids", "credentials", "metadata"]
35
+ __properties: ClassVar[List[str]] = ["base_url", "page_ids", "include_attachments", "credentials", "metadata"]
35
36
 
36
37
  model_config = ConfigDict(
37
38
  populate_by_name=True,
@@ -89,6 +90,7 @@ class IngestFromConfluenceBody(BaseModel):
89
90
  _obj = cls.model_validate({
90
91
  "base_url": obj.get("base_url"),
91
92
  "page_ids": obj.get("page_ids"),
93
+ "include_attachments": obj.get("include_attachments") if obj.get("include_attachments") is not None else False,
92
94
  "credentials": ConfluenceCredentials.from_dict(obj["credentials"]) if obj.get("credentials") is not None else None,
93
95
  "metadata": obj.get("metadata")
94
96
  })
@@ -17,17 +17,18 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictStr
21
- from typing import Any, ClassVar, Dict, List
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
22
  from typing import Optional, Set
23
23
  from typing_extensions import Self
24
24
 
25
- class CreateTopicModelJobRequest(BaseModel):
25
+ class TagFilter(BaseModel):
26
26
  """
27
- CreateTopicModelJobRequest
27
+ Filter for document tags supporting inclusion and exclusion. Note: The exclude list takes priority over the include list. If a document has a tag that appears in both lists, the document will be excluded. Examples: - Include only documents with 'red' OR 'blue' tags: {\"include\": [\"red\", \"blue\"]} - Exclude documents with 'red' OR 'blue' tags: {\"exclude\": [\"red\", \"blue\"]} - Include documents with 'color' tag BUT exclude 'red' and 'blue': {\"include\": [\"color\"], \"exclude\": [\"red\", \"blue\"]}
28
28
  """ # noqa: E501
29
- collection_id: StrictStr
30
- __properties: ClassVar[List[str]] = ["collection_id"]
29
+ include: Optional[List[StrictStr]] = Field(default=None, description="Include documents with ANY of these tags (OR operation).")
30
+ exclude: Optional[List[StrictStr]] = Field(default=None, description="Exclude documents with ANY of these tags (OR operation). Takes priority over include.")
31
+ __properties: ClassVar[List[str]] = ["include", "exclude"]
31
32
 
32
33
  model_config = ConfigDict(
33
34
  populate_by_name=True,
@@ -47,7 +48,7 @@ class CreateTopicModelJobRequest(BaseModel):
47
48
 
48
49
  @classmethod
49
50
  def from_json(cls, json_str: str) -> Optional[Self]:
50
- """Create an instance of CreateTopicModelJobRequest from a JSON string"""
51
+ """Create an instance of TagFilter from a JSON string"""
51
52
  return cls.from_dict(json.loads(json_str))
52
53
 
53
54
  def to_dict(self) -> Dict[str, Any]:
@@ -72,7 +73,7 @@ class CreateTopicModelJobRequest(BaseModel):
72
73
 
73
74
  @classmethod
74
75
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
- """Create an instance of CreateTopicModelJobRequest from a dict"""
76
+ """Create an instance of TagFilter from a dict"""
76
77
  if obj is None:
77
78
  return None
78
79
 
@@ -80,7 +81,8 @@ class CreateTopicModelJobRequest(BaseModel):
80
81
  return cls.model_validate(obj)
81
82
 
82
83
  _obj = cls.model_validate({
83
- "collection_id": obj.get("collection_id")
84
+ "include": obj.get("include"),
85
+ "exclude": obj.get("exclude")
84
86
  })
85
87
  return _obj
86
88
 
@@ -14,7 +14,7 @@
14
14
  """ # noqa: E501
15
15
 
16
16
 
17
- __version__ = "1.6.55-dev1"
17
+ __version__ = "1.7.0-dev2"
18
18
 
19
19
  # import apis into sdk package
20
20
  from h2ogpte.rest_sync.api.api_keys_api import APIKeysApi
@@ -69,6 +69,7 @@ from h2ogpte.rest_sync.models.chat_message_reference import ChatMessageReference
69
69
  from h2ogpte.rest_sync.models.chat_session import ChatSession
70
70
  from h2ogpte.rest_sync.models.chat_session_update_request import ChatSessionUpdateRequest
71
71
  from h2ogpte.rest_sync.models.chat_settings import ChatSettings
72
+ from h2ogpte.rest_sync.models.chat_settings_tags import ChatSettingsTags
72
73
  from h2ogpte.rest_sync.models.chunk import Chunk
73
74
  from h2ogpte.rest_sync.models.chunk_search_result import ChunkSearchResult
74
75
  from h2ogpte.rest_sync.models.collection import Collection
@@ -88,7 +89,6 @@ from h2ogpte.rest_sync.models.create_import_collection_to_collection_job_request
88
89
  from h2ogpte.rest_sync.models.create_insert_document_to_collection_job_request import CreateInsertDocumentToCollectionJobRequest
89
90
  from h2ogpte.rest_sync.models.create_secret201_response import CreateSecret201Response
90
91
  from h2ogpte.rest_sync.models.create_secret_request import CreateSecretRequest
91
- from h2ogpte.rest_sync.models.create_topic_model_job_request import CreateTopicModelJobRequest
92
92
  from h2ogpte.rest_sync.models.delete_chat_sessions_job_request import DeleteChatSessionsJobRequest
93
93
  from h2ogpte.rest_sync.models.delete_collections_job_request import DeleteCollectionsJobRequest
94
94
  from h2ogpte.rest_sync.models.delete_documents_job_request import DeleteDocumentsJobRequest
@@ -158,6 +158,7 @@ from h2ogpte.rest_sync.models.suggested_question import SuggestedQuestion
158
158
  from h2ogpte.rest_sync.models.summarize_request import SummarizeRequest
159
159
  from h2ogpte.rest_sync.models.tag import Tag
160
160
  from h2ogpte.rest_sync.models.tag_create_request import TagCreateRequest
161
+ from h2ogpte.rest_sync.models.tag_filter import TagFilter
161
162
  from h2ogpte.rest_sync.models.tag_update_request import TagUpdateRequest
162
163
  from h2ogpte.rest_sync.models.update_agent_key_request import UpdateAgentKeyRequest
163
164
  from h2ogpte.rest_sync.models.update_agent_tool_preference_request import UpdateAgentToolPreferenceRequest
@@ -16,7 +16,7 @@ from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt
16
16
  from typing import Any, Dict, List, Optional, Tuple, Union
17
17
  from typing_extensions import Annotated
18
18
 
19
- from pydantic import Field, StrictBytes, StrictInt, StrictStr
19
+ from pydantic import Field, StrictBytes, StrictInt, StrictStr, field_validator
20
20
  from typing import List, Optional, Tuple, Union
21
21
  from typing_extensions import Annotated
22
22
  from h2ogpte.rest_sync.models.add_custom_agent_tool201_response_inner import AddCustomAgentTool201ResponseInner
@@ -54,11 +54,11 @@ class AgentsApi:
54
54
  @validate_call
55
55
  def add_custom_agent_tool(
56
56
  self,
57
- tool_type: StrictStr,
58
- tool_args: StrictStr,
59
- file: Optional[Union[StrictBytes, StrictStr, Tuple[StrictStr, StrictBytes]]] = None,
57
+ tool_type: Annotated[StrictStr, Field(description="The type of custom tool being added: - local_mcp: Model Context Protocol server running locally - remote_mcp: Model Context Protocol server running remotely - browser_action: Custom browser automation actions - general_code: General purpose code execution tools ")],
58
+ tool_args: Annotated[StrictStr, Field(description="JSON string containing tool-specific arguments. The structure varies by tool_type: For remote_mcp: { \\\"mcp_config_json\\\": \\\"JSON string with MCP server configuration\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true) } For local_mcp, browser_action, and general_code: { \\\"tool_name\\\": \\\"string (optional, defaults to filename without extension)\\\", \\\"description\\\": \\\"string (optional, tool description)\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true), \\\"should_unzip\\\": true/false (optional, for general_code .zip files only), \\\"tool_usage_mode\\\": [\\\"runner\\\", \\\"creator\\\"] (optional list of strings) } ")],
59
+ file: Annotated[Optional[Union[StrictBytes, StrictStr, Tuple[StrictStr, StrictBytes]]], Field(description="The tool file to upload. Requirements vary by tool_type: - local_mcp: .zip file containing MCP server code - remote_mcp: Optional .json file with MCP configuration - browser_action: .py file (must start with 'browser_') or .zip containing browser action scripts - general_code: .py or .zip file with custom code ")] = None,
60
60
  custom_tool_path: Optional[StrictStr] = None,
61
- filename: Optional[StrictStr] = None,
61
+ filename: Annotated[Optional[StrictStr], Field(description="Optional filename to use when storing the uploaded file")] = None,
62
62
  _request_timeout: Union[
63
63
  None,
64
64
  Annotated[StrictFloat, Field(gt=0)],
@@ -76,15 +76,15 @@ class AgentsApi:
76
76
 
77
77
  Add Custom Agent Tools
78
78
 
79
- :param tool_type: (required)
79
+ :param tool_type: The type of custom tool being added: - local_mcp: Model Context Protocol server running locally - remote_mcp: Model Context Protocol server running remotely - browser_action: Custom browser automation actions - general_code: General purpose code execution tools (required)
80
80
  :type tool_type: str
81
- :param tool_args: (required)
81
+ :param tool_args: JSON string containing tool-specific arguments. The structure varies by tool_type: For remote_mcp: { \\\"mcp_config_json\\\": \\\"JSON string with MCP server configuration\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true) } For local_mcp, browser_action, and general_code: { \\\"tool_name\\\": \\\"string (optional, defaults to filename without extension)\\\", \\\"description\\\": \\\"string (optional, tool description)\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true), \\\"should_unzip\\\": true/false (optional, for general_code .zip files only), \\\"tool_usage_mode\\\": [\\\"runner\\\", \\\"creator\\\"] (optional list of strings) } (required)
82
82
  :type tool_args: str
83
- :param file:
83
+ :param file: The tool file to upload. Requirements vary by tool_type: - local_mcp: .zip file containing MCP server code - remote_mcp: Optional .json file with MCP configuration - browser_action: .py file (must start with 'browser_') or .zip containing browser action scripts - general_code: .py or .zip file with custom code
84
84
  :type file: bytearray
85
85
  :param custom_tool_path:
86
86
  :type custom_tool_path: str
87
- :param filename:
87
+ :param filename: Optional filename to use when storing the uploaded file
88
88
  :type filename: str
89
89
  :param _request_timeout: timeout setting for this request. If one
90
90
  number provided, it will be total request
@@ -138,11 +138,11 @@ class AgentsApi:
138
138
  @validate_call
139
139
  def add_custom_agent_tool_with_http_info(
140
140
  self,
141
- tool_type: StrictStr,
142
- tool_args: StrictStr,
143
- file: Optional[Union[StrictBytes, StrictStr, Tuple[StrictStr, StrictBytes]]] = None,
141
+ tool_type: Annotated[StrictStr, Field(description="The type of custom tool being added: - local_mcp: Model Context Protocol server running locally - remote_mcp: Model Context Protocol server running remotely - browser_action: Custom browser automation actions - general_code: General purpose code execution tools ")],
142
+ tool_args: Annotated[StrictStr, Field(description="JSON string containing tool-specific arguments. The structure varies by tool_type: For remote_mcp: { \\\"mcp_config_json\\\": \\\"JSON string with MCP server configuration\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true) } For local_mcp, browser_action, and general_code: { \\\"tool_name\\\": \\\"string (optional, defaults to filename without extension)\\\", \\\"description\\\": \\\"string (optional, tool description)\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true), \\\"should_unzip\\\": true/false (optional, for general_code .zip files only), \\\"tool_usage_mode\\\": [\\\"runner\\\", \\\"creator\\\"] (optional list of strings) } ")],
143
+ file: Annotated[Optional[Union[StrictBytes, StrictStr, Tuple[StrictStr, StrictBytes]]], Field(description="The tool file to upload. Requirements vary by tool_type: - local_mcp: .zip file containing MCP server code - remote_mcp: Optional .json file with MCP configuration - browser_action: .py file (must start with 'browser_') or .zip containing browser action scripts - general_code: .py or .zip file with custom code ")] = None,
144
144
  custom_tool_path: Optional[StrictStr] = None,
145
- filename: Optional[StrictStr] = None,
145
+ filename: Annotated[Optional[StrictStr], Field(description="Optional filename to use when storing the uploaded file")] = None,
146
146
  _request_timeout: Union[
147
147
  None,
148
148
  Annotated[StrictFloat, Field(gt=0)],
@@ -160,15 +160,15 @@ class AgentsApi:
160
160
 
161
161
  Add Custom Agent Tools
162
162
 
163
- :param tool_type: (required)
163
+ :param tool_type: The type of custom tool being added: - local_mcp: Model Context Protocol server running locally - remote_mcp: Model Context Protocol server running remotely - browser_action: Custom browser automation actions - general_code: General purpose code execution tools (required)
164
164
  :type tool_type: str
165
- :param tool_args: (required)
165
+ :param tool_args: JSON string containing tool-specific arguments. The structure varies by tool_type: For remote_mcp: { \\\"mcp_config_json\\\": \\\"JSON string with MCP server configuration\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true) } For local_mcp, browser_action, and general_code: { \\\"tool_name\\\": \\\"string (optional, defaults to filename without extension)\\\", \\\"description\\\": \\\"string (optional, tool description)\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true), \\\"should_unzip\\\": true/false (optional, for general_code .zip files only), \\\"tool_usage_mode\\\": [\\\"runner\\\", \\\"creator\\\"] (optional list of strings) } (required)
166
166
  :type tool_args: str
167
- :param file:
167
+ :param file: The tool file to upload. Requirements vary by tool_type: - local_mcp: .zip file containing MCP server code - remote_mcp: Optional .json file with MCP configuration - browser_action: .py file (must start with 'browser_') or .zip containing browser action scripts - general_code: .py or .zip file with custom code
168
168
  :type file: bytearray
169
169
  :param custom_tool_path:
170
170
  :type custom_tool_path: str
171
- :param filename:
171
+ :param filename: Optional filename to use when storing the uploaded file
172
172
  :type filename: str
173
173
  :param _request_timeout: timeout setting for this request. If one
174
174
  number provided, it will be total request
@@ -222,11 +222,11 @@ class AgentsApi:
222
222
  @validate_call
223
223
  def add_custom_agent_tool_without_preload_content(
224
224
  self,
225
- tool_type: StrictStr,
226
- tool_args: StrictStr,
227
- file: Optional[Union[StrictBytes, StrictStr, Tuple[StrictStr, StrictBytes]]] = None,
225
+ tool_type: Annotated[StrictStr, Field(description="The type of custom tool being added: - local_mcp: Model Context Protocol server running locally - remote_mcp: Model Context Protocol server running remotely - browser_action: Custom browser automation actions - general_code: General purpose code execution tools ")],
226
+ tool_args: Annotated[StrictStr, Field(description="JSON string containing tool-specific arguments. The structure varies by tool_type: For remote_mcp: { \\\"mcp_config_json\\\": \\\"JSON string with MCP server configuration\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true) } For local_mcp, browser_action, and general_code: { \\\"tool_name\\\": \\\"string (optional, defaults to filename without extension)\\\", \\\"description\\\": \\\"string (optional, tool description)\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true), \\\"should_unzip\\\": true/false (optional, for general_code .zip files only), \\\"tool_usage_mode\\\": [\\\"runner\\\", \\\"creator\\\"] (optional list of strings) } ")],
227
+ file: Annotated[Optional[Union[StrictBytes, StrictStr, Tuple[StrictStr, StrictBytes]]], Field(description="The tool file to upload. Requirements vary by tool_type: - local_mcp: .zip file containing MCP server code - remote_mcp: Optional .json file with MCP configuration - browser_action: .py file (must start with 'browser_') or .zip containing browser action scripts - general_code: .py or .zip file with custom code ")] = None,
228
228
  custom_tool_path: Optional[StrictStr] = None,
229
- filename: Optional[StrictStr] = None,
229
+ filename: Annotated[Optional[StrictStr], Field(description="Optional filename to use when storing the uploaded file")] = None,
230
230
  _request_timeout: Union[
231
231
  None,
232
232
  Annotated[StrictFloat, Field(gt=0)],
@@ -244,15 +244,15 @@ class AgentsApi:
244
244
 
245
245
  Add Custom Agent Tools
246
246
 
247
- :param tool_type: (required)
247
+ :param tool_type: The type of custom tool being added: - local_mcp: Model Context Protocol server running locally - remote_mcp: Model Context Protocol server running remotely - browser_action: Custom browser automation actions - general_code: General purpose code execution tools (required)
248
248
  :type tool_type: str
249
- :param tool_args: (required)
249
+ :param tool_args: JSON string containing tool-specific arguments. The structure varies by tool_type: For remote_mcp: { \\\"mcp_config_json\\\": \\\"JSON string with MCP server configuration\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true) } For local_mcp, browser_action, and general_code: { \\\"tool_name\\\": \\\"string (optional, defaults to filename without extension)\\\", \\\"description\\\": \\\"string (optional, tool description)\\\", \\\"enable_by_default\\\": true/false (optional, defaults to true), \\\"should_unzip\\\": true/false (optional, for general_code .zip files only), \\\"tool_usage_mode\\\": [\\\"runner\\\", \\\"creator\\\"] (optional list of strings) } (required)
250
250
  :type tool_args: str
251
- :param file:
251
+ :param file: The tool file to upload. Requirements vary by tool_type: - local_mcp: .zip file containing MCP server code - remote_mcp: Optional .json file with MCP configuration - browser_action: .py file (must start with 'browser_') or .zip containing browser action scripts - general_code: .py or .zip file with custom code
252
252
  :type file: bytearray
253
253
  :param custom_tool_path:
254
254
  :type custom_tool_path: str
255
- :param filename:
255
+ :param filename: Optional filename to use when storing the uploaded file
256
256
  :type filename: str
257
257
  :param _request_timeout: timeout setting for this request. If one
258
258
  number provided, it will be total request