qanswer_sdk 3.1212.0__py3-none-any.whl → 3.1244.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. qanswer_sdk/__init__.py +66 -43
  2. qanswer_sdk/api/__init__.py +7 -4
  3. qanswer_sdk/api/admin_api.py +590 -7695
  4. qanswer_sdk/api/ai_assistant_access_rights_api.py +72 -73
  5. qanswer_sdk/api/ai_assistant_api.py +567 -46
  6. qanswer_sdk/api/branding_api.py +4578 -0
  7. qanswer_sdk/api/chatbot_api.py +72 -87
  8. qanswer_sdk/api/{tag_api.py → connector_imap_connector_api.py} +415 -997
  9. qanswer_sdk/api/connector_rdf_api.py +30 -31
  10. qanswer_sdk/api/connectors_api.py +183 -166
  11. qanswer_sdk/api/connectors_data_api.py +345 -1
  12. qanswer_sdk/api/dataset_config_api.py +0 -245
  13. qanswer_sdk/api/llm_api.py +30 -30
  14. qanswer_sdk/api/payment_api.py +17 -16
  15. qanswer_sdk/api/speech_to_text_api.py +2 -2
  16. qanswer_sdk/api/task_chat_api.py +8 -7
  17. qanswer_sdk/api/task_rdf_linker_api.py +35 -36
  18. qanswer_sdk/api/task_rdf_sparql_endpoint_api.py +16 -16
  19. qanswer_sdk/api/task_report_copilot_api.py +895 -281
  20. qanswer_sdk/api/task_search_api.py +8 -7
  21. qanswer_sdk/api/tool_embedder_api.py +4040 -0
  22. qanswer_sdk/api/{user_api.py → tool_llm_api.py} +735 -2749
  23. qanswer_sdk/api/unit_organizations_api.py +4547 -0
  24. qanswer_sdk/api/unit_teams_api.py +3906 -0
  25. qanswer_sdk/api/{organizations_teams_api.py → unit_user_api.py} +1345 -1394
  26. qanswer_sdk/api_client.py +1 -1
  27. qanswer_sdk/configuration.py +1 -1
  28. qanswer_sdk/models/__init__.py +58 -38
  29. qanswer_sdk/models/aggregation.py +2 -2
  30. qanswer_sdk/models/ai_assistant_filter_dto.py +105 -0
  31. qanswer_sdk/models/{user_profile_paginated.py → ai_assistant_list.py} +18 -18
  32. qanswer_sdk/models/{embedding_model.py → available_aggregation.py} +13 -11
  33. qanswer_sdk/models/available_connectors_response.py +2 -2
  34. qanswer_sdk/models/{branding_data.py → branding_app_title.py} +4 -4
  35. qanswer_sdk/models/{api_response.py → branding_system_message.py} +8 -8
  36. qanswer_sdk/models/chat_task_settings.py +12 -12
  37. qanswer_sdk/models/chat_task_update.py +13 -5
  38. qanswer_sdk/models/chatbot_chat_payload.py +19 -2
  39. qanswer_sdk/models/chatbot_conversation_model.py +10 -0
  40. qanswer_sdk/models/chatbot_response.py +22 -2
  41. qanswer_sdk/models/{user_chatbot_setting_payload.py → chatbot_setting_dto.py} +12 -13
  42. qanswer_sdk/models/chatbot_setting_request.py +96 -0
  43. qanswer_sdk/models/{question_completion.py → clip_connector_file_structure.py} +9 -9
  44. qanswer_sdk/models/clip_connector_structure.py +97 -0
  45. qanswer_sdk/models/connector_model.py +2 -2
  46. qanswer_sdk/models/conversation_message.py +4 -12
  47. qanswer_sdk/models/{cost_summary.py → cost_summary_dto.py} +4 -4
  48. qanswer_sdk/models/create_connector_request.py +4 -2
  49. qanswer_sdk/models/create_imap_connector_request.py +105 -0
  50. qanswer_sdk/models/create_pinecone_connector_request.py +3 -1
  51. qanswer_sdk/models/create_sharepoint_connector_from_certificate_request.py +3 -1
  52. qanswer_sdk/models/create_sharepoint_connector_request.py +3 -1
  53. qanswer_sdk/models/dataset_detail_kg.py +27 -1
  54. qanswer_sdk/models/dataset_schema.py +4 -2
  55. qanswer_sdk/models/dataset_update_object.py +3 -1
  56. qanswer_sdk/models/delete_connector_model.py +2 -2
  57. qanswer_sdk/models/delete_connectors_response.py +2 -4
  58. qanswer_sdk/models/{json_nullable_source_metadata.py → duplicate_report_template_response.py} +11 -9
  59. qanswer_sdk/models/{json_nullable_file_failure_reason.py → email_folder.py} +12 -8
  60. qanswer_sdk/models/{embedding_endpoint.py → embedder_detailed_dto.py} +16 -8
  61. qanswer_sdk/models/{tag_payload.py → embedder_dto.py} +11 -7
  62. qanswer_sdk/models/{pageable_object.py → embedder_list_dto.py} +20 -20
  63. qanswer_sdk/models/embedding_endpoint_create.py +3 -1
  64. qanswer_sdk/models/embedding_endpoint_update.py +4 -2
  65. qanswer_sdk/models/{sort_object.py → entity_description.py} +16 -12
  66. qanswer_sdk/models/{dataset_description.py → entity_description_dto.py} +9 -7
  67. qanswer_sdk/models/file_model.py +2 -2
  68. qanswer_sdk/models/imap_add_payload.py +103 -0
  69. qanswer_sdk/models/imap_additional_fields.py +101 -0
  70. qanswer_sdk/models/imap_file_metadata.py +114 -0
  71. qanswer_sdk/models/imap_search_response.py +113 -0
  72. qanswer_sdk/models/llm.py +129 -0
  73. qanswer_sdk/models/llm_consumption.py +118 -0
  74. qanswer_sdk/models/{pageable.py → llm_context_ranges.py} +14 -13
  75. qanswer_sdk/models/{llm_cost.py → llm_cost_filter.py} +11 -7
  76. qanswer_sdk/models/llm_cost_list.py +101 -0
  77. qanswer_sdk/models/llm_detailed_dto.py +179 -0
  78. qanswer_sdk/models/{llm_details.py → llm_dto.py} +14 -18
  79. qanswer_sdk/models/llm_endpoint.py +8 -2
  80. qanswer_sdk/models/llm_endpoint_read_input.py +173 -0
  81. qanswer_sdk/models/llm_endpoint_read_output.py +173 -0
  82. qanswer_sdk/models/llm_filter_dto.py +99 -0
  83. qanswer_sdk/models/llm_list_detailed_dto.py +101 -0
  84. qanswer_sdk/models/llm_list_dto.py +101 -0
  85. qanswer_sdk/models/modify_connector_request.py +5 -11
  86. qanswer_sdk/models/{o_auth_service.py → o_auth_service_dto.py} +4 -4
  87. qanswer_sdk/models/organization_admin.py +95 -0
  88. qanswer_sdk/models/organization_filter_dto.py +97 -0
  89. qanswer_sdk/models/organization_list_dto.py +101 -0
  90. qanswer_sdk/models/organization_llm.py +103 -0
  91. qanswer_sdk/models/{plan.py → plan_dto.py} +4 -4
  92. qanswer_sdk/models/prompt_token_count_details.py +3 -3
  93. qanswer_sdk/models/rag_payload.py +4 -4
  94. qanswer_sdk/models/rag_response.py +4 -2
  95. qanswer_sdk/models/relation_extraction_task_settings.py +12 -10
  96. qanswer_sdk/models/relation_extraction_task_update.py +14 -4
  97. qanswer_sdk/models/report_copilot_slot_task_settings.py +12 -10
  98. qanswer_sdk/models/report_copilot_slot_task_update.py +13 -3
  99. qanswer_sdk/models/report_copilot_task_settings.py +12 -10
  100. qanswer_sdk/models/report_copilot_task_update.py +14 -4
  101. qanswer_sdk/models/{report_copilot_template.py → report_template.py} +17 -7
  102. qanswer_sdk/models/{report_copilot_create_payload.py → report_template_create_payload.py} +4 -4
  103. qanswer_sdk/models/{report_copilot_template_simplified.py → report_template_simplified.py} +11 -7
  104. qanswer_sdk/models/{report_copilot_export_template_as_docx_payload_simplified.py → report_template_simplified_payload.py} +4 -4
  105. qanswer_sdk/models/{report_copilot_update_payload.py → report_template_update_payload.py} +15 -9
  106. qanswer_sdk/models/search_task_settings.py +12 -12
  107. qanswer_sdk/models/search_task_update.py +13 -3
  108. qanswer_sdk/models/{set_logo1_request.py → set_logo2_request.py} +4 -4
  109. qanswer_sdk/models/shared_organization_access.py +7 -3
  110. qanswer_sdk/models/shared_team_access.py +10 -4
  111. qanswer_sdk/models/slot.py +5 -1
  112. qanswer_sdk/models/slot_update.py +5 -1
  113. qanswer_sdk/models/socket_conversation_message.py +3 -11
  114. qanswer_sdk/models/socket_file_metadata.py +17 -11
  115. qanswer_sdk/models/source_metadata.py +109 -0
  116. qanswer_sdk/models/{tag_qa_list_payload.py → subscription_response.py} +22 -21
  117. qanswer_sdk/models/{team_with_count.py → team_admin.py} +8 -8
  118. qanswer_sdk/models/{team_filter.py → team_filter_dto.py} +16 -6
  119. qanswer_sdk/models/team_list_dto.py +101 -0
  120. qanswer_sdk/models/{available_endpoints_response.py → template_clip_structure_input.py} +14 -14
  121. qanswer_sdk/models/{available_embedding_models_response.py → template_clip_structure_output.py} +14 -14
  122. qanswer_sdk/models/test_imap_connection_payload.py +95 -0
  123. qanswer_sdk/models/text2_sparql_payload.py +4 -4
  124. qanswer_sdk/models/text2_sparql_task_settings.py +12 -10
  125. qanswer_sdk/models/text2_sparql_task_update.py +14 -4
  126. qanswer_sdk/models/{user_chatbot_setting_response.py → user_chatbot_setting.py} +24 -24
  127. qanswer_sdk/models/user_dataset.py +38 -2
  128. qanswer_sdk/models/{qa_metadata_payload.py → user_dataset_shared.py} +27 -37
  129. qanswer_sdk/models/{user_filter.py → user_filter_dto.py} +4 -4
  130. qanswer_sdk/models/{user_profile.py → user_profile_dto.py} +10 -10
  131. qanswer_sdk/models/user_profile_list_dto.py +3 -3
  132. qanswer_sdk/models/widget_configs.py +4 -2
  133. {qanswer_sdk-3.1212.0.dist-info → qanswer_sdk-3.1244.0.dist-info}/METADATA +2 -2
  134. {qanswer_sdk-3.1212.0.dist-info → qanswer_sdk-3.1244.0.dist-info}/RECORD +135 -112
  135. qanswer_sdk/api/llm_consumption_controller_api.py +0 -310
  136. qanswer_sdk/models/organization_filter.py +0 -87
  137. qanswer_sdk/models/page_organization.py +0 -123
  138. qanswer_sdk/models/page_team_with_count.py +0 -123
  139. qanswer_sdk/models/tag_qa_payload.py +0 -91
  140. {qanswer_sdk-3.1212.0.dist-info → qanswer_sdk-3.1244.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,103 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ QAnswer: Api Documentation
5
+
6
+ APIs provided by QAnswer
7
+
8
+ The version of the OpenAPI document: 1.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, StrictBool, StrictInt
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from qanswer_sdk.models.organization import Organization
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class OrganizationLlm(BaseModel):
27
+ """
28
+ OrganizationLlm
29
+ """ # noqa: E501
30
+ id: Optional[StrictInt] = None
31
+ organization: Optional[Organization] = None
32
+ llm: Optional[Llm] = None
33
+ active: Optional[StrictBool] = None
34
+ __properties: ClassVar[List[str]] = ["id", "organization", "llm", "active"]
35
+
36
+ model_config = ConfigDict(
37
+ populate_by_name=True,
38
+ validate_assignment=True,
39
+ protected_namespaces=(),
40
+ )
41
+
42
+
43
+ def to_str(self) -> str:
44
+ """Returns the string representation of the model using alias"""
45
+ return pprint.pformat(self.model_dump(by_alias=True))
46
+
47
+ def to_json(self) -> str:
48
+ """Returns the JSON representation of the model using alias"""
49
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
50
+ return json.dumps(self.to_dict())
51
+
52
+ @classmethod
53
+ def from_json(cls, json_str: str) -> Optional[Self]:
54
+ """Create an instance of OrganizationLlm from a JSON string"""
55
+ return cls.from_dict(json.loads(json_str))
56
+
57
+ def to_dict(self) -> Dict[str, Any]:
58
+ """Return the dictionary representation of the model using alias.
59
+
60
+ This has the following differences from calling pydantic's
61
+ `self.model_dump(by_alias=True)`:
62
+
63
+ * `None` is only added to the output dict for nullable fields that
64
+ were set at model initialization. Other fields with value `None`
65
+ are ignored.
66
+ """
67
+ excluded_fields: Set[str] = set([
68
+ ])
69
+
70
+ _dict = self.model_dump(
71
+ by_alias=True,
72
+ exclude=excluded_fields,
73
+ exclude_none=True,
74
+ )
75
+ # override the default output from pydantic by calling `to_dict()` of organization
76
+ if self.organization:
77
+ _dict['organization'] = self.organization.to_dict()
78
+ # override the default output from pydantic by calling `to_dict()` of llm
79
+ if self.llm:
80
+ _dict['llm'] = self.llm.to_dict()
81
+ return _dict
82
+
83
+ @classmethod
84
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
85
+ """Create an instance of OrganizationLlm from a dict"""
86
+ if obj is None:
87
+ return None
88
+
89
+ if not isinstance(obj, dict):
90
+ return cls.model_validate(obj)
91
+
92
+ _obj = cls.model_validate({
93
+ "id": obj.get("id"),
94
+ "organization": Organization.from_dict(obj["organization"]) if obj.get("organization") is not None else None,
95
+ "llm": Llm.from_dict(obj["llm"]) if obj.get("llm") is not None else None,
96
+ "active": obj.get("active")
97
+ })
98
+ return _obj
99
+
100
+ from qanswer_sdk.models.llm import Llm
101
+ # TODO: Rewrite to not use raise_errors
102
+ OrganizationLlm.model_rebuild(raise_errors=False)
103
+
@@ -22,9 +22,9 @@ from typing import Any, ClassVar, Dict, List, Optional, Union
22
22
  from typing import Optional, Set
23
23
  from typing_extensions import Self
24
24
 
25
- class Plan(BaseModel):
25
+ class PlanDto(BaseModel):
26
26
  """
27
- Plan
27
+ PlanDto
28
28
  """ # noqa: E501
29
29
  plan: Optional[StrictStr] = None
30
30
  short_name: Optional[StrictStr] = Field(default=None, alias="shortName")
@@ -75,7 +75,7 @@ class Plan(BaseModel):
75
75
 
76
76
  @classmethod
77
77
  def from_json(cls, json_str: str) -> Optional[Self]:
78
- """Create an instance of Plan from a JSON string"""
78
+ """Create an instance of PlanDto from a JSON string"""
79
79
  return cls.from_dict(json.loads(json_str))
80
80
 
81
81
  def to_dict(self) -> Dict[str, Any]:
@@ -100,7 +100,7 @@ class Plan(BaseModel):
100
100
 
101
101
  @classmethod
102
102
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
103
- """Create an instance of Plan from a dict"""
103
+ """Create an instance of PlanDto from a dict"""
104
104
  if obj is None:
105
105
  return None
106
106
 
@@ -30,10 +30,10 @@ class PromptTokenCountDetails(BaseModel):
30
30
  instruction: StrictInt
31
31
  input_data: StrictInt
32
32
  chat_history: StrictInt
33
- training_examples: StrictInt
33
+ feedback: StrictInt
34
34
  bot_name: StrictInt
35
35
  bot_description: StrictInt
36
- __properties: ClassVar[List[str]] = ["question", "instruction", "input_data", "chat_history", "training_examples", "bot_name", "bot_description"]
36
+ __properties: ClassVar[List[str]] = ["question", "instruction", "input_data", "chat_history", "feedback", "bot_name", "bot_description"]
37
37
 
38
38
  model_config = ConfigDict(
39
39
  populate_by_name=True,
@@ -90,7 +90,7 @@ class PromptTokenCountDetails(BaseModel):
90
90
  "instruction": obj.get("instruction"),
91
91
  "input_data": obj.get("input_data"),
92
92
  "chat_history": obj.get("chat_history"),
93
- "training_examples": obj.get("training_examples"),
93
+ "feedback": obj.get("feedback"),
94
94
  "bot_name": obj.get("bot_name"),
95
95
  "bot_description": obj.get("bot_description")
96
96
  })
@@ -32,9 +32,8 @@ class RAGPayload(BaseModel):
32
32
  dataset: StrictStr = Field(description="The dataset to be queried for information.")
33
33
  image_urls: Optional[List[StrictStr]] = Field(default=None, description="List of image URLs to be processed if provided.")
34
34
  parent_message_id: Optional[StrictStr] = Field(default=None, description="ID of the parent generation (message_id of parent response or question) if applicable; used for tracking multi-turn conversations.")
35
- conversation_id: Optional[StrictStr] = Field(default=None, description="Unique identifier for the conversation.")
35
+ conversation_id: StrictStr = Field(description="Unique identifier for the conversation.")
36
36
  is_regenerate: Optional[StrictBool] = Field(default=None, description="Indicates if the user is asking for a regenerated response.")
37
- auto_filters: Optional[StrictBool] = Field(default=None, description="Determines whether the auto filters should be used for the response.")
38
37
  origin: Optional[StrictStr] = Field(default=None, description="The username or IP address of the user making the request.")
39
38
  text_fragmentation: Optional[StrictBool] = Field(default=None, description="Detemines wether links for website sources should have text fragments.")
40
39
  interface_origin: Optional[StrictStr] = Field(default=None, description="The interface from which the request originated (search, chat, microsoft_teams, etc.).")
@@ -45,12 +44,13 @@ class RAGPayload(BaseModel):
45
44
  talk_to_web: Optional[StrictBool] = Field(default=None, description="Determines whether the response should be sent to the web interface.")
46
45
  original_question: Optional[StrictStr] = Field(default=None, description="The original question submitted to the model.")
47
46
  oauth_token: Optional[StrictStr] = Field(default=None, description="The oauth token")
47
+ auto_filters: Optional[StrictBool] = Field(default=None, description="Determines whether the auto filters should be used for the response.")
48
48
  filters: Optional[List[List[SearchMetadataFilter]]] = None
49
49
  index_hashes: Optional[List[Dict[str, Any]]] = Field(default=None, description="The hashes of the indexes to be used for the response.")
50
50
  agentic_chatbot_enabled: Optional[StrictBool] = Field(default=None, description="Whether the agentic chatbot is enabled.")
51
51
  conversation_has_clip: Optional[StrictBool] = Field(default=None, description="Indicates whether the conversation has a clip associated with it.")
52
52
  current_time: Optional[StrictStr] = Field(default=None, description="The current time to be used in the prompt, if applicable.")
53
- __properties: ClassVar[List[str]] = ["question", "username", "dataset", "image_urls", "parent_message_id", "conversation_id", "is_regenerate", "auto_filters", "origin", "text_fragmentation", "interface_origin", "system_prompt", "prompt", "additional_fields", "bypass_guardrail", "talk_to_web", "original_question", "oauth_token", "filters", "index_hashes", "agentic_chatbot_enabled", "conversation_has_clip", "current_time"]
53
+ __properties: ClassVar[List[str]] = ["question", "username", "dataset", "image_urls", "parent_message_id", "conversation_id", "is_regenerate", "origin", "text_fragmentation", "interface_origin", "system_prompt", "prompt", "additional_fields", "bypass_guardrail", "talk_to_web", "original_question", "oauth_token", "auto_filters", "filters", "index_hashes", "agentic_chatbot_enabled", "conversation_has_clip", "current_time"]
54
54
 
55
55
  @field_validator('interface_origin')
56
56
  def interface_origin_validate_enum(cls, value):
@@ -129,7 +129,6 @@ class RAGPayload(BaseModel):
129
129
  "parent_message_id": obj.get("parent_message_id"),
130
130
  "conversation_id": obj.get("conversation_id"),
131
131
  "is_regenerate": obj.get("is_regenerate"),
132
- "auto_filters": obj.get("auto_filters"),
133
132
  "origin": obj.get("origin"),
134
133
  "text_fragmentation": obj.get("text_fragmentation"),
135
134
  "interface_origin": obj.get("interface_origin"),
@@ -140,6 +139,7 @@ class RAGPayload(BaseModel):
140
139
  "talk_to_web": obj.get("talk_to_web"),
141
140
  "original_question": obj.get("original_question"),
142
141
  "oauth_token": obj.get("oauth_token"),
142
+ "auto_filters": obj.get("auto_filters"),
143
143
  "filters": [
144
144
  [SearchMetadataFilter.from_dict(_inner_item) for _inner_item in _item]
145
145
  for _item in obj["filters"]
@@ -41,7 +41,8 @@ class RAGResponse(BaseModel):
41
41
  is_input_data_cropped: Optional[StrictBool] = Field(default=None, description="Indicates whether the input data was cropped during prompting.")
42
42
  mode: Optional[StrictStr] = Field(default=None, description="The mode of the response generation.")
43
43
  prompt_used: Optional[StrictStr] = Field(default=None, description="The actual prompt used for generation by the model.")
44
- __properties: ClassVar[List[str]] = ["conversation_id", "message_id", "ai_response", "sources", "all_sources", "finish_reason", "is_regenerate", "aggs", "filters", "is_input_data_cropped", "mode", "prompt_used"]
44
+ prompt_type: Optional[StrictStr] = Field(default=None, description="The type of prompt used for generation by the model.")
45
+ __properties: ClassVar[List[str]] = ["conversation_id", "message_id", "ai_response", "sources", "all_sources", "finish_reason", "is_regenerate", "aggs", "filters", "is_input_data_cropped", "mode", "prompt_used", "prompt_type"]
45
46
 
46
47
  @field_validator('mode')
47
48
  def mode_validate_enum(cls, value):
@@ -143,7 +144,8 @@ class RAGResponse(BaseModel):
143
144
  "filters": [SearchMetadataFilter.from_dict(_item) for _item in obj["filters"]] if obj.get("filters") is not None else None,
144
145
  "is_input_data_cropped": obj.get("is_input_data_cropped"),
145
146
  "mode": obj.get("mode"),
146
- "prompt_used": obj.get("prompt_used")
147
+ "prompt_used": obj.get("prompt_used"),
148
+ "prompt_type": obj.get("prompt_type")
147
149
  })
148
150
  return _obj
149
151
 
@@ -17,9 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr, field_validator
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional, Union
22
- from qanswer_sdk.models.llm_details import LLMDetails
22
+ from qanswer_sdk.models.llm_endpoint_read_output import LLMEndpointReadOutput
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
@@ -28,7 +28,6 @@ class RelationExtractionTaskSettings(BaseModel):
28
28
  RelationExtractionTaskSettings
29
29
  """ # noqa: E501
30
30
  prompt: Optional[StrictStr] = None
31
- llm_choice: Optional[StrictStr] = None
32
31
  bot_seed: Optional[StrictInt] = None
33
32
  bot_temperature: Optional[Union[StrictFloat, StrictInt]] = None
34
33
  bot_answer_length: Optional[StrictStr] = None
@@ -36,10 +35,12 @@ class RelationExtractionTaskSettings(BaseModel):
36
35
  stream_speed: Optional[Union[StrictFloat, StrictInt]] = None
37
36
  context_window: Optional[StrictInt] = None
38
37
  max_tokens: Optional[StrictInt] = None
38
+ slots_values: Optional[Dict[str, StrictStr]] = None
39
39
  bot_name: Optional[StrictStr] = None
40
40
  bot_description: Optional[StrictStr] = None
41
- llm_details: LLMDetails
42
- __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "bot_name", "bot_description", "llm_details"]
41
+ llm_id: Optional[StrictInt] = Field(default=None, description="The LLM ID. This field is populated based on the llm_choice.")
42
+ llm_endpoint: Optional[LLMEndpointReadOutput] = None
43
+ __properties: ClassVar[List[str]] = ["prompt", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "slots_values", "bot_name", "bot_description", "llm_id", "llm_endpoint"]
43
44
 
44
45
  @field_validator('bot_answer_length')
45
46
  def bot_answer_length_validate_enum(cls, value):
@@ -90,9 +91,9 @@ class RelationExtractionTaskSettings(BaseModel):
90
91
  exclude=excluded_fields,
91
92
  exclude_none=True,
92
93
  )
93
- # override the default output from pydantic by calling `to_dict()` of llm_details
94
- if self.llm_details:
95
- _dict['llm_details'] = self.llm_details.to_dict()
94
+ # override the default output from pydantic by calling `to_dict()` of llm_endpoint
95
+ if self.llm_endpoint:
96
+ _dict['llm_endpoint'] = self.llm_endpoint.to_dict()
96
97
  return _dict
97
98
 
98
99
  @classmethod
@@ -106,7 +107,6 @@ class RelationExtractionTaskSettings(BaseModel):
106
107
 
107
108
  _obj = cls.model_validate({
108
109
  "prompt": obj.get("prompt"),
109
- "llm_choice": obj.get("llm_choice"),
110
110
  "bot_seed": obj.get("bot_seed"),
111
111
  "bot_temperature": obj.get("bot_temperature"),
112
112
  "bot_answer_length": obj.get("bot_answer_length"),
@@ -114,9 +114,11 @@ class RelationExtractionTaskSettings(BaseModel):
114
114
  "stream_speed": obj.get("stream_speed"),
115
115
  "context_window": obj.get("context_window"),
116
116
  "max_tokens": obj.get("max_tokens"),
117
+ "slots_values": obj.get("slots_values"),
117
118
  "bot_name": obj.get("bot_name"),
118
119
  "bot_description": obj.get("bot_description"),
119
- "llm_details": LLMDetails.from_dict(obj["llm_details"]) if obj.get("llm_details") is not None else None
120
+ "llm_id": obj.get("llm_id"),
121
+ "llm_endpoint": LLMEndpointReadOutput.from_dict(obj["llm_endpoint"]) if obj.get("llm_endpoint") is not None else None
120
122
  })
121
123
  return _obj
122
124
 
@@ -17,8 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr, field_validator
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from qanswer_sdk.models.llm_endpoint_read_input import LLMEndpointReadInput
22
23
  from typing import Optional, Set
23
24
  from typing_extensions import Self
24
25
 
@@ -27,7 +28,7 @@ class RelationExtractionTaskUpdate(BaseModel):
27
28
  RelationExtractionTaskUpdate
28
29
  """ # noqa: E501
29
30
  prompt: Optional[StrictStr] = None
30
- llm_choice: Optional[StrictStr] = None
31
+ llm_choice: Optional[StrictStr] = Field(default=None, description="The LLM choice. If not provided, the system default will be used.")
31
32
  bot_seed: Optional[StrictInt] = None
32
33
  bot_temperature: Optional[Union[StrictFloat, StrictInt]] = None
33
34
  bot_answer_length: Optional[StrictStr] = None
@@ -35,9 +36,12 @@ class RelationExtractionTaskUpdate(BaseModel):
35
36
  stream_speed: Optional[Union[StrictFloat, StrictInt]] = None
36
37
  context_window: Optional[StrictInt] = None
37
38
  max_tokens: Optional[StrictInt] = None
39
+ slots_values: Optional[Dict[str, StrictStr]] = None
38
40
  bot_name: Optional[StrictStr] = None
39
41
  bot_description: Optional[StrictStr] = None
40
- __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "bot_name", "bot_description"]
42
+ llm_id: Optional[StrictInt] = Field(default=None, description="The LLM ID. This field is populated based on the llm_choice.")
43
+ llm_endpoint: Optional[LLMEndpointReadInput] = None
44
+ __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "slots_values", "bot_name", "bot_description", "llm_id", "llm_endpoint"]
41
45
 
42
46
  @field_validator('bot_answer_length')
43
47
  def bot_answer_length_validate_enum(cls, value):
@@ -88,6 +92,9 @@ class RelationExtractionTaskUpdate(BaseModel):
88
92
  exclude=excluded_fields,
89
93
  exclude_none=True,
90
94
  )
95
+ # override the default output from pydantic by calling `to_dict()` of llm_endpoint
96
+ if self.llm_endpoint:
97
+ _dict['llm_endpoint'] = self.llm_endpoint.to_dict()
91
98
  return _dict
92
99
 
93
100
  @classmethod
@@ -109,8 +116,11 @@ class RelationExtractionTaskUpdate(BaseModel):
109
116
  "stream_speed": obj.get("stream_speed"),
110
117
  "context_window": obj.get("context_window"),
111
118
  "max_tokens": obj.get("max_tokens"),
119
+ "slots_values": obj.get("slots_values"),
112
120
  "bot_name": obj.get("bot_name"),
113
- "bot_description": obj.get("bot_description")
121
+ "bot_description": obj.get("bot_description"),
122
+ "llm_id": obj.get("llm_id"),
123
+ "llm_endpoint": LLMEndpointReadInput.from_dict(obj["llm_endpoint"]) if obj.get("llm_endpoint") is not None else None
114
124
  })
115
125
  return _obj
116
126
 
@@ -17,9 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr, field_validator
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional, Union
22
- from qanswer_sdk.models.llm_details import LLMDetails
22
+ from qanswer_sdk.models.llm_endpoint_read_output import LLMEndpointReadOutput
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
@@ -28,7 +28,6 @@ class ReportCopilotSlotTaskSettings(BaseModel):
28
28
  ReportCopilotSlotTaskSettings
29
29
  """ # noqa: E501
30
30
  prompt: Optional[StrictStr] = None
31
- llm_choice: Optional[StrictStr] = None
32
31
  bot_seed: Optional[StrictInt] = None
33
32
  bot_temperature: Optional[Union[StrictFloat, StrictInt]] = None
34
33
  bot_answer_length: Optional[StrictStr] = None
@@ -36,11 +35,13 @@ class ReportCopilotSlotTaskSettings(BaseModel):
36
35
  stream_speed: Optional[Union[StrictFloat, StrictInt]] = None
37
36
  context_window: Optional[StrictInt] = None
38
37
  max_tokens: Optional[StrictInt] = None
38
+ slots_values: Optional[Dict[str, StrictStr]] = None
39
39
  bot_name: Optional[StrictStr] = None
40
40
  bot_description: Optional[StrictStr] = None
41
- llm_details: LLMDetails
41
+ llm_id: Optional[StrictInt] = Field(default=None, description="The LLM ID. This field is populated based on the llm_choice.")
42
+ llm_endpoint: Optional[LLMEndpointReadOutput] = None
42
43
  id: Optional[StrictInt] = None
43
- __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "bot_name", "bot_description", "llm_details", "id"]
44
+ __properties: ClassVar[List[str]] = ["prompt", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "slots_values", "bot_name", "bot_description", "llm_id", "llm_endpoint", "id"]
44
45
 
45
46
  @field_validator('bot_answer_length')
46
47
  def bot_answer_length_validate_enum(cls, value):
@@ -91,9 +92,9 @@ class ReportCopilotSlotTaskSettings(BaseModel):
91
92
  exclude=excluded_fields,
92
93
  exclude_none=True,
93
94
  )
94
- # override the default output from pydantic by calling `to_dict()` of llm_details
95
- if self.llm_details:
96
- _dict['llm_details'] = self.llm_details.to_dict()
95
+ # override the default output from pydantic by calling `to_dict()` of llm_endpoint
96
+ if self.llm_endpoint:
97
+ _dict['llm_endpoint'] = self.llm_endpoint.to_dict()
97
98
  return _dict
98
99
 
99
100
  @classmethod
@@ -107,7 +108,6 @@ class ReportCopilotSlotTaskSettings(BaseModel):
107
108
 
108
109
  _obj = cls.model_validate({
109
110
  "prompt": obj.get("prompt"),
110
- "llm_choice": obj.get("llm_choice"),
111
111
  "bot_seed": obj.get("bot_seed"),
112
112
  "bot_temperature": obj.get("bot_temperature"),
113
113
  "bot_answer_length": obj.get("bot_answer_length"),
@@ -115,9 +115,11 @@ class ReportCopilotSlotTaskSettings(BaseModel):
115
115
  "stream_speed": obj.get("stream_speed"),
116
116
  "context_window": obj.get("context_window"),
117
117
  "max_tokens": obj.get("max_tokens"),
118
+ "slots_values": obj.get("slots_values"),
118
119
  "bot_name": obj.get("bot_name"),
119
120
  "bot_description": obj.get("bot_description"),
120
- "llm_details": LLMDetails.from_dict(obj["llm_details"]) if obj.get("llm_details") is not None else None,
121
+ "llm_id": obj.get("llm_id"),
122
+ "llm_endpoint": LLMEndpointReadOutput.from_dict(obj["llm_endpoint"]) if obj.get("llm_endpoint") is not None else None,
121
123
  "id": obj.get("id")
122
124
  })
123
125
  return _obj
@@ -17,8 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr, field_validator
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from qanswer_sdk.models.llm_endpoint_read_input import LLMEndpointReadInput
22
23
  from typing import Optional, Set
23
24
  from typing_extensions import Self
24
25
 
@@ -27,7 +28,7 @@ class ReportCopilotSlotTaskUpdate(BaseModel):
27
28
  ReportCopilotSlotTaskUpdate
28
29
  """ # noqa: E501
29
30
  prompt: Optional[StrictStr] = None
30
- llm_choice: Optional[StrictStr] = None
31
+ llm_choice: Optional[StrictStr] = Field(default=None, description="The LLM choice. If not provided, the system default will be used.")
31
32
  bot_seed: Optional[StrictInt] = None
32
33
  bot_temperature: Optional[Union[StrictFloat, StrictInt]] = None
33
34
  bot_answer_length: Optional[StrictStr] = None
@@ -35,10 +36,13 @@ class ReportCopilotSlotTaskUpdate(BaseModel):
35
36
  stream_speed: Optional[Union[StrictFloat, StrictInt]] = None
36
37
  context_window: Optional[StrictInt] = None
37
38
  max_tokens: Optional[StrictInt] = None
39
+ slots_values: Optional[Dict[str, StrictStr]] = None
38
40
  bot_name: Optional[StrictStr] = None
39
41
  bot_description: Optional[StrictStr] = None
42
+ llm_id: Optional[StrictInt] = Field(default=None, description="The LLM ID. This field is populated based on the llm_choice.")
43
+ llm_endpoint: Optional[LLMEndpointReadInput] = None
40
44
  id: Optional[StrictInt] = None
41
- __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "bot_name", "bot_description", "id"]
45
+ __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "slots_values", "bot_name", "bot_description", "llm_id", "llm_endpoint", "id"]
42
46
 
43
47
  @field_validator('bot_answer_length')
44
48
  def bot_answer_length_validate_enum(cls, value):
@@ -89,6 +93,9 @@ class ReportCopilotSlotTaskUpdate(BaseModel):
89
93
  exclude=excluded_fields,
90
94
  exclude_none=True,
91
95
  )
96
+ # override the default output from pydantic by calling `to_dict()` of llm_endpoint
97
+ if self.llm_endpoint:
98
+ _dict['llm_endpoint'] = self.llm_endpoint.to_dict()
92
99
  return _dict
93
100
 
94
101
  @classmethod
@@ -110,8 +117,11 @@ class ReportCopilotSlotTaskUpdate(BaseModel):
110
117
  "stream_speed": obj.get("stream_speed"),
111
118
  "context_window": obj.get("context_window"),
112
119
  "max_tokens": obj.get("max_tokens"),
120
+ "slots_values": obj.get("slots_values"),
113
121
  "bot_name": obj.get("bot_name"),
114
122
  "bot_description": obj.get("bot_description"),
123
+ "llm_id": obj.get("llm_id"),
124
+ "llm_endpoint": LLMEndpointReadInput.from_dict(obj["llm_endpoint"]) if obj.get("llm_endpoint") is not None else None,
115
125
  "id": obj.get("id")
116
126
  })
117
127
  return _obj
@@ -17,9 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr, field_validator
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional, Union
22
- from qanswer_sdk.models.llm_details import LLMDetails
22
+ from qanswer_sdk.models.llm_endpoint_read_output import LLMEndpointReadOutput
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
@@ -28,7 +28,6 @@ class ReportCopilotTaskSettings(BaseModel):
28
28
  ReportCopilotTaskSettings
29
29
  """ # noqa: E501
30
30
  prompt: Optional[StrictStr] = None
31
- llm_choice: Optional[StrictStr] = None
32
31
  bot_seed: Optional[StrictInt] = None
33
32
  bot_temperature: Optional[Union[StrictFloat, StrictInt]] = None
34
33
  bot_answer_length: Optional[StrictStr] = None
@@ -36,10 +35,12 @@ class ReportCopilotTaskSettings(BaseModel):
36
35
  stream_speed: Optional[Union[StrictFloat, StrictInt]] = None
37
36
  context_window: Optional[StrictInt] = None
38
37
  max_tokens: Optional[StrictInt] = None
38
+ slots_values: Optional[Dict[str, StrictStr]] = None
39
39
  bot_name: Optional[StrictStr] = None
40
40
  bot_description: Optional[StrictStr] = None
41
- llm_details: LLMDetails
42
- __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "bot_name", "bot_description", "llm_details"]
41
+ llm_id: Optional[StrictInt] = Field(default=None, description="The LLM ID. This field is populated based on the llm_choice.")
42
+ llm_endpoint: Optional[LLMEndpointReadOutput] = None
43
+ __properties: ClassVar[List[str]] = ["prompt", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "slots_values", "bot_name", "bot_description", "llm_id", "llm_endpoint"]
43
44
 
44
45
  @field_validator('bot_answer_length')
45
46
  def bot_answer_length_validate_enum(cls, value):
@@ -90,9 +91,9 @@ class ReportCopilotTaskSettings(BaseModel):
90
91
  exclude=excluded_fields,
91
92
  exclude_none=True,
92
93
  )
93
- # override the default output from pydantic by calling `to_dict()` of llm_details
94
- if self.llm_details:
95
- _dict['llm_details'] = self.llm_details.to_dict()
94
+ # override the default output from pydantic by calling `to_dict()` of llm_endpoint
95
+ if self.llm_endpoint:
96
+ _dict['llm_endpoint'] = self.llm_endpoint.to_dict()
96
97
  return _dict
97
98
 
98
99
  @classmethod
@@ -106,7 +107,6 @@ class ReportCopilotTaskSettings(BaseModel):
106
107
 
107
108
  _obj = cls.model_validate({
108
109
  "prompt": obj.get("prompt"),
109
- "llm_choice": obj.get("llm_choice"),
110
110
  "bot_seed": obj.get("bot_seed"),
111
111
  "bot_temperature": obj.get("bot_temperature"),
112
112
  "bot_answer_length": obj.get("bot_answer_length"),
@@ -114,9 +114,11 @@ class ReportCopilotTaskSettings(BaseModel):
114
114
  "stream_speed": obj.get("stream_speed"),
115
115
  "context_window": obj.get("context_window"),
116
116
  "max_tokens": obj.get("max_tokens"),
117
+ "slots_values": obj.get("slots_values"),
117
118
  "bot_name": obj.get("bot_name"),
118
119
  "bot_description": obj.get("bot_description"),
119
- "llm_details": LLMDetails.from_dict(obj["llm_details"]) if obj.get("llm_details") is not None else None
120
+ "llm_id": obj.get("llm_id"),
121
+ "llm_endpoint": LLMEndpointReadOutput.from_dict(obj["llm_endpoint"]) if obj.get("llm_endpoint") is not None else None
120
122
  })
121
123
  return _obj
122
124
 
@@ -17,8 +17,9 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr, field_validator
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from qanswer_sdk.models.llm_endpoint_read_input import LLMEndpointReadInput
22
23
  from typing import Optional, Set
23
24
  from typing_extensions import Self
24
25
 
@@ -27,7 +28,7 @@ class ReportCopilotTaskUpdate(BaseModel):
27
28
  ReportCopilotTaskUpdate
28
29
  """ # noqa: E501
29
30
  prompt: Optional[StrictStr] = None
30
- llm_choice: Optional[StrictStr] = None
31
+ llm_choice: Optional[StrictStr] = Field(default=None, description="The LLM choice. If not provided, the system default will be used.")
31
32
  bot_seed: Optional[StrictInt] = None
32
33
  bot_temperature: Optional[Union[StrictFloat, StrictInt]] = None
33
34
  bot_answer_length: Optional[StrictStr] = None
@@ -35,9 +36,12 @@ class ReportCopilotTaskUpdate(BaseModel):
35
36
  stream_speed: Optional[Union[StrictFloat, StrictInt]] = None
36
37
  context_window: Optional[StrictInt] = None
37
38
  max_tokens: Optional[StrictInt] = None
39
+ slots_values: Optional[Dict[str, StrictStr]] = None
38
40
  bot_name: Optional[StrictStr] = None
39
41
  bot_description: Optional[StrictStr] = None
40
- __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "bot_name", "bot_description"]
42
+ llm_id: Optional[StrictInt] = Field(default=None, description="The LLM ID. This field is populated based on the llm_choice.")
43
+ llm_endpoint: Optional[LLMEndpointReadInput] = None
44
+ __properties: ClassVar[List[str]] = ["prompt", "llm_choice", "bot_seed", "bot_temperature", "bot_answer_length", "number_of_references", "stream_speed", "context_window", "max_tokens", "slots_values", "bot_name", "bot_description", "llm_id", "llm_endpoint"]
41
45
 
42
46
  @field_validator('bot_answer_length')
43
47
  def bot_answer_length_validate_enum(cls, value):
@@ -88,6 +92,9 @@ class ReportCopilotTaskUpdate(BaseModel):
88
92
  exclude=excluded_fields,
89
93
  exclude_none=True,
90
94
  )
95
+ # override the default output from pydantic by calling `to_dict()` of llm_endpoint
96
+ if self.llm_endpoint:
97
+ _dict['llm_endpoint'] = self.llm_endpoint.to_dict()
91
98
  return _dict
92
99
 
93
100
  @classmethod
@@ -109,8 +116,11 @@ class ReportCopilotTaskUpdate(BaseModel):
109
116
  "stream_speed": obj.get("stream_speed"),
110
117
  "context_window": obj.get("context_window"),
111
118
  "max_tokens": obj.get("max_tokens"),
119
+ "slots_values": obj.get("slots_values"),
112
120
  "bot_name": obj.get("bot_name"),
113
- "bot_description": obj.get("bot_description")
121
+ "bot_description": obj.get("bot_description"),
122
+ "llm_id": obj.get("llm_id"),
123
+ "llm_endpoint": LLMEndpointReadInput.from_dict(obj["llm_endpoint"]) if obj.get("llm_endpoint") is not None else None
114
124
  })
115
125
  return _obj
116
126