mindbridge-api-python-client 1.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. mindbridge_api_python_client-1.4.8.dist-info/LICENSE.txt +11 -0
  2. mindbridge_api_python_client-1.4.8.dist-info/METADATA +64 -0
  3. mindbridge_api_python_client-1.4.8.dist-info/RECORD +43 -0
  4. mindbridge_api_python_client-1.4.8.dist-info/WHEEL +4 -0
  5. mindbridgeapi/__init__.py +69 -0
  6. mindbridgeapi/accounting_period.py +34 -0
  7. mindbridgeapi/analyses.py +383 -0
  8. mindbridgeapi/analysis_item.py +167 -0
  9. mindbridgeapi/analysis_period.py +68 -0
  10. mindbridgeapi/analysis_source_item.py +198 -0
  11. mindbridgeapi/analysis_source_type_item.py +55 -0
  12. mindbridgeapi/analysis_source_types.py +36 -0
  13. mindbridgeapi/analysis_sources.py +132 -0
  14. mindbridgeapi/analysis_type_item.py +45 -0
  15. mindbridgeapi/analysis_types.py +62 -0
  16. mindbridgeapi/async_results.py +194 -0
  17. mindbridgeapi/base_set.py +175 -0
  18. mindbridgeapi/chunked_file_item.py +37 -0
  19. mindbridgeapi/chunked_file_part_item.py +25 -0
  20. mindbridgeapi/chunked_files.py +70 -0
  21. mindbridgeapi/column_mapping.py +21 -0
  22. mindbridgeapi/common_validators.py +71 -0
  23. mindbridgeapi/data_tables.py +206 -0
  24. mindbridgeapi/engagement_item.py +100 -0
  25. mindbridgeapi/engagements.py +93 -0
  26. mindbridgeapi/enumerations/analysis_source_type.py +142 -0
  27. mindbridgeapi/enumerations/analysis_type.py +36 -0
  28. mindbridgeapi/enumerations/deprecated_enum.py +69 -0
  29. mindbridgeapi/enumerations/system_library.py +32 -0
  30. mindbridgeapi/exceptions.py +92 -0
  31. mindbridgeapi/file_manager.py +212 -0
  32. mindbridgeapi/file_manager_item.py +107 -0
  33. mindbridgeapi/generated_pydantic_model/model.py +7035 -0
  34. mindbridgeapi/libraries.py +54 -0
  35. mindbridgeapi/library_item.py +44 -0
  36. mindbridgeapi/organization_item.py +61 -0
  37. mindbridgeapi/organizations.py +82 -0
  38. mindbridgeapi/server.py +202 -0
  39. mindbridgeapi/task_item.py +47 -0
  40. mindbridgeapi/tasks.py +150 -0
  41. mindbridgeapi/transaction_id_selection.py +31 -0
  42. mindbridgeapi/users.py +42 -0
  43. mindbridgeapi/virtual_column.py +104 -0
@@ -0,0 +1,45 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from typing import ClassVar, Generator
10
+ from pydantic import ConfigDict, Field, field_validator, model_validator
11
+ from mindbridgeapi.analysis_source_type_item import AnalysisSourceTypeItem
12
+ from mindbridgeapi.common_validators import (
13
+ _convert_userinfo_to_useritem,
14
+ _warning_if_extra_fields,
15
+ )
16
+ from mindbridgeapi.generated_pydantic_model.model import ApiAnalysisTypeRead
17
+
18
+
19
+ def _empty_analysis_source_types() -> Generator[AnalysisSourceTypeItem, None, None]:
20
+ """Empty generator function
21
+
22
+ This returns an empty generator function, it's use is to ensure
23
+ analysis_source_types is not None for the AnalysisTypeItem class
24
+ """
25
+ yield from ()
26
+
27
+
28
+ class AnalysisTypeItem(ApiAnalysisTypeRead):
29
+ GENERAL_LEDGER: ClassVar[str] = "4b8360d00000000000000000"
30
+ NOT_FOR_PROFIT_GENERAL_LEDGER: ClassVar[str] = "4b8360d00000000000000001"
31
+ NOT_FOR_PROFIT_GENERAL_LEDGER_FUND: ClassVar[str] = "4b8360d00000000000000002"
32
+ ACCOUNTS_PAYABLE_V2: ClassVar[str] = "4b8360d00000000000000003"
33
+ ACCOUNTS_RECEIVABLE_V2: ClassVar[str] = "4b8360d00000000000000004"
34
+ analysis_source_types: Generator[AnalysisSourceTypeItem, None, None] = Field(
35
+ default_factory=_empty_analysis_source_types, exclude=True
36
+ )
37
+
38
+ model_config = ConfigDict(
39
+ extra="allow",
40
+ validate_assignment=True,
41
+ validate_default=True,
42
+ validate_return=True,
43
+ )
44
+ _a = model_validator(mode="after")(_warning_if_extra_fields)
45
+ _b = field_validator("*")(_convert_userinfo_to_useritem)
@@ -0,0 +1,62 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from dataclasses import dataclass
10
+ from functools import cached_property
11
+ from typing import Any, Dict, Generator, Optional
12
+ from mindbridgeapi.analysis_source_types import AnalysisSourceTypes
13
+ from mindbridgeapi.analysis_type_item import AnalysisTypeItem
14
+ from mindbridgeapi.base_set import BaseSet
15
+ from mindbridgeapi.exceptions import ItemNotFoundError
16
+
17
+
18
+ @dataclass
19
+ class AnalysisTypes(BaseSet):
20
+ @cached_property
21
+ def base_url(self) -> str:
22
+ return f"{self.server.base_url}/analysis-types"
23
+
24
+ def get_by_id(self, id: str) -> AnalysisTypeItem:
25
+ url = f"{self.base_url}/{id}"
26
+ resp_dict = super()._get_by_id(url=url)
27
+
28
+ analysis_type = AnalysisTypeItem.model_validate(resp_dict)
29
+ self.restart_analysis_source_types(analysis_type)
30
+ return analysis_type
31
+
32
+ def get(
33
+ self, json: Optional[Dict[str, Any]] = None
34
+ ) -> Generator[AnalysisTypeItem, None, None]:
35
+ if json is None:
36
+ json = {}
37
+
38
+ url = f"{self.base_url}/query"
39
+ for resp_dict in super()._get(url=url, json=json):
40
+ analysis_type = AnalysisTypeItem.model_validate(resp_dict)
41
+ self.restart_analysis_source_types(analysis_type)
42
+ yield analysis_type
43
+
44
+ def restart_analysis_source_types(
45
+ self, analysis_type_item: AnalysisTypeItem
46
+ ) -> None:
47
+ if getattr(analysis_type_item, "id", None) is None:
48
+ raise ItemNotFoundError
49
+
50
+ if analysis_type_item.source_configurations is None:
51
+ return
52
+
53
+ analysis_source_type_ids = [
54
+ x.source_type_id
55
+ for x in analysis_type_item.source_configurations
56
+ if x.source_type_id is not None
57
+ ]
58
+
59
+ if len(analysis_source_type_ids) != 0:
60
+ analysis_type_item.analysis_source_types = AnalysisSourceTypes(
61
+ server=self.server
62
+ ).get(json={"id": {"$in": analysis_source_type_ids}})
@@ -0,0 +1,194 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from dataclasses import dataclass
10
+ from functools import cached_property
11
+ import logging
12
+ import time
13
+ from typing import Any, Dict, Generator, List, Optional
14
+ from mindbridgeapi.base_set import BaseSet
15
+ from mindbridgeapi.exceptions import (
16
+ ItemNotFoundError,
17
+ UnexpectedServerError,
18
+ ValidationError,
19
+ )
20
+ from mindbridgeapi.generated_pydantic_model.model import (
21
+ ApiAsyncResult as AsyncResultItem,
22
+ )
23
+ from mindbridgeapi.generated_pydantic_model.model import Status2 as AsyncResultStatus
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ @dataclass
29
+ class AsyncResults(BaseSet):
30
+ @cached_property
31
+ def base_url(self) -> str:
32
+ return f"{self.server.base_url}/async-results"
33
+
34
+ def _wait_for_async_result(
35
+ self,
36
+ async_result: AsyncResultItem,
37
+ max_wait_minutes: int,
38
+ init_interval_sec: int,
39
+ ) -> None:
40
+ """Wait for async result to complete
41
+
42
+ Waits, at most the minutes specified, for the async result to be COMPLETE and
43
+ raises and error if any error
44
+
45
+ Args:
46
+ async_result (AsyncResultItem): Async result to check
47
+ max_wait_minutes (int): Maximum minutes to wait
48
+ """
49
+ self._wait_for_async_results(
50
+ async_results=[async_result],
51
+ max_wait_minutes=max_wait_minutes,
52
+ init_interval_sec=init_interval_sec,
53
+ )
54
+
55
+ def _wait_for_async_results(
56
+ self,
57
+ async_results: List[AsyncResultItem],
58
+ max_wait_minutes: int,
59
+ init_interval_sec: int,
60
+ ) -> None:
61
+ """Wait for async results to complete
62
+
63
+ Waits, at most the minutes specified, for all async results to be COMPLETE and
64
+ raises and error if any error
65
+
66
+ Args:
67
+ async_results (List[AsyncResultItem]): Async results to check
68
+ max_wait_minutes (int): Maximum minutes to wait
69
+ init_interval_sec (int): The initial seconds to wait
70
+
71
+ Raises:
72
+ TimeoutError: If waited for more than specified
73
+ """
74
+
75
+ max_interval_sec = 60 * 5
76
+ max_wait_seconds = max_wait_minutes * 60
77
+ start_time = time.monotonic()
78
+ elapsed_time = 0.0
79
+ interval_sec = init_interval_sec
80
+ i = 0
81
+
82
+ while (time.monotonic() - start_time) < max_wait_seconds:
83
+ loop_start_time = time.monotonic()
84
+ elapsed_time = loop_start_time - start_time
85
+ sorted_async_results_ids = sorted(
86
+ [x.id for x in async_results if x.id is not None]
87
+ )
88
+ number_of_async_results = len(sorted_async_results_ids)
89
+ if number_of_async_results != len(async_results):
90
+ raise ItemNotFoundError
91
+
92
+ logger.info(
93
+ "Starting a AsyncResult iteration. It has been: "
94
+ f"{elapsed_time:.1f} seconds. Loop {i=} and {number_of_async_results} "
95
+ "to check"
96
+ )
97
+
98
+ if number_of_async_results <= 0:
99
+ break
100
+
101
+ if number_of_async_results == 1:
102
+ new_async_results = [self.get_by_id(sorted_async_results_ids[0])]
103
+ else:
104
+ query = {"id": {"$in": sorted_async_results_ids}}
105
+ new_async_results = list(self.get(json=query))
106
+
107
+ if (
108
+ sorted([x.id for x in new_async_results if x.id is not None])
109
+ != sorted_async_results_ids
110
+ ):
111
+ raise UnexpectedServerError( # noqa: TRY003
112
+ "AsyncResults received didn't match requested"
113
+ )
114
+
115
+ async_results = []
116
+ for async_result in new_async_results:
117
+ if async_result is None:
118
+ raise UnexpectedServerError("AsyncResult was None") # noqa: TRY003
119
+
120
+ if not self._check_if_async_result_is_completed(async_result):
121
+ async_results.append(async_result)
122
+
123
+ if len(async_results) == 0:
124
+ break
125
+
126
+ sleep_seconds = interval_sec - (time.monotonic() - loop_start_time)
127
+ logger.info(
128
+ f"Waiting for about {sleep_seconds} seconds as some of the async "
129
+ "results are not complete yet."
130
+ )
131
+ if sleep_seconds > 0:
132
+ time.sleep(sleep_seconds)
133
+
134
+ elapsed_time = time.monotonic() - start_time
135
+ logger.info(
136
+ "Finished a AsyncResult iteration. It has been:"
137
+ f" {elapsed_time:.1f} seconds"
138
+ )
139
+
140
+ if interval_sec < max_interval_sec:
141
+ interval_sec = min(init_interval_sec * 2**i, max_interval_sec)
142
+
143
+ i = i + 1
144
+
145
+ else:
146
+ raise TimeoutError(f"Waited too long: {max_wait_minutes} minutes")
147
+
148
+ def _check_if_async_result_is_completed(
149
+ self, async_result: AsyncResultItem
150
+ ) -> bool:
151
+ """Checks if the Async Result is completed
152
+
153
+ Returns True if COMPLETE, False if IN_PROGRESS and raises and error otherwise.
154
+
155
+ Args:
156
+ async_result (AsyncResultItem): The async result to check
157
+
158
+ Returns:
159
+ bool: True if COMPLETE, False if IN_PROGRESS
160
+
161
+ Raises:
162
+ ValidationError: If the async_result resulted in an error state
163
+ """
164
+ async_result_str = (
165
+ f"Async Result {async_result.id} for"
166
+ f" {async_result.entity_type} {async_result.entity_id} resulted in"
167
+ f" {async_result.status}"
168
+ )
169
+ logger.info(async_result_str)
170
+
171
+ if async_result.status == AsyncResultStatus.IN_PROGRESS:
172
+ return False
173
+
174
+ if async_result.status == AsyncResultStatus.COMPLETE:
175
+ return True
176
+
177
+ # Must be AsyncResultStatus.ERROR
178
+ raise ValidationError(f"{async_result_str} with message {async_result.error}.")
179
+
180
+ def get(
181
+ self, json: Optional[Dict[str, Any]] = None
182
+ ) -> Generator[AsyncResultItem, None, None]:
183
+ if json is None:
184
+ json = {}
185
+
186
+ url = f"{self.base_url}/query"
187
+ for resp_dict in super()._get(url=url, json=json):
188
+ yield AsyncResultItem.model_validate(resp_dict)
189
+
190
+ def get_by_id(self, id: str) -> AsyncResultItem:
191
+ url = f"{self.base_url}/{id}"
192
+ resp_dict = super()._get_by_id(url=url)
193
+
194
+ return AsyncResultItem.model_validate(resp_dict)
@@ -0,0 +1,175 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from dataclasses import dataclass
10
+ from http.client import responses
11
+ import json
12
+ import logging
13
+ import shutil
14
+ from typing import TYPE_CHECKING, Any, Dict, Generator, Iterable, List, Optional
15
+ from urllib.parse import urlencode
16
+ from mindbridgeapi.exceptions import UnexpectedServerError, ValidationError
17
+
18
+ if TYPE_CHECKING:
19
+ from pathlib import Path
20
+ from urllib3.response import BaseHTTPResponse
21
+ from mindbridgeapi.server import Server
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ @dataclass
27
+ class BaseSet:
28
+ server: "Server"
29
+
30
+ def _get_by_id(
31
+ self, url: str, query_parameters: Optional[Dict[str, Any]] = None
32
+ ) -> Dict[str, Any]:
33
+ resp = self.server.http.request("GET", url, fields=query_parameters)
34
+ self._check_response(resp)
35
+ return self._response_as_dict(resp)
36
+
37
+ def _get(
38
+ self, url: str, json: Dict[str, Any]
39
+ ) -> Generator[Dict[str, Any], None, None]:
40
+ item_holder: List[Dict[str, Any]] = []
41
+ page_number = 0
42
+ page_is_not_last = True
43
+
44
+ while True:
45
+ if len(item_holder) != 0:
46
+ yield item_holder.pop(0)
47
+ elif page_is_not_last:
48
+ params = {"page": page_number}
49
+ request_url = f"{url}?{urlencode(params)}"
50
+
51
+ resp = self.server.http.request("POST", request_url, json=json)
52
+ self._check_response(resp)
53
+ resp_dict = self._response_as_dict(resp)
54
+
55
+ if "last" not in resp_dict or not isinstance(resp_dict["last"], bool):
56
+ raise UnexpectedServerError(f"{resp_dict}")
57
+
58
+ page_is_not_last = not resp_dict["last"]
59
+
60
+ if "content" not in resp_dict or not isinstance(
61
+ resp_dict["content"], list
62
+ ):
63
+ raise UnexpectedServerError(f"{resp_dict}")
64
+
65
+ item_holder.extend(resp_dict["content"])
66
+
67
+ page_number = page_number + 1
68
+ else:
69
+ return # No more items
70
+
71
+ def _create(
72
+ self,
73
+ url: str,
74
+ json: Optional[Dict[str, Any]] = None,
75
+ extra_ok_statuses: Optional[Iterable[int]] = None,
76
+ ) -> Dict[str, Any]:
77
+ if json is None:
78
+ json = {}
79
+
80
+ resp = self.server.http.request("POST", url, json=json)
81
+ self._check_response(resp=resp, extra_ok_statuses=extra_ok_statuses)
82
+ return self._response_as_dict(resp)
83
+
84
+ def _delete(self, url: str) -> None:
85
+ resp = self.server.http.request("DELETE", url)
86
+ self._check_response(resp)
87
+
88
+ def _update(self, url: str, json: Dict[str, Any]) -> Dict[str, Any]:
89
+ resp = self.server.http.request("PUT", url, json=json)
90
+ self._check_response(resp)
91
+ return self._response_as_dict(resp)
92
+
93
+ def _upload(self, url: str, files: Dict[str, Any]) -> Dict[str, Any]:
94
+ resp = self.server.http.request("POST", url, fields=files)
95
+
96
+ self._check_response(resp)
97
+
98
+ if resp.status == 204:
99
+ return {}
100
+
101
+ return self._response_as_dict(resp)
102
+
103
+ @staticmethod
104
+ def _response_as_dict(resp: "BaseHTTPResponse") -> Dict[str, Any]:
105
+ """Converts the HTTP response body as a dict
106
+
107
+ Args:
108
+ resp (urllib3.response.BaseHTTPResponse): The HTTP response from the server
109
+
110
+ Returns:
111
+ Dict[str, Any]: The dict representation of the JSON response from the server
112
+
113
+ Raises:
114
+ UnexpectedServerError: When the response is not JSON, or it is JSON but
115
+ python didn't parase the data to a dict
116
+ """
117
+ try:
118
+ resp_obj = resp.json()
119
+ except UnicodeDecodeError as err:
120
+ raise UnexpectedServerError("body was not UTF-8.") from err # noqa:TRY003
121
+ except json.JSONDecodeError as err:
122
+ raise UnexpectedServerError("body was not JSON.") from err # noqa:TRY003
123
+
124
+ if not isinstance(resp_obj, dict):
125
+ raise UnexpectedServerError("JSON was not an object.") # noqa:TRY003
126
+
127
+ return resp_obj
128
+
129
+ def _download(self, url: str, output_path: "Path") -> "Path":
130
+ with self.server.http.request("GET", url, preload_content=False) as resp, open(
131
+ output_path, "wb"
132
+ ) as write_file:
133
+ shutil.copyfileobj(resp, write_file)
134
+ self._check_response(resp)
135
+ resp.release_conn()
136
+
137
+ return output_path
138
+
139
+ @staticmethod
140
+ def _check_response(
141
+ resp: "BaseHTTPResponse",
142
+ extra_ok_statuses: Optional[Iterable[int]] = None,
143
+ ) -> None:
144
+ """Raises error if response status is not ok, also logs"""
145
+
146
+ if extra_ok_statuses is None:
147
+ extra_ok_statuses = iter(())
148
+
149
+ status_text = responses.get(resp.status, "")
150
+
151
+ log_str = "HTTP response (approximately):"
152
+ log_str += f"\n{resp.status} {status_text}"
153
+ for k, v in resp.headers.items():
154
+ log_str += f"\n{k}: {v}"
155
+
156
+ log_str += "\n"
157
+ try:
158
+ log_str += f"\n{json.dumps(resp.json(), indent=4, sort_keys=True)}"
159
+ except (UnicodeDecodeError, json.JSONDecodeError):
160
+ if len(resp.data) > 0:
161
+ log_str += "\n[Body that is apparently not JSON data]"
162
+
163
+ logger.debug(log_str)
164
+
165
+ # Raise error if not ok
166
+ if 400 <= resp.status < 600 and resp.status not in extra_ok_statuses:
167
+ http_error_msg = f"{resp.status}: {status_text} for url: {resp.url}"
168
+ try:
169
+ http_error_msg += (
170
+ f"\n{json.dumps(resp.json(), indent=4, sort_keys=True)}"
171
+ )
172
+ except (UnicodeDecodeError, json.JSONDecodeError):
173
+ pass
174
+
175
+ raise ValidationError(http_error_msg)
@@ -0,0 +1,37 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from typing import Any, Dict
10
+ from pydantic import ConfigDict, field_validator, model_validator
11
+ from mindbridgeapi.common_validators import (
12
+ _convert_userinfo_to_useritem,
13
+ _warning_if_extra_fields,
14
+ )
15
+ from mindbridgeapi.generated_pydantic_model.model import (
16
+ ApiChunkedFileCreateOnly,
17
+ ApiChunkedFileRead,
18
+ )
19
+
20
+
21
+ class ChunkedFileItem(ApiChunkedFileRead):
22
+ model_config = ConfigDict(
23
+ extra="allow",
24
+ validate_assignment=True,
25
+ validate_default=True,
26
+ validate_return=True,
27
+ )
28
+ _a = model_validator(mode="after")(_warning_if_extra_fields)
29
+ _b = field_validator("*")(_convert_userinfo_to_useritem)
30
+
31
+ @property
32
+ def create_json(self) -> Dict[str, Any]:
33
+ in_class_dict = self.model_dump()
34
+ out_class_object = ApiChunkedFileCreateOnly.model_validate(in_class_dict)
35
+ return out_class_object.model_dump(
36
+ mode="json", by_alias=True, exclude_none=True
37
+ )
@@ -0,0 +1,25 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from pydantic import ConfigDict, model_validator
10
+ from mindbridgeapi.common_validators import _warning_if_extra_fields
11
+ from mindbridgeapi.generated_pydantic_model.model import ApiChunkedFilePartRead
12
+
13
+
14
+ class ChunkedFilePartItem(ApiChunkedFilePartRead):
15
+ model_config = ConfigDict(
16
+ extra="allow",
17
+ validate_assignment=True,
18
+ validate_default=True,
19
+ validate_return=True,
20
+ )
21
+ _ = model_validator(mode="after")(_warning_if_extra_fields)
22
+
23
+ @property
24
+ def create_body(self) -> str:
25
+ return self.model_dump_json(by_alias=True, exclude_none=True)
@@ -0,0 +1,70 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from dataclasses import dataclass
10
+ from functools import cached_property
11
+ from typing import TYPE_CHECKING, Any, Dict, Generator, Optional
12
+ from mindbridgeapi.base_set import BaseSet
13
+ from mindbridgeapi.chunked_file_item import ChunkedFileItem
14
+ from mindbridgeapi.exceptions import ItemAlreadyExistsError, ItemNotFoundError
15
+
16
+ if TYPE_CHECKING:
17
+ from mindbridgeapi.chunked_file_part_item import ChunkedFilePartItem
18
+
19
+
20
+ @dataclass
21
+ class ChunkedFiles(BaseSet):
22
+ @cached_property
23
+ def base_url(self) -> str:
24
+ return f"{self.server.base_url}/chunked-files"
25
+
26
+ def create(self, item: ChunkedFileItem) -> ChunkedFileItem:
27
+ if getattr(item, "id", None) is not None and item.id is not None:
28
+ raise ItemAlreadyExistsError(item.id)
29
+
30
+ url = self.base_url
31
+ resp_dict = super()._create(url=url, json=item.create_json)
32
+
33
+ return ChunkedFileItem.model_validate(resp_dict)
34
+
35
+ def get_by_id(self, id: str) -> ChunkedFileItem:
36
+ url = f"{self.base_url}/{id}"
37
+ resp_dict = super()._get_by_id(url=url)
38
+
39
+ return ChunkedFileItem.model_validate(resp_dict)
40
+
41
+ def get(
42
+ self, json: Optional[Dict[str, Any]] = None
43
+ ) -> Generator[ChunkedFileItem, None, None]:
44
+ if json is None:
45
+ json = {}
46
+
47
+ url = f"{self.base_url}/query"
48
+ for resp_dict in super()._get(url=url, json=json):
49
+ yield ChunkedFileItem.model_validate(resp_dict)
50
+
51
+ def send_chunk(
52
+ self,
53
+ chunked_file_item: ChunkedFileItem,
54
+ chunked_file_part_item: "ChunkedFilePartItem",
55
+ data: bytes,
56
+ ) -> None:
57
+ if getattr(chunked_file_item, "id", None) is None:
58
+ raise ItemNotFoundError
59
+
60
+ url = f"{self.base_url}/{chunked_file_item.id}/part"
61
+ files = {
62
+ "chunkedFilePart": (
63
+ None,
64
+ chunked_file_part_item.create_body,
65
+ "application/json",
66
+ ),
67
+ "fileChunk": (chunked_file_item.name, data),
68
+ }
69
+
70
+ super()._upload(url=url, files=files)
@@ -0,0 +1,21 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from pydantic import ConfigDict, model_validator
10
+ from mindbridgeapi.common_validators import _warning_if_extra_fields
11
+ from mindbridgeapi.generated_pydantic_model.model import ApiProposedColumnMappingRead
12
+
13
+
14
+ class ColumnMapping(ApiProposedColumnMappingRead):
15
+ model_config = ConfigDict(
16
+ extra="allow",
17
+ validate_assignment=True,
18
+ validate_default=True,
19
+ validate_return=True,
20
+ )
21
+ _ = model_validator(mode="after")(_warning_if_extra_fields)