mindbridge-api-python-client 1.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. mindbridge_api_python_client-1.4.8.dist-info/LICENSE.txt +11 -0
  2. mindbridge_api_python_client-1.4.8.dist-info/METADATA +64 -0
  3. mindbridge_api_python_client-1.4.8.dist-info/RECORD +43 -0
  4. mindbridge_api_python_client-1.4.8.dist-info/WHEEL +4 -0
  5. mindbridgeapi/__init__.py +69 -0
  6. mindbridgeapi/accounting_period.py +34 -0
  7. mindbridgeapi/analyses.py +383 -0
  8. mindbridgeapi/analysis_item.py +167 -0
  9. mindbridgeapi/analysis_period.py +68 -0
  10. mindbridgeapi/analysis_source_item.py +198 -0
  11. mindbridgeapi/analysis_source_type_item.py +55 -0
  12. mindbridgeapi/analysis_source_types.py +36 -0
  13. mindbridgeapi/analysis_sources.py +132 -0
  14. mindbridgeapi/analysis_type_item.py +45 -0
  15. mindbridgeapi/analysis_types.py +62 -0
  16. mindbridgeapi/async_results.py +194 -0
  17. mindbridgeapi/base_set.py +175 -0
  18. mindbridgeapi/chunked_file_item.py +37 -0
  19. mindbridgeapi/chunked_file_part_item.py +25 -0
  20. mindbridgeapi/chunked_files.py +70 -0
  21. mindbridgeapi/column_mapping.py +21 -0
  22. mindbridgeapi/common_validators.py +71 -0
  23. mindbridgeapi/data_tables.py +206 -0
  24. mindbridgeapi/engagement_item.py +100 -0
  25. mindbridgeapi/engagements.py +93 -0
  26. mindbridgeapi/enumerations/analysis_source_type.py +142 -0
  27. mindbridgeapi/enumerations/analysis_type.py +36 -0
  28. mindbridgeapi/enumerations/deprecated_enum.py +69 -0
  29. mindbridgeapi/enumerations/system_library.py +32 -0
  30. mindbridgeapi/exceptions.py +92 -0
  31. mindbridgeapi/file_manager.py +212 -0
  32. mindbridgeapi/file_manager_item.py +107 -0
  33. mindbridgeapi/generated_pydantic_model/model.py +7035 -0
  34. mindbridgeapi/libraries.py +54 -0
  35. mindbridgeapi/library_item.py +44 -0
  36. mindbridgeapi/organization_item.py +61 -0
  37. mindbridgeapi/organizations.py +82 -0
  38. mindbridgeapi/server.py +202 -0
  39. mindbridgeapi/task_item.py +47 -0
  40. mindbridgeapi/tasks.py +150 -0
  41. mindbridgeapi/transaction_id_selection.py +31 -0
  42. mindbridgeapi/users.py +42 -0
  43. mindbridgeapi/virtual_column.py +104 -0
@@ -0,0 +1,11 @@
1
+ This package contains intellectual property proprietary to MindBridge as per
2
+ the Agreement, in addition to sample data sets, and shall be used for internal
3
+ purposes by MindBridge clients only.
4
+
5
+ You may include portions of this package in your software as needed for the
6
+ purposes of consuming the MindBridge API.
7
+
8
+ You may not redistribute this package to third parties except for the express
9
+ purposes of the third party needing to develop software to integrate with
10
+ MindBridge in order to further utilization of the MindBridge Software per the
11
+ applicable Agreement.
@@ -0,0 +1,64 @@
1
+ Metadata-Version: 2.1
2
+ Name: mindbridge-api-python-client
3
+ Version: 1.4.8
4
+ Summary: Interact with the MindBridge API
5
+ Home-page: https://www.mindbridge.ai
6
+ License: Proprietary
7
+ Author: Edgar Silva
8
+ Author-email: edgar.silva@mindbridge.ai
9
+ Maintainer: Kevin Paulson
10
+ Maintainer-email: kevin.paulson@mindbridge.ai
11
+ Requires-Python: >=3.8.1,<4.0
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Framework :: Flake8
14
+ Classifier: Framework :: Pydantic
15
+ Classifier: Framework :: Pydantic :: 2
16
+ Classifier: Framework :: Pytest
17
+ Classifier: License :: Other/Proprietary License
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python
20
+ Classifier: Programming Language :: Python :: 3
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3.12
25
+ Classifier: Programming Language :: Python :: 3 :: Only
26
+ Classifier: Programming Language :: Python :: 3.8
27
+ Classifier: Typing :: Typed
28
+ Requires-Dist: pydantic (>=2.5.2,<3.0.0)
29
+ Requires-Dist: typing-extensions (>=4.0.0,<5.0.0) ; python_version < "3.9"
30
+ Requires-Dist: urllib3 (>=2.0.7,<3.0.0)
31
+ Project-URL: Repository, https://github.com/mindbridge-ai/mindbridge-api-python-client
32
+ Description-Content-Type: text/markdown
33
+
34
+ <h1 align="center">MindBridge API Python Client</h1>
35
+ <p align="center">
36
+ <img alt="Logo of MindBridge" src="https://www.mindbridge.ai/wp-content/uploads/2021/07/MindBridge_Logo_Primary_RGB.png" />
37
+ </p>
38
+
39
+ Interact with the MindBridge API using this Python SDK. Please see [The MindBridge API](https://www.mindbridge.ai/support/api/) for more information about the MindBridge API. You can also [Access MindBridge Customer Support](https://support.mindbridge.ai/hc/en-us/articles/360054147834-Access-MindBridge-Customer-Support) or [Contact us](https://www.mindbridge.ai/contact/).
40
+
41
+ ## Installation
42
+ mindbridge-api-python-client can be installed with [pip](https://pip.pypa.io):
43
+
44
+ ```sh
45
+ python -m pip install mindbridge-api-python-client
46
+ ```
47
+
48
+ ## Usage
49
+ Replace `subdomain.mindbridge.ai` with your MindBridge tenant URL.
50
+ ```python
51
+ import getpass
52
+ import mindbridgeapi as mbapi
53
+
54
+ url = "subdomain.mindbridge.ai"
55
+ token = getpass.getpass(f"Token for {url}: ")
56
+
57
+ server = mbapi.Server(url=url, token=token)
58
+
59
+ organization = mbapi.OrganizationItem(name="My Organization name")
60
+ organization = server.organizations.create(organization)
61
+
62
+ # Create engagements, analyses and run them, etc.
63
+ ```
64
+
@@ -0,0 +1,43 @@
1
+ mindbridgeapi/__init__.py,sha256=8G4ZvHpaG2IZvzzbqPSYFjr1mKI2qorCDAfrC9ERh90,2804
2
+ mindbridgeapi/accounting_period.py,sha256=Wpg8ICAL6P_A6jdsfEXoozR77T3f4R4k05aXrZi1GS0,1186
3
+ mindbridgeapi/analyses.py,sha256=NqQ78Rz9c-RKXonYrvOYM8eowvxqPBbhulYyA2XhHUw,14053
4
+ mindbridgeapi/analysis_item.py,sha256=Duj5oF6n46mY8K-tu2NqeSwEZRKaQjgV3SL9MxpCn9U,6081
5
+ mindbridgeapi/analysis_period.py,sha256=SpmmOb0HvOLhaY3ZYeIaws3JUvQQb0fCfGeXOfFRZEw,2219
6
+ mindbridgeapi/analysis_source_item.py,sha256=MyED_MXyxrL7_i77ovlxwB2Jj2nGssR2soYmb_A3UE8,7427
7
+ mindbridgeapi/analysis_source_type_item.py,sha256=kUDsneQ_CTY5isv1LzseDYRGaj3BR8XDqEqLfnu7K9A,2885
8
+ mindbridgeapi/analysis_source_types.py,sha256=WHZpoO9inmrz571rJf4CjFqDESewxD5EFhMEHHi1Q6A,1214
9
+ mindbridgeapi/analysis_sources.py,sha256=bjNpsMRnQzCVhQ8wAqTTcJ1IJrB9hHVW71Va4KhI1es,4783
10
+ mindbridgeapi/analysis_type_item.py,sha256=fJkqSdppcHfCODAEXXgwlMHJ4OKex7gzNGH9rAgfzdE,1770
11
+ mindbridgeapi/analysis_types.py,sha256=OpiQQavoO1zcWCJirRvTX4LCoBACnjCOOZXn1j-Q8yI,2178
12
+ mindbridgeapi/async_results.py,sha256=T-n5gAlL7IOJeui4e5yHhCPU9eTnraaqLSySiMkLEMY,6575
13
+ mindbridgeapi/base_set.py,sha256=okBFRJ67JOydHarKL_5dgHS4-9twwttmZqmYf6sqlLk,5973
14
+ mindbridgeapi/chunked_file_item.py,sha256=dGUco2qjl0xmbJ59AYXPm4T1n6psoLBjsCOVQgXcsgk,1208
15
+ mindbridgeapi/chunked_file_part_item.py,sha256=-iWXTCIR0cLX9mZWJC-eqmUckhg_PhGEGuZWXzXdhEM,852
16
+ mindbridgeapi/chunked_files.py,sha256=7B61HihsX1ODmpkQGSTBzL6yQc1J8jwWEZXGUzhkwAA,2303
17
+ mindbridgeapi/column_mapping.py,sha256=A76_7gQEvdTN_RihuoXKqI6eKEVG2t3kceEAXZO_yUg,739
18
+ mindbridgeapi/common_validators.py,sha256=EVnpt7-PVmqCldaPMBjSD1Zmu_QVfF2l5Lt3pMPqLjI,2273
19
+ mindbridgeapi/data_tables.py,sha256=jnSs2zqCdWdjY4gQFNkIruvoFEdqDitLreoa4rexlhA,7323
20
+ mindbridgeapi/engagement_item.py,sha256=uBC8zP3qIZjxT0_xBSPVXosOFwcypFJ6P6KDymD1QgA,3646
21
+ mindbridgeapi/engagements.py,sha256=Y2rn_sCFUAaOeB-VnZH97I0qrriIk4r0z4xBDYU5-o4,3347
22
+ mindbridgeapi/enumerations/analysis_source_type.py,sha256=eqFg2OOpt_qInwboW81avmnJSTtzjrZwS2einKO4eQ0,5019
23
+ mindbridgeapi/enumerations/analysis_type.py,sha256=kEZINjyqqwsXyXcm_NCLBcQc_VIEc_fXqpwVuPIkKvI,1218
24
+ mindbridgeapi/enumerations/deprecated_enum.py,sha256=703qNP-MgdJS1zJtRTGuFwpYZCiViLhw5ilFWI8-Cf8,2199
25
+ mindbridgeapi/enumerations/system_library.py,sha256=Q9xaZEXA6uArwuyMhO5gclcF1vUtuDun40NjVXC6IGE,1066
26
+ mindbridgeapi/exceptions.py,sha256=_eSZaajT4ssuTTg57clv24ARZyswqqMAA3lxIiP6P18,2937
27
+ mindbridgeapi/file_manager.py,sha256=5Z3_BcMq1ObaKZiOjNUxsZAzqq20Kx9HG32lEozTSqw,7249
28
+ mindbridgeapi/file_manager_item.py,sha256=5Z_n4ijgqfM-Psj3snDQZWe9jLlJLLiYHvyJ5xwmkz8,3640
29
+ mindbridgeapi/generated_pydantic_model/model.py,sha256=dZhsYcD4smFAiQsmYOKe6Ak8uzbahwUa0UcmXxgALkA,210089
30
+ mindbridgeapi/libraries.py,sha256=16Ldvf3AxgvR3n5Gn8a2AkeFYIfEiEAZ767o7aeUxrk,1879
31
+ mindbridgeapi/library_item.py,sha256=ykbJRxdQmIwAWxpfFHOPMZz1_FHRpnErQnYLCMc8aPg,1629
32
+ mindbridgeapi/organization_item.py,sha256=9X6jVeWAoKIXpBnEQ-bkwdjY-ZwN9B_RH4IKHh0Kdps,2032
33
+ mindbridgeapi/organizations.py,sha256=EornYxX2dF-QAJtNuAKIYIVicgoEkoLllT_dgBFm2AE,2810
34
+ mindbridgeapi/server.py,sha256=ty1L9yiVuwK3Q1sJOsKdYgBuzvWQZ2GrXrgcRouXhBA,7586
35
+ mindbridgeapi/task_item.py,sha256=T_kCYPt1Neu4jIF_xWS4C0ICncXPgC2Na_qOEfKMPZU,1502
36
+ mindbridgeapi/tasks.py,sha256=4BpzsVOI_Hd6B3WAGVlb9qGrXF93aMKMCTK_iCPPe6A,5096
37
+ mindbridgeapi/transaction_id_selection.py,sha256=gR53cQYegS80WyK1Ao1IEFm0r1DRChnzgzBNFtWPjns,1178
38
+ mindbridgeapi/users.py,sha256=UVzMmEWXxdtRmbNnah8y9Lgg1Dbt4ZJ9R0KkQTZ7v0M,1314
39
+ mindbridgeapi/virtual_column.py,sha256=0Ke0VLO2uKRXoiUQC9PhNaHSz3QTVla4gU_y1hupJmk,3802
40
+ mindbridge_api_python_client-1.4.8.dist-info/LICENSE.txt,sha256=mplBHRn1FrWL_Ef6JvSXBssVPDfPUm57qAP0FeeenTQ,565
41
+ mindbridge_api_python_client-1.4.8.dist-info/METADATA,sha256=ofrLn8ppEVqwAmLLifgvE15LnCgykwQx3DVLnxwi1H4,2498
42
+ mindbridge_api_python_client-1.4.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
43
+ mindbridge_api_python_client-1.4.8.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 1.9.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,69 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from mindbridgeapi.accounting_period import AccountingPeriod
10
+ from mindbridgeapi.analysis_item import AnalysisItem
11
+ from mindbridgeapi.analysis_period import AnalysisPeriod
12
+ from mindbridgeapi.analysis_source_item import AnalysisSourceItem
13
+ from mindbridgeapi.analysis_source_type_item import AnalysisSourceTypeItem
14
+ from mindbridgeapi.analysis_type_item import AnalysisTypeItem
15
+ from mindbridgeapi.chunked_file_item import ChunkedFileItem
16
+ from mindbridgeapi.chunked_file_part_item import ChunkedFilePartItem
17
+ from mindbridgeapi.column_mapping import ColumnMapping
18
+ from mindbridgeapi.engagement_item import EngagementItem
19
+ from mindbridgeapi.enumerations.analysis_source_type import AnalysisSourceType
20
+ from mindbridgeapi.enumerations.analysis_type import AnalysisType
21
+ from mindbridgeapi.enumerations.system_library import SystemLibrary
22
+ from mindbridgeapi.file_manager_item import FileManagerItem
23
+ from mindbridgeapi.generated_pydantic_model.model import (
24
+ PeriodType as AnalysisEffectiveDateMetricsPeriod,
25
+ )
26
+ from mindbridgeapi.generated_pydantic_model.model import Frequency
27
+ from mindbridgeapi.generated_pydantic_model.model import Status4 as TaskStatus
28
+ from mindbridgeapi.generated_pydantic_model.model import TargetWorkflowState
29
+ from mindbridgeapi.generated_pydantic_model.model import Type5 as FileManagerType
30
+ from mindbridgeapi.generated_pydantic_model.model import Type11 as TaskType
31
+ from mindbridgeapi.generated_pydantic_model.model import Type13 as TransactionIdType
32
+ from mindbridgeapi.generated_pydantic_model.model import Type17 as VirtualColumnType
33
+ from mindbridgeapi.library_item import LibraryItem
34
+ from mindbridgeapi.organization_item import OrganizationItem
35
+ from mindbridgeapi.server import Server
36
+ from mindbridgeapi.task_item import TaskItem
37
+ from mindbridgeapi.transaction_id_selection import TransactionIdSelection
38
+ from mindbridgeapi.virtual_column import VirtualColumn
39
+
40
+ __all__ = [
41
+ "AccountingPeriod",
42
+ "AnalysisItem",
43
+ "AnalysisPeriod",
44
+ "AnalysisSourceItem",
45
+ "AnalysisSourceTypeItem",
46
+ "AnalysisTypeItem",
47
+ "ChunkedFileItem",
48
+ "ChunkedFilePartItem",
49
+ "ColumnMapping",
50
+ "EngagementItem",
51
+ "AnalysisSourceType",
52
+ "AnalysisType",
53
+ "SystemLibrary",
54
+ "FileManagerItem",
55
+ "AnalysisEffectiveDateMetricsPeriod",
56
+ "Frequency",
57
+ "TaskStatus",
58
+ "TargetWorkflowState",
59
+ "FileManagerType",
60
+ "TaskType",
61
+ "TransactionIdType",
62
+ "VirtualColumnType",
63
+ "LibraryItem",
64
+ "OrganizationItem",
65
+ "Server",
66
+ "TaskItem",
67
+ "TransactionIdSelection",
68
+ "VirtualColumn",
69
+ ]
@@ -0,0 +1,34 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from pydantic import ConfigDict, Field, model_validator
10
+ from mindbridgeapi.common_validators import _warning_if_extra_fields
11
+ from mindbridgeapi.generated_pydantic_model.model import (
12
+ ApiAccountingPeriodRead,
13
+ Frequency,
14
+ )
15
+
16
+
17
+ class AccountingPeriod(ApiAccountingPeriodRead):
18
+ fiscal_start_month: int = Field().merge_field_infos(
19
+ ApiAccountingPeriodRead.model_fields["fiscal_start_month"], default=1
20
+ )
21
+ fiscal_start_day: int = Field().merge_field_infos(
22
+ ApiAccountingPeriodRead.model_fields["fiscal_start_day"], default=1
23
+ )
24
+ frequency: Frequency = Field().merge_field_infos(
25
+ ApiAccountingPeriodRead.model_fields["frequency"], default=Frequency.ANNUAL
26
+ )
27
+
28
+ model_config = ConfigDict(
29
+ extra="allow",
30
+ validate_assignment=True,
31
+ validate_default=True,
32
+ validate_return=True,
33
+ )
34
+ _ = model_validator(mode="after")(_warning_if_extra_fields)
@@ -0,0 +1,383 @@
1
+ #
2
+ # Copyright MindBridge Analytics Inc. all rights reserved.
3
+ #
4
+ # This material is confidential and may not be copied, distributed,
5
+ # reversed engineered, decompiled or otherwise disseminated without
6
+ # the prior written consent of MindBridge Analytics Inc.
7
+ #
8
+
9
+ from dataclasses import dataclass
10
+ from datetime import date
11
+ from functools import cached_property
12
+ import logging
13
+ from typing import TYPE_CHECKING, Any, Dict, Generator, Optional
14
+ import warnings
15
+ from mindbridgeapi.analysis_item import AnalysisItem
16
+ from mindbridgeapi.analysis_sources import AnalysisSources
17
+ from mindbridgeapi.async_results import AsyncResults
18
+ from mindbridgeapi.base_set import BaseSet
19
+ from mindbridgeapi.data_tables import DataTables
20
+ from mindbridgeapi.exceptions import (
21
+ ItemAlreadyExistsError,
22
+ ItemError,
23
+ ItemNotFoundError,
24
+ ParameterError,
25
+ UnexpectedServerError,
26
+ ValidationError,
27
+ )
28
+ from mindbridgeapi.generated_pydantic_model.model import (
29
+ ApiAnalysisStatusRead,
30
+ ApiAsyncResult,
31
+ ApiEngagementRollForwardRequest,
32
+ EntityType,
33
+ )
34
+ from mindbridgeapi.generated_pydantic_model.model import Type1 as AsyncResultType
35
+ from mindbridgeapi.tasks import Tasks
36
+
37
+ if TYPE_CHECKING:
38
+ from mindbridgeapi.engagement_item import EngagementItem
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+
43
+ @dataclass
44
+ class Analyses(BaseSet):
45
+ def __post_init__(self) -> None:
46
+ self.async_result_set = AsyncResults(server=self.server)
47
+
48
+ @cached_property
49
+ def base_url(self) -> str:
50
+ return f"{self.server.base_url}/analyses"
51
+
52
+ def create(self, item: AnalysisItem) -> AnalysisItem:
53
+ if getattr(item, "id", None) is not None and item.id is not None:
54
+ raise ItemAlreadyExistsError(item.id)
55
+
56
+ url = self.base_url
57
+ resp_dict = super()._create(url=url, json=item.create_json)
58
+ analysis = AnalysisItem.model_validate(resp_dict)
59
+ self.restart_analysis_sources(analysis)
60
+ self.restart_data_tables(analysis)
61
+ self.restart_tasks(analysis)
62
+
63
+ return analysis
64
+
65
+ def update(self, item: AnalysisItem) -> AnalysisItem:
66
+ if getattr(item, "id", None) is None:
67
+ raise ItemNotFoundError
68
+
69
+ url = f"{self.base_url}/{item.id}"
70
+ resp_dict = super()._update(url=url, json=item.update_json)
71
+
72
+ analysis = AnalysisItem.model_validate(resp_dict)
73
+ self.restart_analysis_sources(analysis)
74
+ self.restart_data_tables(analysis)
75
+ self.restart_tasks(analysis)
76
+
77
+ return analysis
78
+
79
+ def get(
80
+ self, json: Optional[Dict[str, Any]] = None
81
+ ) -> Generator[AnalysisItem, None, None]:
82
+ if json is None:
83
+ json = {}
84
+
85
+ url = f"{self.base_url}/query"
86
+ for resp_dict in super()._get(url=url, json=json):
87
+ analysis = AnalysisItem.model_validate(resp_dict)
88
+ self.restart_analysis_sources(analysis)
89
+ self.restart_data_tables(analysis)
90
+ self.restart_tasks(analysis)
91
+
92
+ yield analysis
93
+
94
+ def delete(self, item: AnalysisItem) -> None:
95
+ if getattr(item, "id", None) is None:
96
+ raise ItemNotFoundError
97
+
98
+ url = f"{self.base_url}/{item.id}"
99
+ super()._delete(url=url)
100
+
101
+ def start(self, item: AnalysisItem) -> AnalysisItem:
102
+ warnings.warn(
103
+ "Use the run function instead", category=DeprecationWarning, stacklevel=2
104
+ )
105
+ return self.run(item)
106
+
107
+ def run(self, item: AnalysisItem) -> AnalysisItem:
108
+ analysis_id = getattr(item, "id", None)
109
+ if analysis_id is None:
110
+ raise ItemNotFoundError
111
+
112
+ url = f"{self.base_url}/{analysis_id}/run"
113
+ resp_dict = super()._create(url=url)
114
+ async_result = ApiAsyncResult.model_validate(resp_dict)
115
+
116
+ if async_result.type != AsyncResultType.ANALYSIS_RUN:
117
+ raise ItemError(f"Async Result Type was {async_result.type}.")
118
+
119
+ if async_result.entity_id != analysis_id:
120
+ raise UnexpectedServerError(
121
+ details="async_result.entity_id was not the same as the item id"
122
+ )
123
+
124
+ return self.get_by_id(analysis_id)
125
+
126
+ def wait_for_analysis_sources(
127
+ self,
128
+ analysis: AnalysisItem,
129
+ check_interval_seconds: int = -873, # Depreciated
130
+ max_wait_minutes: int = 24 * 60,
131
+ ) -> AnalysisItem:
132
+ if check_interval_seconds != -873:
133
+ warnings.warn(
134
+ "check_interval_seconds was provided to wait_for_analysis_sources as "
135
+ f"{check_interval_seconds}. This will not be referenced as now the "
136
+ "check interval will be exponentially increasing to a max interval",
137
+ category=DeprecationWarning,
138
+ stacklevel=2,
139
+ )
140
+
141
+ del check_interval_seconds
142
+
143
+ analysis_id = getattr(analysis, "id", None)
144
+ if analysis_id is None:
145
+ raise ItemNotFoundError
146
+
147
+ analysis = self.get_by_id(analysis_id)
148
+
149
+ if getattr(analysis, "id", None) != analysis_id:
150
+ raise UnexpectedServerError( # noqa: TRY003
151
+ "analysis id was not the same as requested."
152
+ )
153
+
154
+ # Get the list of async_result ids
155
+ async_results_to_check = []
156
+ for analysis_source in analysis.analysis_sources:
157
+ async_results = self.async_result_set.get(
158
+ json={
159
+ "$and": [
160
+ {"entityId": {"$eq": analysis_source.id}},
161
+ {"type": {"$eq": AsyncResultType.ANALYSIS_SOURCE_INGESTION}},
162
+ {"entityType": {"$eq": EntityType.ANALYSIS_SOURCE}},
163
+ ]
164
+ }
165
+ )
166
+ async_results_list = list(async_results)
167
+ if len(async_results_list) == 0:
168
+ """
169
+ This shouldn't occur as analysis sources are started as soon as they are
170
+ added to the analysis
171
+ """
172
+ raise UnexpectedServerError(
173
+ f"Unable to find {EntityType.ANALYSIS_SOURCE} status for:"
174
+ f" {analysis_source.id}."
175
+ )
176
+
177
+ async_result = max(
178
+ async_results_list,
179
+ key=lambda x: getattr(x, "last_modified_date", date.min),
180
+ )
181
+ async_results_to_check.append(async_result)
182
+
183
+ if len(async_results_to_check) == 0:
184
+ """
185
+ There will be one item in this list for every analysis_source otherwise an
186
+ error would have been raised in the loop above.
187
+ """
188
+ raise ItemError("Analysis has no analysis sources.") # noqa: TRY003
189
+
190
+ self.async_result_set._wait_for_async_results(
191
+ async_results=async_results_to_check,
192
+ max_wait_minutes=max_wait_minutes,
193
+ init_interval_sec=11,
194
+ )
195
+
196
+ analysis_status = self.status(analysis)
197
+ analysis_status_ready = getattr(analysis_status, "ready", False)
198
+ analysis_status_preflight_errors = getattr(
199
+ analysis_status, "preflight_errors", []
200
+ )
201
+ if analysis_status_ready:
202
+ logger.info(f"Analysis ({analysis_id}) is ready to run")
203
+ else:
204
+ err_msg = f"Analysis ({analysis_id}) is not ready to run"
205
+ if len(analysis_status_preflight_errors) == 0:
206
+ err_msg = f"{err_msg} (no preflight_errors)"
207
+ else:
208
+ preflight_errors_str = ", ".join(
209
+ [str(i.name) for i in analysis_status_preflight_errors]
210
+ )
211
+ err_msg = f"{err_msg}. Preflight Errors: {preflight_errors_str}"
212
+
213
+ raise ValidationError(err_msg)
214
+
215
+ return self.get_by_id(analysis_id)
216
+
217
+ def status(self, item: AnalysisItem) -> ApiAnalysisStatusRead:
218
+ if getattr(item, "id", None) is None:
219
+ raise ItemNotFoundError
220
+
221
+ url = f"{self.base_url}/{item.id}/status"
222
+ resp_dict = super()._get_by_id(url=url)
223
+ analysis_status = ApiAnalysisStatusRead.model_validate(resp_dict)
224
+
225
+ self._status_log_message(item, analysis_status)
226
+
227
+ return analysis_status
228
+
229
+ @staticmethod
230
+ def _status_log_message(
231
+ analysis: AnalysisItem, analysis_status_item: ApiAnalysisStatusRead
232
+ ) -> None:
233
+ log_message = f"Analysis Status for {analysis.name} ({analysis.id}):"
234
+ log_message += f"\n ready: {analysis_status_item.ready}"
235
+ log_message += f"\n status: {analysis_status_item.status}"
236
+ log_message += (
237
+ "\n unmapped_account_mapping_count:"
238
+ f" {analysis_status_item.unmapped_account_mapping_count}"
239
+ )
240
+ log_message += (
241
+ "\n mapped_account_mapping_count:"
242
+ f" {analysis_status_item.mapped_account_mapping_count}"
243
+ )
244
+ log_message += (
245
+ "\n inferred_account_mapping_count:"
246
+ f" {analysis_status_item.inferred_account_mapping_count}"
247
+ )
248
+ log_message += "\n preflight_errors:"
249
+ for pfe in getattr(analysis_status_item, "preflight_errors", []):
250
+ log_message += f"\n - {pfe}"
251
+
252
+ log_message += "\n source_statuses:"
253
+ for source_status in getattr(analysis_status_item, "source_statuses", []):
254
+ log_message += f"\n - source_id: {source_status.source_id}"
255
+ log_message += f"\n - status: {source_status.status}"
256
+ log_message += (
257
+ "\n - analysis_source_type_id:"
258
+ f" {source_status.analysis_source_type_id}"
259
+ )
260
+ log_message += f"\n - period_id: {source_status.period_id}"
261
+ log_message += "\n"
262
+
263
+ logger.info(log_message)
264
+
265
+ def wait_for_analysis(
266
+ self,
267
+ analysis: AnalysisItem,
268
+ check_interval_seconds: int = -873, # Depreciated
269
+ max_wait_minutes: int = 24 * 60,
270
+ ) -> AnalysisItem:
271
+ if check_interval_seconds != -873:
272
+ warnings.warn(
273
+ "check_interval_seconds was provided to wait_for_analysis as "
274
+ f"{check_interval_seconds}. This will not be referenced as now the "
275
+ "check interval will be exponentially increasing to a max interval",
276
+ category=DeprecationWarning,
277
+ stacklevel=2,
278
+ )
279
+
280
+ del check_interval_seconds
281
+
282
+ analysis_id = getattr(analysis, "id", None)
283
+ if analysis_id is None:
284
+ raise ItemNotFoundError
285
+
286
+ async_results = self.async_result_set.get(
287
+ json={
288
+ "$and": [
289
+ {"entityId": {"$eq": analysis_id}},
290
+ {"type": {"$eq": AsyncResultType.ANALYSIS_RUN}},
291
+ {"entityType": {"$eq": EntityType.ANALYSIS}},
292
+ ]
293
+ }
294
+ )
295
+ async_results_list = list(async_results)
296
+ if len(async_results_list) == 0:
297
+ raise ValidationError(
298
+ f"Unable to find {EntityType.ANALYSIS} run status for: {analysis_id}."
299
+ " Possibly the analysis has not been started yet?"
300
+ )
301
+
302
+ async_result = max(
303
+ async_results_list, key=lambda x: getattr(x, "last_modified_date", date.min)
304
+ )
305
+
306
+ self.async_result_set._wait_for_async_result(
307
+ async_result=async_result,
308
+ max_wait_minutes=max_wait_minutes,
309
+ init_interval_sec=76,
310
+ )
311
+
312
+ return self.get_by_id(analysis_id)
313
+
314
+ def roll_forward_analysis_to_engagement(
315
+ self,
316
+ analysis_item: AnalysisItem,
317
+ engagement_item: "EngagementItem",
318
+ interim: bool = False,
319
+ ) -> AnalysisItem:
320
+ url = f"{self.base_url}/engagement-roll-forward"
321
+
322
+ if analysis_item.id is None or analysis_item.engagement_id is None:
323
+ raise ItemNotFoundError
324
+
325
+ if engagement_item.id is None:
326
+ raise ItemNotFoundError
327
+
328
+ if analysis_item.engagement_id == engagement_item.id:
329
+ raise ParameterError(
330
+ parameter_name="engagement_id",
331
+ details=(
332
+ "The target engagement cannot be the same engagement in which the "
333
+ "analysis exists."
334
+ ),
335
+ )
336
+
337
+ roll_forward_request = ApiEngagementRollForwardRequest(
338
+ analysis_id=analysis_item.id,
339
+ interim=interim,
340
+ target_engagement_id=engagement_item.id,
341
+ )
342
+ roll_forward_request_json = roll_forward_request.model_dump(
343
+ mode="json", by_alias=True, exclude_none=True
344
+ )
345
+ resp_dict = super()._create(url=url, json=roll_forward_request_json)
346
+ async_result = ApiAsyncResult.model_validate(resp_dict)
347
+ if async_result.entity_id is None:
348
+ raise UnexpectedServerError(details="async_result.entity_id was None")
349
+
350
+ return self.get_by_id(async_result.entity_id)
351
+
352
+ def get_by_id(self, id: str) -> AnalysisItem:
353
+ url = f"{self.base_url}/{id}"
354
+ resp_dict = super()._get_by_id(url=url)
355
+ analysis = AnalysisItem.model_validate(resp_dict)
356
+ self.restart_analysis_sources(analysis)
357
+ self.restart_data_tables(analysis)
358
+
359
+ return analysis
360
+
361
+ def restart_data_tables(self, analysis_item: AnalysisItem) -> None:
362
+ if getattr(analysis_item, "id", None) is None:
363
+ raise ItemNotFoundError
364
+
365
+ analysis_item.data_tables = DataTables(server=self.server).get(
366
+ json={"analysisId": {"$eq": analysis_item.id}}
367
+ )
368
+
369
+ def restart_analysis_sources(self, analysis_item: AnalysisItem) -> None:
370
+ if getattr(analysis_item, "id", None) is None:
371
+ raise ItemNotFoundError
372
+
373
+ analysis_item.analysis_sources = AnalysisSources(server=self.server).get(
374
+ json={"analysisId": {"$eq": analysis_item.id}}
375
+ )
376
+
377
+ def restart_tasks(self, analysis_item: AnalysisItem) -> None:
378
+ if getattr(analysis_item, "id", None) is None:
379
+ raise ItemNotFoundError
380
+
381
+ analysis_item.tasks = Tasks(server=self.server).get(
382
+ json={"analysisId": {"$eq": analysis_item.id}}
383
+ )