onetick-py 1.162.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (152) hide show
  1. locator_parser/__init__.py +0 -0
  2. locator_parser/acl.py +73 -0
  3. locator_parser/actions.py +266 -0
  4. locator_parser/common.py +365 -0
  5. locator_parser/io.py +41 -0
  6. locator_parser/locator.py +150 -0
  7. onetick/__init__.py +101 -0
  8. onetick/doc_utilities/__init__.py +3 -0
  9. onetick/doc_utilities/napoleon.py +40 -0
  10. onetick/doc_utilities/ot_doctest.py +140 -0
  11. onetick/doc_utilities/snippets.py +280 -0
  12. onetick/lib/__init__.py +4 -0
  13. onetick/lib/instance.py +138 -0
  14. onetick/py/__init__.py +290 -0
  15. onetick/py/_stack_info.py +89 -0
  16. onetick/py/_version.py +2 -0
  17. onetick/py/aggregations/__init__.py +11 -0
  18. onetick/py/aggregations/_base.py +645 -0
  19. onetick/py/aggregations/_docs.py +912 -0
  20. onetick/py/aggregations/compute.py +286 -0
  21. onetick/py/aggregations/functions.py +2216 -0
  22. onetick/py/aggregations/generic.py +104 -0
  23. onetick/py/aggregations/high_low.py +80 -0
  24. onetick/py/aggregations/num_distinct.py +83 -0
  25. onetick/py/aggregations/order_book.py +427 -0
  26. onetick/py/aggregations/other.py +1014 -0
  27. onetick/py/backports.py +26 -0
  28. onetick/py/cache.py +373 -0
  29. onetick/py/callback/__init__.py +5 -0
  30. onetick/py/callback/callback.py +275 -0
  31. onetick/py/callback/callbacks.py +131 -0
  32. onetick/py/compatibility.py +752 -0
  33. onetick/py/configuration.py +736 -0
  34. onetick/py/core/__init__.py +0 -0
  35. onetick/py/core/_csv_inspector.py +93 -0
  36. onetick/py/core/_internal/__init__.py +0 -0
  37. onetick/py/core/_internal/_manually_bound_value.py +6 -0
  38. onetick/py/core/_internal/_nodes_history.py +250 -0
  39. onetick/py/core/_internal/_op_utils/__init__.py +0 -0
  40. onetick/py/core/_internal/_op_utils/every_operand.py +9 -0
  41. onetick/py/core/_internal/_op_utils/is_const.py +10 -0
  42. onetick/py/core/_internal/_per_tick_scripts/tick_list_sort_template.script +121 -0
  43. onetick/py/core/_internal/_proxy_node.py +140 -0
  44. onetick/py/core/_internal/_state_objects.py +2307 -0
  45. onetick/py/core/_internal/_state_vars.py +87 -0
  46. onetick/py/core/_source/__init__.py +0 -0
  47. onetick/py/core/_source/_symbol_param.py +95 -0
  48. onetick/py/core/_source/schema.py +97 -0
  49. onetick/py/core/_source/source_methods/__init__.py +0 -0
  50. onetick/py/core/_source/source_methods/aggregations.py +810 -0
  51. onetick/py/core/_source/source_methods/applyers.py +296 -0
  52. onetick/py/core/_source/source_methods/columns.py +141 -0
  53. onetick/py/core/_source/source_methods/data_quality.py +301 -0
  54. onetick/py/core/_source/source_methods/debugs.py +270 -0
  55. onetick/py/core/_source/source_methods/drops.py +120 -0
  56. onetick/py/core/_source/source_methods/fields.py +619 -0
  57. onetick/py/core/_source/source_methods/filters.py +1001 -0
  58. onetick/py/core/_source/source_methods/joins.py +1393 -0
  59. onetick/py/core/_source/source_methods/merges.py +566 -0
  60. onetick/py/core/_source/source_methods/misc.py +1325 -0
  61. onetick/py/core/_source/source_methods/pandases.py +155 -0
  62. onetick/py/core/_source/source_methods/renames.py +356 -0
  63. onetick/py/core/_source/source_methods/sorts.py +183 -0
  64. onetick/py/core/_source/source_methods/switches.py +142 -0
  65. onetick/py/core/_source/source_methods/symbols.py +117 -0
  66. onetick/py/core/_source/source_methods/times.py +627 -0
  67. onetick/py/core/_source/source_methods/writes.py +702 -0
  68. onetick/py/core/_source/symbol.py +202 -0
  69. onetick/py/core/_source/tmp_otq.py +222 -0
  70. onetick/py/core/column.py +209 -0
  71. onetick/py/core/column_operations/__init__.py +0 -0
  72. onetick/py/core/column_operations/_methods/__init__.py +4 -0
  73. onetick/py/core/column_operations/_methods/_internal.py +28 -0
  74. onetick/py/core/column_operations/_methods/conversions.py +215 -0
  75. onetick/py/core/column_operations/_methods/methods.py +294 -0
  76. onetick/py/core/column_operations/_methods/op_types.py +150 -0
  77. onetick/py/core/column_operations/accessors/__init__.py +0 -0
  78. onetick/py/core/column_operations/accessors/_accessor.py +30 -0
  79. onetick/py/core/column_operations/accessors/decimal_accessor.py +92 -0
  80. onetick/py/core/column_operations/accessors/dt_accessor.py +464 -0
  81. onetick/py/core/column_operations/accessors/float_accessor.py +160 -0
  82. onetick/py/core/column_operations/accessors/str_accessor.py +1374 -0
  83. onetick/py/core/column_operations/base.py +1061 -0
  84. onetick/py/core/cut_builder.py +149 -0
  85. onetick/py/core/db_constants.py +20 -0
  86. onetick/py/core/eval_query.py +244 -0
  87. onetick/py/core/lambda_object.py +442 -0
  88. onetick/py/core/multi_output_source.py +193 -0
  89. onetick/py/core/per_tick_script.py +2253 -0
  90. onetick/py/core/query_inspector.py +465 -0
  91. onetick/py/core/source.py +1663 -0
  92. onetick/py/db/__init__.py +2 -0
  93. onetick/py/db/_inspection.py +1042 -0
  94. onetick/py/db/db.py +1423 -0
  95. onetick/py/db/utils.py +64 -0
  96. onetick/py/docs/__init__.py +0 -0
  97. onetick/py/docs/docstring_parser.py +112 -0
  98. onetick/py/docs/utils.py +81 -0
  99. onetick/py/functions.py +2354 -0
  100. onetick/py/license.py +188 -0
  101. onetick/py/log.py +88 -0
  102. onetick/py/math.py +947 -0
  103. onetick/py/misc.py +437 -0
  104. onetick/py/oqd/__init__.py +22 -0
  105. onetick/py/oqd/eps.py +1195 -0
  106. onetick/py/oqd/sources.py +325 -0
  107. onetick/py/otq.py +211 -0
  108. onetick/py/pyomd_mock.py +47 -0
  109. onetick/py/run.py +841 -0
  110. onetick/py/servers.py +173 -0
  111. onetick/py/session.py +1342 -0
  112. onetick/py/sources/__init__.py +19 -0
  113. onetick/py/sources/cache.py +167 -0
  114. onetick/py/sources/common.py +126 -0
  115. onetick/py/sources/csv.py +642 -0
  116. onetick/py/sources/custom.py +85 -0
  117. onetick/py/sources/data_file.py +305 -0
  118. onetick/py/sources/data_source.py +1049 -0
  119. onetick/py/sources/empty.py +94 -0
  120. onetick/py/sources/odbc.py +337 -0
  121. onetick/py/sources/order_book.py +238 -0
  122. onetick/py/sources/parquet.py +168 -0
  123. onetick/py/sources/pit.py +191 -0
  124. onetick/py/sources/query.py +495 -0
  125. onetick/py/sources/snapshots.py +419 -0
  126. onetick/py/sources/split_query_output_by_symbol.py +198 -0
  127. onetick/py/sources/symbology_mapping.py +123 -0
  128. onetick/py/sources/symbols.py +357 -0
  129. onetick/py/sources/ticks.py +825 -0
  130. onetick/py/sql.py +70 -0
  131. onetick/py/state.py +256 -0
  132. onetick/py/types.py +2056 -0
  133. onetick/py/utils/__init__.py +70 -0
  134. onetick/py/utils/acl.py +93 -0
  135. onetick/py/utils/config.py +186 -0
  136. onetick/py/utils/default.py +49 -0
  137. onetick/py/utils/file.py +38 -0
  138. onetick/py/utils/helpers.py +76 -0
  139. onetick/py/utils/locator.py +94 -0
  140. onetick/py/utils/perf.py +499 -0
  141. onetick/py/utils/query.py +49 -0
  142. onetick/py/utils/render.py +1139 -0
  143. onetick/py/utils/script.py +244 -0
  144. onetick/py/utils/temp.py +471 -0
  145. onetick/py/utils/types.py +118 -0
  146. onetick/py/utils/tz.py +82 -0
  147. onetick_py-1.162.2.dist-info/METADATA +148 -0
  148. onetick_py-1.162.2.dist-info/RECORD +152 -0
  149. onetick_py-1.162.2.dist-info/WHEEL +5 -0
  150. onetick_py-1.162.2.dist-info/entry_points.txt +2 -0
  151. onetick_py-1.162.2.dist-info/licenses/LICENSE +21 -0
  152. onetick_py-1.162.2.dist-info/top_level.txt +2 -0
@@ -0,0 +1,104 @@
1
+ from typing import TYPE_CHECKING, Optional
2
+
3
+ if TYPE_CHECKING:
4
+ from onetick.py.core.source import Source # hack for annotations
5
+
6
+ import onetick.py as otp
7
+ from onetick.py.otq import otq
8
+ from onetick.py.core.column import _Column
9
+ from ._base import _Aggregation, _MultiColumnAggregation
10
+
11
+
12
+ class Generic(_Aggregation, _MultiColumnAggregation):
13
+ NAME = 'GENERIC_AGGREGATION'
14
+ EP = otq.GenericAggregation
15
+
16
+ FIELDS_TO_SKIP = ['column_name', 'all_fields', 'output_field_name']
17
+ FIELDS_MAPPING = dict(_Aggregation.FIELDS_MAPPING, **{
18
+ 'query_name': 'QUERY_NAME',
19
+ 'bucket_delimiter': 'BUCKET_DELIMITERS',
20
+ })
21
+ FIELDS_DEFAULT = dict(_Aggregation.FIELDS_DEFAULT, **{
22
+ 'bucket_delimiter': None,
23
+ })
24
+ _validations_to_skip = ['running_all_fields']
25
+
26
+ def __init__(self,
27
+ query_fun,
28
+ bucket_delimiter: bool = False,
29
+ **kwargs):
30
+ self._query: Optional['Source'] = None
31
+ self._query_fun = query_fun
32
+ self._query_params: Optional[dict] = None
33
+ self.bucket_delimiter = 'D' if bucket_delimiter else None
34
+
35
+ # init variables which will be set later
36
+ self.query_name: Optional[str] = None
37
+ self._query_schema: Optional[dict] = None
38
+
39
+ if 'all_fields' in kwargs:
40
+ raise ValueError("Parameter 'all_fields' for generic aggregation is meaningless. "
41
+ "Aggregated source will have all fields returned by 'query_fun'.")
42
+ # we don't want to set hard limit on the output of order book aggregations
43
+ kwargs['all_fields'] = True
44
+ super().__init__(column=_Column('TIMESTAMP'), **kwargs)
45
+
46
+ def _set_query_params(self, **kwargs):
47
+ self._query_params = kwargs
48
+
49
+ def apply(self, src: 'Source', name: Optional[str] = None, **kwargs) -> 'Source':
50
+ """Applies generic aggregation to Source and sets proper schema
51
+
52
+ Parameters
53
+ ----------
54
+ src: Source
55
+ Source to apply aggregation
56
+ name: str, optional
57
+ Name of output column. If not specified, will be used self.column_name
58
+ kwargs: dict
59
+ Parameters to be passed to `query_fun()` when creating aggregation query
60
+ """
61
+ self._set_query_params(**kwargs)
62
+ return super().apply(src, name=name)
63
+
64
+ def _make_query_object(self, schema):
65
+ query_params = self._query_params if self._query_params else {}
66
+ query = otp.DataSource(symbols='LOCAL::', tick_type='ANY', schema_policy='manual', schema=schema)
67
+ query = self._query_fun(query, **query_params)
68
+ return query
69
+
70
+ def _detect_query_fun_schema(self, res):
71
+ # this will be translated to passthrough with symbol and tick type set
72
+ if self._query is None:
73
+ self._query = self._make_query_object(res.schema)
74
+
75
+ return self._query.schema.copy()
76
+
77
+ def _modify_source(self, res: 'Source', **kwargs):
78
+ query_schema = self._detect_query_fun_schema(res)
79
+
80
+ # schema will be used when validating output
81
+ if self._query_schema is None:
82
+ self._query_schema = query_schema
83
+
84
+ if self._query is None:
85
+ raise RuntimeError('Attempted to use `self._query` before initialization')
86
+
87
+ # query_name will be used to create ep
88
+ query_name = self._query._store_in_tmp_otq(
89
+ res._tmp_otq, operation_suffix='generic_aggregation', add_passthrough=False,
90
+ )
91
+ self.query_name = f'THIS::{query_name}'
92
+
93
+ def _get_common_schema(self, src, name):
94
+ super()._get_common_schema(src, name)
95
+ return {
96
+ column: src.schema[column]
97
+ for column in map(str, self.group_by)
98
+ }
99
+
100
+ def _get_output_schema(self, src, name=None):
101
+ schema = self._query_schema.copy()
102
+ if self.bucket_delimiter:
103
+ schema['DELIMITER'] = str
104
+ return schema
@@ -0,0 +1,80 @@
1
+ from typing import Union, TYPE_CHECKING
2
+ from copy import deepcopy
3
+
4
+ if TYPE_CHECKING:
5
+ from onetick.py.core.source import Source # hack for annotations
6
+
7
+ from onetick.py.core.column import _Column
8
+ from onetick.py import types as ott
9
+ from onetick.py.otq import otq
10
+
11
+ from ._base import (
12
+ _AggregationTSType, _AggregationTSSelection, _KeepTs, _FloatAggregation, _ExpectLargeInts, _AllColumnsAggregation,
13
+ )
14
+
15
+
16
+ class Max(_AggregationTSType, _ExpectLargeInts):
17
+
18
+ NAME = "HIGH"
19
+ EP = otq.High
20
+ require_type = (int, float, ott.nsectime, ott._inf)
21
+
22
+ FIELDS_MAPPING = deepcopy(_AggregationTSType.FIELDS_MAPPING)
23
+ FIELDS_MAPPING.update(_ExpectLargeInts.FIELDS_MAPPING)
24
+ FIELDS_DEFAULT = deepcopy(_AggregationTSType.FIELDS_DEFAULT)
25
+ FIELDS_DEFAULT.update(_ExpectLargeInts.FIELDS_DEFAULT)
26
+
27
+
28
+ class Min(Max):
29
+ NAME = "LOW"
30
+ EP = otq.Low
31
+
32
+
33
+ class HighTick(_AggregationTSType, _AggregationTSSelection, _KeepTs, _FloatAggregation, _AllColumnsAggregation):
34
+ EP = otq.HighTick
35
+ NAME = 'HIGH_TICK'
36
+ DEFAULT_OUTPUT_NAME = 'HIGH_TICK'
37
+
38
+ FIELDS_MAPPING = deepcopy(_AggregationTSType.FIELDS_MAPPING)
39
+ FIELDS_MAPPING.update(_AggregationTSSelection.FIELDS_MAPPING)
40
+ FIELDS_MAPPING['n'] = 'NUM_TICKS'
41
+ FIELDS_DEFAULT = deepcopy(_AggregationTSType.FIELDS_DEFAULT)
42
+ FIELDS_DEFAULT.update(_AggregationTSSelection.FIELDS_DEFAULT)
43
+ FIELDS_DEFAULT['n'] = 1
44
+
45
+ FIELDS_TO_SKIP = ['output_field_name', 'all_fields']
46
+
47
+ def __init__(self, column: Union[str, _Column], n: int = 1, *args, **kwargs):
48
+ """
49
+ Select `n` ticks with the highest values in the `column` field
50
+ """
51
+ super().__init__(column, *args, **kwargs)
52
+ self.n = n
53
+
54
+ @staticmethod
55
+ def validate_output_name(*args, **kwargs):
56
+ # HighTick and LowTick aggregations don't have output fields
57
+ pass
58
+
59
+
60
+ class LowTick(HighTick):
61
+ EP = otq.LowTick
62
+ NAME = 'LOW_TICK'
63
+ DEFAULT_OUTPUT_NAME = 'LOW_TICK'
64
+
65
+
66
+ class HighTime(_AggregationTSType, _AggregationTSSelection, _FloatAggregation):
67
+ NAME = "HIGH_TIME"
68
+ EP = otq.HighTime
69
+
70
+ FIELDS_MAPPING = deepcopy(_AggregationTSType.FIELDS_MAPPING)
71
+ FIELDS_MAPPING.update(_AggregationTSSelection.FIELDS_MAPPING)
72
+ FIELDS_DEFAULT = deepcopy(_AggregationTSType.FIELDS_DEFAULT)
73
+ FIELDS_DEFAULT.update(_AggregationTSSelection.FIELDS_DEFAULT)
74
+ output_field_type = ott.nsectime
75
+
76
+
77
+ class LowTime(HighTime):
78
+ """Returns timestamp of tick with lowest value of input field"""
79
+ NAME = "LOW_TIME"
80
+ EP = otq.LowTime
@@ -0,0 +1,83 @@
1
+ from typing import TYPE_CHECKING
2
+ from copy import deepcopy
3
+
4
+ if TYPE_CHECKING:
5
+ from onetick.py.core.source import Source # hack for annotations
6
+
7
+ from onetick.py.otq import otq
8
+
9
+ from onetick.py.core.column import _Column
10
+ from onetick.py import types as ott
11
+
12
+ from ._base import _Aggregation
13
+ from ._docs import (_running_doc,
14
+ _all_fields_doc,
15
+ _bucket_interval_doc,
16
+ _bucket_time_doc,
17
+ _bucket_units_doc,
18
+ _bucket_end_condition_doc,
19
+ _boundary_tick_bucket_doc,
20
+ _group_by_doc,
21
+ _groups_to_display_doc)
22
+ from onetick.py.docs.utils import docstring
23
+
24
+
25
+ # OneTick build >= 20220913120000
26
+ if hasattr(otq, 'NumDistinct'):
27
+
28
+ class NumDistinct(_Aggregation):
29
+ NAME = 'NUM_DISTINCT'
30
+ EP = otq.NumDistinct
31
+
32
+ FIELDS_MAPPING = deepcopy(_Aggregation.FIELDS_MAPPING)
33
+ FIELDS_MAPPING['keys'] = 'KEYS'
34
+
35
+ FIELDS_TO_SKIP = ['column_name', 'end_condition_per_group']
36
+
37
+ output_field_type = int
38
+
39
+ def __init__(self, keys, *args, **kwargs):
40
+ super().__init__(column=_Column('TIMESTAMP'), *args, **kwargs)
41
+ if isinstance(keys, str):
42
+ keys = [keys]
43
+ self._keys = keys
44
+
45
+ @property
46
+ def keys(self):
47
+ return ott.value2str(','.join(self._keys))
48
+
49
+ def apply(self, src, name='VALUE'):
50
+ return super().apply(src, name)
51
+
52
+ def validate_input_columns(self, src: 'Source'):
53
+ for column in self._keys:
54
+ if column not in src.schema:
55
+ raise TypeError(f"Aggregation {self.__class__.__name__} uses"
56
+ f" column '{column}' as input, which doesn't exist")
57
+
58
+ @docstring(parameters=[_running_doc, _all_fields_doc,
59
+ _bucket_interval_doc, _bucket_units_doc, _bucket_time_doc,
60
+ _bucket_end_condition_doc, _boundary_tick_bucket_doc, _group_by_doc, _groups_to_display_doc])
61
+ def num_distinct(*args, **kwargs):
62
+ """
63
+ Outputs number of distinct values for a specified set of key fields.
64
+
65
+ Parameters
66
+ ----------
67
+ keys: str or list of str or list of :py:class:`~onetick.py.Column`
68
+ Specifies a list of tick attributes for which unique values are found.
69
+ The ticks in the input time series must contain those attributes.
70
+
71
+ Examples
72
+ --------
73
+ >>> data = otp.Ticks(dict(X=[1, 3, 2, 1, 3]))
74
+ >>> data = data.agg({'X': otp.agg.num_distinct('X')})
75
+ >>> otp.run(data)
76
+ Time X
77
+ 0 2003-12-04 3
78
+
79
+ See also
80
+ --------
81
+ **NUM_DISTINCT** OneTick event processor
82
+ """
83
+ return NumDistinct(*args, **kwargs)
@@ -0,0 +1,427 @@
1
+ from typing import TYPE_CHECKING, List, Optional, Union
2
+ from onetick.py.backports import Literal
3
+
4
+ from abc import ABC
5
+
6
+ import onetick.py as otp
7
+ from onetick.py.otq import otq
8
+ from onetick.py import types as ott
9
+
10
+ if TYPE_CHECKING:
11
+ from onetick.py.core.source import Source # hack for annotations
12
+ from onetick.py.core.column import _Column
13
+ from onetick.py.compatibility import is_supported_otq_ob_summary
14
+ from ._base import _Aggregation, get_seconds_from_time_offset
15
+ from ._docs import (_running_doc,
16
+ _bucket_interval_doc,
17
+ _bucket_time_doc,
18
+ _bucket_units_ob_doc,
19
+ _bucket_end_condition_doc,
20
+ _end_condition_per_group_doc,
21
+ _group_by_doc,
22
+ _groups_to_display_doc,
23
+ _side_doc,
24
+ _max_levels_doc,
25
+ _min_levels_doc,
26
+ _max_depth_shares_doc,
27
+ _max_depth_for_price_doc,
28
+ _book_uncross_method_doc,
29
+ _dq_events_that_clear_book_doc,
30
+ _best_ask_price_field_doc,
31
+ _best_bid_price_field_doc,
32
+ _bucket_interval_ob_num_levels_doc,
33
+ _identify_source_doc,
34
+ _show_full_detail_doc,
35
+ _show_only_changes_doc,
36
+ _book_delimiters_doc,
37
+ _max_initialization_days_doc,
38
+ _state_key_max_inactivity_sec_doc,
39
+ _size_max_fractional_digits_doc)
40
+
41
+
42
+ OB_SNAPSHOT_DOC_PARAMS = [
43
+ _running_doc,
44
+ _bucket_interval_doc, _bucket_time_doc, _bucket_units_ob_doc,
45
+ _bucket_end_condition_doc, _end_condition_per_group_doc, _group_by_doc, _groups_to_display_doc,
46
+ _side_doc, _max_levels_doc, _max_depth_shares_doc, _max_depth_for_price_doc,
47
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc, _identify_source_doc,
48
+ _show_full_detail_doc, _show_only_changes_doc, _book_delimiters_doc,
49
+ _max_initialization_days_doc, _state_key_max_inactivity_sec_doc,
50
+ _size_max_fractional_digits_doc
51
+ ]
52
+ OB_SNAPSHOT_WIDE_DOC_PARAMS = [
53
+ _running_doc,
54
+ _bucket_interval_doc, _bucket_time_doc, _bucket_units_ob_doc, _bucket_end_condition_doc,
55
+ _end_condition_per_group_doc, _group_by_doc, _groups_to_display_doc,
56
+ _max_levels_doc, _max_depth_shares_doc, _max_depth_for_price_doc,
57
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc,
58
+ _book_delimiters_doc,
59
+ _max_initialization_days_doc, _state_key_max_inactivity_sec_doc,
60
+ _size_max_fractional_digits_doc
61
+ ]
62
+ OB_SNAPSHOT_FLAT_DOC_PARAMS = [
63
+ _running_doc,
64
+ _bucket_interval_doc, _bucket_time_doc, _bucket_units_ob_doc, _bucket_end_condition_doc,
65
+ _end_condition_per_group_doc, _group_by_doc, _groups_to_display_doc,
66
+ _max_levels_doc,
67
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc,
68
+ _show_full_detail_doc,
69
+ _max_initialization_days_doc, _state_key_max_inactivity_sec_doc,
70
+ _size_max_fractional_digits_doc
71
+ ]
72
+ OB_SUMMARY_DOC_PARAMS = [
73
+ _running_doc,
74
+ _bucket_interval_doc, _bucket_time_doc, _bucket_units_ob_doc,
75
+ _bucket_end_condition_doc, _end_condition_per_group_doc, _group_by_doc, _groups_to_display_doc,
76
+ _side_doc, _max_levels_doc, _min_levels_doc, _max_depth_shares_doc, _max_depth_for_price_doc,
77
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc, _max_initialization_days_doc,
78
+ _state_key_max_inactivity_sec_doc, _size_max_fractional_digits_doc
79
+ ]
80
+
81
+ OB_SIZE_DOC_PARAMS = [
82
+ _running_doc,
83
+ _bucket_interval_doc, _bucket_time_doc, _bucket_units_ob_doc,
84
+ _bucket_end_condition_doc, _end_condition_per_group_doc, _group_by_doc, _groups_to_display_doc,
85
+ _side_doc, _max_levels_doc, _max_depth_for_price_doc,
86
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc, _max_initialization_days_doc,
87
+ _best_ask_price_field_doc, _best_bid_price_field_doc,
88
+ ]
89
+
90
+ OB_VWAP_DOC_PARAMS = [
91
+ _running_doc,
92
+ _bucket_interval_doc, _bucket_time_doc, _bucket_units_ob_doc,
93
+ _bucket_end_condition_doc, _end_condition_per_group_doc, _group_by_doc, _groups_to_display_doc,
94
+ _side_doc, _max_levels_doc, _max_depth_shares_doc,
95
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc, _max_initialization_days_doc,
96
+ ]
97
+
98
+ OB_NUM_LEVELS_DOC_PARAMS = [
99
+ _running_doc,
100
+ _bucket_interval_ob_num_levels_doc, _side_doc,
101
+ _book_uncross_method_doc, _dq_events_that_clear_book_doc, _max_initialization_days_doc,
102
+ ]
103
+
104
+
105
+ class _OrderBookAggregation(_Aggregation, ABC):
106
+ FIELDS_TO_SKIP = ['column_name', 'all_fields', 'boundary_tick_bucket', 'output_field_name']
107
+ FIELDS_MAPPING = dict(_Aggregation.FIELDS_MAPPING, **{
108
+ 'side': 'SIDE',
109
+ 'max_levels': 'MAX_LEVELS',
110
+ 'max_depth_shares': 'MAX_DEPTH_SHARES',
111
+ 'max_depth_for_price': 'MAX_DEPTH_FOR_PRICE',
112
+ 'max_initialization_days': 'MAX_INITIALIZATION_DAYS',
113
+ 'book_uncross_method': 'BOOK_UNCROSS_METHOD',
114
+ 'dq_events_that_clear_book': 'DQ_EVENTS_THAT_CLEAR_BOOK',
115
+ })
116
+ FIELDS_DEFAULT = dict(_Aggregation.FIELDS_DEFAULT, **{
117
+ 'side': None,
118
+ 'max_levels': None,
119
+ 'max_depth_shares': None,
120
+ 'max_depth_for_price': None,
121
+ 'max_initialization_days': 1,
122
+ 'book_uncross_method': None,
123
+ 'dq_events_that_clear_book': None,
124
+ })
125
+ _validations_to_skip = ['running_all_fields']
126
+
127
+ def __init__(self,
128
+ *args,
129
+ side: Optional[Literal['ASK', 'BID']] = None,
130
+ max_levels: Optional[int] = None,
131
+ max_depth_shares: Optional[int] = None,
132
+ max_depth_for_price: Optional[float] = None,
133
+ max_initialization_days: int = 1,
134
+ book_uncross_method: Optional[Literal['REMOVE_OLDER_CROSSED_LEVELS']] = None,
135
+ dq_events_that_clear_book: Optional[List[str]] = None,
136
+ **kwargs):
137
+ self.side = side
138
+ self.max_levels = max_levels
139
+ self.max_depth_shares = max_depth_shares
140
+ self.max_depth_for_price = max_depth_for_price
141
+ self.max_initialization_days = max_initialization_days
142
+ self.book_uncross_method = book_uncross_method
143
+ self.dq_events_that_clear_book = ','.join(dq_events_that_clear_book) if dq_events_that_clear_book else None
144
+ super().__init__(_Column('TIMESTAMP'), *args, **kwargs)
145
+
146
+ def _param_validation(self):
147
+ super()._param_validation()
148
+ book_uncross_methods = (None, 'REMOVE_OLDER_CROSSED_LEVELS')
149
+ if self.book_uncross_method not in book_uncross_methods:
150
+ raise ValueError(
151
+ f"Wrong value for parameter 'book_uncross_method': '{self.book_uncross_method}'. "
152
+ f"Possible values: {book_uncross_methods}."
153
+ )
154
+ valid_units = ("seconds", "days", "months", "flexible")
155
+ if self.bucket_units not in valid_units:
156
+ raise ValueError("'bucket_units' can be one of the following: "
157
+ f"'{', '.join(valid_units)}'; however, '{self.bucket_units}' was passed")
158
+
159
+ def validate_input_columns(self, src: 'Source'):
160
+ super().validate_input_columns(src)
161
+ if any([
162
+ not {'BUY_SELL_FLAG', 'PRICE', 'SIZE'}.issubset(src.schema),
163
+ 'UPDATE_TIME' not in src.schema and 'DELETED_TIME' not in src.schema
164
+ ]):
165
+ raise TypeError(f"Aggregation `{self.NAME}` need these columns: "
166
+ f"BUY_SELL_FLAG, PRICE, SIZE and (UPDATE_TIME or DELETED_TIME)")
167
+
168
+
169
+ class ObSnapshot(_OrderBookAggregation):
170
+ NAME = 'OB_SNAPSHOT'
171
+ EP = otq.ObSnapshot
172
+
173
+ FIELDS_MAPPING = dict(_OrderBookAggregation.FIELDS_MAPPING, **{
174
+ 'identify_source': 'IDENTIFY_SOURCE',
175
+ 'show_full_detail': 'SHOW_FULL_DETAIL',
176
+ 'show_only_changes': 'SHOW_ONLY_CHANGES',
177
+ 'book_delimiters': 'BOOK_DELIMITERS',
178
+ 'state_key_max_inactivity_sec': 'STATE_KEY_MAX_INACTIVITY_SEC',
179
+ 'size_max_fractional_digits': 'SIZE_MAX_FRACTIONAL_DIGITS',
180
+ })
181
+ FIELDS_DEFAULT = dict(_OrderBookAggregation.FIELDS_DEFAULT, **{
182
+ 'identify_source': False,
183
+ 'show_full_detail': False,
184
+ 'show_only_changes': False,
185
+ 'book_delimiters': None,
186
+ 'state_key_max_inactivity_sec': None,
187
+ 'size_max_fractional_digits': 0,
188
+ })
189
+
190
+ def __init__(self,
191
+ *args,
192
+ identify_source: bool = False,
193
+ show_full_detail: bool = False,
194
+ show_only_changes: bool = False,
195
+ book_delimiters: Optional[Literal['D']] = None,
196
+ state_key_max_inactivity_sec: Optional[int] = None,
197
+ size_max_fractional_digits: int = 0,
198
+ **kwargs):
199
+ self.identify_source = identify_source
200
+ self.show_full_detail = show_full_detail
201
+ self.show_only_changes = show_only_changes
202
+ self.book_delimiters = book_delimiters
203
+ self.state_key_max_inactivity_sec = state_key_max_inactivity_sec
204
+ self.size_max_fractional_digits = size_max_fractional_digits
205
+ # we don't want to set hard limit on the output of order book aggregations
206
+ if self.show_full_detail:
207
+ kwargs['all_fields'] = True
208
+ super().__init__(*args, **kwargs)
209
+
210
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
211
+ schema = {
212
+ 'PRICE': float,
213
+ 'SIZE': int,
214
+ 'LEVEL': int,
215
+ 'UPDATE_TIME': otp.nsectime,
216
+ 'BUY_SELL_FLAG': int,
217
+ }
218
+ if self.book_delimiters:
219
+ schema['DELIMITER'] = str
220
+ return schema
221
+
222
+
223
+ class ObSnapshotWide(ObSnapshot):
224
+ NAME = 'OB_SNAPSHOT_WIDE'
225
+ EP = otq.ObSnapshotWide
226
+
227
+ FIELDS_TO_SKIP = [
228
+ *ObSnapshot.FIELDS_TO_SKIP, 'side', 'identify_source', 'show_full_detail', 'show_only_changes'
229
+ ]
230
+
231
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
232
+ schema = {
233
+ 'BID_PRICE': float,
234
+ 'BID_SIZE': int,
235
+ 'BID_UPDATE_TIME': otp.nsectime,
236
+ 'ASK_PRICE': float,
237
+ 'ASK_SIZE': int,
238
+ 'ASK_UPDATE_TIME': otp.nsectime,
239
+ 'LEVEL': int,
240
+ }
241
+ if self.book_delimiters:
242
+ schema['DELIMITER'] = str
243
+ return schema
244
+
245
+
246
+ class ObSnapshotFlat(ObSnapshot):
247
+ NAME = 'OB_SNAPSHOT_FLAT'
248
+ EP = otq.ObSnapshotFlat
249
+
250
+ FIELDS_TO_SKIP = [
251
+ *ObSnapshot.FIELDS_TO_SKIP,
252
+ 'side', 'identify_source', 'show_only_changes',
253
+ 'book_delimiters', 'max_depth_shares', 'max_depth_for_price',
254
+ ]
255
+
256
+ def validate_input_columns(self, src: 'Source'):
257
+ super().validate_input_columns(src)
258
+ if self.max_levels is None or self.max_levels < 1 or self.max_levels > 100_000:
259
+ raise ValueError(f"Parameter 'max_levels' must be set in aggregation `{self.NAME}`"
260
+ f" and must be between 1 and 100000.")
261
+
262
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
263
+ schema = {}
264
+ assert self.max_levels is not None
265
+ for level in range(1, self.max_levels + 1):
266
+ schema.update({
267
+ f'BID_PRICE{level}': float,
268
+ f'BID_SIZE{level}': int,
269
+ f'BID_UPDATE_TIME{level}': otp.nsectime,
270
+ f'ASK_PRICE{level}': float,
271
+ f'ASK_SIZE{level}': int,
272
+ f'ASK_UPDATE_TIME{level}': otp.nsectime,
273
+ })
274
+ return schema
275
+
276
+
277
+ class ObSummary(_OrderBookAggregation):
278
+ NAME = 'OB_SUMMARY'
279
+
280
+ # Will be set later, to prevent error while importing this module with outdated onetick
281
+ EP = None
282
+
283
+ FIELDS_MAPPING = dict(_OrderBookAggregation.FIELDS_MAPPING, **{
284
+ 'min_levels': 'MIN_LEVELS',
285
+ 'state_key_max_inactivity_sec': 'STATE_KEY_MAX_INACTIVITY_SEC',
286
+ 'size_max_fractional_digits': 'SIZE_MAX_FRACTIONAL_DIGITS',
287
+ })
288
+ FIELDS_DEFAULT = dict(_OrderBookAggregation.FIELDS_DEFAULT, **{
289
+ 'min_levels': None,
290
+ 'state_key_max_inactivity_sec': None,
291
+ 'size_max_fractional_digits': 0,
292
+ })
293
+
294
+ def __init__(self,
295
+ *args,
296
+ min_levels: Optional[int] = None,
297
+ state_key_max_inactivity_sec: Optional[int] = None,
298
+ size_max_fractional_digits: int = 0,
299
+ **kwargs):
300
+ if is_supported_otq_ob_summary():
301
+ self.EP = otq.ObSummary
302
+ else:
303
+ raise RuntimeError("Used onetick installation not support onetick.query.ObSummary")
304
+
305
+ self.min_levels = min_levels
306
+ self.state_key_max_inactivity_sec = state_key_max_inactivity_sec
307
+ self.size_max_fractional_digits = size_max_fractional_digits
308
+ super().__init__(*args, **kwargs)
309
+
310
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
311
+ schema = {
312
+ 'BID_SIZE': int,
313
+ 'BID_VWAP': float,
314
+ 'BEST_BID_PRICE': float,
315
+ 'WORST_BID_PRICE': float,
316
+ 'NUM_BID_LEVELS': int,
317
+ 'ASK_SIZE': int,
318
+ 'ASK_VWAP': float,
319
+ 'BEST_ASK_PRICE': float,
320
+ 'WORST_ASK_PRICE': float,
321
+ 'NUM_ASK_LEVELS': int,
322
+ }
323
+ return schema
324
+
325
+
326
+ class ObSize(_OrderBookAggregation):
327
+ NAME = 'OB_SIZE'
328
+ EP = otq.ObSize
329
+
330
+ FIELDS_MAPPING = dict(_OrderBookAggregation.FIELDS_MAPPING, **{
331
+ 'min_levels': 'MIN_LEVELS',
332
+ 'best_ask_price_field': 'BEST_ASK_PRICE_FIELD',
333
+ 'best_bid_price_field': 'BEST_BID_PRICE_FIELD',
334
+ })
335
+ FIELDS_DEFAULT = dict(_OrderBookAggregation.FIELDS_DEFAULT, **{
336
+ 'min_levels': None,
337
+ 'best_ask_price_field': '',
338
+ 'best_bid_price_field': '',
339
+ })
340
+
341
+ def __init__(
342
+ self, *args, min_levels: Optional[int] = None,
343
+ best_ask_price_field: Optional[Union[str, _Column]] = None,
344
+ best_bid_price_field: Optional[Union[str, _Column]] = None,
345
+ **kwargs,
346
+ ):
347
+ if min_levels and not kwargs.get('max_depth_for_price'):
348
+ raise ValueError('`min_levels` parameter must not be set when `max_depth_for_price` not set')
349
+
350
+ self.min_levels = min_levels
351
+
352
+ if isinstance(best_ask_price_field, _Column):
353
+ best_ask_price_field = str(best_ask_price_field)
354
+ elif best_ask_price_field is None:
355
+ best_ask_price_field = ''
356
+
357
+ if isinstance(best_bid_price_field, _Column):
358
+ best_bid_price_field = str(best_bid_price_field)
359
+ elif best_bid_price_field is None:
360
+ best_bid_price_field = ''
361
+
362
+ self.best_ask_price_field = best_ask_price_field
363
+ self.best_bid_price_field = best_bid_price_field
364
+ super().__init__(*args, **kwargs)
365
+
366
+ def validate_input_columns(self, src: 'Source'):
367
+ super().validate_input_columns(src)
368
+
369
+ if self.best_ask_price_field and self.best_ask_price_field not in src.schema:
370
+ raise ValueError(
371
+ f'Column \'{self.best_ask_price_field}\' from `best_ask_price_field` parameter not in the schema.'
372
+ )
373
+
374
+ if self.best_bid_price_field and self.best_bid_price_field not in src.schema:
375
+ raise ValueError(
376
+ f'Column \'{self.best_bid_price_field}\' from `best_bid_price_field` parameter not in the schema.'
377
+ )
378
+
379
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
380
+ if self.side:
381
+ return {'VALUE': float}
382
+
383
+ return {
384
+ 'ASK_VALUE': float,
385
+ 'BID_VALUE': float,
386
+ }
387
+
388
+
389
+ class ObVwap(_OrderBookAggregation):
390
+ NAME = 'OB_VWAP'
391
+ EP = otq.ObVwap
392
+
393
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
394
+ if self.side:
395
+ return {'VALUE': float}
396
+
397
+ return {
398
+ 'ASK_VALUE': float,
399
+ 'BID_VALUE': float,
400
+ }
401
+
402
+
403
+ class ObNumLevels(_OrderBookAggregation):
404
+ NAME = 'OB_NUM_LEVELS'
405
+ EP = otq.ObNumLevels
406
+
407
+ def __init__(self, *args, bucket_interval: Union[int, ott.OTPBaseTimeOffset] = 0, **kwargs):
408
+ if not isinstance(bucket_interval, (int, ott.OTPBaseTimeOffset)):
409
+ raise ValueError('Unsupported value type for `bucket_interval` parameter')
410
+
411
+ if isinstance(bucket_interval, ott.OTPBaseTimeOffset):
412
+ _, datepart = bucket_interval.get_offset()
413
+ if datepart not in {'second', 'minute', 'hour', 'day'}:
414
+ raise ValueError(f"Unsupported DatePart passed to bucket_interval: {datepart}")
415
+
416
+ bucket_interval = get_seconds_from_time_offset(bucket_interval)
417
+
418
+ super().__init__(*args, bucket_interval=bucket_interval, **kwargs)
419
+
420
+ def _get_output_schema(self, src: 'Source', name: Optional[str] = None) -> dict:
421
+ if self.side:
422
+ return {'VALUE': float}
423
+
424
+ return {
425
+ 'ASK_VALUE': float,
426
+ 'BID_VALUE': float,
427
+ }