onetick-py 1.162.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- locator_parser/__init__.py +0 -0
- locator_parser/acl.py +73 -0
- locator_parser/actions.py +266 -0
- locator_parser/common.py +365 -0
- locator_parser/io.py +41 -0
- locator_parser/locator.py +150 -0
- onetick/__init__.py +101 -0
- onetick/doc_utilities/__init__.py +3 -0
- onetick/doc_utilities/napoleon.py +40 -0
- onetick/doc_utilities/ot_doctest.py +140 -0
- onetick/doc_utilities/snippets.py +280 -0
- onetick/lib/__init__.py +4 -0
- onetick/lib/instance.py +138 -0
- onetick/py/__init__.py +290 -0
- onetick/py/_stack_info.py +89 -0
- onetick/py/_version.py +2 -0
- onetick/py/aggregations/__init__.py +11 -0
- onetick/py/aggregations/_base.py +645 -0
- onetick/py/aggregations/_docs.py +912 -0
- onetick/py/aggregations/compute.py +286 -0
- onetick/py/aggregations/functions.py +2216 -0
- onetick/py/aggregations/generic.py +104 -0
- onetick/py/aggregations/high_low.py +80 -0
- onetick/py/aggregations/num_distinct.py +83 -0
- onetick/py/aggregations/order_book.py +427 -0
- onetick/py/aggregations/other.py +1014 -0
- onetick/py/backports.py +26 -0
- onetick/py/cache.py +373 -0
- onetick/py/callback/__init__.py +5 -0
- onetick/py/callback/callback.py +275 -0
- onetick/py/callback/callbacks.py +131 -0
- onetick/py/compatibility.py +752 -0
- onetick/py/configuration.py +736 -0
- onetick/py/core/__init__.py +0 -0
- onetick/py/core/_csv_inspector.py +93 -0
- onetick/py/core/_internal/__init__.py +0 -0
- onetick/py/core/_internal/_manually_bound_value.py +6 -0
- onetick/py/core/_internal/_nodes_history.py +250 -0
- onetick/py/core/_internal/_op_utils/__init__.py +0 -0
- onetick/py/core/_internal/_op_utils/every_operand.py +9 -0
- onetick/py/core/_internal/_op_utils/is_const.py +10 -0
- onetick/py/core/_internal/_per_tick_scripts/tick_list_sort_template.script +121 -0
- onetick/py/core/_internal/_proxy_node.py +140 -0
- onetick/py/core/_internal/_state_objects.py +2307 -0
- onetick/py/core/_internal/_state_vars.py +87 -0
- onetick/py/core/_source/__init__.py +0 -0
- onetick/py/core/_source/_symbol_param.py +95 -0
- onetick/py/core/_source/schema.py +97 -0
- onetick/py/core/_source/source_methods/__init__.py +0 -0
- onetick/py/core/_source/source_methods/aggregations.py +810 -0
- onetick/py/core/_source/source_methods/applyers.py +296 -0
- onetick/py/core/_source/source_methods/columns.py +141 -0
- onetick/py/core/_source/source_methods/data_quality.py +301 -0
- onetick/py/core/_source/source_methods/debugs.py +270 -0
- onetick/py/core/_source/source_methods/drops.py +120 -0
- onetick/py/core/_source/source_methods/fields.py +619 -0
- onetick/py/core/_source/source_methods/filters.py +1001 -0
- onetick/py/core/_source/source_methods/joins.py +1393 -0
- onetick/py/core/_source/source_methods/merges.py +566 -0
- onetick/py/core/_source/source_methods/misc.py +1325 -0
- onetick/py/core/_source/source_methods/pandases.py +155 -0
- onetick/py/core/_source/source_methods/renames.py +356 -0
- onetick/py/core/_source/source_methods/sorts.py +183 -0
- onetick/py/core/_source/source_methods/switches.py +142 -0
- onetick/py/core/_source/source_methods/symbols.py +117 -0
- onetick/py/core/_source/source_methods/times.py +627 -0
- onetick/py/core/_source/source_methods/writes.py +702 -0
- onetick/py/core/_source/symbol.py +202 -0
- onetick/py/core/_source/tmp_otq.py +222 -0
- onetick/py/core/column.py +209 -0
- onetick/py/core/column_operations/__init__.py +0 -0
- onetick/py/core/column_operations/_methods/__init__.py +4 -0
- onetick/py/core/column_operations/_methods/_internal.py +28 -0
- onetick/py/core/column_operations/_methods/conversions.py +215 -0
- onetick/py/core/column_operations/_methods/methods.py +294 -0
- onetick/py/core/column_operations/_methods/op_types.py +150 -0
- onetick/py/core/column_operations/accessors/__init__.py +0 -0
- onetick/py/core/column_operations/accessors/_accessor.py +30 -0
- onetick/py/core/column_operations/accessors/decimal_accessor.py +92 -0
- onetick/py/core/column_operations/accessors/dt_accessor.py +464 -0
- onetick/py/core/column_operations/accessors/float_accessor.py +160 -0
- onetick/py/core/column_operations/accessors/str_accessor.py +1374 -0
- onetick/py/core/column_operations/base.py +1061 -0
- onetick/py/core/cut_builder.py +149 -0
- onetick/py/core/db_constants.py +20 -0
- onetick/py/core/eval_query.py +244 -0
- onetick/py/core/lambda_object.py +442 -0
- onetick/py/core/multi_output_source.py +193 -0
- onetick/py/core/per_tick_script.py +2253 -0
- onetick/py/core/query_inspector.py +465 -0
- onetick/py/core/source.py +1663 -0
- onetick/py/db/__init__.py +2 -0
- onetick/py/db/_inspection.py +1042 -0
- onetick/py/db/db.py +1423 -0
- onetick/py/db/utils.py +64 -0
- onetick/py/docs/__init__.py +0 -0
- onetick/py/docs/docstring_parser.py +112 -0
- onetick/py/docs/utils.py +81 -0
- onetick/py/functions.py +2354 -0
- onetick/py/license.py +188 -0
- onetick/py/log.py +88 -0
- onetick/py/math.py +947 -0
- onetick/py/misc.py +437 -0
- onetick/py/oqd/__init__.py +22 -0
- onetick/py/oqd/eps.py +1195 -0
- onetick/py/oqd/sources.py +325 -0
- onetick/py/otq.py +211 -0
- onetick/py/pyomd_mock.py +47 -0
- onetick/py/run.py +841 -0
- onetick/py/servers.py +173 -0
- onetick/py/session.py +1342 -0
- onetick/py/sources/__init__.py +19 -0
- onetick/py/sources/cache.py +167 -0
- onetick/py/sources/common.py +126 -0
- onetick/py/sources/csv.py +642 -0
- onetick/py/sources/custom.py +85 -0
- onetick/py/sources/data_file.py +305 -0
- onetick/py/sources/data_source.py +1049 -0
- onetick/py/sources/empty.py +94 -0
- onetick/py/sources/odbc.py +337 -0
- onetick/py/sources/order_book.py +238 -0
- onetick/py/sources/parquet.py +168 -0
- onetick/py/sources/pit.py +191 -0
- onetick/py/sources/query.py +495 -0
- onetick/py/sources/snapshots.py +419 -0
- onetick/py/sources/split_query_output_by_symbol.py +198 -0
- onetick/py/sources/symbology_mapping.py +123 -0
- onetick/py/sources/symbols.py +357 -0
- onetick/py/sources/ticks.py +825 -0
- onetick/py/sql.py +70 -0
- onetick/py/state.py +256 -0
- onetick/py/types.py +2056 -0
- onetick/py/utils/__init__.py +70 -0
- onetick/py/utils/acl.py +93 -0
- onetick/py/utils/config.py +186 -0
- onetick/py/utils/default.py +49 -0
- onetick/py/utils/file.py +38 -0
- onetick/py/utils/helpers.py +76 -0
- onetick/py/utils/locator.py +94 -0
- onetick/py/utils/perf.py +499 -0
- onetick/py/utils/query.py +49 -0
- onetick/py/utils/render.py +1139 -0
- onetick/py/utils/script.py +244 -0
- onetick/py/utils/temp.py +471 -0
- onetick/py/utils/types.py +118 -0
- onetick/py/utils/tz.py +82 -0
- onetick_py-1.162.2.dist-info/METADATA +148 -0
- onetick_py-1.162.2.dist-info/RECORD +152 -0
- onetick_py-1.162.2.dist-info/WHEEL +5 -0
- onetick_py-1.162.2.dist-info/entry_points.txt +2 -0
- onetick_py-1.162.2.dist-info/licenses/LICENSE +21 -0
- onetick_py-1.162.2.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,702 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
from datetime import date
|
|
3
|
+
from typing import TYPE_CHECKING, Optional, Set, Type, Union
|
|
4
|
+
from onetick.py.backports import Literal
|
|
5
|
+
|
|
6
|
+
from onetick import py as otp
|
|
7
|
+
from onetick.py import configuration
|
|
8
|
+
from onetick.py.core.column import _Column, field_name_contains_lowercase
|
|
9
|
+
from onetick.py.otq import otq
|
|
10
|
+
from onetick.py.utils import adaptive
|
|
11
|
+
from onetick.py.compatibility import is_save_snapshot_database_parameter_supported
|
|
12
|
+
|
|
13
|
+
from .misc import inplace_operation
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from onetick.py.core.source import Source
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@inplace_operation
|
|
20
|
+
def write(
|
|
21
|
+
self,
|
|
22
|
+
db: Union[str, 'otp.DB'],
|
|
23
|
+
symbol: Union[str, 'otp.Column', None] = None,
|
|
24
|
+
tick_type: Union[str, 'otp.Column', None] = None,
|
|
25
|
+
date: Union[date, Type[adaptive], None] = adaptive,
|
|
26
|
+
start: Optional[date] = None,
|
|
27
|
+
end: Optional[date] = None,
|
|
28
|
+
append: bool = False,
|
|
29
|
+
keep_symbol_and_tick_type: Union[bool, Type[adaptive]] = adaptive,
|
|
30
|
+
propagate: bool = True,
|
|
31
|
+
out_of_range_tick_action: Literal['exception', 'ignore', 'load'] = 'exception',
|
|
32
|
+
timestamp: Optional['otp.Column'] = None,
|
|
33
|
+
keep_timestamp: bool = True,
|
|
34
|
+
correction_type: Optional['otp.Column'] = None,
|
|
35
|
+
replace_existing_time_series: bool = False,
|
|
36
|
+
allow_concurrent_write: bool = False,
|
|
37
|
+
context: Union[str, Type[adaptive]] = adaptive,
|
|
38
|
+
use_context_of_query: bool = False,
|
|
39
|
+
inplace: bool = False,
|
|
40
|
+
**kwargs,
|
|
41
|
+
) -> Optional['Source']:
|
|
42
|
+
"""
|
|
43
|
+
Saves data result to OneTick database.
|
|
44
|
+
|
|
45
|
+
Note
|
|
46
|
+
----
|
|
47
|
+
This method does not save anything. It adds instruction in query to save.
|
|
48
|
+
Data will be saved when query will be executed.
|
|
49
|
+
|
|
50
|
+
Using ``start``+``end`` parameters instead of single ``date`` have some limitations:
|
|
51
|
+
|
|
52
|
+
* ``inplace`` is not supported
|
|
53
|
+
* if ``DAY_BOUNDARY_TZ`` and ``DAY_BOUNDARY_OFFSET`` specified against
|
|
54
|
+
individual locations of database, then day boundary could be calculated incorrectly.
|
|
55
|
+
* ``out_of_range_tick_action`` could be only ``exception`` or ``ignore``
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
db: str or :py:class:`otp.DB <onetick.py.DB>`
|
|
60
|
+
database name or object.
|
|
61
|
+
symbol: str or Column
|
|
62
|
+
resulting symbol name string or column to get symbol name from.
|
|
63
|
+
If this parameter is not set, then ticks _SYMBOL_NAME pseudo-field is used.
|
|
64
|
+
If it is empty, an attempt is made to retrieve
|
|
65
|
+
the symbol name from the field named SYMBOL_NAME.
|
|
66
|
+
tick_type: str or Column
|
|
67
|
+
resulting tick type string or column to get tick type from.
|
|
68
|
+
If this parameter is not set, the _TICK_TYPE pseudo-field is used.
|
|
69
|
+
If it is empty, an attempt is made to retrieve
|
|
70
|
+
the tick type from the field named TICK_TYPE.
|
|
71
|
+
date: :py:class:`otp.datetime <onetick.py.datetime>` or None
|
|
72
|
+
date where to save data.
|
|
73
|
+
Should be set to `None` if writing to accelerator or memory database.
|
|
74
|
+
By default, it is set to `otp.config.default_date`.
|
|
75
|
+
start: :py:class:`otp.datetime <onetick.py.datetime>` or None
|
|
76
|
+
Start date for data to save. It is inclusive.
|
|
77
|
+
Cannot be used with ``date`` parameter.
|
|
78
|
+
Also cannot be used with ``inplace`` set to ``True``.
|
|
79
|
+
Should be set to `None` if writing to accelerator or memory database.
|
|
80
|
+
By default, None.
|
|
81
|
+
end: :py:class:`otp.datetime <onetick.py.datetime>` or None
|
|
82
|
+
End date for data to save. It is exclusive, so be sure to set
|
|
83
|
+
it to the next day after the last day in data.
|
|
84
|
+
Cannot be used with ``date`` parameter.
|
|
85
|
+
Also cannot be used with ``inplace`` set to ``True``.
|
|
86
|
+
Should be set to `None` if writing to accelerator or memory database.
|
|
87
|
+
By default, None.
|
|
88
|
+
append: bool
|
|
89
|
+
If False - data will be rewritten for this ``date``
|
|
90
|
+
or range of dates (from ``start`` to ``end``),
|
|
91
|
+
otherwise data will be appended: new symbols are added,
|
|
92
|
+
existing symbols can be modified (append new ticks, modify existing ticks).
|
|
93
|
+
This option is not valid for accelerator databases.
|
|
94
|
+
keep_symbol_and_tick_type: bool
|
|
95
|
+
keep fields containing symbol name and tick type when writing ticks
|
|
96
|
+
to the database or propagating them.
|
|
97
|
+
By default, this parameter is adaptive.
|
|
98
|
+
If ``symbol`` or ``tick_type`` are column objects, then it's set to True.
|
|
99
|
+
Otherwise, it's set to False.
|
|
100
|
+
propagate: bool
|
|
101
|
+
Propagate ticks after that event processor or not.
|
|
102
|
+
out_of_range_tick_action: str
|
|
103
|
+
Action to be executed if tick's timestamp's date is not ``date`` or between ``start`` or ``end``:
|
|
104
|
+
|
|
105
|
+
* `exception`: runtime exception will be raised
|
|
106
|
+
* `ignore`: tick will not be written to the database
|
|
107
|
+
* `load`: writes tick to the database anyway.
|
|
108
|
+
Can be used only with ``date``, not with ``start``+``end``.
|
|
109
|
+
|
|
110
|
+
Default: `exception`
|
|
111
|
+
timestamp: Column
|
|
112
|
+
Field that contains the timestamp with which the ticks will be written to the database.
|
|
113
|
+
By default, the TIMESTAMP pseudo-column is used.
|
|
114
|
+
keep_timestamp: bool
|
|
115
|
+
If ``timestamp`` parameter is set and this parameter is set to True,
|
|
116
|
+
then timestamp column is removed.
|
|
117
|
+
correction_type: Column
|
|
118
|
+
The name of the column that contains the correction type.
|
|
119
|
+
This column will be removed.
|
|
120
|
+
If this parameter is not set, no corrections will be submitted.
|
|
121
|
+
replace_existing_time_series: bool
|
|
122
|
+
If ``append`` is set to True, setting this option to True instructs the loader
|
|
123
|
+
to replace existing time series, instead of appending to them.
|
|
124
|
+
Other time series will remain unchanged.
|
|
125
|
+
allow_concurrent_write: bool
|
|
126
|
+
Allows different queries running on the same server to load concurrently into the same database.
|
|
127
|
+
context: str
|
|
128
|
+
The server context used to look up the database.
|
|
129
|
+
By default, `otp.config.context` is used if ``use_context_of_query`` is not set.
|
|
130
|
+
use_context_of_query: bool
|
|
131
|
+
If this parameter is set to True and the ``context`` parameter is not set,
|
|
132
|
+
the context of the query is used instead of the default value of the ``context`` parameter.
|
|
133
|
+
inplace: bool
|
|
134
|
+
A flag controls whether operation should be applied inplace.
|
|
135
|
+
If ``inplace=True``, then it returns nothing.
|
|
136
|
+
Otherwise, method returns a new modified object.
|
|
137
|
+
Cannot be ``True`` if ``start`` and ``end`` are set.
|
|
138
|
+
kwargs:
|
|
139
|
+
.. deprecated:: 1.21.0
|
|
140
|
+
|
|
141
|
+
Use named parameters instead.
|
|
142
|
+
|
|
143
|
+
Returns
|
|
144
|
+
-------
|
|
145
|
+
:class:`Source` or None
|
|
146
|
+
|
|
147
|
+
See also
|
|
148
|
+
--------
|
|
149
|
+
**WRITE_TO_ONETICK_DB** OneTick event processor
|
|
150
|
+
|
|
151
|
+
Examples
|
|
152
|
+
--------
|
|
153
|
+
>>> data = otp.Ticks(X=[1, 2, 3])
|
|
154
|
+
>>> data = data.write('SOME_DB', symbol='S_WRITE', tick_type='T_WRITE')
|
|
155
|
+
>>> otp.run(data)
|
|
156
|
+
Time X
|
|
157
|
+
0 2003-12-01 00:00:00.000 1
|
|
158
|
+
1 2003-12-01 00:00:00.001 2
|
|
159
|
+
2 2003-12-01 00:00:00.002 3
|
|
160
|
+
>>> data = otp.DataSource('SOME_DB', symbol='S_WRITE', tick_type='T_WRITE')
|
|
161
|
+
>>> otp.run(data)
|
|
162
|
+
Time X
|
|
163
|
+
0 2003-12-01 00:00:00.000 1
|
|
164
|
+
1 2003-12-01 00:00:00.001 2
|
|
165
|
+
2 2003-12-01 00:00:00.002 3
|
|
166
|
+
"""
|
|
167
|
+
if 'append_mode' in kwargs:
|
|
168
|
+
warnings.warn("Parameter 'append_mode' is deprecated, use 'append'", FutureWarning)
|
|
169
|
+
append = kwargs.pop('append_mode')
|
|
170
|
+
|
|
171
|
+
if 'timestamp_field' in kwargs:
|
|
172
|
+
warnings.warn("Parameter 'timestamp_field' is deprecated, use 'timestamp'", FutureWarning)
|
|
173
|
+
timestamp = kwargs.pop('timestamp_field')
|
|
174
|
+
|
|
175
|
+
if 'keep_timestamp_field' in kwargs:
|
|
176
|
+
warnings.warn("Parameter 'keep_timestamp_field' is deprecated, use 'keep_timestamp'", FutureWarning)
|
|
177
|
+
keep_timestamp = kwargs.pop('keep_timestamp_field')
|
|
178
|
+
|
|
179
|
+
if kwargs:
|
|
180
|
+
raise TypeError(f'write() got unexpected arguments: {list(kwargs)}')
|
|
181
|
+
|
|
182
|
+
kwargs = {}
|
|
183
|
+
|
|
184
|
+
# validate field names
|
|
185
|
+
for field_name in self.schema:
|
|
186
|
+
if field_name_contains_lowercase(field_name):
|
|
187
|
+
if otp.config.allow_lowercase_in_saved_fields:
|
|
188
|
+
warnings.warn(
|
|
189
|
+
f'Field "{field_name}" contains lowercase characters and is being saved'
|
|
190
|
+
' to a Onetick database. This field will be converted to uppercase upon saving.'
|
|
191
|
+
)
|
|
192
|
+
else:
|
|
193
|
+
raise ValueError(
|
|
194
|
+
f'Field "{field_name}" contains lowercase characters and cannot be saved to a Onetick database'
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
if date is not adaptive and (start or end):
|
|
198
|
+
raise ValueError('date cannot be used with start+end')
|
|
199
|
+
|
|
200
|
+
if date is adaptive and (start and end) and inplace:
|
|
201
|
+
# join_with_query and merge are used for multiple dates, so inplace is not supported
|
|
202
|
+
raise ValueError('cannot run on multiple dates if inplace is True, use one value for date instead of start+end')
|
|
203
|
+
|
|
204
|
+
if (start and not end) or (not start and end):
|
|
205
|
+
raise ValueError('start and end should be both specified or both None')
|
|
206
|
+
|
|
207
|
+
if date is adaptive:
|
|
208
|
+
date = configuration.config.default_date
|
|
209
|
+
|
|
210
|
+
if symbol is not None:
|
|
211
|
+
if isinstance(symbol, _Column):
|
|
212
|
+
kwargs['symbol_name_field'] = str(symbol)
|
|
213
|
+
if keep_symbol_and_tick_type is adaptive:
|
|
214
|
+
keep_symbol_and_tick_type = True
|
|
215
|
+
else:
|
|
216
|
+
kwargs.setdefault('symbol_name_field', '_SYMBOL_NAME_FIELD_')
|
|
217
|
+
self[kwargs['symbol_name_field']] = symbol
|
|
218
|
+
|
|
219
|
+
if tick_type is not None:
|
|
220
|
+
if isinstance(tick_type, _Column):
|
|
221
|
+
kwargs['tick_type_field'] = str(tick_type)
|
|
222
|
+
if keep_symbol_and_tick_type is adaptive:
|
|
223
|
+
keep_symbol_and_tick_type = True
|
|
224
|
+
else:
|
|
225
|
+
kwargs.setdefault('tick_type_field', '_TICK_TYPE_FIELD_')
|
|
226
|
+
self[kwargs['tick_type_field']] = tick_type
|
|
227
|
+
|
|
228
|
+
if keep_symbol_and_tick_type is adaptive:
|
|
229
|
+
keep_symbol_and_tick_type = False
|
|
230
|
+
|
|
231
|
+
if timestamp is not None:
|
|
232
|
+
kwargs['timestamp_field'] = str(timestamp)
|
|
233
|
+
|
|
234
|
+
if correction_type is not None:
|
|
235
|
+
kwargs['correction_type_field'] = str(correction_type)
|
|
236
|
+
|
|
237
|
+
if context is not adaptive:
|
|
238
|
+
kwargs['context'] = context
|
|
239
|
+
elif not use_context_of_query:
|
|
240
|
+
if otp.config.context is not None:
|
|
241
|
+
kwargs['context'] = otp.config.context
|
|
242
|
+
|
|
243
|
+
if out_of_range_tick_action.upper() == 'IGNORE':
|
|
244
|
+
# let's ignore
|
|
245
|
+
pass
|
|
246
|
+
elif out_of_range_tick_action.upper() == 'LOAD':
|
|
247
|
+
if start and end:
|
|
248
|
+
raise ValueError('LOAD out_of_range_tick_action cannot be used with start+end, use date instead')
|
|
249
|
+
elif out_of_range_tick_action.upper() == 'EXCEPTION':
|
|
250
|
+
if start and end:
|
|
251
|
+
# WRITE_TO_ONETICK_DB use DAY_BOUNDARY_TZ and DAY_BOUNDARY_OFFSET
|
|
252
|
+
# to check tick timestamp is out of range or not
|
|
253
|
+
# so we mimic it here with THROW event processor
|
|
254
|
+
src = otp.Source(otq.DbShowConfig(str(db), 'DB_TIME_INTERVALS'))
|
|
255
|
+
src.table(inplace=True, DAY_BOUNDARY_TZ=str, DAY_BOUNDARY_OFFSET=int)
|
|
256
|
+
# DAY_BOUNDARY_OFFSET offset are in seconds
|
|
257
|
+
src['DAY_BOUNDARY_OFFSET'] = src['DAY_BOUNDARY_OFFSET'] * 1000
|
|
258
|
+
src.rename(
|
|
259
|
+
{'DAY_BOUNDARY_TZ': '__DAY_BOUNDARY_TZ', 'DAY_BOUNDARY_OFFSET': '__DAY_BOUNDARY_OFFSET'}, inplace=True
|
|
260
|
+
)
|
|
261
|
+
self = self.join_with_query(src, symbol=f"{str(db)}::DUMMY", caching='per_symbol')
|
|
262
|
+
start_formatted = start.strftime('%Y-%m-%d')
|
|
263
|
+
end_formatted = end.strftime('%Y-%m-%d')
|
|
264
|
+
convert_timestamp = self['TIMESTAMP'].dt.strftime('%Y%m%d%H%M%S.%J', timezone=self['__DAY_BOUNDARY_TZ'])
|
|
265
|
+
start_op = otp.dt(start).to_operation(timezone=self['__DAY_BOUNDARY_TZ']) + self['__DAY_BOUNDARY_OFFSET']
|
|
266
|
+
self.throw(
|
|
267
|
+
where=(self['TIMESTAMP'] < start_op),
|
|
268
|
+
message=(
|
|
269
|
+
'Timestamp '
|
|
270
|
+
+ convert_timestamp
|
|
271
|
+
+ ' of a tick, visible or hidden, '
|
|
272
|
+
+ f'earlier than {start_formatted} in timezone '
|
|
273
|
+
+ self['__DAY_BOUNDARY_TZ']
|
|
274
|
+
),
|
|
275
|
+
inplace=True,
|
|
276
|
+
)
|
|
277
|
+
end_op = otp.dt(end).to_operation(timezone=self['__DAY_BOUNDARY_TZ']) + self['__DAY_BOUNDARY_OFFSET']
|
|
278
|
+
self.throw(
|
|
279
|
+
where=(self['TIMESTAMP'] >= end_op),
|
|
280
|
+
message=(
|
|
281
|
+
'Timestamp '
|
|
282
|
+
+ convert_timestamp
|
|
283
|
+
+ ' of a tick, visible or hidden, '
|
|
284
|
+
+ f'later than {end_formatted} in timezone '
|
|
285
|
+
+ self['__DAY_BOUNDARY_TZ']
|
|
286
|
+
),
|
|
287
|
+
inplace=True,
|
|
288
|
+
)
|
|
289
|
+
else:
|
|
290
|
+
raise ValueError(
|
|
291
|
+
f'Unknown out_of_range_tick_action: {out_of_range_tick_action}.'
|
|
292
|
+
' Possible values are: "ignore", "exception"'
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
branches = []
|
|
296
|
+
if propagate:
|
|
297
|
+
branches = [self]
|
|
298
|
+
|
|
299
|
+
kwargs = dict(
|
|
300
|
+
**kwargs,
|
|
301
|
+
database=str(db),
|
|
302
|
+
append_mode=append,
|
|
303
|
+
keep_symbol_name_and_tick_type=keep_symbol_and_tick_type,
|
|
304
|
+
keep_timestamp_field=keep_timestamp,
|
|
305
|
+
replace_existing_time_series=replace_existing_time_series,
|
|
306
|
+
allow_concurrent_write=allow_concurrent_write,
|
|
307
|
+
use_context_of_query=use_context_of_query,
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
if start and end:
|
|
311
|
+
days = (end - start).days + 1
|
|
312
|
+
for i in range(days):
|
|
313
|
+
branch = self.copy()
|
|
314
|
+
branch.sink(
|
|
315
|
+
otq.WriteToOnetickDb(
|
|
316
|
+
date=(start + otp.Day(i)).strftime('%Y%m%d'),
|
|
317
|
+
propagate_ticks=False,
|
|
318
|
+
out_of_range_tick_action='IGNORE',
|
|
319
|
+
**kwargs,
|
|
320
|
+
)
|
|
321
|
+
)
|
|
322
|
+
branches.append(branch)
|
|
323
|
+
self = otp.merge(branches)
|
|
324
|
+
else:
|
|
325
|
+
self.sink(
|
|
326
|
+
otq.WriteToOnetickDb(
|
|
327
|
+
date=date.strftime('%Y%m%d') if date else '', # type: ignore[union-attr]
|
|
328
|
+
propagate_ticks=propagate,
|
|
329
|
+
out_of_range_tick_action=out_of_range_tick_action.upper(),
|
|
330
|
+
**kwargs,
|
|
331
|
+
)
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
for col in ('_SYMBOL_NAME_FIELD_', '_TICK_TYPE_FIELD_'):
|
|
335
|
+
if col in self.schema:
|
|
336
|
+
self.drop(col, inplace=True)
|
|
337
|
+
|
|
338
|
+
to_drop: Set[str] = set()
|
|
339
|
+
if not keep_symbol_and_tick_type:
|
|
340
|
+
if 'symbol_name_field' in kwargs:
|
|
341
|
+
to_drop.add(kwargs['symbol_name_field'])
|
|
342
|
+
if 'tick_type_field' in kwargs:
|
|
343
|
+
to_drop.add(kwargs['tick_type_field'])
|
|
344
|
+
if not keep_timestamp and timestamp is not None and str(timestamp) not in {'Time', 'TIMESTAMP'}:
|
|
345
|
+
to_drop.add(str(timestamp))
|
|
346
|
+
if correction_type is not None:
|
|
347
|
+
to_drop.add(str(correction_type))
|
|
348
|
+
self.schema.set(**{k: v for k, v in self.schema.items() if k not in to_drop})
|
|
349
|
+
return self
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
@inplace_operation
|
|
353
|
+
def write_parquet(
|
|
354
|
+
self,
|
|
355
|
+
output_path,
|
|
356
|
+
compression_type="snappy",
|
|
357
|
+
num_tick_per_row_group=1000,
|
|
358
|
+
partitioning_keys="",
|
|
359
|
+
propagate_input_ticks=False,
|
|
360
|
+
inplace=False,
|
|
361
|
+
):
|
|
362
|
+
"""
|
|
363
|
+
Writes the input tick series to parquet data file.
|
|
364
|
+
|
|
365
|
+
Input must not have field 'time' as that field will also be added by the EP in the resulting file(s)
|
|
366
|
+
|
|
367
|
+
Parameters
|
|
368
|
+
----------
|
|
369
|
+
output_path: str
|
|
370
|
+
Path for saving ticks to Parquet file.
|
|
371
|
+
Partitioned: Path to the root directory of the parquet files.
|
|
372
|
+
Non-partitioned: Path to the parquet file.
|
|
373
|
+
compression_type: str
|
|
374
|
+
Compression type for parquet files.
|
|
375
|
+
Should be one of these: `gzip`, `lz4`, `none`, `snappy` (default), `zstd`.
|
|
376
|
+
num_tick_per_row_group: int
|
|
377
|
+
Number of rows per row group.
|
|
378
|
+
partitioning_keys: list, str
|
|
379
|
+
List of fields (`list` or comma-separated string) to be used as keys for partitioning.
|
|
380
|
+
|
|
381
|
+
Setting this parameter will switch this EP to partitioned mode.
|
|
382
|
+
|
|
383
|
+
In non-partitioned mode, if the path points to a file that already exists, it will be overridden.
|
|
384
|
+
When partitioning is active:
|
|
385
|
+
|
|
386
|
+
* The target directory must be empty
|
|
387
|
+
* Key fields and their string values will be automatically URL-encoded to avoid conflicts with
|
|
388
|
+
filesystem naming rules.
|
|
389
|
+
|
|
390
|
+
Pseudo-fields '_SYMBOL_NAME' and '_TICK_TYPE' may be used as `partitioning_keys` and
|
|
391
|
+
will be added to the schema automatically.
|
|
392
|
+
propagate_input_ticks: bool
|
|
393
|
+
Switches propagation of the ticks. If set to `True`, ticks will be propagated.
|
|
394
|
+
inplace: bool
|
|
395
|
+
A flag controls whether operation should be applied inplace.
|
|
396
|
+
If ``inplace=True``, then it returns nothing. Otherwise method
|
|
397
|
+
returns a new modified object.
|
|
398
|
+
|
|
399
|
+
See also
|
|
400
|
+
--------
|
|
401
|
+
| **WRITE_TO_PARQUET** OneTick event processor
|
|
402
|
+
| :py:class:`onetick.py.ReadParquet`
|
|
403
|
+
|
|
404
|
+
Examples
|
|
405
|
+
--------
|
|
406
|
+
Simple usage:
|
|
407
|
+
|
|
408
|
+
>>> data = otp.Ticks(A=[1, 2, 3])
|
|
409
|
+
>>> data = data.write_parquet("/path/to/parquet/file") # doctest: +SKIP
|
|
410
|
+
>>> otp.run(data) # doctest: +SKIP
|
|
411
|
+
"""
|
|
412
|
+
if not hasattr(otq, "WriteToParquet"):
|
|
413
|
+
raise RuntimeError("Current version of OneTick don't support WRITE_TO_PARQUET EP")
|
|
414
|
+
|
|
415
|
+
if isinstance(partitioning_keys, list):
|
|
416
|
+
partitioning_keys = ",".join(partitioning_keys)
|
|
417
|
+
|
|
418
|
+
compression_type = compression_type.upper()
|
|
419
|
+
|
|
420
|
+
ep_kwargs = {}
|
|
421
|
+
if 'num_tick_per_row_group' in otq.WriteToParquet.Parameters.list_parameters():
|
|
422
|
+
ep_kwargs['num_tick_per_row_group'] = num_tick_per_row_group
|
|
423
|
+
else:
|
|
424
|
+
ep_kwargs['num_ticks_per_row_group'] = num_tick_per_row_group
|
|
425
|
+
|
|
426
|
+
self.sink(
|
|
427
|
+
otq.WriteToParquet(
|
|
428
|
+
output_path=output_path,
|
|
429
|
+
compression_type=compression_type,
|
|
430
|
+
partitioning_keys=partitioning_keys,
|
|
431
|
+
propagate_input_ticks=propagate_input_ticks,
|
|
432
|
+
**ep_kwargs,
|
|
433
|
+
)
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
return self
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
@inplace_operation
|
|
440
|
+
def save_snapshot(
|
|
441
|
+
self: 'Source',
|
|
442
|
+
snapshot_name='VALUE',
|
|
443
|
+
snapshot_storage='memory',
|
|
444
|
+
default_db='CEP_SNAPSHOT',
|
|
445
|
+
database='',
|
|
446
|
+
symbol_name_field=None,
|
|
447
|
+
expected_symbols_per_time_series=1000,
|
|
448
|
+
num_ticks=1,
|
|
449
|
+
reread_prevention_level=1,
|
|
450
|
+
group_by=None,
|
|
451
|
+
expected_groups_per_symbol=10,
|
|
452
|
+
keep_snapshot_after_query=False,
|
|
453
|
+
allow_concurrent_writers=False,
|
|
454
|
+
remove_snapshot_upon_start=None,
|
|
455
|
+
inplace=False,
|
|
456
|
+
):
|
|
457
|
+
"""
|
|
458
|
+
Saves last (at most) `n` ticks of each group of ticks from the input time series in global storage or
|
|
459
|
+
in a memory mapped file under a specified snapshot name.
|
|
460
|
+
Tick descriptor should be the same for all ticks saved into the snapshot.
|
|
461
|
+
These ticks can then be read via :py:class:`ReadSnapshot <onetick.py.ReadSnapshot>` by using the name
|
|
462
|
+
of the snapshot and the same symbol name (``<db_name>::<symbol>``) that were used by this method.
|
|
463
|
+
|
|
464
|
+
The event processor cannot be used by default. To enable it, access control should be configured,
|
|
465
|
+
so user could have rights to use **SAVE_SNAPSHOT** EP.
|
|
466
|
+
|
|
467
|
+
Parameters
|
|
468
|
+
----------
|
|
469
|
+
snapshot_name: str
|
|
470
|
+
The name of the snapshot, can be any string which doesn't contain slashes or backslashes.
|
|
471
|
+
Two snapshots can have the same name if they are stored in memory mapped files for different databases. Also,
|
|
472
|
+
they can have the same names if they are stored in the memories of different processes (different tick_servers).
|
|
473
|
+
In all other cases the names should be unique.
|
|
474
|
+
|
|
475
|
+
Default: `VALUE`
|
|
476
|
+
snapshot_storage: str
|
|
477
|
+
This parameter specifies the place of storage of the snapshot. Possible options are:
|
|
478
|
+
|
|
479
|
+
* `memory` - the snapshot is stored in the dynamic (heap) memory of the process
|
|
480
|
+
that ran (or is still running) the :py:meth:`onetick.py.Source.save_snapshot` for the snapshot.
|
|
481
|
+
* `memory_mapped_file` - the snapshot is stored in a memory mapped file.
|
|
482
|
+
For each symbol to get the location of the snapshot in the file system, ``save_snapshot`` looks at
|
|
483
|
+
the **SAVE_SNAPSHOT_DIR** parameter value in the locator section for the database of the symbol.
|
|
484
|
+
In a specified directory it creates a new directory with the name of the snapshot and keeps
|
|
485
|
+
the memory mapped file and some other helper files there.
|
|
486
|
+
|
|
487
|
+
Default: `memory`
|
|
488
|
+
default_db: str
|
|
489
|
+
The ticks with empty symbol names or symbol names with no database name as a prefix are saved as
|
|
490
|
+
if they have symbol names equal to **DEFAULT_DB::SYMBOL_NAME** (where **SYMBOL_NAME** can be empty).
|
|
491
|
+
These kinds of ticks, for example, can appear after merging time series. To save/read these ticks
|
|
492
|
+
to/from storage a dummy database with the specified default name should be configured in the locator.
|
|
493
|
+
|
|
494
|
+
Default: `CEP_SNAPSHOT`
|
|
495
|
+
database: str, optional
|
|
496
|
+
Specifies the output database for saving the snapshot.
|
|
497
|
+
symbol_name_field: str, :py:class:`~onetick.py.Column`, optional
|
|
498
|
+
If this parameter is specified, then each input time series is assumed to be a union of several time series and
|
|
499
|
+
the value of the specified attribute of each tick determines to which time series the tick actually belongs.
|
|
500
|
+
These values should be pure symbol names (for instance if the tick belongs to the time series **DEMO_L1::A**,
|
|
501
|
+
then the value of the corresponding attribute should be **A**) and the database name will be taken from
|
|
502
|
+
symbol of the merged time series.
|
|
503
|
+
expected_symbols_per_time_series: int
|
|
504
|
+
This parameter makes sense only when ``symbol_name_field`` is specified.
|
|
505
|
+
It is the number of real symbols that are expected to occur per input time series.
|
|
506
|
+
Bigger numbers may result in larger memory utilization by the query but will make the query faster.
|
|
507
|
+
|
|
508
|
+
Default: `1000`
|
|
509
|
+
num_ticks: int
|
|
510
|
+
The number of ticks to be stored for each group per each symbol.
|
|
511
|
+
|
|
512
|
+
Default: `1`
|
|
513
|
+
reread_prevention_level: int
|
|
514
|
+
For better performance we do not use synchronization mechanisms between the snapshot writer[s] and reader[s].
|
|
515
|
+
That is why when the writer submits ticks for some symbol very quickly the reader may fail to read
|
|
516
|
+
those ticks, and it will keep trying to reread them until it succeeds.
|
|
517
|
+
The ``reread_prevention_level`` parameter addresses this problem.
|
|
518
|
+
The higher the reread prevention level the higher the chance for the reader to read ticks successfully.
|
|
519
|
+
But high prevention level also means high memory utilization, that is why it is recommended to keep
|
|
520
|
+
the value of this parameter unchanged until you get an error about inability of the reader to read the snapshot
|
|
521
|
+
due to fast writer.
|
|
522
|
+
|
|
523
|
+
Default: `1`
|
|
524
|
+
group_by: list of str, :py:class:`~onetick.py.Column`, optional
|
|
525
|
+
When specified, the EP will keep the last **n** ticks of each group for each symbol;
|
|
526
|
+
otherwise it will just keep the last **n** ticks of the input time series.
|
|
527
|
+
The group is a list of input ticks with the same values in the specified fields.
|
|
528
|
+
expected_groups_per_symbol: int
|
|
529
|
+
The number of expected groups of ticks for each time series.
|
|
530
|
+
The specified value is used only when ``group_by`` fields are specified,
|
|
531
|
+
otherwise it is ignored, and we assume that the number of expected groups is 1.
|
|
532
|
+
The number hints the EP to allocate memory for such number of tick groups each time
|
|
533
|
+
a new group of ticks is going to be created and no free memory is left.
|
|
534
|
+
|
|
535
|
+
Default: `10`
|
|
536
|
+
keep_snapshot_after_query: bool
|
|
537
|
+
If the snapshot is saved in process memory and this parameter is set, the saved snapshot continues to live
|
|
538
|
+
after the query ends. If this parameter is not set, the snapshot is removed as soon as the query finishes and
|
|
539
|
+
its name is released for saving new snapshots with the same name.
|
|
540
|
+
This parameter is ignored if the snapshot is saved in the memory mapped file.
|
|
541
|
+
|
|
542
|
+
Default: `False`
|
|
543
|
+
allow_concurrent_writers: bool
|
|
544
|
+
If this parameter is ``True`` multiple saver queries can write to the same snapshot contemporaneously.
|
|
545
|
+
But different writers should write to different time series.
|
|
546
|
+
Also, saver queries should run inside the same process (i.e., different tick servers or loaders with otq
|
|
547
|
+
transformers cannot write to the same ``memory_mapped_file`` snapshot concurrently).
|
|
548
|
+
|
|
549
|
+
Default: `False`
|
|
550
|
+
remove_snapshot_upon_start: bool, optional
|
|
551
|
+
If this parameter is ``True`` the snapshot will be removed at the beginning of the query the next time
|
|
552
|
+
``save_snapshot`` is called for the same snapshot. If the parameter is ``False`` the snapshot
|
|
553
|
+
with the specified name will be appended to upon the next run of ``save_snapshot``.
|
|
554
|
+
|
|
555
|
+
If you'll leave this parameter as ``None``, it will be equal to setting this parameter to ``NOT_SET`` in EP.
|
|
556
|
+
``NOT_SET`` option operates in the same way as ``True`` for ``memory`` snapshots or ``False``
|
|
557
|
+
for ``memory_mapped_file`` snapshots.
|
|
558
|
+
|
|
559
|
+
Default: None (``NOT_SET``)
|
|
560
|
+
inplace: bool
|
|
561
|
+
A flag controls whether operation should be applied inplace.
|
|
562
|
+
If ``inplace=True``, then it returns nothing. Otherwise method
|
|
563
|
+
returns a new modified object.
|
|
564
|
+
|
|
565
|
+
See also
|
|
566
|
+
--------
|
|
567
|
+
| **SAVE_SNAPSHOT** OneTick event processor
|
|
568
|
+
| :py:class:`onetick.py.ReadSnapshot`
|
|
569
|
+
| :py:class:`onetick.py.ShowSnapshotList`
|
|
570
|
+
| :py:class:`onetick.py.FindSnapshotSymbols`
|
|
571
|
+
| :py:meth:`onetick.py.Source.join_with_snapshot`
|
|
572
|
+
|
|
573
|
+
Examples
|
|
574
|
+
--------
|
|
575
|
+
Save ticks to a snapshot in a memory:
|
|
576
|
+
|
|
577
|
+
>>> src = otp.Ticks(X=[1, 2, 3, 4, 5])
|
|
578
|
+
>>> src = src.save_snapshot(snapshot_name='some_snapshot') # doctest: +SKIP
|
|
579
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
580
|
+
|
|
581
|
+
If you want to use snapshot, stored in memory, after query, use parameter ``keep_snapshot_after_query``:
|
|
582
|
+
|
|
583
|
+
>>> src = src.save_snapshot(snapshot_name='some_snapshot', keep_snapshot_after_query=True) # doctest: +SKIP
|
|
584
|
+
|
|
585
|
+
Snapshot will be associated with default database. You can set database via ``database`` parameter:
|
|
586
|
+
|
|
587
|
+
>>> src = src.save_snapshot(
|
|
588
|
+
... snapshot_name='some_snapshot', database='SOME_DATABASE', keep_snapshot_after_query=True
|
|
589
|
+
... ) # doctest: +SKIP
|
|
590
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
591
|
+
>>>
|
|
592
|
+
>>> src = otp.ShowSnapshotList() # doctest: +SKIP
|
|
593
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
594
|
+
Time SNAPSHOT_NAME STORAGE_TYPE DB_NAME
|
|
595
|
+
0 2003-12-01 some_snapshot MEMORY SOME_DATABASE
|
|
596
|
+
|
|
597
|
+
By default, only one last tick per group, if it set, or from all ticks per symbol is saved.
|
|
598
|
+
You can change this number by setting ``num_ticks`` parameter:
|
|
599
|
+
|
|
600
|
+
>>> src = src.save_snapshot(snapshot_name='some_snapshot', num_ticks=100) # doctest: +SKIP
|
|
601
|
+
|
|
602
|
+
Setting symbol name for every tick in snapshot from source field:
|
|
603
|
+
|
|
604
|
+
>>> src = otp.Ticks(X=[1, 2, 3], SYMBOL_FIELD=['A', 'B', 'C'])
|
|
605
|
+
>>> src = src.save_snapshot(
|
|
606
|
+
... snapshot_name='some_snapshot', symbol_name_field='SYMBOL_FIELD', keep_snapshot_after_query=True,
|
|
607
|
+
... ) # doctest: +SKIP
|
|
608
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
609
|
+
>>>
|
|
610
|
+
>>> src = otp.FindSnapshotSymbols(snapshot_name='some_snapshot') # doctest: +SKIP
|
|
611
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
612
|
+
Time SYMBOL_NAME
|
|
613
|
+
0 2003-12-01 DEMO_L1::A
|
|
614
|
+
1 2003-12-01 DEMO_L1::B
|
|
615
|
+
2 2003-12-01 DEMO_L1::C
|
|
616
|
+
|
|
617
|
+
Group ticks by column ``X`` and keep last 2 ticks from each group:
|
|
618
|
+
|
|
619
|
+
>>> src = otp.Ticks(X=[0, 0, 0, 1, 1, 1], Y=[1, 2, 3, 4, 5, 6])
|
|
620
|
+
>>> src = src.save_snapshot(
|
|
621
|
+
... snapshot_name='some_snapshot', group_by=[src['X']], num_ticks=2, keep_snapshot_after_query=True,
|
|
622
|
+
... ) # doctest: +SKIP
|
|
623
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
624
|
+
>>>
|
|
625
|
+
>>> src = otp.ReadSnapshot(snapshot_name='some_snapshot') # doctest: +SKIP
|
|
626
|
+
>>> otp.run(src) # doctest: +SKIP
|
|
627
|
+
Time X Y TICK_TIME
|
|
628
|
+
0 2003-12-01 0 2 2003-12-01 00:00:00.001
|
|
629
|
+
1 2003-12-01 0 3 2003-12-01 00:00:00.002
|
|
630
|
+
2 2003-12-01 1 5 2003-12-01 00:00:00.004
|
|
631
|
+
3 2003-12-01 1 6 2003-12-01 00:00:00.005
|
|
632
|
+
|
|
633
|
+
"""
|
|
634
|
+
kwargs = {}
|
|
635
|
+
|
|
636
|
+
if not hasattr(otq, "SaveSnapshot"):
|
|
637
|
+
raise RuntimeError("Current version of OneTick doesn't support SAVE_SNAPSHOT EP")
|
|
638
|
+
|
|
639
|
+
if snapshot_storage not in ['memory', 'memory_mapped_file']:
|
|
640
|
+
raise ValueError('`snapshot_storage` must be one of "memory", "memory_mapped_file"')
|
|
641
|
+
|
|
642
|
+
if isinstance(symbol_name_field, _Column):
|
|
643
|
+
symbol_name_field = str(symbol_name_field)
|
|
644
|
+
if symbol_name_field and symbol_name_field not in self.schema:
|
|
645
|
+
raise ValueError(f'Field "{symbol_name_field}" passed as `symbol_name_field` parameter is not in schema.')
|
|
646
|
+
|
|
647
|
+
is_database_param_supported = is_save_snapshot_database_parameter_supported()
|
|
648
|
+
|
|
649
|
+
if database:
|
|
650
|
+
if not is_database_param_supported:
|
|
651
|
+
raise RuntimeError("Current version of OneTick doesn't support `database` parameter on SAVE_SNAPSHOT EP")
|
|
652
|
+
|
|
653
|
+
kwargs['database'] = database
|
|
654
|
+
|
|
655
|
+
if symbol_name_field is None:
|
|
656
|
+
symbol_name_field = ''
|
|
657
|
+
|
|
658
|
+
if group_by is None:
|
|
659
|
+
group_by = []
|
|
660
|
+
|
|
661
|
+
if not isinstance(group_by, list):
|
|
662
|
+
raise ValueError('`group_by` must be a list')
|
|
663
|
+
|
|
664
|
+
result_group_by = []
|
|
665
|
+
|
|
666
|
+
for column in group_by:
|
|
667
|
+
item = column
|
|
668
|
+
if isinstance(column, _Column):
|
|
669
|
+
item = str(column)
|
|
670
|
+
|
|
671
|
+
if item not in self.schema:
|
|
672
|
+
raise ValueError(f'Field "{item}" passed as `group_by` parameter is not in schema.')
|
|
673
|
+
|
|
674
|
+
result_group_by.append(item)
|
|
675
|
+
|
|
676
|
+
snapshot_storage = snapshot_storage.upper()
|
|
677
|
+
|
|
678
|
+
if remove_snapshot_upon_start is None:
|
|
679
|
+
remove_snapshot_upon_start = 'NOT_SET'
|
|
680
|
+
|
|
681
|
+
# clear schema
|
|
682
|
+
self.schema.set()
|
|
683
|
+
|
|
684
|
+
self.sink(
|
|
685
|
+
otq.SaveSnapshot(
|
|
686
|
+
snapshot_name=snapshot_name,
|
|
687
|
+
snapshot_storage=snapshot_storage,
|
|
688
|
+
default_db=default_db,
|
|
689
|
+
symbol_name_field=symbol_name_field,
|
|
690
|
+
expected_symbols_per_time_series=expected_symbols_per_time_series,
|
|
691
|
+
num_ticks=num_ticks,
|
|
692
|
+
reread_prevention_level=reread_prevention_level,
|
|
693
|
+
group_by=','.join(result_group_by),
|
|
694
|
+
expected_groups_per_symbol=expected_groups_per_symbol,
|
|
695
|
+
keep_snapshot_after_query=keep_snapshot_after_query,
|
|
696
|
+
allow_concurrent_writers=allow_concurrent_writers,
|
|
697
|
+
remove_snapshot_upon_start=remove_snapshot_upon_start,
|
|
698
|
+
**kwargs,
|
|
699
|
+
)
|
|
700
|
+
)
|
|
701
|
+
|
|
702
|
+
return self
|