omnata-plugin-runtime 0.3.5__py3-none-any.whl → 0.3.6a13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omnata_plugin_runtime/omnata_plugin.py +12 -12
- {omnata_plugin_runtime-0.3.5.dist-info → omnata_plugin_runtime-0.3.6a13.dist-info}/METADATA +1 -1
- {omnata_plugin_runtime-0.3.5.dist-info → omnata_plugin_runtime-0.3.6a13.dist-info}/RECORD +5 -5
- {omnata_plugin_runtime-0.3.5.dist-info → omnata_plugin_runtime-0.3.6a13.dist-info}/LICENSE +0 -0
- {omnata_plugin_runtime-0.3.5.dist-info → omnata_plugin_runtime-0.3.6a13.dist-info}/WHEEL +0 -0
@@ -948,7 +948,6 @@ class InboundSyncRequest(SyncRequest):
|
|
948
948
|
"""
|
949
949
|
Adds some results to the queue for applying asynchronously
|
950
950
|
"""
|
951
|
-
# TODO: maybe also have a mechanism to apply immediately if the queued results are getting too large
|
952
951
|
logger.info(f"Enqueueing {len(results)} results for upload")
|
953
952
|
if stream_name is None or len(stream_name) == 0:
|
954
953
|
raise ValueError("Stream name cannot be empty")
|
@@ -962,17 +961,18 @@ class InboundSyncRequest(SyncRequest):
|
|
962
961
|
self._latest_states = {**current_latest, **{stream_name: new_state}}
|
963
962
|
# if the total size of all the dataframes exceeds 200MB, apply the results immediately
|
964
963
|
# we'll use df.memory_usage(index=True) for this
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
if
|
975
|
-
|
964
|
+
if self.development_mode is False:
|
965
|
+
# note: we want to do it for all values in self._apply_results, not just the new one
|
966
|
+
# so first we need to get the list of lists from the dictionary values and flatten it
|
967
|
+
# then we can sum the memory usage of each dataframe
|
968
|
+
# if the total exceeds 200MB, we apply the results immediately
|
969
|
+
all_df_lists:List[List[pandas.DataFrame]] = list(self._apply_results.values())
|
970
|
+
# flatten
|
971
|
+
all_dfs:List[pandas.DataFrame] = [x for sublist in all_df_lists for x in sublist]
|
972
|
+
# first, don't both if the count is less than 10000, since it's unlikely to be even close
|
973
|
+
if len(all_dfs) > 10000:
|
974
|
+
if sum([x.memory_usage(index=True).sum() for x in all_dfs]) > 200000000:
|
975
|
+
self.apply_results_queue()
|
976
976
|
|
977
977
|
|
978
978
|
def mark_stream_complete(self, stream_name: str):
|
@@ -3,10 +3,10 @@ omnata_plugin_runtime/api.py,sha256=_N5ok5LN7GDO4J9n3yduXp3tpjmhpySY__U2baiygrs,
|
|
3
3
|
omnata_plugin_runtime/configuration.py,sha256=at29ExowF_T4_2U9gY0BF4IVdwC-vDytmNRHL7UCWh8,34742
|
4
4
|
omnata_plugin_runtime/forms.py,sha256=30CJB24TqfLYNnkplZdUbeqA-P9rUIBujVKXw_S-wKY,18371
|
5
5
|
omnata_plugin_runtime/logging.py,sha256=bn7eKoNWvtuyTk7RTwBS9UARMtqkiICtgMtzq3KA2V0,3272
|
6
|
-
omnata_plugin_runtime/omnata_plugin.py,sha256=
|
6
|
+
omnata_plugin_runtime/omnata_plugin.py,sha256=5VVaqZxKNNtn0jxPUygDHNmEsvXI_SQWR9btFPNF_EY,88325
|
7
7
|
omnata_plugin_runtime/plugin_entrypoints.py,sha256=7Iqo_OmfKhnJ6jSjSABlGSRz4-E9WRkbgs-gMPTVfk0,27586
|
8
8
|
omnata_plugin_runtime/rate_limiting.py,sha256=se6MftQI5NrVHaLb1hByPCgAESPQhkAgIG7KIU1clDU,16562
|
9
|
-
omnata_plugin_runtime-0.3.
|
10
|
-
omnata_plugin_runtime-0.3.
|
11
|
-
omnata_plugin_runtime-0.3.
|
12
|
-
omnata_plugin_runtime-0.3.
|
9
|
+
omnata_plugin_runtime-0.3.6a13.dist-info/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
|
10
|
+
omnata_plugin_runtime-0.3.6a13.dist-info/METADATA,sha256=9tuF0iQSDVyjsoQkluv4Lo_4JljUfCdLdWeBnCwuVHo,1603
|
11
|
+
omnata_plugin_runtime-0.3.6a13.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
12
|
+
omnata_plugin_runtime-0.3.6a13.dist-info/RECORD,,
|
File without changes
|
File without changes
|