singlestoredb 1.12.1__cp38-abi3-macosx_10_9_universal2.whl → 1.12.2__cp38-abi3-macosx_10_9_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of singlestoredb might be problematic. Click here for more details.

Binary file
singlestoredb/__init__.py CHANGED
@@ -13,7 +13,7 @@ Examples
13
13
 
14
14
  """
15
15
 
16
- __version__ = '1.12.1'
16
+ __version__ = '1.12.2'
17
17
 
18
18
  from typing import Any
19
19
 
@@ -199,59 +199,65 @@ def get_deployment(
199
199
  """
200
200
  manager = get_workspace_manager()
201
201
 
202
+ #
203
+ # Search for deployment by name
204
+ #
202
205
  deployment_name = params.get('deployment_name') or \
203
206
  (params.get('in_deployment') or {}).get('deployment_name') or \
204
207
  (params.get('group') or {}).get('deployment_name') or \
205
208
  ((params.get('in') or {}).get('in_group') or {}).get('deployment_name') or \
206
209
  ((params.get('in') or {}).get('in_deployment') or {}).get('deployment_name')
210
+
207
211
  if deployment_name:
212
+ # Standard workspace group
208
213
  workspace_groups = [
209
214
  x for x in manager.workspace_groups
210
215
  if x.name == deployment_name
211
216
  ]
212
217
 
213
- starter_workspaces = []
214
- if not workspace_groups:
215
- filtered_starter_workspaces = [
216
- x for x in manager.starter_workspaces
217
- if x.name == deployment_name
218
- ]
219
-
220
- if not filtered_starter_workspaces:
221
- raise KeyError(
222
- f'no deployment found with name: {deployment_name}',
223
- )
224
-
225
- starter_workspaces = filtered_starter_workspaces
218
+ if len(workspace_groups) == 1:
219
+ return workspace_groups[0]
226
220
 
227
- if len(workspace_groups) > 1:
221
+ elif len(workspace_groups) > 1:
228
222
  ids = ', '.join(x.id for x in workspace_groups)
229
223
  raise ValueError(
230
224
  f'more than one workspace group with given name was found: {ids}',
231
225
  )
232
226
 
233
- if len(starter_workspaces) > 1:
227
+ # Starter workspace
228
+ starter_workspaces = [
229
+ x for x in manager.starter_workspaces
230
+ if x.name == deployment_name
231
+ ]
232
+
233
+ if len(starter_workspaces) == 1:
234
+ return starter_workspaces[0]
235
+
236
+ elif len(starter_workspaces) > 1:
234
237
  ids = ', '.join(x.id for x in starter_workspaces)
235
238
  raise ValueError(
236
239
  f'more than one starter workspace with given name was found: {ids}',
237
240
  )
238
241
 
239
- if workspace_groups:
240
- return workspace_groups[0]
241
- else:
242
- return starter_workspaces[0]
242
+ raise KeyError(f'no deployment found with name: {deployment_name}')
243
243
 
244
+ #
245
+ # Search for deployment by ID
246
+ #
244
247
  deployment_id = params.get('deployment_id') or \
245
248
  (params.get('in_deployment') or {}).get('deployment_id') or \
246
249
  (params.get('group') or {}).get('deployment_id') or \
247
250
  ((params.get('in') or {}).get('in_group') or {}).get('deployment_id') or \
248
251
  ((params.get('in') or {}).get('in_deployment') or {}).get('deployment_id')
252
+
249
253
  if deployment_id:
250
254
  try:
255
+ # Standard workspace group
251
256
  return manager.get_workspace_group(deployment_id)
252
257
  except ManagementError as exc:
253
258
  if exc.errno == 404:
254
259
  try:
260
+ # Starter workspace
255
261
  return manager.get_starter_workspace(deployment_id)
256
262
  except ManagementError as exc:
257
263
  if exc.errno == 404:
@@ -260,6 +266,7 @@ def get_deployment(
260
266
  else:
261
267
  raise
262
268
 
269
+ # Use workspace group from environment
263
270
  if os.environ.get('SINGLESTOREDB_WORKSPACE_GROUP'):
264
271
  try:
265
272
  return manager.get_workspace_group(
@@ -273,6 +280,7 @@ def get_deployment(
273
280
  )
274
281
  raise
275
282
 
283
+ # Use cluster from environment
276
284
  if os.environ.get('SINGLESTOREDB_CLUSTER'):
277
285
  try:
278
286
  return manager.get_starter_workspace(
@@ -348,6 +348,7 @@ class FilesObjectBytesReader(io.BytesIO):
348
348
 
349
349
 
350
350
  class FileLocation(ABC):
351
+
351
352
  @abstractmethod
352
353
  def open(
353
354
  self,
@@ -4,6 +4,7 @@
4
4
  import os
5
5
  import random
6
6
  import secrets
7
+ import tempfile
7
8
  import time
8
9
  import unittest
9
10
  from typing import Any
@@ -714,3 +715,810 @@ class TestJobsFusion(unittest.TestCase):
714
715
  res = out[0]
715
716
  assert res[0] == job_id
716
717
  assert res[1] == 1
718
+
719
+
720
+ @pytest.mark.management
721
+ class TestStageFusion(unittest.TestCase):
722
+
723
+ id: str = secrets.token_hex(8)
724
+ dbname: str = 'information_schema'
725
+ manager: None
726
+ workspace_group: None
727
+ workspace_group_2: None
728
+
729
+ @classmethod
730
+ def setUpClass(cls):
731
+ cls.manager = s2.manage_workspaces()
732
+ us_regions = [x for x in cls.manager.regions if x.name.startswith('US')]
733
+ cls.workspace_group = cls.manager.create_workspace_group(
734
+ f'Stage Fusion Testing 1 {cls.id}',
735
+ region=random.choice(us_regions),
736
+ firewall_ranges=[],
737
+ )
738
+ cls.workspace_group_2 = cls.manager.create_workspace_group(
739
+ f'Stage Fusion Testing 2 {cls.id}',
740
+ region=random.choice(us_regions),
741
+ firewall_ranges=[],
742
+ )
743
+ # Wait for both workspace groups to start
744
+ time.sleep(5)
745
+
746
+ os.environ['SINGLESTOREDB_DEFAULT_DATABASE'] = 'information_schema'
747
+ os.environ['SINGLESTOREDB_WORKSPACE_GROUP'] = cls.workspace_group.id
748
+
749
+ @classmethod
750
+ def tearDownClass(cls):
751
+ if cls.workspace_group is not None:
752
+ cls.workspace_group.terminate(force=True)
753
+ if cls.workspace_group_2 is not None:
754
+ cls.workspace_group_2.terminate(force=True)
755
+ cls.manager = None
756
+ cls.workspace_group = None
757
+ cls.workspace_group_2 = None
758
+ cls.workspace = None
759
+ cls.workspace_2 = None
760
+ if os.environ.get('SINGLESTOREDB_WORKSPACE', None) is not None:
761
+ del os.environ['SINGLESTOREDB_WORKSPACE']
762
+ if os.environ.get('SINGLESTOREDB_WORKSPACE_GROUP', None) is not None:
763
+ del os.environ['SINGLESTOREDB_WORKSPACE_GROUP']
764
+ if os.environ.get('SINGLESTOREDB_DEFAULT_DATABASE', None) is not None:
765
+ del os.environ['SINGLESTOREDB_DEFAULT_DATABASE']
766
+
767
+ def setUp(self):
768
+ self.enabled = os.environ.get('SINGLESTOREDB_FUSION_ENABLED')
769
+ os.environ['SINGLESTOREDB_FUSION_ENABLED'] = '1'
770
+ self.conn = s2.connect(database=type(self).dbname, local_infile=True)
771
+ self.cur = self.conn.cursor()
772
+
773
+ def tearDown(self):
774
+ self._clear_stage()
775
+
776
+ if self.enabled:
777
+ os.environ['SINGLESTOREDB_FUSION_ENABLED'] = self.enabled
778
+ else:
779
+ del os.environ['SINGLESTOREDB_FUSION_ENABLED']
780
+
781
+ try:
782
+ if self.cur is not None:
783
+ self.cur.close()
784
+ except Exception:
785
+ # traceback.print_exc()
786
+ pass
787
+
788
+ try:
789
+ if self.conn is not None:
790
+ self.conn.close()
791
+ except Exception:
792
+ # traceback.print_exc()
793
+ pass
794
+
795
+ def _clear_stage(self):
796
+ if self.workspace_group is not None:
797
+ self.cur.execute(f'''
798
+ show stage files
799
+ in group id '{self.workspace_group.id}' recursive
800
+ ''')
801
+ files = list(self.cur)
802
+ folders = []
803
+ for file in files:
804
+ if file[0].endswith('/'):
805
+ folders.append(file)
806
+ continue
807
+ self.cur.execute(f'''
808
+ drop stage file '{file[0]}'
809
+ in group id '{self.workspace_group.id}'
810
+ ''')
811
+ for folder in folders:
812
+ self.cur.execute(f'''
813
+ drop stage folder '{folder[0]}'
814
+ in group id '{self.workspace_group.id}'
815
+ ''')
816
+
817
+ if self.workspace_group_2 is not None:
818
+ self.cur.execute(f'''
819
+ show stage files
820
+ in group id '{self.workspace_group_2.id}' recursive
821
+ ''')
822
+ files = list(self.cur)
823
+ folders = []
824
+ for file in files:
825
+ if file[0].endswith('/'):
826
+ folders.append(file)
827
+ continue
828
+ self.cur.execute(f'''
829
+ drop stage file '{file[0]}'
830
+ in group id '{self.workspace_group_2.id}'
831
+ ''')
832
+ for folder in folders:
833
+ self.cur.execute(f'''
834
+ drop stage folder '{folder[0]}'
835
+ in group id '{self.workspace_group_2.id}'
836
+ ''')
837
+
838
+ def test_show_stage(self):
839
+ test2_sql = os.path.join(os.path.dirname(__file__), 'test2.sql')
840
+
841
+ # Should be empty
842
+ self.cur.execute('''
843
+ show stage files
844
+ ''')
845
+ files = list(self.cur)
846
+ assert len(files) == 0
847
+
848
+ # Copy files to stage
849
+ self.cur.execute(
850
+ f'upload file to stage "new_test_1.sql" from "{test2_sql}"',
851
+ )
852
+ self.cur.execute('create stage folder "subdir1"')
853
+ self.cur.execute(
854
+ f'upload file to stage "subdir1/new_test_2.sql" from "{test2_sql}"',
855
+ )
856
+ self.cur.execute(
857
+ f'upload file to stage "subdir1/new_test_3.sql" from "{test2_sql}"',
858
+ )
859
+ self.cur.execute('create stage folder "subdir2"')
860
+ self.cur.execute(
861
+ f'upload file to stage "subdir2/new_test_4.sql" from "{test2_sql}"',
862
+ )
863
+ self.cur.execute(
864
+ f'upload file to stage "subdir2/new_test_5.sql" from "{test2_sql}"',
865
+ )
866
+
867
+ # Make sure files are there
868
+ self.cur.execute('''
869
+ show stage files recursive
870
+ ''')
871
+ files = list(self.cur)
872
+ assert len(files) == 7
873
+ assert list(sorted(x[0] for x in files)) == [
874
+ 'new_test_1.sql',
875
+ 'subdir1/',
876
+ 'subdir1/new_test_2.sql',
877
+ 'subdir1/new_test_3.sql',
878
+ 'subdir2/',
879
+ 'subdir2/new_test_4.sql',
880
+ 'subdir2/new_test_5.sql',
881
+ ]
882
+
883
+ # Do non-recursive listing
884
+ self.cur.execute('''
885
+ show stage files
886
+ ''')
887
+ files = list(self.cur)
888
+ assert len(files) == 3
889
+ assert list(sorted(x[0] for x in files)) == [
890
+ 'new_test_1.sql',
891
+ 'subdir1/',
892
+ 'subdir2/',
893
+ ]
894
+
895
+ # List files in specific workspace group
896
+ self.cur.execute(f'''
897
+ show stage files in group id '{self.workspace_group.id}'
898
+ ''')
899
+ files = list(self.cur)
900
+ assert len(files) == 3
901
+ assert list(sorted(x[0] for x in files)) == [
902
+ 'new_test_1.sql',
903
+ 'subdir1/',
904
+ 'subdir2/',
905
+ ]
906
+
907
+ self.cur.execute(f'''
908
+ show stage files in id '{self.workspace_group.id}'
909
+ ''')
910
+ files = list(self.cur)
911
+ assert len(files) == 3
912
+ assert list(sorted(x[0] for x in files)) == [
913
+ 'new_test_1.sql',
914
+ 'subdir1/',
915
+ 'subdir2/',
916
+ ]
917
+
918
+ self.cur.execute(f'''
919
+ show stage files in group '{self.workspace_group.name}'
920
+ ''')
921
+ files = list(self.cur)
922
+ assert len(files) == 3
923
+ assert list(sorted(x[0] for x in files)) == [
924
+ 'new_test_1.sql',
925
+ 'subdir1/',
926
+ 'subdir2/',
927
+ ]
928
+
929
+ self.cur.execute(f'''
930
+ show stage files in '{self.workspace_group.name}'
931
+ ''')
932
+ files = list(self.cur)
933
+ assert len(files) == 3
934
+ assert list(sorted(x[0] for x in files)) == [
935
+ 'new_test_1.sql',
936
+ 'subdir1/',
937
+ 'subdir2/',
938
+ ]
939
+
940
+ # Check other workspace group
941
+ self.cur.execute(f'''
942
+ show stage files in group '{self.workspace_group_2.name}'
943
+ ''')
944
+ files = list(self.cur)
945
+ assert len(files) == 0
946
+
947
+ # Limit results
948
+ self.cur.execute('''
949
+ show stage files recursive limit 5
950
+ ''')
951
+ files = list(self.cur)
952
+ assert len(files) == 5
953
+ assert list(sorted(x[0] for x in files)) == [
954
+ 'new_test_1.sql',
955
+ 'subdir1/',
956
+ 'subdir1/new_test_2.sql',
957
+ 'subdir1/new_test_3.sql',
958
+ 'subdir2/',
959
+ ]
960
+
961
+ # Order by type and name
962
+ self.cur.execute('''
963
+ show stage files order by type, name recursive extended
964
+ ''')
965
+ files = list(self.cur)
966
+ assert len(files) == 7
967
+ assert list(x[0] for x in files) == [
968
+ 'subdir1/',
969
+ 'subdir2/',
970
+ 'new_test_1.sql',
971
+ 'subdir1/new_test_2.sql',
972
+ 'subdir1/new_test_3.sql',
973
+ 'subdir2/new_test_4.sql',
974
+ 'subdir2/new_test_5.sql',
975
+ ]
976
+
977
+ # Order by type and name descending
978
+ self.cur.execute('''
979
+ show stage files order by type desc, name desc recursive extended
980
+ ''')
981
+ files = list(self.cur)
982
+ assert len(files) == 7
983
+ assert list(x[0] for x in files) == [
984
+ 'subdir2/new_test_5.sql',
985
+ 'subdir2/new_test_4.sql',
986
+ 'subdir1/new_test_3.sql',
987
+ 'subdir1/new_test_2.sql',
988
+ 'new_test_1.sql',
989
+ 'subdir2/',
990
+ 'subdir1/',
991
+ ]
992
+
993
+ # List at specific path
994
+ self.cur.execute('''
995
+ show stage files at 'subdir2/' recursive
996
+ ''')
997
+ files = list(self.cur)
998
+ assert len(files) == 2
999
+ assert list(sorted(x[0] for x in files)) == [
1000
+ 'new_test_4.sql',
1001
+ 'new_test_5.sql',
1002
+ ]
1003
+
1004
+ # LIKE clause
1005
+ self.cur.execute('''
1006
+ show stage files like '%_4.%' recursive
1007
+ ''')
1008
+ files = list(self.cur)
1009
+ assert len(files) == 1
1010
+ assert list(sorted(x[0] for x in files)) == [
1011
+ 'subdir2/new_test_4.sql',
1012
+ ]
1013
+
1014
+ def test_download_stage(self):
1015
+ test2_sql = os.path.join(os.path.dirname(__file__), 'test2.sql')
1016
+
1017
+ # Should be empty
1018
+ self.cur.execute('''
1019
+ show stage files
1020
+ ''')
1021
+ files = list(self.cur)
1022
+ assert len(files) == 0
1023
+
1024
+ # Copy file to stage 1
1025
+ self.cur.execute(f'''
1026
+ upload file to stage 'dl_test.sql' from '{test2_sql}'
1027
+ ''')
1028
+
1029
+ self.cur.execute('''
1030
+ show stage files
1031
+ ''')
1032
+ files = list(self.cur)
1033
+ assert len(files) == 1
1034
+ assert list(sorted(x[0] for x in files)) == ['dl_test.sql']
1035
+
1036
+ # Copy file to stage 2
1037
+ self.cur.execute(f'''
1038
+ upload file to stage 'dl_test2.sql'
1039
+ in group '{self.workspace_group_2.name}'
1040
+ from '{test2_sql}'
1041
+ ''')
1042
+
1043
+ # Make sure only one file in stage 2
1044
+ self.cur.execute(f'''
1045
+ show stage files in group '{self.workspace_group_2.name}'
1046
+ ''')
1047
+ files = list(self.cur)
1048
+ assert len(files) == 1
1049
+ assert list(sorted(x[0] for x in files)) == ['dl_test2.sql']
1050
+
1051
+ # Download file from stage 1
1052
+ with tempfile.TemporaryDirectory() as tmpdir:
1053
+ self.cur.execute(f'''
1054
+ download stage file 'dl_test.sql' to '{tmpdir}/dl_test.sql'
1055
+ ''')
1056
+ with open(os.path.join(tmpdir, 'dl_test.sql'), 'r') as dl_file:
1057
+ assert dl_file.read() == open(test2_sql, 'r').read()
1058
+
1059
+ # Download file from stage 2
1060
+ with tempfile.TemporaryDirectory() as tmpdir:
1061
+ self.cur.execute(f'''
1062
+ download stage file 'dl_test2.sql'
1063
+ in group '{self.workspace_group_2.name}'
1064
+ to '{tmpdir}/dl_test2.sql'
1065
+ ''')
1066
+ with open(os.path.join(tmpdir, 'dl_test2.sql'), 'r') as dl_file:
1067
+ assert dl_file.read() == open(test2_sql, 'r').read()
1068
+
1069
+ def test_stage_multi_wg_operations(self):
1070
+ test_sql = os.path.join(os.path.dirname(__file__), 'test.sql')
1071
+ test2_sql = os.path.join(os.path.dirname(__file__), 'test2.sql')
1072
+
1073
+ # Should be empty
1074
+ self.cur.execute('''
1075
+ show stage files
1076
+ ''')
1077
+ files = list(self.cur)
1078
+ assert len(files) == 0
1079
+
1080
+ # Copy file to stage 1
1081
+ self.cur.execute(f'''
1082
+ upload file to stage 'new_test.sql' from '{test_sql}'
1083
+ ''')
1084
+
1085
+ self.cur.execute('''
1086
+ show stage files
1087
+ ''')
1088
+ files = list(self.cur)
1089
+ assert len(files) == 1
1090
+
1091
+ # Copy file to stage 2
1092
+ self.cur.execute(f'''
1093
+ upload file to stage 'new_test2.sql'
1094
+ in group '{self.workspace_group_2.name}'
1095
+ from '{test2_sql}'
1096
+ ''')
1097
+
1098
+ # Make sure only one file in stage 1
1099
+ self.cur.execute('''
1100
+ show stage files
1101
+ ''')
1102
+ files = list(self.cur)
1103
+ assert len(files) == 1
1104
+ assert files[0][0] == 'new_test.sql'
1105
+
1106
+ # Make sure only one file in stage 2
1107
+ self.cur.execute(f'''
1108
+ show stage files in group '{self.workspace_group_2.name}' recursive
1109
+ ''')
1110
+ files = list(self.cur)
1111
+ assert len(files) == 1
1112
+ assert list(sorted(x[0] for x in files)) == ['new_test2.sql']
1113
+
1114
+ # Make sure only one file in stage 2 (using IN)
1115
+ self.cur.execute(f'''
1116
+ show stage files in '{self.workspace_group_2.name}' recursive
1117
+ ''')
1118
+ files = list(self.cur)
1119
+ assert len(files) == 1
1120
+ assert list(sorted(x[0] for x in files)) == ['new_test2.sql']
1121
+
1122
+ # Make subdir
1123
+ self.cur.execute(f'''
1124
+ create stage folder 'data' in group '{self.workspace_group_2.name}'
1125
+ ''')
1126
+
1127
+ # Upload file using workspace ID
1128
+ self.cur.execute(f'''
1129
+ upload file to stage 'data/new_test2_sub.sql'
1130
+ in group id '{self.workspace_group_2.id}'
1131
+ from '{test2_sql}'
1132
+ ''')
1133
+
1134
+ # Make sure only one file in stage 1
1135
+ self.cur.execute('''
1136
+ show stage files
1137
+ ''')
1138
+ files = list(self.cur)
1139
+ assert len(files) == 1
1140
+ assert files[0][0] == 'new_test.sql'
1141
+
1142
+ # Make sure two files in stage 2
1143
+ self.cur.execute(f'''
1144
+ show stage files in group id '{self.workspace_group_2.id}' recursive
1145
+ ''')
1146
+ files = list(self.cur)
1147
+ assert len(files) == 3
1148
+ assert list(sorted(x[0] for x in files)) == \
1149
+ ['data/', 'data/new_test2_sub.sql', 'new_test2.sql']
1150
+
1151
+ # Test overwrite
1152
+ with self.assertRaises(OSError):
1153
+ self.cur.execute(f'''
1154
+ upload file to stage 'data/new_test2_sub.sql'
1155
+ in group id '{self.workspace_group_2.id}'
1156
+ from '{test2_sql}'
1157
+ ''')
1158
+
1159
+ self.cur.execute(f'''
1160
+ upload file to stage 'data/new_test2_sub.sql'
1161
+ in group id '{self.workspace_group_2.id}'
1162
+ from '{test2_sql}' overwrite
1163
+ ''')
1164
+
1165
+ # Make sure two files in stage 2
1166
+ self.cur.execute(f'''
1167
+ show stage files in group id '{self.workspace_group_2.id}' recursive
1168
+ ''')
1169
+ files = list(self.cur)
1170
+ assert len(files) == 3
1171
+ assert list(sorted(x[0] for x in files)) == \
1172
+ ['data/', 'data/new_test2_sub.sql', 'new_test2.sql']
1173
+
1174
+ # Test LIKE clause
1175
+ self.cur.execute(f'''
1176
+ show stage files
1177
+ in group id '{self.workspace_group_2.id}'
1178
+ like '%_sub%' recursive
1179
+ ''')
1180
+ files = list(self.cur)
1181
+ assert len(files) == 1
1182
+ assert list(sorted(x[0] for x in files)) == ['data/new_test2_sub.sql']
1183
+
1184
+ # Drop file from default stage
1185
+ self.cur.execute('''
1186
+ drop stage file 'new_test.sql'
1187
+ ''')
1188
+
1189
+ # Make sure no files in stage 1
1190
+ self.cur.execute('''
1191
+ show stage files
1192
+ ''')
1193
+ files = list(self.cur)
1194
+ assert len(files) == 0
1195
+
1196
+ # Make sure two files in stage 2
1197
+ self.cur.execute(f'''
1198
+ show stage files in group id '{self.workspace_group_2.id}' recursive
1199
+ ''')
1200
+ files = list(self.cur)
1201
+ assert len(files) == 3
1202
+ assert list(sorted(x[0] for x in files)) == \
1203
+ ['data/', 'data/new_test2_sub.sql', 'new_test2.sql']
1204
+
1205
+ # Attempt to drop directory from stage 2
1206
+ with self.assertRaises(OSError):
1207
+ self.cur.execute(f'''
1208
+ drop stage folder 'data'
1209
+ in group id '{self.workspace_group_2.id}'
1210
+ ''')
1211
+
1212
+ self.cur.execute(f'''
1213
+ drop stage file 'data/new_test2_sub.sql'
1214
+ in group id '{self.workspace_group_2.id}'
1215
+ ''')
1216
+
1217
+ # Make sure one file and one directory in stage 2
1218
+ self.cur.execute(f'''
1219
+ show stage files in group id '{self.workspace_group_2.id}' recursive
1220
+ ''')
1221
+ files = list(self.cur)
1222
+ assert len(files) == 2
1223
+ assert list(sorted(x[0] for x in files)) == ['data/', 'new_test2.sql']
1224
+
1225
+ # Drop stage folder from stage 2
1226
+ self.cur.execute(f'''
1227
+ drop stage folder 'data'
1228
+ in group id '{self.workspace_group_2.id}'
1229
+ ''')
1230
+
1231
+ # Make sure one file in stage 2
1232
+ self.cur.execute(f'''
1233
+ show stage files in group id '{self.workspace_group_2.id}' recursive
1234
+ ''')
1235
+ files = list(self.cur)
1236
+ assert len(files) == 1
1237
+ assert list(sorted(x[0] for x in files)) == ['new_test2.sql']
1238
+
1239
+ # Drop last file
1240
+ self.cur.execute(f'''
1241
+ drop stage file 'new_test2.sql'
1242
+ in group id '{self.workspace_group_2.id}'
1243
+ ''')
1244
+
1245
+ # Make sure no files in stage 2
1246
+ self.cur.execute(f'''
1247
+ show stage files in group id '{self.workspace_group_2.id}' recursive
1248
+ ''')
1249
+ files = list(self.cur)
1250
+ assert len(files) == 0
1251
+
1252
+
1253
+ @pytest.mark.management
1254
+ class TestFilesFusion(unittest.TestCase):
1255
+
1256
+ id: str = secrets.token_hex(8)
1257
+ dbname: str = 'information_schema'
1258
+ manager: None
1259
+ workspace_group: None
1260
+
1261
+ @classmethod
1262
+ def setUpClass(cls):
1263
+ cls.manager = s2.manage_workspaces()
1264
+ us_regions = [x for x in cls.manager.regions if x.name.startswith('US')]
1265
+ cls.workspace_group = cls.manager.create_workspace_group(
1266
+ f'Files Fusion Testing {cls.id}',
1267
+ region=random.choice(us_regions),
1268
+ firewall_ranges=[],
1269
+ )
1270
+ # Wait for both workspace groups to start
1271
+ time.sleep(5)
1272
+
1273
+ os.environ['SINGLESTOREDB_DEFAULT_DATABASE'] = 'information_schema'
1274
+ os.environ['SINGLESTOREDB_WORKSPACE_GROUP'] = cls.workspace_group.id
1275
+
1276
+ @classmethod
1277
+ def tearDownClass(cls):
1278
+ if cls.workspace_group is not None:
1279
+ cls.workspace_group.terminate(force=True)
1280
+ cls.manager = None
1281
+ cls.workspace_group = None
1282
+ cls.workspace = None
1283
+ if os.environ.get('SINGLESTOREDB_WORKSPACE', None) is not None:
1284
+ del os.environ['SINGLESTOREDB_WORKSPACE']
1285
+ if os.environ.get('SINGLESTOREDB_WORKSPACE_GROUP', None) is not None:
1286
+ del os.environ['SINGLESTOREDB_WORKSPACE_GROUP']
1287
+ if os.environ.get('SINGLESTOREDB_DEFAULT_DATABASE', None) is not None:
1288
+ del os.environ['SINGLESTOREDB_DEFAULT_DATABASE']
1289
+
1290
+ def setUp(self):
1291
+ self.enabled = os.environ.get('SINGLESTOREDB_FUSION_ENABLED')
1292
+ os.environ['SINGLESTOREDB_FUSION_ENABLED'] = '1'
1293
+ self.conn = s2.connect(database=type(self).dbname, local_infile=True)
1294
+ self.cur = self.conn.cursor()
1295
+
1296
+ def tearDown(self):
1297
+ self._clear_files()
1298
+
1299
+ if self.enabled:
1300
+ os.environ['SINGLESTOREDB_FUSION_ENABLED'] = self.enabled
1301
+ else:
1302
+ del os.environ['SINGLESTOREDB_FUSION_ENABLED']
1303
+
1304
+ try:
1305
+ if self.cur is not None:
1306
+ self.cur.close()
1307
+ except Exception:
1308
+ # traceback.print_exc()
1309
+ pass
1310
+
1311
+ try:
1312
+ if self.conn is not None:
1313
+ self.conn.close()
1314
+ except Exception:
1315
+ # traceback.print_exc()
1316
+ pass
1317
+
1318
+ def _clear_files(self):
1319
+ cls = type(self)
1320
+ for prefix in ['show', 'dl', 'drop']:
1321
+ for i in range(1, 6):
1322
+ try:
1323
+ self.cur.execute(
1324
+ f'''drop personal file "{prefix}_test_{i}_{cls.id}.ipynb"''',
1325
+ )
1326
+ except (OSError, s2.ManagementError):
1327
+ pass
1328
+ for i in range(1, 6):
1329
+ try:
1330
+ self.cur.execute(
1331
+ f'''drop shared file "{prefix}_test_{i}_{cls.id}.ipynb"''',
1332
+ )
1333
+ except (OSError, s2.ManagementError):
1334
+ pass
1335
+
1336
+ def test_show_personal_files(self):
1337
+ return self._test_show_files('personal')
1338
+
1339
+ def test_show_shared_files(self):
1340
+ return self._test_show_files('shared')
1341
+
1342
+ def _test_show_files(self, ftype):
1343
+ cls = type(self)
1344
+ nb = os.path.join(os.path.dirname(__file__), 'test.ipynb')
1345
+
1346
+ # Should be empty
1347
+ self.cur.execute(f'''
1348
+ show {ftype} files like 'show_%{cls.id}%'
1349
+ ''')
1350
+ files = list(self.cur)
1351
+ assert len(files) == 0
1352
+
1353
+ # Upload files
1354
+ self.cur.execute(
1355
+ f'upload {ftype} file to "show_test_1_{cls.id}.ipynb" from "{nb}"',
1356
+ )
1357
+ self.cur.execute(
1358
+ f'upload {ftype} file to "show_test_2_{cls.id}.ipynb" from "{nb}"',
1359
+ )
1360
+ self.cur.execute(
1361
+ f'upload {ftype} file to "show_test_3_{cls.id}.ipynb" from "{nb}"',
1362
+ )
1363
+ self.cur.execute(
1364
+ f'upload {ftype} file to "show_test_4_{cls.id}.ipynb" from "{nb}"',
1365
+ )
1366
+ self.cur.execute(
1367
+ f'upload {ftype} file to "show_test_5_{cls.id}.ipynb" from "{nb}"',
1368
+ )
1369
+
1370
+ # Make sure files are there
1371
+ self.cur.execute(f'''
1372
+ show {ftype} files like 'show_%{cls.id}%'
1373
+ ''')
1374
+ files = list(self.cur)
1375
+ assert len(files) == 5
1376
+ assert list(sorted(x[0] for x in files)) == [
1377
+ f'show_test_1_{cls.id}.ipynb',
1378
+ f'show_test_2_{cls.id}.ipynb',
1379
+ f'show_test_3_{cls.id}.ipynb',
1380
+ f'show_test_4_{cls.id}.ipynb',
1381
+ f'show_test_5_{cls.id}.ipynb',
1382
+ ]
1383
+
1384
+ # Test ORDER BY
1385
+ self.cur.execute(f'''
1386
+ show {ftype} files like 'show_%{cls.id}%' order by name desc
1387
+ ''')
1388
+ files = list(self.cur)
1389
+ assert len(files) == 5
1390
+ assert list(x[0] for x in files) == [
1391
+ f'show_test_5_{cls.id}.ipynb',
1392
+ f'show_test_4_{cls.id}.ipynb',
1393
+ f'show_test_3_{cls.id}.ipynb',
1394
+ f'show_test_2_{cls.id}.ipynb',
1395
+ f'show_test_1_{cls.id}.ipynb',
1396
+ ]
1397
+
1398
+ # Test LIMIT
1399
+ self.cur.execute(f'''
1400
+ show {ftype} files like 'show_%{cls.id}%' order by name desc limit 3
1401
+ ''')
1402
+ files = list(self.cur)
1403
+ assert len(files) == 3
1404
+ assert list(x[0] for x in files) == [
1405
+ f'show_test_5_{cls.id}.ipynb',
1406
+ f'show_test_4_{cls.id}.ipynb',
1407
+ f'show_test_3_{cls.id}.ipynb',
1408
+ ]
1409
+
1410
+ # Test EXTENDED
1411
+ self.cur.execute(f'''
1412
+ show {ftype} files like 'show_%{cls.id}%' extended
1413
+ ''')
1414
+ assert [x[0] for x in self.cur.description] == \
1415
+ ['Name', 'Type', 'Size', 'Writable', 'CreatedAt', 'LastModifiedAt']
1416
+
1417
+ def test_download_personal_files(self):
1418
+ return self._test_download_files('personal')
1419
+
1420
+ def test_download_shared_files(self):
1421
+ return self._test_download_files('shared')
1422
+
1423
+ def _test_download_files(self, ftype):
1424
+ cls = type(self)
1425
+ nb = os.path.join(os.path.dirname(__file__), 'test.ipynb')
1426
+
1427
+ # Should be empty
1428
+ self.cur.execute(f'''
1429
+ show {ftype} files like 'dl_%{cls.id}%'
1430
+ ''')
1431
+ files = list(self.cur)
1432
+ assert len(files) == 0
1433
+
1434
+ # Upload files
1435
+ self.cur.execute(f'upload {ftype} file to "dl_test_1_{cls.id}.ipynb" from "{nb}"')
1436
+ self.cur.execute(f'upload {ftype} file to "dl_test_2_{cls.id}.ipynb" from "{nb}"')
1437
+
1438
+ # Make sure files are there
1439
+ self.cur.execute(f'''
1440
+ show {ftype} files like 'dl_%{cls.id}%'
1441
+ ''')
1442
+ files = list(self.cur)
1443
+ assert len(files) == 2
1444
+ assert list(sorted(x[0] for x in files)) == [
1445
+ f'dl_test_1_{cls.id}.ipynb',
1446
+ f'dl_test_2_{cls.id}.ipynb',
1447
+ ]
1448
+
1449
+ # Download files
1450
+ with tempfile.TemporaryDirectory() as tmpdir:
1451
+ self.cur.execute(f'''
1452
+ download {ftype} file 'dl_test_1_{cls.id}.ipynb'
1453
+ to '{tmpdir}/dl_test_1.ipynb'
1454
+ ''')
1455
+ with open(os.path.join(tmpdir, 'dl_test_1.ipynb'), 'r') as dl_file:
1456
+ assert dl_file.read() == open(nb, 'r').read()
1457
+
1458
+ self.cur.execute(f'''
1459
+ download {ftype} file 'dl_test_2_{cls.id}.ipynb'
1460
+ to '{tmpdir}/dl_test_2.ipynb'
1461
+ ''')
1462
+ with open(os.path.join(tmpdir, 'dl_test_2.ipynb'), 'r') as dl_file:
1463
+ assert dl_file.read() == open(nb, 'r').read()
1464
+
1465
+ def test_drop_personal_files(self):
1466
+ return self._test_drop_files('personal')
1467
+
1468
+ def test_drop_shared_files(self):
1469
+ return self._test_drop_files('shared')
1470
+
1471
+ def _test_drop_files(self, ftype):
1472
+ cls = type(self)
1473
+ nb = os.path.join(os.path.dirname(__file__), 'test.ipynb')
1474
+
1475
+ # Should be empty
1476
+ self.cur.execute(f'''
1477
+ show {ftype} files like 'drop_%{cls.id}%'
1478
+ ''')
1479
+ files = list(self.cur)
1480
+ assert len(files) == 0
1481
+
1482
+ # Upload files
1483
+ self.cur.execute(
1484
+ f'upload {ftype} file to "drop_test_1_{cls.id}.ipynb" from "{nb}"',
1485
+ )
1486
+ self.cur.execute(
1487
+ f'upload {ftype} file to "drop_test_2_{cls.id}.ipynb" from "{nb}"',
1488
+ )
1489
+
1490
+ # Make sure files are there
1491
+ self.cur.execute(f'''
1492
+ show {ftype} files like 'drop_%{cls.id}%'
1493
+ ''')
1494
+ files = list(self.cur)
1495
+ assert len(files) == 2
1496
+ assert list(sorted(x[0] for x in files)) == [
1497
+ f'drop_test_1_{cls.id}.ipynb',
1498
+ f'drop_test_2_{cls.id}.ipynb',
1499
+ ]
1500
+
1501
+ # Drop 1 file
1502
+ self.cur.execute(f'''
1503
+ drop {ftype} file 'drop_test_1_{cls.id}.ipynb'
1504
+ ''')
1505
+
1506
+ # Make sure 1 file is there
1507
+ self.cur.execute(f'''
1508
+ show {ftype} files like 'drop_%{cls.id}%'
1509
+ ''')
1510
+ files = list(self.cur)
1511
+ assert len(files) == 1
1512
+ assert list(x[0] for x in files) == [f'drop_test_2_{cls.id}.ipynb']
1513
+
1514
+ # Drop 2nd file
1515
+ self.cur.execute(f'''
1516
+ drop {ftype} file 'drop_test_2_{cls.id}.ipynb'
1517
+ ''')
1518
+
1519
+ # Make sure no files are there
1520
+ self.cur.execute(f'''
1521
+ show {ftype} files like 'drop_%{cls.id}%'
1522
+ ''')
1523
+ files = list(self.cur)
1524
+ assert len(files) == 0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: singlestoredb
3
- Version: 1.12.1
3
+ Version: 1.12.2
4
4
  Summary: Interface to the SingleStoreDB database and workspace management APIs
5
5
  Home-page: https://github.com/singlestore-labs/singlestoredb-python
6
6
  Author: SingleStore
@@ -1,15 +1,15 @@
1
- _singlestoredb_accel.abi3.so,sha256=hEcp6tY0Iwmx0nPwBkfSG2Yb9iWV4KUd65FPBatqYGc,206864
1
+ _singlestoredb_accel.abi3.so,sha256=w_pN-iqjjIvBbB-M4V1NU29Ju2Om_zynPEjKFVPhb-E,206864
2
+ singlestoredb-1.12.2.dist-info/RECORD,,
3
+ singlestoredb-1.12.2.dist-info/LICENSE,sha256=Mlq78idURT-9G026aMYswwwnnrLcgzTLuXeAs5hjDLM,11341
4
+ singlestoredb-1.12.2.dist-info/WHEEL,sha256=_VEguvlLpUd-c8RbFMA4yMIVNMBv2LhpxYLCEQ-Bogk,113
5
+ singlestoredb-1.12.2.dist-info/entry_points.txt,sha256=bSLaTWB5zGjpVYPAaI46MkkDup0su-eb3uAhCNYuRV0,48
6
+ singlestoredb-1.12.2.dist-info/top_level.txt,sha256=lA65Vf4qAMfg_s1oG3LEO90h4t1Z-SPDbRqkevI3bSY,40
7
+ singlestoredb-1.12.2.dist-info/METADATA,sha256=bw-btFC0eX7QrakG-glHHak-eMA8OoksWrrYrSMgJkg,5636
2
8
  sqlx/magic.py,sha256=JsS9_9aBFaOt91Torm1JPN0c8qB2QmYJmNSKtbSQIY0,3509
3
9
  sqlx/__init__.py,sha256=aBYiU8DZXCogvWu3yWafOz7bZS5WWwLZXj7oL0dXGyU,85
4
- singlestoredb-1.12.1.dist-info/RECORD,,
5
- singlestoredb-1.12.1.dist-info/LICENSE,sha256=Mlq78idURT-9G026aMYswwwnnrLcgzTLuXeAs5hjDLM,11341
6
- singlestoredb-1.12.1.dist-info/WHEEL,sha256=_VEguvlLpUd-c8RbFMA4yMIVNMBv2LhpxYLCEQ-Bogk,113
7
- singlestoredb-1.12.1.dist-info/entry_points.txt,sha256=bSLaTWB5zGjpVYPAaI46MkkDup0su-eb3uAhCNYuRV0,48
8
- singlestoredb-1.12.1.dist-info/top_level.txt,sha256=lA65Vf4qAMfg_s1oG3LEO90h4t1Z-SPDbRqkevI3bSY,40
9
- singlestoredb-1.12.1.dist-info/METADATA,sha256=wYTpEPYx7WpKtU6Jr7wQjunmlrA7UnsmNMRkbL73R40,5636
10
10
  singlestoredb/auth.py,sha256=u8D9tpKzrqa4ssaHjyZnGDX1q8XBpGtuoOkTkSv7B28,7599
11
11
  singlestoredb/config.py,sha256=rlF69SiclYyKghNRckX77Ls1ZT23RhSssO1cyYBiHmA,12589
12
- singlestoredb/__init__.py,sha256=4htLkEL7t7jjaZ4Qym7QvHrv4P7cYZdDGtYcMpmSGow,1649
12
+ singlestoredb/__init__.py,sha256=Ms1Jw6Pv01LlItPE5vSqQ59kqTYvN3HrSbQSN1i1qv0,1649
13
13
  singlestoredb/types.py,sha256=FIqO1A7e0Gkk7ITmIysBy-P5S--ItbMSlYvblzqGS30,9969
14
14
  singlestoredb/connection.py,sha256=0HEpjBZXLqQwOTEfveMkgej1H3Kyof47prIHvJJZtoo,45831
15
15
  singlestoredb/pytest.py,sha256=OyF3BO9mgxenifYhOihnzGk8WzCJ_zN5_mxe8XyFPOc,9074
@@ -26,13 +26,13 @@ singlestoredb/fusion/handlers/models.py,sha256=xJPIG0_GgF-VrmPoIsU2U4AsS7ytDz8JM
26
26
  singlestoredb/fusion/handlers/job.py,sha256=r0KdOD55VUDw-SymC__5Mn-fzJTZE_xcBgH-O8DYVHc,21095
27
27
  singlestoredb/fusion/handlers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
28
  singlestoredb/fusion/handlers/export.py,sha256=3moTJeqsHkDDpitUUAE6x95JYH2rmb28MALbO4x0dcc,8981
29
- singlestoredb/fusion/handlers/utils.py,sha256=dg5v25kCejgqg8smnxE4vqu_G3OJzJ4jX8pm2N1z6mc,10574
29
+ singlestoredb/fusion/handlers/utils.py,sha256=ozHOWUraoN8XGTK9JZdhv5HV8AQR8zfUd1yh1kLvUXY,10685
30
30
  singlestoredb/fusion/handlers/stage.py,sha256=kYVjbPys83kf3jX6jWwN8Ju0oEocKVZ3TIOt2HiC5Ew,14287
31
31
  singlestoredb/fusion/handlers/workspace.py,sha256=4xN2TFO4yF7KZB2Fcht7IuvoDdAT6fDfDLjixiHZN8w,27506
32
32
  singlestoredb/tests/test.sql,sha256=dfMehVCQ9wObSVTQKyQi-fRFDZeqRxV4Cj8doBCPEFM,17679
33
33
  singlestoredb/tests/test_xdict.py,sha256=fqHspoi39nbX3fIDVkkRXcd5H50xdOsSvK0bxAMQnaE,10408
34
34
  singlestoredb/tests/test_results.py,sha256=wg93sujwt-R9_eJCgSCElgAZhLDkIiAo3qPkPydOv78,6582
35
- singlestoredb/tests/test_fusion.py,sha256=W3aRfBeu8HBGm1CIQWFIeWUPBUlfHBCbJy8vejPHdRs,23828
35
+ singlestoredb/tests/test_fusion.py,sha256=EH1mRwdX2Fajsq6x2l0gBhH1YhcxtvDGIKC9HJ4sDbQ,50521
36
36
  singlestoredb/tests/test_plugin.py,sha256=qpO9wmWc62VaijN1sJ97YSYIX7I7Y5C6sY-WzwrutDQ,812
37
37
  singlestoredb/tests/test_basics.py,sha256=1__lEF7FmQF4_pFi5R53TtJidtQznmQ592Ci6aDVgrc,46368
38
38
  singlestoredb/tests/test_ext_func.py,sha256=OWd-CJ1Owhx72nikSWWEF2EQFCJk7vEXZM2Oy9EbYQo,37357
@@ -56,7 +56,7 @@ singlestoredb/tests/ext_funcs/__init__.py,sha256=qZLnDI_Ck0tguVi-K-BKXDHAcC0jui3
56
56
  singlestoredb/magics/__init__.py,sha256=lZjkT3Webo9c1EQAzlRCRh6B2pckQH8uvNrrB__abcI,1210
57
57
  singlestoredb/magics/run_shared.py,sha256=SI8dCBRMaGn-xZU7dto4jsAqKBi-Ll14htUsMUSBpJM,1752
58
58
  singlestoredb/magics/run_personal.py,sha256=2f7u1T7iblxGzZurHNgNXLrPBvsvPADZKo_RD_IjYuE,1844
59
- singlestoredb/management/files.py,sha256=_FDIs9rtG7cW6aIkXnYReLQaFxdix2iu7Do368hOoE8,30476
59
+ singlestoredb/management/files.py,sha256=89IhpGw9WdwxVeksavHEDMVn9wb_jxb-utZuIDqkLHw,30477
60
60
  singlestoredb/management/organization.py,sha256=hqMaM7H-naMjNbxDl_f7G_2o5TkiGKyzPhxuzDveJAw,5402
61
61
  singlestoredb/management/job.py,sha256=4-xLWzbE8odQogVVaFer80UEoTAZY1T28VZ9Ug4rbmM,24611
62
62
  singlestoredb/management/region.py,sha256=HnLcWUh7r_aLECliplCDHak4a_F3B7LOSXEYMW66qD0,1611