singlestoredb 1.0.2__cp38-abi3-win_amd64.whl → 1.0.4__cp38-abi3-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of singlestoredb might be problematic. Click here for more details.
- _singlestoredb_accel.pyd +0 -0
- singlestoredb/__init__.py +1 -1
- singlestoredb/config.py +12 -0
- singlestoredb/functions/decorator.py +0 -53
- singlestoredb/functions/ext/asgi.py +97 -19
- singlestoredb/functions/ext/mmap.py +306 -0
- singlestoredb/functions/signature.py +19 -28
- singlestoredb/management/manager.py +2 -2
- singlestoredb/management/utils.py +30 -0
- singlestoredb/management/workspace.py +209 -35
- singlestoredb/tests/test_ext_func.py +5 -3
- singlestoredb/tests/test_udf.py +0 -11
- {singlestoredb-1.0.2.dist-info → singlestoredb-1.0.4.dist-info}/METADATA +1 -1
- {singlestoredb-1.0.2.dist-info → singlestoredb-1.0.4.dist-info}/RECORD +18 -17
- {singlestoredb-1.0.2.dist-info → singlestoredb-1.0.4.dist-info}/LICENSE +0 -0
- {singlestoredb-1.0.2.dist-info → singlestoredb-1.0.4.dist-info}/WHEEL +0 -0
- {singlestoredb-1.0.2.dist-info → singlestoredb-1.0.4.dist-info}/entry_points.txt +0 -0
- {singlestoredb-1.0.2.dist-info → singlestoredb-1.0.4.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,6 @@ import numbers
|
|
|
5
5
|
import os
|
|
6
6
|
import re
|
|
7
7
|
import string
|
|
8
|
-
import textwrap
|
|
9
8
|
import typing
|
|
10
9
|
from typing import Any
|
|
11
10
|
from typing import Callable
|
|
@@ -16,7 +15,6 @@ from typing import Sequence
|
|
|
16
15
|
from typing import Tuple
|
|
17
16
|
from typing import TypeVar
|
|
18
17
|
from typing import Union
|
|
19
|
-
from urllib.parse import urljoin
|
|
20
18
|
|
|
21
19
|
try:
|
|
22
20
|
import numpy as np
|
|
@@ -611,8 +609,11 @@ def dtype_to_sql(dtype: str, default: Any = None) -> str:
|
|
|
611
609
|
|
|
612
610
|
def signature_to_sql(
|
|
613
611
|
signature: Dict[str, Any],
|
|
614
|
-
|
|
612
|
+
url: Optional[str] = None,
|
|
615
613
|
data_format: str = 'rowdat_1',
|
|
614
|
+
app_mode: str = 'remote',
|
|
615
|
+
link: Optional[str] = None,
|
|
616
|
+
replace: bool = False,
|
|
616
617
|
) -> str:
|
|
617
618
|
'''
|
|
618
619
|
Convert a dictionary function signature into SQL.
|
|
@@ -646,37 +647,27 @@ def signature_to_sql(
|
|
|
646
647
|
host = os.environ.get('SINGLESTOREDB_EXT_HOST', '127.0.0.1')
|
|
647
648
|
port = os.environ.get('SINGLESTOREDB_EXT_PORT', '8000')
|
|
648
649
|
|
|
649
|
-
|
|
650
|
+
if app_mode.lower() == 'remote':
|
|
651
|
+
url = url or f'https://{host}:{port}/invoke'
|
|
652
|
+
elif url is None:
|
|
653
|
+
raise ValueError('url can not be `None`')
|
|
650
654
|
|
|
651
655
|
database = ''
|
|
652
656
|
if signature.get('database'):
|
|
653
657
|
database = escape_name(signature['database']) + '.'
|
|
654
658
|
|
|
655
|
-
|
|
659
|
+
or_replace = 'OR REPLACE ' if (bool(signature.get('replace')) or replace) else ''
|
|
656
660
|
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
def func_to_env(func: Callable[..., Any]) -> str:
|
|
665
|
-
# TODO: multiple functions
|
|
666
|
-
signature = get_signature(func)
|
|
667
|
-
env_name = signature['environment']
|
|
668
|
-
replace = 'OR REPLACE ' if signature.get('replace') else ''
|
|
669
|
-
packages = ', '.join(escape_item(x, 'utf8') for x in signature.get('packages', []))
|
|
670
|
-
resources = ', '.join(escape_item(x, 'utf8') for x in signature.get('resources', []))
|
|
671
|
-
code = inspect.getsource(func)
|
|
661
|
+
link_str = ''
|
|
662
|
+
if link:
|
|
663
|
+
if not re.match(r'^[\w_]+$', link):
|
|
664
|
+
raise ValueError(f'invalid LINK name: {link}')
|
|
665
|
+
link_str = f' LINK {link}'
|
|
672
666
|
|
|
673
667
|
return (
|
|
674
|
-
f'CREATE {
|
|
675
|
-
'
|
|
676
|
-
(
|
|
677
|
-
|
|
678
|
-
'
|
|
679
|
-
'\n BEGIN\n' +
|
|
680
|
-
textwrap.indent(code, ' ') +
|
|
681
|
-
' END;'
|
|
668
|
+
f'CREATE {or_replace}EXTERNAL FUNCTION ' +
|
|
669
|
+
f'{database}{escape_name(signature["name"])}' +
|
|
670
|
+
'(' + ', '.join(args) + ')' + returns +
|
|
671
|
+
f' AS {app_mode.upper()} SERVICE "{url}" FORMAT {data_format.upper()}'
|
|
672
|
+
f'{link_str};'
|
|
682
673
|
)
|
|
@@ -44,10 +44,10 @@ class Manager(object):
|
|
|
44
44
|
"""SingleStoreDB manager base class."""
|
|
45
45
|
|
|
46
46
|
#: Management API version if none is specified.
|
|
47
|
-
default_version = '
|
|
47
|
+
default_version = config.get_option('management.version')
|
|
48
48
|
|
|
49
49
|
#: Base URL if none is specified.
|
|
50
|
-
default_base_url = '
|
|
50
|
+
default_base_url = config.get_option('management.base_url')
|
|
51
51
|
|
|
52
52
|
#: Object type
|
|
53
53
|
obj_type = ''
|
|
@@ -9,6 +9,7 @@ from typing import Any
|
|
|
9
9
|
from typing import Callable
|
|
10
10
|
from typing import Dict
|
|
11
11
|
from typing import List
|
|
12
|
+
from typing import Mapping
|
|
12
13
|
from typing import Optional
|
|
13
14
|
from typing import SupportsIndex
|
|
14
15
|
from typing import TypeVar
|
|
@@ -282,3 +283,32 @@ def camel_to_snake(s: Optional[str]) -> Optional[str]:
|
|
|
282
283
|
if out and out[0] == '_':
|
|
283
284
|
return out[1:]
|
|
284
285
|
return out
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def snake_to_camel_dict(
|
|
289
|
+
s: Optional[Mapping[str, Any]],
|
|
290
|
+
cap_first: bool = False,
|
|
291
|
+
) -> Optional[Dict[str, Any]]:
|
|
292
|
+
"""Convert snake-case keys to camel-case keys."""
|
|
293
|
+
if s is None:
|
|
294
|
+
return None
|
|
295
|
+
out = {}
|
|
296
|
+
for k, v in s.items():
|
|
297
|
+
if isinstance(s, Mapping):
|
|
298
|
+
out[str(snake_to_camel(k))] = snake_to_camel_dict(v, cap_first=cap_first)
|
|
299
|
+
else:
|
|
300
|
+
out[str(snake_to_camel(k))] = v
|
|
301
|
+
return out
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def camel_to_snake_dict(s: Optional[Mapping[str, Any]]) -> Optional[Dict[str, Any]]:
|
|
305
|
+
"""Convert camel-case keys to snake-case keys."""
|
|
306
|
+
if s is None:
|
|
307
|
+
return None
|
|
308
|
+
out = {}
|
|
309
|
+
for k, v in s.items():
|
|
310
|
+
if isinstance(s, Mapping):
|
|
311
|
+
out[str(camel_to_snake(k))] = camel_to_snake_dict(v)
|
|
312
|
+
else:
|
|
313
|
+
out[str(camel_to_snake(k))] = v
|
|
314
|
+
return out
|
|
@@ -7,8 +7,8 @@ import glob
|
|
|
7
7
|
import io
|
|
8
8
|
import os
|
|
9
9
|
import re
|
|
10
|
-
import socket
|
|
11
10
|
import time
|
|
11
|
+
from collections.abc import Mapping
|
|
12
12
|
from typing import Any
|
|
13
13
|
from typing import BinaryIO
|
|
14
14
|
from typing import Dict
|
|
@@ -23,10 +23,12 @@ from .billing_usage import BillingUsageItem
|
|
|
23
23
|
from .manager import Manager
|
|
24
24
|
from .organization import Organization
|
|
25
25
|
from .region import Region
|
|
26
|
+
from .utils import camel_to_snake_dict
|
|
26
27
|
from .utils import from_datetime
|
|
27
28
|
from .utils import NamedList
|
|
28
29
|
from .utils import PathLike
|
|
29
30
|
from .utils import snake_to_camel
|
|
31
|
+
from .utils import snake_to_camel_dict
|
|
30
32
|
from .utils import to_datetime
|
|
31
33
|
from .utils import ttl_property
|
|
32
34
|
from .utils import vars_to_str
|
|
@@ -934,6 +936,12 @@ class Workspace(object):
|
|
|
934
936
|
created_at: Union[str, datetime.datetime],
|
|
935
937
|
terminated_at: Optional[Union[str, datetime.datetime]] = None,
|
|
936
938
|
endpoint: Optional[str] = None,
|
|
939
|
+
auto_suspend: Optional[Dict[str, Any]] = None,
|
|
940
|
+
cache_config: Optional[int] = None,
|
|
941
|
+
deployment_type: Optional[str] = None,
|
|
942
|
+
resume_attachments: Optional[Dict[str, Any]] = None,
|
|
943
|
+
scaling_progress: Optional[int] = None,
|
|
944
|
+
last_resumed_at: Optional[str] = None,
|
|
937
945
|
):
|
|
938
946
|
#: Name of the workspace
|
|
939
947
|
self.name = name
|
|
@@ -963,6 +971,24 @@ class Workspace(object):
|
|
|
963
971
|
#: Hostname (or IP address) of the workspace database server
|
|
964
972
|
self.endpoint = endpoint
|
|
965
973
|
|
|
974
|
+
#: Current auto-suspend settings
|
|
975
|
+
self.auto_suspend = camel_to_snake_dict(auto_suspend)
|
|
976
|
+
|
|
977
|
+
#: Multiplier for the persistent cache
|
|
978
|
+
self.cache_config = cache_config
|
|
979
|
+
|
|
980
|
+
#: Deployment type of the workspace
|
|
981
|
+
self.deployment_type = deployment_type
|
|
982
|
+
|
|
983
|
+
#: Database attachments
|
|
984
|
+
self.resume_attachments = camel_to_snake_dict(resume_attachments)
|
|
985
|
+
|
|
986
|
+
#: Current progress percentage for scaling the workspace
|
|
987
|
+
self.scaling_progress = scaling_progress
|
|
988
|
+
|
|
989
|
+
#: Timestamp when workspace was last resumed
|
|
990
|
+
self.last_resumed_at = to_datetime(last_resumed_at)
|
|
991
|
+
|
|
966
992
|
self._manager: Optional[WorkspaceManager] = None
|
|
967
993
|
|
|
968
994
|
def __str__(self) -> str:
|
|
@@ -991,15 +1017,64 @@ class Workspace(object):
|
|
|
991
1017
|
|
|
992
1018
|
"""
|
|
993
1019
|
out = cls(
|
|
994
|
-
name=obj['name'],
|
|
1020
|
+
name=obj['name'],
|
|
1021
|
+
workspace_id=obj['workspaceID'],
|
|
995
1022
|
workspace_group=obj['workspaceGroupID'],
|
|
996
|
-
size=obj.get('size', 'Unknown'),
|
|
997
|
-
|
|
1023
|
+
size=obj.get('size', 'Unknown'),
|
|
1024
|
+
state=obj['state'],
|
|
1025
|
+
created_at=obj['createdAt'],
|
|
1026
|
+
terminated_at=obj.get('terminatedAt'),
|
|
998
1027
|
endpoint=obj.get('endpoint'),
|
|
1028
|
+
auto_suspend=obj.get('autoSuspend'),
|
|
1029
|
+
cache_config=obj.get('cacheConfig'),
|
|
1030
|
+
deployment_type=obj.get('deploymentType'),
|
|
1031
|
+
last_resumed_at=obj.get('lastResumedAt'),
|
|
1032
|
+
resume_attachments=obj.get('resumeAttachments'),
|
|
1033
|
+
scaling_progress=obj.get('scalingProgress'),
|
|
999
1034
|
)
|
|
1000
1035
|
out._manager = manager
|
|
1001
1036
|
return out
|
|
1002
1037
|
|
|
1038
|
+
def update(
|
|
1039
|
+
self,
|
|
1040
|
+
auto_suspend: Optional[Dict[str, Any]] = None,
|
|
1041
|
+
cache_config: Optional[int] = None,
|
|
1042
|
+
deployment_type: Optional[str] = None,
|
|
1043
|
+
size: Optional[str] = None,
|
|
1044
|
+
) -> None:
|
|
1045
|
+
"""
|
|
1046
|
+
Update the workspace definition.
|
|
1047
|
+
|
|
1048
|
+
Parameters
|
|
1049
|
+
----------
|
|
1050
|
+
auto_suspend : Dict[str, Any], optional
|
|
1051
|
+
Auto-suspend mode for the workspace: IDLE, SCHEDULED, DISABLED
|
|
1052
|
+
cache_config : int, optional
|
|
1053
|
+
Specifies the multiplier for the persistent cache associated
|
|
1054
|
+
with the workspace. If specified, it enables the cache configuration
|
|
1055
|
+
multiplier. It can have one of the following values: 1, 2, or 4.
|
|
1056
|
+
deployment_type : str, optional
|
|
1057
|
+
The deployment type that will be applied to all the workspaces
|
|
1058
|
+
within the group
|
|
1059
|
+
size : str, optional
|
|
1060
|
+
Size of the workspace (in workspace size notation), such as "S-1".
|
|
1061
|
+
|
|
1062
|
+
"""
|
|
1063
|
+
if self._manager is None:
|
|
1064
|
+
raise ManagementError(
|
|
1065
|
+
msg='No workspace manager is associated with this object.',
|
|
1066
|
+
)
|
|
1067
|
+
data = {
|
|
1068
|
+
k: v for k, v in dict(
|
|
1069
|
+
autoSuspend=snake_to_camel_dict(auto_suspend),
|
|
1070
|
+
cacheConfig=cache_config,
|
|
1071
|
+
deploymentType=deployment_type,
|
|
1072
|
+
size=size,
|
|
1073
|
+
).items() if v is not None
|
|
1074
|
+
}
|
|
1075
|
+
self._manager._patch(f'workspaces/{self.id}', json=data)
|
|
1076
|
+
self.refresh()
|
|
1077
|
+
|
|
1003
1078
|
def refresh(self) -> Workspace:
|
|
1004
1079
|
"""Update the object to the current state."""
|
|
1005
1080
|
if self._manager is None:
|
|
@@ -1008,7 +1083,10 @@ class Workspace(object):
|
|
|
1008
1083
|
)
|
|
1009
1084
|
new_obj = self._manager.get_workspace(self.id)
|
|
1010
1085
|
for name, value in vars(new_obj).items():
|
|
1011
|
-
|
|
1086
|
+
if isinstance(value, Mapping):
|
|
1087
|
+
setattr(self, name, snake_to_camel_dict(value))
|
|
1088
|
+
else:
|
|
1089
|
+
setattr(self, name, value)
|
|
1012
1090
|
return self
|
|
1013
1091
|
|
|
1014
1092
|
def terminate(
|
|
@@ -1111,6 +1189,7 @@ class Workspace(object):
|
|
|
1111
1189
|
|
|
1112
1190
|
def resume(
|
|
1113
1191
|
self,
|
|
1192
|
+
disable_auto_suspend: bool = False,
|
|
1114
1193
|
wait_on_resumed: bool = False,
|
|
1115
1194
|
wait_interval: int = 20,
|
|
1116
1195
|
wait_timeout: int = 600,
|
|
@@ -1120,6 +1199,8 @@ class Workspace(object):
|
|
|
1120
1199
|
|
|
1121
1200
|
Parameters
|
|
1122
1201
|
----------
|
|
1202
|
+
disable_auto_suspend : bool, optional
|
|
1203
|
+
Should auto-suspend be disabled?
|
|
1123
1204
|
wait_on_resumed : bool, optional
|
|
1124
1205
|
Wait for the workspace to go into 'Resumed' or 'Active' mode before returning
|
|
1125
1206
|
wait_interval : int, optional
|
|
@@ -1137,7 +1218,10 @@ class Workspace(object):
|
|
|
1137
1218
|
raise ManagementError(
|
|
1138
1219
|
msg='No workspace manager is associated with this object.',
|
|
1139
1220
|
)
|
|
1140
|
-
self._manager._post(
|
|
1221
|
+
self._manager._post(
|
|
1222
|
+
f'workspaces/{self.id}/resume',
|
|
1223
|
+
json=dict(disableAutoSuspend=disable_auto_suspend),
|
|
1224
|
+
)
|
|
1141
1225
|
if wait_on_resumed:
|
|
1142
1226
|
self._manager._wait_on_state(
|
|
1143
1227
|
self._manager.get_workspace(self.id),
|
|
@@ -1170,6 +1254,7 @@ class WorkspaceGroup(object):
|
|
|
1170
1254
|
region: Optional[Region],
|
|
1171
1255
|
firewall_ranges: List[str],
|
|
1172
1256
|
terminated_at: Optional[Union[str, datetime.datetime]],
|
|
1257
|
+
allow_all_traffic: Optional[bool],
|
|
1173
1258
|
):
|
|
1174
1259
|
#: Name of the workspace group
|
|
1175
1260
|
self.name = name
|
|
@@ -1189,6 +1274,9 @@ class WorkspaceGroup(object):
|
|
|
1189
1274
|
#: Timestamp of when the workspace group was terminated
|
|
1190
1275
|
self.terminated_at = to_datetime(terminated_at)
|
|
1191
1276
|
|
|
1277
|
+
#: Should all traffic be allowed?
|
|
1278
|
+
self.allow_all_traffic = allow_all_traffic
|
|
1279
|
+
|
|
1192
1280
|
self._manager: Optional[WorkspaceManager] = None
|
|
1193
1281
|
|
|
1194
1282
|
def __str__(self) -> str:
|
|
@@ -1229,6 +1317,7 @@ class WorkspaceGroup(object):
|
|
|
1229
1317
|
region=region,
|
|
1230
1318
|
firewall_ranges=obj.get('firewallRanges', []),
|
|
1231
1319
|
terminated_at=obj.get('terminatedAt'),
|
|
1320
|
+
allow_all_traffic=obj.get('allowAllTraffic'),
|
|
1232
1321
|
)
|
|
1233
1322
|
out._manager = manager
|
|
1234
1323
|
return out
|
|
@@ -1260,13 +1349,20 @@ class WorkspaceGroup(object):
|
|
|
1260
1349
|
)
|
|
1261
1350
|
new_obj = self._manager.get_workspace_group(self.id)
|
|
1262
1351
|
for name, value in vars(new_obj).items():
|
|
1263
|
-
|
|
1352
|
+
if isinstance(value, Mapping):
|
|
1353
|
+
setattr(self, name, camel_to_snake_dict(value))
|
|
1354
|
+
else:
|
|
1355
|
+
setattr(self, name, value)
|
|
1264
1356
|
return self
|
|
1265
1357
|
|
|
1266
1358
|
def update(
|
|
1267
|
-
self,
|
|
1268
|
-
|
|
1359
|
+
self,
|
|
1360
|
+
name: Optional[str] = None,
|
|
1269
1361
|
firewall_ranges: Optional[List[str]] = None,
|
|
1362
|
+
admin_password: Optional[str] = None,
|
|
1363
|
+
expires_at: Optional[str] = None,
|
|
1364
|
+
allow_all_traffic: Optional[bool] = None,
|
|
1365
|
+
update_window: Optional[Dict[str, int]] = None,
|
|
1270
1366
|
) -> None:
|
|
1271
1367
|
"""
|
|
1272
1368
|
Update the workspace group definition.
|
|
@@ -1274,11 +1370,24 @@ class WorkspaceGroup(object):
|
|
|
1274
1370
|
Parameters
|
|
1275
1371
|
----------
|
|
1276
1372
|
name : str, optional
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1373
|
+
Name of the workspace group
|
|
1374
|
+
firewall_ranges : list[str], optional
|
|
1375
|
+
List of allowed CIDR ranges. An empty list indicates that all
|
|
1376
|
+
inbound requests are allowed.
|
|
1377
|
+
admin_password : str, optional
|
|
1378
|
+
Admin password for the workspace group. If no password is supplied,
|
|
1379
|
+
a password will be generated and retured in the response.
|
|
1380
|
+
expires_at : str, optional
|
|
1381
|
+
The timestamp of when the workspace group will expire.
|
|
1382
|
+
If the expiration time is not specified,
|
|
1383
|
+
the workspace group will have no expiration time.
|
|
1384
|
+
At expiration, the workspace group is terminated and all the data is lost.
|
|
1385
|
+
Expiration time can be specified as a timestamp or duration.
|
|
1386
|
+
Example: "2021-01-02T15:04:05Z07:00", "2021-01-02", "3h30m"
|
|
1387
|
+
allow_all_traffic : bool, optional
|
|
1388
|
+
Allow all traffic to the workspace group
|
|
1389
|
+
update_window : Dict[str, int], optional
|
|
1390
|
+
Specify the day and hour of an update window: dict(day=0-6, hour=0-23)
|
|
1282
1391
|
|
|
1283
1392
|
"""
|
|
1284
1393
|
if self._manager is None:
|
|
@@ -1287,8 +1396,12 @@ class WorkspaceGroup(object):
|
|
|
1287
1396
|
)
|
|
1288
1397
|
data = {
|
|
1289
1398
|
k: v for k, v in dict(
|
|
1290
|
-
name=name,
|
|
1399
|
+
name=name,
|
|
1291
1400
|
firewallRanges=firewall_ranges,
|
|
1401
|
+
adminPassword=admin_password,
|
|
1402
|
+
expiresAt=expires_at,
|
|
1403
|
+
allowAllTraffic=allow_all_traffic,
|
|
1404
|
+
updateWindow=snake_to_camel_dict(update_window),
|
|
1292
1405
|
).items() if v is not None
|
|
1293
1406
|
}
|
|
1294
1407
|
self._manager._patch(f'workspaceGroups/{self.id}', json=data)
|
|
@@ -1338,9 +1451,15 @@ class WorkspaceGroup(object):
|
|
|
1338
1451
|
wait_timeout -= wait_interval
|
|
1339
1452
|
|
|
1340
1453
|
def create_workspace(
|
|
1341
|
-
self,
|
|
1342
|
-
|
|
1343
|
-
|
|
1454
|
+
self,
|
|
1455
|
+
name: str,
|
|
1456
|
+
size: Optional[str] = None,
|
|
1457
|
+
auto_suspend: Optional[Dict[str, Any]] = None,
|
|
1458
|
+
cache_config: Optional[int] = None,
|
|
1459
|
+
enable_kai: Optional[bool] = None,
|
|
1460
|
+
wait_on_active: bool = False,
|
|
1461
|
+
wait_interval: int = 10,
|
|
1462
|
+
wait_timeout: int = 600,
|
|
1344
1463
|
) -> Workspace:
|
|
1345
1464
|
"""
|
|
1346
1465
|
Create a new workspace.
|
|
@@ -1351,6 +1470,15 @@ class WorkspaceGroup(object):
|
|
|
1351
1470
|
Name of the workspace
|
|
1352
1471
|
size : str, optional
|
|
1353
1472
|
Workspace size in workspace size notation (S-00, S-1, etc.)
|
|
1473
|
+
auto_suspend : Dict[str, Any], optional
|
|
1474
|
+
Auto suspend settings for the workspace. If this field is not
|
|
1475
|
+
provided, no settings will be enabled.
|
|
1476
|
+
cache_config : int, optional
|
|
1477
|
+
Specifies the multiplier for the persistent cache associated
|
|
1478
|
+
with the workspace. If specified, it enables the cache configuration
|
|
1479
|
+
multiplier. It can have one of the following values: 1, 2, or 4.
|
|
1480
|
+
enable_kai : bool, optional
|
|
1481
|
+
Whether to create a SingleStore Kai-enabled workspace
|
|
1354
1482
|
wait_on_active : bool, optional
|
|
1355
1483
|
Wait for the workspace to be active before returning
|
|
1356
1484
|
wait_timeout : int, optional
|
|
@@ -1358,9 +1486,6 @@ class WorkspaceGroup(object):
|
|
|
1358
1486
|
if wait=True
|
|
1359
1487
|
wait_interval : int, optional
|
|
1360
1488
|
Number of seconds between each polling interval
|
|
1361
|
-
add_endpoint_to_firewall_ranges : bool, optional
|
|
1362
|
-
Should the workspace endpoint be added to the workspace group
|
|
1363
|
-
firewall ranges?
|
|
1364
1489
|
|
|
1365
1490
|
Returns
|
|
1366
1491
|
-------
|
|
@@ -1373,14 +1498,17 @@ class WorkspaceGroup(object):
|
|
|
1373
1498
|
)
|
|
1374
1499
|
|
|
1375
1500
|
out = self._manager.create_workspace(
|
|
1376
|
-
name=name,
|
|
1377
|
-
|
|
1501
|
+
name=name,
|
|
1502
|
+
workspace_group=self,
|
|
1503
|
+
size=size,
|
|
1504
|
+
auto_suspend=snake_to_camel_dict(auto_suspend),
|
|
1505
|
+
cache_config=cache_config,
|
|
1506
|
+
enable_kai=enable_kai,
|
|
1507
|
+
wait_on_active=wait_on_active,
|
|
1508
|
+
wait_interval=wait_interval,
|
|
1509
|
+
wait_timeout=wait_timeout,
|
|
1378
1510
|
)
|
|
1379
1511
|
|
|
1380
|
-
if add_endpoint_to_firewall_ranges and out.endpoint is not None:
|
|
1381
|
-
ip_address = '{}/32'.format(socket.gethostbyname(out.endpoint))
|
|
1382
|
-
self.update(firewall_ranges=self.firewall_ranges+[ip_address])
|
|
1383
|
-
|
|
1384
1512
|
return out
|
|
1385
1513
|
|
|
1386
1514
|
@property
|
|
@@ -1525,9 +1653,15 @@ class WorkspaceManager(Manager):
|
|
|
1525
1653
|
return NamedList([Region.from_dict(item, self) for item in res.json()])
|
|
1526
1654
|
|
|
1527
1655
|
def create_workspace_group(
|
|
1528
|
-
self,
|
|
1529
|
-
|
|
1656
|
+
self,
|
|
1657
|
+
name: str,
|
|
1658
|
+
region: Union[str, Region],
|
|
1659
|
+
firewall_ranges: List[str],
|
|
1660
|
+
admin_password: Optional[str] = None,
|
|
1661
|
+
backup_bucket_kms_key_id: Optional[str] = None,
|
|
1662
|
+
data_bucket_kms_key_id: Optional[str] = None,
|
|
1530
1663
|
expires_at: Optional[str] = None,
|
|
1664
|
+
smart_dr: Optional[bool] = None,
|
|
1531
1665
|
allow_all_traffic: Optional[bool] = None,
|
|
1532
1666
|
update_window: Optional[Dict[str, int]] = None,
|
|
1533
1667
|
) -> WorkspaceGroup:
|
|
@@ -1546,6 +1680,17 @@ class WorkspaceManager(Manager):
|
|
|
1546
1680
|
admin_password : str, optional
|
|
1547
1681
|
Admin password for the workspace group. If no password is supplied,
|
|
1548
1682
|
a password will be generated and retured in the response.
|
|
1683
|
+
backup_bucket_kms_key_id : str, optional
|
|
1684
|
+
Specifies the KMS key ID associated with the backup bucket.
|
|
1685
|
+
If specified, enables Customer-Managed Encryption Keys (CMEK)
|
|
1686
|
+
encryption for the backup bucket of the workspace group.
|
|
1687
|
+
This feature is only supported in workspace groups deployed in AWS.
|
|
1688
|
+
data_bucket_kms_key_id : str, optional
|
|
1689
|
+
Specifies the KMS key ID associated with the data bucket.
|
|
1690
|
+
If specified, enables Customer-Managed Encryption Keys (CMEK)
|
|
1691
|
+
encryption for the data bucket and Amazon Elastic Block Store
|
|
1692
|
+
(EBS) volumes of the workspace group. This feature is only supported
|
|
1693
|
+
in workspace groups deployed in AWS.
|
|
1549
1694
|
expires_at : str, optional
|
|
1550
1695
|
The timestamp of when the workspace group will expire.
|
|
1551
1696
|
If the expiration time is not specified,
|
|
@@ -1553,6 +1698,10 @@ class WorkspaceManager(Manager):
|
|
|
1553
1698
|
At expiration, the workspace group is terminated and all the data is lost.
|
|
1554
1699
|
Expiration time can be specified as a timestamp or duration.
|
|
1555
1700
|
Example: "2021-01-02T15:04:05Z07:00", "2021-01-02", "3h30m"
|
|
1701
|
+
smart_dr : bool, optional
|
|
1702
|
+
Enables Smart Disaster Recovery (SmartDR) for the workspace group.
|
|
1703
|
+
SmartDR is a disaster recovery solution that ensures seamless and
|
|
1704
|
+
continuous replication of data from the primary region to a secondary region
|
|
1556
1705
|
allow_all_traffic : bool, optional
|
|
1557
1706
|
Allow all traffic to the workspace group
|
|
1558
1707
|
update_window : Dict[str, int], optional
|
|
@@ -1569,18 +1718,28 @@ class WorkspaceManager(Manager):
|
|
|
1569
1718
|
'workspaceGroups', json=dict(
|
|
1570
1719
|
name=name, regionID=region,
|
|
1571
1720
|
adminPassword=admin_password,
|
|
1721
|
+
backupBucketKMSKeyID=backup_bucket_kms_key_id,
|
|
1722
|
+
dataBucketKMSKeyID=data_bucket_kms_key_id,
|
|
1572
1723
|
firewallRanges=firewall_ranges or [],
|
|
1573
1724
|
expiresAt=expires_at,
|
|
1725
|
+
smartDR=smart_dr,
|
|
1574
1726
|
allowAllTraffic=allow_all_traffic,
|
|
1575
|
-
updateWindow=update_window,
|
|
1727
|
+
updateWindow=snake_to_camel_dict(update_window),
|
|
1576
1728
|
),
|
|
1577
1729
|
)
|
|
1578
1730
|
return self.get_workspace_group(res.json()['workspaceGroupID'])
|
|
1579
1731
|
|
|
1580
1732
|
def create_workspace(
|
|
1581
|
-
self,
|
|
1582
|
-
|
|
1583
|
-
|
|
1733
|
+
self,
|
|
1734
|
+
name: str,
|
|
1735
|
+
workspace_group: Union[str, WorkspaceGroup],
|
|
1736
|
+
size: Optional[str] = None,
|
|
1737
|
+
auto_suspend: Optional[Dict[str, Any]] = None,
|
|
1738
|
+
cache_config: Optional[int] = None,
|
|
1739
|
+
enable_kai: Optional[bool] = None,
|
|
1740
|
+
wait_on_active: bool = False,
|
|
1741
|
+
wait_interval: int = 10,
|
|
1742
|
+
wait_timeout: int = 600,
|
|
1584
1743
|
) -> Workspace:
|
|
1585
1744
|
"""
|
|
1586
1745
|
Create a new workspace.
|
|
@@ -1593,6 +1752,15 @@ class WorkspaceManager(Manager):
|
|
|
1593
1752
|
The workspace ID of the workspace
|
|
1594
1753
|
size : str, optional
|
|
1595
1754
|
Workspace size in workspace size notation (S-00, S-1, etc.)
|
|
1755
|
+
auto_suspend : Dict[str, Any], optional
|
|
1756
|
+
Auto suspend settings for the workspace. If this field is not
|
|
1757
|
+
provided, no settings will be enabled.
|
|
1758
|
+
cache_config : int, optional
|
|
1759
|
+
Specifies the multiplier for the persistent cache associated
|
|
1760
|
+
with the workspace. If specified, it enables the cache configuration
|
|
1761
|
+
multiplier. It can have one of the following values: 1, 2, or 4.
|
|
1762
|
+
enable_kai : bool, optional
|
|
1763
|
+
Whether to create a SingleStore Kai-enabled workspace
|
|
1596
1764
|
wait_on_active : bool, optional
|
|
1597
1765
|
Wait for the workspace to be active before returning
|
|
1598
1766
|
wait_timeout : int, optional
|
|
@@ -1610,14 +1778,20 @@ class WorkspaceManager(Manager):
|
|
|
1610
1778
|
workspace_group = workspace_group.id
|
|
1611
1779
|
res = self._post(
|
|
1612
1780
|
'workspaces', json=dict(
|
|
1613
|
-
name=name,
|
|
1781
|
+
name=name,
|
|
1782
|
+
workspaceGroupID=workspace_group,
|
|
1614
1783
|
size=size,
|
|
1784
|
+
autoSuspend=snake_to_camel_dict(auto_suspend),
|
|
1785
|
+
cacheConfig=cache_config,
|
|
1786
|
+
enableKai=enable_kai,
|
|
1615
1787
|
),
|
|
1616
1788
|
)
|
|
1617
1789
|
out = self.get_workspace(res.json()['workspaceID'])
|
|
1618
1790
|
if wait_on_active:
|
|
1619
1791
|
out = self._wait_on_state(
|
|
1620
|
-
out,
|
|
1792
|
+
out,
|
|
1793
|
+
'Active',
|
|
1794
|
+
interval=wait_interval,
|
|
1621
1795
|
timeout=wait_timeout,
|
|
1622
1796
|
)
|
|
1623
1797
|
return out
|
|
@@ -65,11 +65,13 @@ def start_http_server(database, data_format='rowdat_1'):
|
|
|
65
65
|
time.sleep(3)
|
|
66
66
|
retries -= 1
|
|
67
67
|
|
|
68
|
-
app = create_app(
|
|
68
|
+
app = create_app(
|
|
69
|
+
ext_funcs,
|
|
70
|
+
url=f'http://{HTTP_HOST}:{port}/invoke',
|
|
71
|
+
data_format=data_format,
|
|
72
|
+
)
|
|
69
73
|
app.register_functions(
|
|
70
|
-
base_url=f'http://{HTTP_HOST}:{port}',
|
|
71
74
|
database=database,
|
|
72
|
-
data_format=data_format,
|
|
73
75
|
)
|
|
74
76
|
|
|
75
77
|
with s2.connect(database=database) as conn:
|
singlestoredb/tests/test_udf.py
CHANGED
|
@@ -412,17 +412,6 @@ class TestUDF(unittest.TestCase):
|
|
|
412
412
|
assert to_sql(foo) == '`hello``_``world`(`x` BIGINT NOT NULL) ' \
|
|
413
413
|
'RETURNS BIGINT NOT NULL'
|
|
414
414
|
|
|
415
|
-
# Add database name
|
|
416
|
-
@udf(database='mydb')
|
|
417
|
-
def foo(x: int) -> int: ...
|
|
418
|
-
assert to_sql(foo) == '`mydb`.`foo`(`x` BIGINT NOT NULL) ' \
|
|
419
|
-
'RETURNS BIGINT NOT NULL'
|
|
420
|
-
|
|
421
|
-
@udf(database='my`db')
|
|
422
|
-
def foo(x: int) -> int: ...
|
|
423
|
-
assert to_sql(foo) == '`my``db`.`foo`(`x` BIGINT NOT NULL) ' \
|
|
424
|
-
'RETURNS BIGINT NOT NULL'
|
|
425
|
-
|
|
426
415
|
def test_dtypes(self):
|
|
427
416
|
assert dt.BOOL() == 'BOOL NULL'
|
|
428
417
|
assert dt.BOOL(nullable=False) == 'BOOL NOT NULL'
|