isolate 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isolate/__init__.py +3 -0
- isolate/_isolate_version.py +34 -0
- isolate/_version.py +6 -0
- isolate/backends/__init__.py +2 -0
- isolate/backends/_base.py +132 -0
- isolate/backends/common.py +259 -0
- isolate/backends/conda.py +215 -0
- isolate/backends/container.py +64 -0
- isolate/backends/local.py +46 -0
- isolate/backends/pyenv.py +143 -0
- isolate/backends/remote.py +141 -0
- isolate/backends/settings.py +121 -0
- isolate/backends/virtualenv.py +204 -0
- isolate/common/__init__.py +0 -0
- isolate/common/timestamp.py +15 -0
- isolate/connections/__init__.py +21 -0
- isolate/connections/_local/__init__.py +2 -0
- isolate/connections/_local/_base.py +190 -0
- isolate/connections/_local/agent_startup.py +53 -0
- isolate/connections/common.py +121 -0
- isolate/connections/grpc/__init__.py +1 -0
- isolate/connections/grpc/_base.py +175 -0
- isolate/connections/grpc/agent.py +284 -0
- isolate/connections/grpc/configuration.py +23 -0
- isolate/connections/grpc/definitions/__init__.py +11 -0
- isolate/connections/grpc/definitions/agent.proto +18 -0
- isolate/connections/grpc/definitions/agent_pb2.py +29 -0
- isolate/connections/grpc/definitions/agent_pb2.pyi +44 -0
- isolate/connections/grpc/definitions/agent_pb2_grpc.py +68 -0
- isolate/connections/grpc/definitions/common.proto +49 -0
- isolate/connections/grpc/definitions/common_pb2.py +35 -0
- isolate/connections/grpc/definitions/common_pb2.pyi +152 -0
- isolate/connections/grpc/definitions/common_pb2_grpc.py +4 -0
- isolate/connections/grpc/interface.py +71 -0
- isolate/connections/ipc/__init__.py +5 -0
- isolate/connections/ipc/_base.py +225 -0
- isolate/connections/ipc/agent.py +205 -0
- isolate/logger.py +53 -0
- isolate/logs.py +76 -0
- isolate/py.typed +0 -0
- isolate/registry.py +53 -0
- isolate/server/__init__.py +1 -0
- isolate/server/definitions/__init__.py +13 -0
- isolate/server/definitions/server.proto +80 -0
- isolate/server/definitions/server_pb2.py +56 -0
- isolate/server/definitions/server_pb2.pyi +241 -0
- isolate/server/definitions/server_pb2_grpc.py +205 -0
- isolate/server/health/__init__.py +11 -0
- isolate/server/health/health.proto +23 -0
- isolate/server/health/health_pb2.py +32 -0
- isolate/server/health/health_pb2.pyi +66 -0
- isolate/server/health/health_pb2_grpc.py +99 -0
- isolate/server/health_server.py +40 -0
- isolate/server/interface.py +27 -0
- isolate/server/server.py +735 -0
- isolate-0.22.0.dist-info/METADATA +88 -0
- isolate-0.22.0.dist-info/RECORD +61 -0
- isolate-0.22.0.dist-info/WHEEL +5 -0
- isolate-0.22.0.dist-info/entry_points.txt +7 -0
- isolate-0.22.0.dist-info/licenses/LICENSE +201 -0
- isolate-0.22.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
syntax = "proto3";
|
|
2
|
+
|
|
3
|
+
import "google/protobuf/timestamp.proto";
|
|
4
|
+
|
|
5
|
+
message SerializedObject {
|
|
6
|
+
// The serialization method used to serialize the the raw_object. Must be
|
|
7
|
+
// present in the environment that is running the agent itself.
|
|
8
|
+
string method = 1;
|
|
9
|
+
// The Python object serialized with the method above.
|
|
10
|
+
bytes definition = 2;
|
|
11
|
+
// A flag indicating whether the given object was raised (e.g. an exception
|
|
12
|
+
// that was captured) or not.
|
|
13
|
+
bool was_it_raised = 3;
|
|
14
|
+
// The stringized version of the traceback, if it was raised.
|
|
15
|
+
optional string stringized_traceback = 4;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
message PartialRunResult {
|
|
19
|
+
// A flag indicating whether the run has completed.
|
|
20
|
+
bool is_complete = 1;
|
|
21
|
+
// A list of logs collected during this partial execution. It does
|
|
22
|
+
// not include old logs.
|
|
23
|
+
repeated Log logs = 2;
|
|
24
|
+
// The result of the run, if it is complete.
|
|
25
|
+
optional SerializedObject result = 3;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
message Log {
|
|
29
|
+
string message = 1;
|
|
30
|
+
LogSource source = 2;
|
|
31
|
+
LogLevel level = 3;
|
|
32
|
+
google.protobuf.Timestamp timestamp = 4;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
enum LogSource {
|
|
36
|
+
BUILDER = 0;
|
|
37
|
+
BRIDGE = 1;
|
|
38
|
+
USER = 2;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
enum LogLevel {
|
|
42
|
+
TRACE = 0;
|
|
43
|
+
DEBUG = 1;
|
|
44
|
+
INFO = 2;
|
|
45
|
+
WARNING = 3;
|
|
46
|
+
ERROR = 4;
|
|
47
|
+
STDOUT = 5;
|
|
48
|
+
STDERR = 6;
|
|
49
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
3
|
+
# source: common.proto
|
|
4
|
+
# Protobuf Python Version: 4.25.1
|
|
5
|
+
"""Generated protocol buffer code."""
|
|
6
|
+
from google.protobuf import descriptor as _descriptor
|
|
7
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
|
8
|
+
from google.protobuf import symbol_database as _symbol_database
|
|
9
|
+
from google.protobuf.internal import builder as _builder
|
|
10
|
+
# @@protoc_insertion_point(imports)
|
|
11
|
+
|
|
12
|
+
_sym_db = _symbol_database.Default()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x63ommon.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x89\x01\n\x10SerializedObject\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x12\n\ndefinition\x18\x02 \x01(\x0c\x12\x15\n\rwas_it_raised\x18\x03 \x01(\x08\x12!\n\x14stringized_traceback\x18\x04 \x01(\tH\x00\x88\x01\x01\x42\x17\n\x15_stringized_traceback\"n\n\x10PartialRunResult\x12\x13\n\x0bis_complete\x18\x01 \x01(\x08\x12\x12\n\x04logs\x18\x02 \x03(\x0b\x32\x04.Log\x12&\n\x06result\x18\x03 \x01(\x0b\x32\x11.SerializedObjectH\x00\x88\x01\x01\x42\t\n\x07_result\"{\n\x03Log\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x1a\n\x06source\x18\x02 \x01(\x0e\x32\n.LogSource\x12\x18\n\x05level\x18\x03 \x01(\x0e\x32\t.LogLevel\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*.\n\tLogSource\x12\x0b\n\x07\x42UILDER\x10\x00\x12\n\n\x06\x42RIDGE\x10\x01\x12\x08\n\x04USER\x10\x02*Z\n\x08LogLevel\x12\t\n\x05TRACE\x10\x00\x12\t\n\x05\x44\x45\x42UG\x10\x01\x12\x08\n\x04INFO\x10\x02\x12\x0b\n\x07WARNING\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\n\n\x06STDOUT\x10\x05\x12\n\n\x06STDERR\x10\x06\x62\x06proto3')
|
|
19
|
+
|
|
20
|
+
_globals = globals()
|
|
21
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
22
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'common_pb2', _globals)
|
|
23
|
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
|
24
|
+
DESCRIPTOR._options = None
|
|
25
|
+
_globals['_LOGSOURCE']._serialized_start=426
|
|
26
|
+
_globals['_LOGSOURCE']._serialized_end=472
|
|
27
|
+
_globals['_LOGLEVEL']._serialized_start=474
|
|
28
|
+
_globals['_LOGLEVEL']._serialized_end=564
|
|
29
|
+
_globals['_SERIALIZEDOBJECT']._serialized_start=50
|
|
30
|
+
_globals['_SERIALIZEDOBJECT']._serialized_end=187
|
|
31
|
+
_globals['_PARTIALRUNRESULT']._serialized_start=189
|
|
32
|
+
_globals['_PARTIALRUNRESULT']._serialized_end=299
|
|
33
|
+
_globals['_LOG']._serialized_start=301
|
|
34
|
+
_globals['_LOG']._serialized_end=424
|
|
35
|
+
# @@protoc_insertion_point(module_scope)
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@generated by mypy-protobuf. Do not edit manually!
|
|
3
|
+
isort:skip_file
|
|
4
|
+
"""
|
|
5
|
+
import builtins
|
|
6
|
+
import collections.abc
|
|
7
|
+
import google.protobuf.descriptor
|
|
8
|
+
import google.protobuf.internal.containers
|
|
9
|
+
import google.protobuf.internal.enum_type_wrapper
|
|
10
|
+
import google.protobuf.message
|
|
11
|
+
import google.protobuf.timestamp_pb2
|
|
12
|
+
import sys
|
|
13
|
+
import typing
|
|
14
|
+
|
|
15
|
+
if sys.version_info >= (3, 10):
|
|
16
|
+
import typing as typing_extensions
|
|
17
|
+
else:
|
|
18
|
+
import typing_extensions
|
|
19
|
+
|
|
20
|
+
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
|
21
|
+
|
|
22
|
+
class _LogSource:
|
|
23
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
24
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
25
|
+
|
|
26
|
+
class _LogSourceEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_LogSource.ValueType], builtins.type):
|
|
27
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
28
|
+
BUILDER: _LogSource.ValueType # 0
|
|
29
|
+
BRIDGE: _LogSource.ValueType # 1
|
|
30
|
+
USER: _LogSource.ValueType # 2
|
|
31
|
+
|
|
32
|
+
class LogSource(_LogSource, metaclass=_LogSourceEnumTypeWrapper): ...
|
|
33
|
+
|
|
34
|
+
BUILDER: LogSource.ValueType # 0
|
|
35
|
+
BRIDGE: LogSource.ValueType # 1
|
|
36
|
+
USER: LogSource.ValueType # 2
|
|
37
|
+
global___LogSource = LogSource
|
|
38
|
+
|
|
39
|
+
class _LogLevel:
|
|
40
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
41
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
42
|
+
|
|
43
|
+
class _LogLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_LogLevel.ValueType], builtins.type):
|
|
44
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
45
|
+
TRACE: _LogLevel.ValueType # 0
|
|
46
|
+
DEBUG: _LogLevel.ValueType # 1
|
|
47
|
+
INFO: _LogLevel.ValueType # 2
|
|
48
|
+
WARNING: _LogLevel.ValueType # 3
|
|
49
|
+
ERROR: _LogLevel.ValueType # 4
|
|
50
|
+
STDOUT: _LogLevel.ValueType # 5
|
|
51
|
+
STDERR: _LogLevel.ValueType # 6
|
|
52
|
+
|
|
53
|
+
class LogLevel(_LogLevel, metaclass=_LogLevelEnumTypeWrapper): ...
|
|
54
|
+
|
|
55
|
+
TRACE: LogLevel.ValueType # 0
|
|
56
|
+
DEBUG: LogLevel.ValueType # 1
|
|
57
|
+
INFO: LogLevel.ValueType # 2
|
|
58
|
+
WARNING: LogLevel.ValueType # 3
|
|
59
|
+
ERROR: LogLevel.ValueType # 4
|
|
60
|
+
STDOUT: LogLevel.ValueType # 5
|
|
61
|
+
STDERR: LogLevel.ValueType # 6
|
|
62
|
+
global___LogLevel = LogLevel
|
|
63
|
+
|
|
64
|
+
@typing_extensions.final
|
|
65
|
+
class SerializedObject(google.protobuf.message.Message):
|
|
66
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
67
|
+
|
|
68
|
+
METHOD_FIELD_NUMBER: builtins.int
|
|
69
|
+
DEFINITION_FIELD_NUMBER: builtins.int
|
|
70
|
+
WAS_IT_RAISED_FIELD_NUMBER: builtins.int
|
|
71
|
+
STRINGIZED_TRACEBACK_FIELD_NUMBER: builtins.int
|
|
72
|
+
method: builtins.str
|
|
73
|
+
"""The serialization method used to serialize the the raw_object. Must be
|
|
74
|
+
present in the environment that is running the agent itself.
|
|
75
|
+
"""
|
|
76
|
+
definition: builtins.bytes
|
|
77
|
+
"""The Python object serialized with the method above."""
|
|
78
|
+
was_it_raised: builtins.bool
|
|
79
|
+
"""A flag indicating whether the given object was raised (e.g. an exception
|
|
80
|
+
that was captured) or not.
|
|
81
|
+
"""
|
|
82
|
+
stringized_traceback: builtins.str
|
|
83
|
+
"""The stringized version of the traceback, if it was raised."""
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
method: builtins.str = ...,
|
|
88
|
+
definition: builtins.bytes = ...,
|
|
89
|
+
was_it_raised: builtins.bool = ...,
|
|
90
|
+
stringized_traceback: builtins.str | None = ...,
|
|
91
|
+
) -> None: ...
|
|
92
|
+
def HasField(self, field_name: typing_extensions.Literal["_stringized_traceback", b"_stringized_traceback", "stringized_traceback", b"stringized_traceback"]) -> builtins.bool: ...
|
|
93
|
+
def ClearField(self, field_name: typing_extensions.Literal["_stringized_traceback", b"_stringized_traceback", "definition", b"definition", "method", b"method", "stringized_traceback", b"stringized_traceback", "was_it_raised", b"was_it_raised"]) -> None: ...
|
|
94
|
+
def WhichOneof(self, oneof_group: typing_extensions.Literal["_stringized_traceback", b"_stringized_traceback"]) -> typing_extensions.Literal["stringized_traceback"] | None: ...
|
|
95
|
+
|
|
96
|
+
global___SerializedObject = SerializedObject
|
|
97
|
+
|
|
98
|
+
@typing_extensions.final
|
|
99
|
+
class PartialRunResult(google.protobuf.message.Message):
|
|
100
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
101
|
+
|
|
102
|
+
IS_COMPLETE_FIELD_NUMBER: builtins.int
|
|
103
|
+
LOGS_FIELD_NUMBER: builtins.int
|
|
104
|
+
RESULT_FIELD_NUMBER: builtins.int
|
|
105
|
+
is_complete: builtins.bool
|
|
106
|
+
"""A flag indicating whether the run has completed."""
|
|
107
|
+
@property
|
|
108
|
+
def logs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Log]:
|
|
109
|
+
"""A list of logs collected during this partial execution. It does
|
|
110
|
+
not include old logs.
|
|
111
|
+
"""
|
|
112
|
+
@property
|
|
113
|
+
def result(self) -> global___SerializedObject:
|
|
114
|
+
"""The result of the run, if it is complete."""
|
|
115
|
+
def __init__(
|
|
116
|
+
self,
|
|
117
|
+
*,
|
|
118
|
+
is_complete: builtins.bool = ...,
|
|
119
|
+
logs: collections.abc.Iterable[global___Log] | None = ...,
|
|
120
|
+
result: global___SerializedObject | None = ...,
|
|
121
|
+
) -> None: ...
|
|
122
|
+
def HasField(self, field_name: typing_extensions.Literal["_result", b"_result", "result", b"result"]) -> builtins.bool: ...
|
|
123
|
+
def ClearField(self, field_name: typing_extensions.Literal["_result", b"_result", "is_complete", b"is_complete", "logs", b"logs", "result", b"result"]) -> None: ...
|
|
124
|
+
def WhichOneof(self, oneof_group: typing_extensions.Literal["_result", b"_result"]) -> typing_extensions.Literal["result"] | None: ...
|
|
125
|
+
|
|
126
|
+
global___PartialRunResult = PartialRunResult
|
|
127
|
+
|
|
128
|
+
@typing_extensions.final
|
|
129
|
+
class Log(google.protobuf.message.Message):
|
|
130
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
131
|
+
|
|
132
|
+
MESSAGE_FIELD_NUMBER: builtins.int
|
|
133
|
+
SOURCE_FIELD_NUMBER: builtins.int
|
|
134
|
+
LEVEL_FIELD_NUMBER: builtins.int
|
|
135
|
+
TIMESTAMP_FIELD_NUMBER: builtins.int
|
|
136
|
+
message: builtins.str
|
|
137
|
+
source: global___LogSource.ValueType
|
|
138
|
+
level: global___LogLevel.ValueType
|
|
139
|
+
@property
|
|
140
|
+
def timestamp(self) -> google.protobuf.timestamp_pb2.Timestamp: ...
|
|
141
|
+
def __init__(
|
|
142
|
+
self,
|
|
143
|
+
*,
|
|
144
|
+
message: builtins.str = ...,
|
|
145
|
+
source: global___LogSource.ValueType = ...,
|
|
146
|
+
level: global___LogLevel.ValueType = ...,
|
|
147
|
+
timestamp: google.protobuf.timestamp_pb2.Timestamp | None = ...,
|
|
148
|
+
) -> None: ...
|
|
149
|
+
def HasField(self, field_name: typing_extensions.Literal["timestamp", b"timestamp"]) -> builtins.bool: ...
|
|
150
|
+
def ClearField(self, field_name: typing_extensions.Literal["level", b"level", "message", b"message", "source", b"source", "timestamp", b"timestamp"]) -> None: ...
|
|
151
|
+
|
|
152
|
+
global___Log = Log
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""A common gRPC interface for both the gRPC connection implementation
|
|
2
|
+
and the Isolate Server to share."""
|
|
3
|
+
|
|
4
|
+
import functools
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
from isolate.common import timestamp
|
|
8
|
+
from isolate.connections.common import load_serialized_object, serialize_object
|
|
9
|
+
from isolate.connections.grpc import definitions
|
|
10
|
+
from isolate.logs import Log, LogLevel, LogSource
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@functools.singledispatch
|
|
14
|
+
def from_grpc(message: definitions.Message) -> Any:
|
|
15
|
+
"""Materialize a gRPC message into a Python object."""
|
|
16
|
+
wrong_type = type(message).__name__
|
|
17
|
+
raise NotImplementedError(f"Can't convert {wrong_type} to a Python object.")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@functools.singledispatch
|
|
21
|
+
def to_grpc(obj: Any) -> definitions.Message:
|
|
22
|
+
"""Convert a Python object into a gRPC message."""
|
|
23
|
+
wrong_type = type(obj).__name__
|
|
24
|
+
raise NotImplementedError(f"Cannot convert {wrong_type} to a gRPC message.")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@from_grpc.register
|
|
28
|
+
def _(message: definitions.SerializedObject) -> Any:
|
|
29
|
+
return load_serialized_object(
|
|
30
|
+
message.method,
|
|
31
|
+
message.definition,
|
|
32
|
+
was_it_raised=message.was_it_raised,
|
|
33
|
+
stringized_traceback=message.stringized_traceback,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@from_grpc.register
|
|
38
|
+
def _(message: definitions.Log) -> Log:
|
|
39
|
+
source = LogSource(definitions.LogSource.Name(message.source).lower())
|
|
40
|
+
level = LogLevel[definitions.LogLevel.Name(message.level).upper()]
|
|
41
|
+
return Log(
|
|
42
|
+
message=message.message,
|
|
43
|
+
source=source,
|
|
44
|
+
level=level,
|
|
45
|
+
timestamp=timestamp.to_datetime(message.timestamp),
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@to_grpc.register
|
|
50
|
+
def _(obj: Log) -> definitions.Log:
|
|
51
|
+
return definitions.Log(
|
|
52
|
+
message=obj.message,
|
|
53
|
+
source=definitions.LogSource.Value(obj.source.name.upper()),
|
|
54
|
+
level=definitions.LogLevel.Value(obj.level.name.upper()),
|
|
55
|
+
timestamp=timestamp.from_datetime(obj.timestamp),
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def to_serialized_object(
|
|
60
|
+
obj: Any,
|
|
61
|
+
method: str,
|
|
62
|
+
was_it_raised: bool = False,
|
|
63
|
+
stringized_traceback: Optional[str] = None,
|
|
64
|
+
) -> definitions.SerializedObject:
|
|
65
|
+
"""Convert a Python object into a gRPC message."""
|
|
66
|
+
return definitions.SerializedObject(
|
|
67
|
+
method=method,
|
|
68
|
+
definition=serialize_object(method, obj),
|
|
69
|
+
was_it_raised=was_it_raised,
|
|
70
|
+
stringized_traceback=stringized_traceback,
|
|
71
|
+
)
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import importlib
|
|
5
|
+
import subprocess
|
|
6
|
+
import time
|
|
7
|
+
from contextlib import ExitStack, closing
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from multiprocessing.connection import Connection, Listener
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import (
|
|
12
|
+
TYPE_CHECKING,
|
|
13
|
+
Any,
|
|
14
|
+
Callable,
|
|
15
|
+
ContextManager,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from isolate.backends import (
|
|
19
|
+
BasicCallable,
|
|
20
|
+
CallResultType,
|
|
21
|
+
EnvironmentConnection,
|
|
22
|
+
)
|
|
23
|
+
from isolate.connections._local import PythonExecutionBase, agent_startup
|
|
24
|
+
from isolate.connections.common import prepare_exc
|
|
25
|
+
from isolate.connections.ipc import agent
|
|
26
|
+
from isolate.logs import LogLevel, LogSource
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
# Somhow mypy can't figure out that `ConnectionWrapper`
|
|
30
|
+
# really exists.
|
|
31
|
+
class ConnectionWrapper(Connection):
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
connection: Any,
|
|
35
|
+
loads: Callable[[bytes], Any],
|
|
36
|
+
dumps: Callable[[Any], bytes],
|
|
37
|
+
) -> None: ...
|
|
38
|
+
|
|
39
|
+
def recv(self) -> Any: ...
|
|
40
|
+
|
|
41
|
+
def send(self, value: Any) -> None: ...
|
|
42
|
+
|
|
43
|
+
def close(self) -> None: ...
|
|
44
|
+
|
|
45
|
+
else:
|
|
46
|
+
from multiprocessing.connection import ConnectionWrapper
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AgentListener(Listener):
|
|
50
|
+
"""A custom listener that can use any available serialization method
|
|
51
|
+
to communicate with the child process."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, backend_name: str, *args: Any, **kwargs: Any) -> None:
|
|
54
|
+
self.serialization_backend = loadserialization_method(backend_name)
|
|
55
|
+
super().__init__(*args, **kwargs)
|
|
56
|
+
|
|
57
|
+
def accept(self) -> Connection:
|
|
58
|
+
return ConnectionWrapper(
|
|
59
|
+
super().accept(),
|
|
60
|
+
dumps=self.serialization_backend.dumps,
|
|
61
|
+
loads=self.serialization_backend.loads,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def loadserialization_method(backend_name: str) -> Any:
|
|
66
|
+
# TODO(feat): This should probably throw a better error if the
|
|
67
|
+
# given backend does not exist.
|
|
68
|
+
return importlib.import_module(backend_name)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def encode_service_address(address: tuple[str, int]) -> str:
|
|
72
|
+
host, port = address
|
|
73
|
+
return base64.b64encode(f"{host}:{port}".encode()).decode("utf-8")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class IsolatedProcessConnection(EnvironmentConnection):
|
|
78
|
+
"""A generic IPC implementation for running the isolate backend
|
|
79
|
+
in a separated process.
|
|
80
|
+
|
|
81
|
+
Each implementation needs to define a start_process method to
|
|
82
|
+
spawn the agent."""
|
|
83
|
+
|
|
84
|
+
# The amount of seconds to wait before checking whether the
|
|
85
|
+
# isolated process has exited or not.
|
|
86
|
+
_DEFER_THRESHOLD = 0.25
|
|
87
|
+
|
|
88
|
+
def start_process(
|
|
89
|
+
self,
|
|
90
|
+
connection: AgentListener,
|
|
91
|
+
*args: Any,
|
|
92
|
+
**kwargs: Any,
|
|
93
|
+
) -> ContextManager[subprocess.Popen]:
|
|
94
|
+
"""Start the agent process."""
|
|
95
|
+
raise NotImplementedError
|
|
96
|
+
|
|
97
|
+
def run( # type: ignore[return-value]
|
|
98
|
+
self,
|
|
99
|
+
executable: BasicCallable,
|
|
100
|
+
*args: Any,
|
|
101
|
+
**kwargs: Any,
|
|
102
|
+
) -> CallResultType: # type: ignore[type-var]
|
|
103
|
+
"""Spawn an agent process using the given environment, run the given
|
|
104
|
+
`executable` in that process, and return the result object back."""
|
|
105
|
+
|
|
106
|
+
with ExitStack() as stack:
|
|
107
|
+
# IPC flow is the following:
|
|
108
|
+
# 1. [controller]: Create the socket server
|
|
109
|
+
# 2. [controller]: Spawn the call agent with the socket address
|
|
110
|
+
# 3. [agent]: Connect to the socket server
|
|
111
|
+
# 4. [controller]: Accept the incoming connection request
|
|
112
|
+
# 5. [controller]: Send the executable over the established bridge
|
|
113
|
+
# 6. [agent]: Receive the executable from the bridge
|
|
114
|
+
# 7. [agent]: Execute the executable and once done send the result
|
|
115
|
+
# back
|
|
116
|
+
# 8. [controller]: Loop until either the isolated process exits or sends
|
|
117
|
+
# any data (will be interpreted as a tuple of two
|
|
118
|
+
# mutually exclusive objects, either a result object or
|
|
119
|
+
# an exception to be raised).
|
|
120
|
+
#
|
|
121
|
+
|
|
122
|
+
self.log("Starting the controller bridge.")
|
|
123
|
+
controller_service = stack.enter_context(
|
|
124
|
+
AgentListener(
|
|
125
|
+
self.environment.settings.serialization_method,
|
|
126
|
+
family="AF_INET",
|
|
127
|
+
)
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
self.log(
|
|
131
|
+
f"Controller server is listening at {controller_service.address}."
|
|
132
|
+
" Attempting to start the agent process."
|
|
133
|
+
)
|
|
134
|
+
assert not (args or kwargs), "run() should not receive any arguments."
|
|
135
|
+
isolated_process = stack.enter_context(
|
|
136
|
+
self.start_process(controller_service, *args, **kwargs)
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# TODO(fix): this might hang if the agent process crashes before it can
|
|
140
|
+
# connect to the controller bridge.
|
|
141
|
+
self.log(
|
|
142
|
+
f"Awaiting agent process of {isolated_process.pid}"
|
|
143
|
+
" to establish a connection."
|
|
144
|
+
)
|
|
145
|
+
established_connection = stack.enter_context(
|
|
146
|
+
closing(controller_service.accept())
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
self.log("Bridge between controller and the agent has been established.")
|
|
150
|
+
established_connection.send(executable)
|
|
151
|
+
|
|
152
|
+
self.log("Executable has been sent, awaiting execution result.")
|
|
153
|
+
return self.poll_until_result(
|
|
154
|
+
isolated_process,
|
|
155
|
+
established_connection,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def poll_until_result(
|
|
159
|
+
self,
|
|
160
|
+
process: subprocess.Popen,
|
|
161
|
+
connection: Connection,
|
|
162
|
+
) -> CallResultType: # type: ignore[type-var]
|
|
163
|
+
"""Take the given process, and poll until either it exits or returns
|
|
164
|
+
a result object."""
|
|
165
|
+
|
|
166
|
+
while not connection.poll():
|
|
167
|
+
# Normally, if we do connection.read() without having this loop
|
|
168
|
+
# it is going to block us indefinitely (even if the underlying
|
|
169
|
+
# process has crashed). We can use a combination of process.poll
|
|
170
|
+
# and connection.poll to check if the process is alive and has data
|
|
171
|
+
# to move forward.
|
|
172
|
+
if process.poll():
|
|
173
|
+
break
|
|
174
|
+
|
|
175
|
+
# For preventing busy waiting, we can sleep for a bit
|
|
176
|
+
# and let other threads run.
|
|
177
|
+
time.sleep(self._DEFER_THRESHOLD)
|
|
178
|
+
continue
|
|
179
|
+
|
|
180
|
+
if not connection.poll():
|
|
181
|
+
# If the process has exited but there is still no data, we
|
|
182
|
+
# can assume something terrible has happened.
|
|
183
|
+
raise OSError(
|
|
184
|
+
"The isolated process has exited unexpectedly with code "
|
|
185
|
+
f"'{process.poll()}' without sending any data back."
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# TODO(fix): handle EOFError that might happen here (e.g. problematic
|
|
189
|
+
# serialization might cause it).
|
|
190
|
+
result, did_it_raise, stringized_traceback = connection.recv()
|
|
191
|
+
|
|
192
|
+
if did_it_raise:
|
|
193
|
+
raise prepare_exc(result, stringized_traceback=stringized_traceback)
|
|
194
|
+
else:
|
|
195
|
+
assert stringized_traceback is None
|
|
196
|
+
return result
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
@dataclass
|
|
200
|
+
class PythonIPC(PythonExecutionBase[AgentListener], IsolatedProcessConnection):
|
|
201
|
+
def get_python_cmd(
|
|
202
|
+
self,
|
|
203
|
+
executable: Path,
|
|
204
|
+
connection: AgentListener,
|
|
205
|
+
log_fd: int,
|
|
206
|
+
) -> list[str | Path]:
|
|
207
|
+
assert isinstance(connection.address, tuple)
|
|
208
|
+
return [
|
|
209
|
+
executable,
|
|
210
|
+
agent_startup.__file__,
|
|
211
|
+
agent.__file__,
|
|
212
|
+
encode_service_address(connection.address),
|
|
213
|
+
# TODO(feat): we probably should check if the given backend is installed
|
|
214
|
+
# on the remote interpreter, otherwise it will fail without establishing
|
|
215
|
+
# the connection with the bridge.
|
|
216
|
+
"--serialization-backend",
|
|
217
|
+
self.environment.settings.serialization_method,
|
|
218
|
+
"--log-fd",
|
|
219
|
+
str(log_fd),
|
|
220
|
+
]
|
|
221
|
+
|
|
222
|
+
def handle_agent_log(
|
|
223
|
+
self, line: str, *, level: LogLevel, source: LogSource
|
|
224
|
+
) -> None:
|
|
225
|
+
self.log(line, level=level, source=source)
|