langgraph-api 0.4.40__py3-none-any.whl → 0.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- langgraph_api/__init__.py +1 -1
- langgraph_api/api/assistants.py +65 -61
- langgraph_api/api/meta.py +6 -0
- langgraph_api/api/threads.py +11 -7
- langgraph_api/auth/custom.py +29 -24
- langgraph_api/cli.py +2 -49
- langgraph_api/config.py +131 -16
- langgraph_api/graph.py +1 -1
- langgraph_api/grpc/client.py +183 -0
- langgraph_api/grpc/config_conversion.py +225 -0
- langgraph_api/grpc/generated/core_api_pb2.py +275 -0
- langgraph_api/{grpc_ops → grpc}/generated/core_api_pb2.pyi +35 -40
- langgraph_api/grpc/generated/engine_common_pb2.py +190 -0
- langgraph_api/grpc/generated/engine_common_pb2.pyi +634 -0
- langgraph_api/grpc/generated/engine_common_pb2_grpc.py +24 -0
- langgraph_api/grpc/ops.py +1045 -0
- langgraph_api/js/build.mts +1 -1
- langgraph_api/js/client.http.mts +1 -1
- langgraph_api/js/client.mts +1 -1
- langgraph_api/js/package.json +12 -12
- langgraph_api/js/src/graph.mts +20 -0
- langgraph_api/js/yarn.lock +176 -234
- langgraph_api/metadata.py +29 -21
- langgraph_api/queue_entrypoint.py +2 -2
- langgraph_api/route.py +14 -4
- langgraph_api/schema.py +2 -2
- langgraph_api/self_hosted_metrics.py +48 -2
- langgraph_api/serde.py +58 -14
- langgraph_api/server.py +16 -2
- langgraph_api/worker.py +1 -1
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/METADATA +6 -6
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/RECORD +38 -34
- langgraph_api/grpc_ops/client.py +0 -80
- langgraph_api/grpc_ops/generated/core_api_pb2.py +0 -274
- langgraph_api/grpc_ops/ops.py +0 -610
- /langgraph_api/{grpc_ops → grpc}/__init__.py +0 -0
- /langgraph_api/{grpc_ops → grpc}/generated/__init__.py +0 -0
- /langgraph_api/{grpc_ops → grpc}/generated/core_api_pb2_grpc.py +0 -0
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/WHEEL +0 -0
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/entry_points.txt +0 -0
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""gRPC client wrapper for LangGraph persistence services."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import structlog
|
|
7
|
+
from grpc import aio # type: ignore[import]
|
|
8
|
+
|
|
9
|
+
from .generated.core_api_pb2_grpc import AdminStub, AssistantsStub, ThreadsStub
|
|
10
|
+
|
|
11
|
+
logger = structlog.stdlib.get_logger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Shared global client pool
|
|
15
|
+
_client_pool: "GrpcClientPool | None" = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class GrpcClient:
|
|
19
|
+
"""gRPC client for LangGraph persistence services."""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
server_address: str | None = None,
|
|
24
|
+
):
|
|
25
|
+
"""Initialize the gRPC client.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
server_address: The gRPC server address (default: localhost:50051)
|
|
29
|
+
"""
|
|
30
|
+
self.server_address = server_address or os.getenv(
|
|
31
|
+
"GRPC_SERVER_ADDRESS", "localhost:50051"
|
|
32
|
+
)
|
|
33
|
+
self._channel: aio.Channel | None = None
|
|
34
|
+
self._assistants_stub: AssistantsStub | None = None
|
|
35
|
+
self._threads_stub: ThreadsStub | None = None
|
|
36
|
+
self._admin_stub: AdminStub | None = None
|
|
37
|
+
|
|
38
|
+
async def __aenter__(self):
|
|
39
|
+
"""Async context manager entry."""
|
|
40
|
+
await self.connect()
|
|
41
|
+
return self
|
|
42
|
+
|
|
43
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
44
|
+
"""Async context manager exit."""
|
|
45
|
+
await self.close()
|
|
46
|
+
|
|
47
|
+
async def connect(self):
|
|
48
|
+
"""Connect to the gRPC server."""
|
|
49
|
+
if self._channel is not None:
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
self._channel = aio.insecure_channel(self.server_address)
|
|
53
|
+
|
|
54
|
+
self._assistants_stub = AssistantsStub(self._channel)
|
|
55
|
+
self._threads_stub = ThreadsStub(self._channel)
|
|
56
|
+
self._admin_stub = AdminStub(self._channel)
|
|
57
|
+
|
|
58
|
+
await logger.adebug(
|
|
59
|
+
"Connected to gRPC server", server_address=self.server_address
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
async def close(self):
|
|
63
|
+
"""Close the gRPC connection."""
|
|
64
|
+
if self._channel is not None:
|
|
65
|
+
await self._channel.close()
|
|
66
|
+
self._channel = None
|
|
67
|
+
self._assistants_stub = None
|
|
68
|
+
self._threads_stub = None
|
|
69
|
+
self._admin_stub = None
|
|
70
|
+
await logger.adebug("Closed gRPC connection")
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def assistants(self) -> AssistantsStub:
|
|
74
|
+
"""Get the assistants service stub."""
|
|
75
|
+
if self._assistants_stub is None:
|
|
76
|
+
raise RuntimeError(
|
|
77
|
+
"Client not connected. Use async context manager or call connect() first."
|
|
78
|
+
)
|
|
79
|
+
return self._assistants_stub
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def threads(self) -> ThreadsStub:
|
|
83
|
+
"""Get the threads service stub."""
|
|
84
|
+
if self._threads_stub is None:
|
|
85
|
+
raise RuntimeError(
|
|
86
|
+
"Client not connected. Use async context manager or call connect() first."
|
|
87
|
+
)
|
|
88
|
+
return self._threads_stub
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def admin(self) -> AdminStub:
|
|
92
|
+
"""Get the admin service stub."""
|
|
93
|
+
if self._admin_stub is None:
|
|
94
|
+
raise RuntimeError(
|
|
95
|
+
"Client not connected. Use async context manager or call connect() first."
|
|
96
|
+
)
|
|
97
|
+
return self._admin_stub
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class GrpcClientPool:
|
|
101
|
+
"""Pool of gRPC clients for load distribution."""
|
|
102
|
+
|
|
103
|
+
def __init__(self, pool_size: int = 5, server_address: str | None = None):
|
|
104
|
+
self.pool_size = pool_size
|
|
105
|
+
self.server_address = server_address
|
|
106
|
+
self.clients: list[GrpcClient] = []
|
|
107
|
+
self._current_index = 0
|
|
108
|
+
self._init_lock = asyncio.Lock()
|
|
109
|
+
self._initialized = False
|
|
110
|
+
|
|
111
|
+
async def _initialize(self):
|
|
112
|
+
"""Initialize the pool of clients."""
|
|
113
|
+
async with self._init_lock:
|
|
114
|
+
if self._initialized:
|
|
115
|
+
return
|
|
116
|
+
|
|
117
|
+
await logger.ainfo(
|
|
118
|
+
"Initializing gRPC client pool",
|
|
119
|
+
pool_size=self.pool_size,
|
|
120
|
+
server_address=self.server_address,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
for _ in range(self.pool_size):
|
|
124
|
+
client = GrpcClient(server_address=self.server_address)
|
|
125
|
+
await client.connect()
|
|
126
|
+
self.clients.append(client)
|
|
127
|
+
|
|
128
|
+
self._initialized = True
|
|
129
|
+
await logger.ainfo(
|
|
130
|
+
f"gRPC client pool initialized with {self.pool_size} clients"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
async def get_client(self) -> GrpcClient:
|
|
134
|
+
"""Get next client using round-robin selection.
|
|
135
|
+
|
|
136
|
+
Round-robin without strict locking - slight races are acceptable
|
|
137
|
+
and result in good enough distribution under high load.
|
|
138
|
+
"""
|
|
139
|
+
if not self._initialized:
|
|
140
|
+
await self._initialize()
|
|
141
|
+
|
|
142
|
+
idx = self._current_index % self.pool_size
|
|
143
|
+
self._current_index = idx + 1
|
|
144
|
+
return self.clients[idx]
|
|
145
|
+
|
|
146
|
+
async def close(self):
|
|
147
|
+
"""Close all clients in the pool."""
|
|
148
|
+
if self._initialized:
|
|
149
|
+
await logger.ainfo(f"Closing gRPC client pool ({self.pool_size} clients)")
|
|
150
|
+
for client in self.clients:
|
|
151
|
+
await client.close()
|
|
152
|
+
self.clients.clear()
|
|
153
|
+
self._initialized = False
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
async def get_shared_client() -> GrpcClient:
|
|
157
|
+
"""Get a gRPC client from the shared pool.
|
|
158
|
+
|
|
159
|
+
Uses a pool of channels for better performance under high concurrency.
|
|
160
|
+
Each channel is a separate TCP connection that can handle ~100-200
|
|
161
|
+
concurrent streams effectively.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
A GrpcClient instance from the pool
|
|
165
|
+
"""
|
|
166
|
+
global _client_pool
|
|
167
|
+
if _client_pool is None:
|
|
168
|
+
from langgraph_api import config
|
|
169
|
+
|
|
170
|
+
_client_pool = GrpcClientPool(
|
|
171
|
+
pool_size=config.GRPC_CLIENT_POOL_SIZE,
|
|
172
|
+
server_address=os.getenv("GRPC_SERVER_ADDRESS"),
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
return await _client_pool.get_client()
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
async def close_shared_client():
|
|
179
|
+
"""Close the shared gRPC client pool."""
|
|
180
|
+
global _client_pool
|
|
181
|
+
if _client_pool is not None:
|
|
182
|
+
await _client_pool.close()
|
|
183
|
+
_client_pool = None
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
"""Conversion utils for the RunnableConfig."""
|
|
2
|
+
|
|
3
|
+
# THIS IS DUPLICATED
|
|
4
|
+
# TODO: WFH - Deduplicate with the executor logic by moving into a separate package
|
|
5
|
+
# Sequencing in the next PR.
|
|
6
|
+
from typing import Any, cast
|
|
7
|
+
|
|
8
|
+
import orjson
|
|
9
|
+
from langchain_core.runnables.config import RunnableConfig
|
|
10
|
+
|
|
11
|
+
from langgraph_api.grpc.generated import engine_common_pb2
|
|
12
|
+
|
|
13
|
+
CONFIG_KEY_SEND = "__pregel_send"
|
|
14
|
+
CONFIG_KEY_READ = "__pregel_read"
|
|
15
|
+
CONFIG_KEY_RESUMING = "__pregel_resuming"
|
|
16
|
+
CONFIG_KEY_TASK_ID = "__pregel_task_id"
|
|
17
|
+
CONFIG_KEY_THREAD_ID = "thread_id"
|
|
18
|
+
CONFIG_KEY_CHECKPOINT_MAP = "checkpoint_map"
|
|
19
|
+
CONFIG_KEY_CHECKPOINT_ID = "checkpoint_id"
|
|
20
|
+
CONFIG_KEY_CHECKPOINT_NS = "checkpoint_ns"
|
|
21
|
+
CONFIG_KEY_SCRATCHPAD = "__pregel_scratchpad"
|
|
22
|
+
CONFIG_KEY_DURABILITY = "__pregel_durability"
|
|
23
|
+
CONFIG_KEY_GRAPH_ID = "graph_id"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _durability_to_proto(
|
|
27
|
+
durability: str,
|
|
28
|
+
) -> engine_common_pb2.Durability:
|
|
29
|
+
match durability:
|
|
30
|
+
case "async":
|
|
31
|
+
return engine_common_pb2.Durability.ASYNC
|
|
32
|
+
case "sync":
|
|
33
|
+
return engine_common_pb2.Durability.SYNC
|
|
34
|
+
case "exit":
|
|
35
|
+
return engine_common_pb2.Durability.EXIT
|
|
36
|
+
case _:
|
|
37
|
+
raise ValueError(f"invalid durability: {durability}")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _durability_from_proto(
|
|
41
|
+
durability: engine_common_pb2.Durability,
|
|
42
|
+
) -> str:
|
|
43
|
+
match durability:
|
|
44
|
+
case engine_common_pb2.Durability.ASYNC:
|
|
45
|
+
return "async"
|
|
46
|
+
case engine_common_pb2.Durability.SYNC:
|
|
47
|
+
return "sync"
|
|
48
|
+
case engine_common_pb2.Durability.EXIT:
|
|
49
|
+
return "exit"
|
|
50
|
+
case _:
|
|
51
|
+
raise ValueError(f"invalid durability: {durability}")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def config_to_proto(
|
|
55
|
+
config: RunnableConfig,
|
|
56
|
+
) -> engine_common_pb2.EngineRunnableConfig | None:
|
|
57
|
+
# Prepare kwargs for construction
|
|
58
|
+
if not config:
|
|
59
|
+
return None
|
|
60
|
+
cp = {**config}
|
|
61
|
+
pb_config = engine_common_pb2.EngineRunnableConfig()
|
|
62
|
+
for k, v in (cp.pop("metadata", None) or {}).items():
|
|
63
|
+
if k == "run_attempt":
|
|
64
|
+
pb_config.run_attempt = v
|
|
65
|
+
elif k == "run_id":
|
|
66
|
+
pb_config.server_run_id = str(v)
|
|
67
|
+
else:
|
|
68
|
+
pb_config.metadata_json[k] = orjson.dumps(v)
|
|
69
|
+
if run_name := cp.pop("run_name", None):
|
|
70
|
+
pb_config.run_name = run_name
|
|
71
|
+
|
|
72
|
+
if run_id := cp.pop("run_id", None):
|
|
73
|
+
pb_config.run_id = str(run_id)
|
|
74
|
+
|
|
75
|
+
if (max_concurrency := cp.pop("max_concurrency", None)) and isinstance(
|
|
76
|
+
max_concurrency, int
|
|
77
|
+
):
|
|
78
|
+
pb_config.max_concurrency = max_concurrency
|
|
79
|
+
|
|
80
|
+
if (recursion_limit := cp.pop("recursion_limit", None)) and isinstance(
|
|
81
|
+
recursion_limit, int
|
|
82
|
+
):
|
|
83
|
+
pb_config.recursion_limit = recursion_limit
|
|
84
|
+
|
|
85
|
+
# Handle collections after construction
|
|
86
|
+
if (tags := cp.pop("tags", None)) and isinstance(tags, list):
|
|
87
|
+
pb_config.tags.extend(tags)
|
|
88
|
+
|
|
89
|
+
if (configurable := cp.pop("configurable", None)) and isinstance(
|
|
90
|
+
configurable, dict
|
|
91
|
+
):
|
|
92
|
+
_inject_configurable_into_proto(configurable, pb_config)
|
|
93
|
+
if cp:
|
|
94
|
+
pb_config.extra_json.update({k: orjson.dumps(v) for k, v in cp.items()})
|
|
95
|
+
|
|
96
|
+
return pb_config
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
RESTRICTED_RESERVED_CONFIGURABLE_KEYS = {
|
|
100
|
+
CONFIG_KEY_SEND,
|
|
101
|
+
CONFIG_KEY_READ,
|
|
102
|
+
CONFIG_KEY_SCRATCHPAD,
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _inject_configurable_into_proto(
|
|
107
|
+
configurable: dict[str, Any], proto: engine_common_pb2.EngineRunnableConfig
|
|
108
|
+
) -> None:
|
|
109
|
+
extra = {}
|
|
110
|
+
for key, value in configurable.items():
|
|
111
|
+
if key == CONFIG_KEY_RESUMING:
|
|
112
|
+
proto.resuming = bool(value)
|
|
113
|
+
elif key == CONFIG_KEY_TASK_ID:
|
|
114
|
+
proto.task_id = str(value)
|
|
115
|
+
elif key == CONFIG_KEY_THREAD_ID:
|
|
116
|
+
proto.thread_id = str(value)
|
|
117
|
+
elif key == CONFIG_KEY_CHECKPOINT_MAP:
|
|
118
|
+
proto.checkpoint_map.update(cast(dict[str, str], value))
|
|
119
|
+
elif key == CONFIG_KEY_CHECKPOINT_ID:
|
|
120
|
+
proto.checkpoint_id = str(value)
|
|
121
|
+
elif key == CONFIG_KEY_CHECKPOINT_NS:
|
|
122
|
+
proto.checkpoint_ns = str(value)
|
|
123
|
+
elif key == CONFIG_KEY_DURABILITY and value:
|
|
124
|
+
proto.durability = _durability_to_proto(value)
|
|
125
|
+
elif key not in RESTRICTED_RESERVED_CONFIGURABLE_KEYS:
|
|
126
|
+
extra[key] = value
|
|
127
|
+
if extra:
|
|
128
|
+
proto.extra_configurable_json.update(
|
|
129
|
+
{k: orjson.dumps(v) for k, v in extra.items()}
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def context_to_json_bytes(context: dict[str, Any] | Any) -> bytes | None:
|
|
134
|
+
"""Convert context to JSON bytes for proto serialization."""
|
|
135
|
+
if context is None:
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
# Convert dataclass or other objects to dict if needed
|
|
139
|
+
if hasattr(context, "__dict__") and not hasattr(context, "items"):
|
|
140
|
+
# Convert dataclass to dict
|
|
141
|
+
context_dict = context.__dict__
|
|
142
|
+
elif hasattr(context, "items"):
|
|
143
|
+
# Already a dict-like object
|
|
144
|
+
context_dict = dict(context)
|
|
145
|
+
else:
|
|
146
|
+
# Try to convert to dict using vars()
|
|
147
|
+
context_dict = vars(context) if hasattr(context, "__dict__") else {}
|
|
148
|
+
|
|
149
|
+
return orjson.dumps(context_dict)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def config_from_proto(
|
|
153
|
+
config_proto: engine_common_pb2.EngineRunnableConfig | None,
|
|
154
|
+
) -> RunnableConfig:
|
|
155
|
+
if not config_proto:
|
|
156
|
+
return RunnableConfig(tags=[], metadata={}, configurable={})
|
|
157
|
+
|
|
158
|
+
configurable = _configurable_from_proto(config_proto)
|
|
159
|
+
|
|
160
|
+
metadata = {}
|
|
161
|
+
for k, v in config_proto.metadata_json.items():
|
|
162
|
+
metadata[k] = orjson.loads(v)
|
|
163
|
+
if config_proto.HasField("run_attempt"):
|
|
164
|
+
metadata["run_attempt"] = config_proto.run_attempt
|
|
165
|
+
if config_proto.HasField("server_run_id"):
|
|
166
|
+
metadata["run_id"] = config_proto.server_run_id
|
|
167
|
+
|
|
168
|
+
config = RunnableConfig()
|
|
169
|
+
if config_proto.extra_json:
|
|
170
|
+
for k, v in config_proto.extra_json.items():
|
|
171
|
+
config[k] = orjson.loads(v) # type: ignore[invalid-key]
|
|
172
|
+
if config_proto.tags:
|
|
173
|
+
config["tags"] = list(config_proto.tags)
|
|
174
|
+
if metadata:
|
|
175
|
+
config["metadata"] = metadata
|
|
176
|
+
if configurable:
|
|
177
|
+
config["configurable"] = configurable
|
|
178
|
+
if config_proto.HasField("run_name"):
|
|
179
|
+
config["run_name"] = config_proto.run_name
|
|
180
|
+
|
|
181
|
+
if config_proto.HasField("max_concurrency"):
|
|
182
|
+
config["max_concurrency"] = config_proto.max_concurrency
|
|
183
|
+
|
|
184
|
+
if config_proto.HasField("recursion_limit"):
|
|
185
|
+
config["recursion_limit"] = config_proto.recursion_limit
|
|
186
|
+
|
|
187
|
+
return config
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _configurable_from_proto(
|
|
191
|
+
config_proto: engine_common_pb2.EngineRunnableConfig,
|
|
192
|
+
) -> dict[str, Any]:
|
|
193
|
+
configurable = {}
|
|
194
|
+
|
|
195
|
+
if config_proto.HasField("resuming"):
|
|
196
|
+
configurable[CONFIG_KEY_RESUMING] = config_proto.resuming
|
|
197
|
+
|
|
198
|
+
if config_proto.HasField("task_id"):
|
|
199
|
+
configurable[CONFIG_KEY_TASK_ID] = config_proto.task_id
|
|
200
|
+
|
|
201
|
+
if config_proto.HasField("thread_id"):
|
|
202
|
+
configurable[CONFIG_KEY_THREAD_ID] = config_proto.thread_id
|
|
203
|
+
|
|
204
|
+
if config_proto.HasField("checkpoint_id"):
|
|
205
|
+
configurable[CONFIG_KEY_CHECKPOINT_ID] = config_proto.checkpoint_id
|
|
206
|
+
|
|
207
|
+
if config_proto.HasField("checkpoint_ns"):
|
|
208
|
+
configurable[CONFIG_KEY_CHECKPOINT_NS] = config_proto.checkpoint_ns
|
|
209
|
+
|
|
210
|
+
if config_proto.HasField("durability"):
|
|
211
|
+
durability = _durability_from_proto(config_proto.durability)
|
|
212
|
+
if durability:
|
|
213
|
+
configurable[CONFIG_KEY_DURABILITY] = durability
|
|
214
|
+
|
|
215
|
+
if config_proto.HasField("graph_id"):
|
|
216
|
+
configurable[CONFIG_KEY_GRAPH_ID] = config_proto.graph_id
|
|
217
|
+
|
|
218
|
+
if len(config_proto.checkpoint_map) > 0:
|
|
219
|
+
configurable[CONFIG_KEY_CHECKPOINT_MAP] = dict(config_proto.checkpoint_map)
|
|
220
|
+
|
|
221
|
+
if len(config_proto.extra_configurable_json) > 0:
|
|
222
|
+
for k, v in config_proto.extra_configurable_json.items():
|
|
223
|
+
configurable[k] = orjson.loads(v)
|
|
224
|
+
|
|
225
|
+
return configurable
|