stores 0.1.7.dev6__tar.gz → 0.1.8.dev2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/PKG-INFO +1 -1
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/pyproject.toml +1 -1
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/run_browser_use.py +4 -4
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/format.py +15 -9
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/indexes/base_index.py +12 -9
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/indexes/remote_index.py +7 -2
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/indexes/venv_utils.py +140 -116
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index/tools.py +23 -1
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index/tools.toml +5 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_w_deps/mock_index/__init__.py +10 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_w_deps/tools.toml +3 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_format/conftest.py +2 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_indexes/conftest.py +28 -3
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_indexes/test_base_index.py +161 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_indexes/test_index.py +5 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_indexes/test_local_index.py +14 -2
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_indexes/test_remote_index.py +6 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_indexes/test_venv_utils.py +79 -3
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/uv.lock +1 -1
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/.gitignore +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/.python-version +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/LICENSE +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/README.md +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/README.md +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/anthropic_api.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/google_gemini_auto_call.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/google_gemini_manual_call.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/langchain_w_tool_calling.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/langgraph_agent.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/litellm_w_tool_calling.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/llamaindex_agent.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/openai_agent.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/openai_chat_completions.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/examples/quickstarts/openai_responses.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/local_generator.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/run_remote_tool.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/run_sandbox.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/__init__.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/constants.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/indexes/__init__.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/indexes/index.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/indexes/local_index.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/parse.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/stores/utils.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/README.md +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index/hello/__init__.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_custom_class/foo.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_custom_class/tools.toml +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_function_error/foo.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_function_error/tools.toml +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_w_deps/pyproject.toml +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/mock_index_w_deps/requirements.txt +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_format/test_format.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_parse/conftest.py +0 -0
- {stores-0.1.7.dev6 → stores-0.1.8.dev2}/tests/test_parse/test_parse.py +0 -0
@@ -1,16 +1,16 @@
|
|
1
1
|
import stores # noqa
|
2
2
|
|
3
|
+
|
3
4
|
index = stores.indexes.LocalIndex(
|
4
5
|
"/drive3/Silanthro/tools/basic-browser-use",
|
5
6
|
create_venv=True,
|
6
|
-
include=["basic_browser_use.
|
7
|
+
include=["basic_browser_use.stream_browser_agent_gui"],
|
7
8
|
)
|
8
|
-
# print(index.tools)
|
9
9
|
|
10
10
|
|
11
11
|
async def collect():
|
12
12
|
async for value in index.astream_execute(
|
13
|
-
"basic_browser_use.
|
13
|
+
"basic_browser_use.stream_browser_agent_gui",
|
14
14
|
{"task": "What are the latest OpenAI models?"},
|
15
15
|
):
|
16
16
|
print(value)
|
@@ -28,7 +28,7 @@ async def collect():
|
|
28
28
|
# asyncio.run(collect())
|
29
29
|
|
30
30
|
for value in index.stream_execute(
|
31
|
-
"basic_browser_use.
|
31
|
+
"basic_browser_use.stream_browser_agent_gui",
|
32
32
|
{"task": "What are the latest OpenAI models?"},
|
33
33
|
):
|
34
34
|
print(value)
|
@@ -61,7 +61,7 @@ def get_type_repr(typ: Type | GenericAlias) -> list[str]:
|
|
61
61
|
return [type_mappings[typ.__name__]]
|
62
62
|
|
63
63
|
|
64
|
-
def get_type_schema(typ: Type | GenericAlias):
|
64
|
+
def get_type_schema(typ: Type | GenericAlias, provider: ProviderFormat):
|
65
65
|
origin = get_origin(typ)
|
66
66
|
args = get_args(typ)
|
67
67
|
|
@@ -77,24 +77,27 @@ def get_type_schema(typ: Type | GenericAlias):
|
|
77
77
|
schema["enum"] = [v.value for v in typ]
|
78
78
|
elif isinstance(typ, type) and typ.__class__.__name__ == "_TypedDictMeta":
|
79
79
|
hints = get_type_hints(typ)
|
80
|
-
schema["properties"] = {
|
81
|
-
|
80
|
+
schema["properties"] = {
|
81
|
+
k: get_type_schema(v, provider) for k, v in hints.items()
|
82
|
+
}
|
83
|
+
if provider != ProviderFormat.GOOGLE_GEMINI:
|
84
|
+
schema["additionalProperties"] = False
|
82
85
|
schema["required"] = list(hints.keys())
|
83
86
|
elif origin in (list, List) or typ is dict:
|
84
87
|
if args:
|
85
|
-
schema["items"] = get_type_schema(args[0])
|
88
|
+
schema["items"] = get_type_schema(args[0], provider)
|
86
89
|
else:
|
87
90
|
raise TypeError("Insufficient argument type information")
|
88
91
|
elif origin in (dict, Dict) or typ is dict:
|
89
92
|
raise TypeError("Insufficient argument type information")
|
90
93
|
elif origin in (tuple, Tuple) or typ is tuple:
|
91
94
|
if args:
|
92
|
-
schema["items"] = get_type_schema(args[0])
|
95
|
+
schema["items"] = get_type_schema(args[0], provider)
|
93
96
|
else:
|
94
97
|
raise TypeError("Insufficient argument type information")
|
95
98
|
elif origin is Union or origin is T.UnionType:
|
96
99
|
for arg in args:
|
97
|
-
subschema = get_type_schema(arg)
|
100
|
+
subschema = get_type_schema(arg, provider)
|
98
101
|
del subschema["type"]
|
99
102
|
schema = {
|
100
103
|
**schema,
|
@@ -103,14 +106,17 @@ def get_type_schema(typ: Type | GenericAlias):
|
|
103
106
|
|
104
107
|
# Un-nest single member type lists since Gemini does not accept list of types
|
105
108
|
# Optional for OpenAI or Anthropic
|
106
|
-
if schema["type"]
|
107
|
-
schema["type"]
|
109
|
+
if schema["type"]:
|
110
|
+
if len(schema["type"]) == 1:
|
111
|
+
schema["type"] = schema["type"][0]
|
112
|
+
elif len(schema["type"]) > 1 and provider == ProviderFormat.GOOGLE_GEMINI:
|
113
|
+
schema["type"] = schema["type"][0]
|
108
114
|
|
109
115
|
return schema
|
110
116
|
|
111
117
|
|
112
118
|
def get_param_schema(param: inspect.Parameter, provider: ProviderFormat):
|
113
|
-
param_schema = get_type_schema(param.annotation)
|
119
|
+
param_schema = get_type_schema(param.annotation, provider)
|
114
120
|
|
115
121
|
if param_schema["type"] is None:
|
116
122
|
raise TypeError(f"Unsupported type: {param.annotation.__name__}")
|
@@ -9,7 +9,6 @@ from typing import (
|
|
9
9
|
Callable,
|
10
10
|
List,
|
11
11
|
Literal,
|
12
|
-
Optional,
|
13
12
|
Tuple,
|
14
13
|
Union,
|
15
14
|
get_args,
|
@@ -98,7 +97,7 @@ def _handle_non_string_literal(annotation: type):
|
|
98
97
|
return list[new_annotation], {"item": literal_map}
|
99
98
|
if origin is Union or origin is UnionType:
|
100
99
|
union_literal_maps = {}
|
101
|
-
argtype_args = [a for a in get_args(annotation)
|
100
|
+
argtype_args = [a for a in get_args(annotation)]
|
102
101
|
new_union, literal_map = _handle_non_string_literal(argtype_args[0])
|
103
102
|
union_literal_maps[new_union.__name__] = literal_map
|
104
103
|
for child_argtype in argtype_args[1:]:
|
@@ -198,12 +197,12 @@ def wrap_tool(tool: Callable):
|
|
198
197
|
# Process args with default values: make sure type includes None
|
199
198
|
new_annotation = argtype
|
200
199
|
if new_annotation is Parameter.empty:
|
201
|
-
new_annotation =
|
200
|
+
new_annotation = type(new_arg.default) | None
|
202
201
|
origin = get_origin(new_annotation)
|
203
202
|
if origin not in [Union, UnionType] or NoneType not in get_args(
|
204
203
|
new_annotation
|
205
204
|
):
|
206
|
-
new_annotation =
|
205
|
+
new_annotation = new_annotation | None
|
207
206
|
new_arg = new_arg.replace(
|
208
207
|
default=None,
|
209
208
|
kind=Parameter.POSITIONAL_OR_KEYWORD,
|
@@ -402,13 +401,17 @@ class BaseIndex:
|
|
402
401
|
# Handle sync
|
403
402
|
yield tool_fn(**kwargs)
|
404
403
|
|
405
|
-
def parse_and_execute(self, msg: str):
|
404
|
+
def parse_and_execute(self, msg: str, collect_results=False):
|
406
405
|
toolcall = llm_parse_json(msg, keys=["toolname", "kwargs"])
|
407
|
-
return self.execute(
|
406
|
+
return self.execute(
|
407
|
+
toolcall.get("toolname"), toolcall.get("kwargs"), collect_results
|
408
|
+
)
|
408
409
|
|
409
|
-
async def
|
410
|
+
async def aparse_and_execute(self, msg: str, collect_results=False):
|
410
411
|
toolcall = llm_parse_json(msg, keys=["toolname", "kwargs"])
|
411
|
-
return await self.aexecute(
|
412
|
+
return await self.aexecute(
|
413
|
+
toolcall.get("toolname"), toolcall.get("kwargs"), collect_results
|
414
|
+
)
|
412
415
|
|
413
416
|
def stream_parse_and_execute(self, msg: str):
|
414
417
|
toolcall = llm_parse_json(msg, keys=["toolname", "kwargs"])
|
@@ -416,7 +419,7 @@ class BaseIndex:
|
|
416
419
|
|
417
420
|
async def astream_parse_and_execute(self, msg: str):
|
418
421
|
toolcall = llm_parse_json(msg, keys=["toolname", "kwargs"])
|
419
|
-
async for value in self.
|
422
|
+
async for value in self.astream_execute(
|
420
423
|
toolcall.get("toolname"), toolcall.get("kwargs")
|
421
424
|
):
|
422
425
|
yield value
|
@@ -8,7 +8,7 @@ from pathlib import Path
|
|
8
8
|
from typing import Optional
|
9
9
|
|
10
10
|
import requests
|
11
|
-
from git import Repo
|
11
|
+
from git import GitCommandError, Repo
|
12
12
|
|
13
13
|
from stores.constants import VENV_NAME
|
14
14
|
from stores.indexes.base_index import BaseIndex
|
@@ -44,6 +44,8 @@ def lookup_index(index_id: str, index_version: str | None = None):
|
|
44
44
|
)
|
45
45
|
if response.ok:
|
46
46
|
return response.json()
|
47
|
+
else:
|
48
|
+
raise ValueError(f"Index {index_id} not found in database")
|
47
49
|
|
48
50
|
|
49
51
|
class RemoteIndex(BaseIndex):
|
@@ -88,7 +90,10 @@ class RemoteIndex(BaseIndex):
|
|
88
90
|
if not repo_url:
|
89
91
|
# Otherwise, assume index references a GitHub repo
|
90
92
|
repo_url = f"https://github.com/{index_id}.git"
|
91
|
-
|
93
|
+
try:
|
94
|
+
repo = Repo.clone_from(repo_url, self.index_folder)
|
95
|
+
except GitCommandError as e:
|
96
|
+
raise ValueError(f"Index {index_id} not found") from e
|
92
97
|
if commit_like:
|
93
98
|
repo.git.checkout(commit_like)
|
94
99
|
|
@@ -327,8 +327,7 @@ def parse_tool_signature(
|
|
327
327
|
if signature_dict.get("isasyncgenfunction"):
|
328
328
|
|
329
329
|
async def func_handler(*args, **kwargs):
|
330
|
-
|
331
|
-
async for value in run_remote_tool(
|
330
|
+
async for value in run_remote_tool_async(
|
332
331
|
tool_id=signature_dict["tool_id"],
|
333
332
|
index_folder=index_folder,
|
334
333
|
args=args,
|
@@ -338,6 +337,7 @@ def parse_tool_signature(
|
|
338
337
|
stream=True,
|
339
338
|
):
|
340
339
|
yield value
|
340
|
+
|
341
341
|
elif signature_dict.get("isgeneratorfunction"):
|
342
342
|
|
343
343
|
def func_handler(*args, **kwargs):
|
@@ -347,7 +347,7 @@ def parse_tool_signature(
|
|
347
347
|
def run():
|
348
348
|
async def runner():
|
349
349
|
try:
|
350
|
-
async for item in
|
350
|
+
async for item in run_remote_tool_async(
|
351
351
|
tool_id=signature_dict["tool_id"],
|
352
352
|
index_folder=index_folder,
|
353
353
|
args=args,
|
@@ -381,26 +381,63 @@ def parse_tool_signature(
|
|
381
381
|
elif signature_dict.get("iscoroutinefunction"):
|
382
382
|
|
383
383
|
async def func_handler(*args, **kwargs):
|
384
|
-
|
385
|
-
|
384
|
+
result = []
|
385
|
+
async for item in run_remote_tool_async(
|
386
386
|
tool_id=signature_dict["tool_id"],
|
387
387
|
index_folder=index_folder,
|
388
388
|
args=args,
|
389
389
|
kwargs=kwargs,
|
390
390
|
venv=venv,
|
391
391
|
env_var=env_var,
|
392
|
-
|
392
|
+
stream=True,
|
393
|
+
):
|
394
|
+
result.append(item)
|
395
|
+
return result[-1] if result else None
|
393
396
|
else:
|
394
397
|
|
395
|
-
def
|
396
|
-
|
398
|
+
async def func_handler_async_fallback(*args, **kwargs):
|
399
|
+
result = []
|
400
|
+
async for item in run_remote_tool_async(
|
397
401
|
tool_id=signature_dict["tool_id"],
|
398
402
|
index_folder=index_folder,
|
399
403
|
args=args,
|
400
404
|
kwargs=kwargs,
|
401
405
|
venv=venv,
|
402
406
|
env_var=env_var,
|
403
|
-
|
407
|
+
stream=True,
|
408
|
+
):
|
409
|
+
result.append(item)
|
410
|
+
return result[-1] if result else None
|
411
|
+
|
412
|
+
def func_handler(*args, **kwargs):
|
413
|
+
coro = func_handler_async_fallback(*args, **kwargs)
|
414
|
+
try:
|
415
|
+
# Check if we're in an async context
|
416
|
+
asyncio.get_running_loop()
|
417
|
+
in_async = True
|
418
|
+
except RuntimeError:
|
419
|
+
in_async = False
|
420
|
+
|
421
|
+
if not in_async:
|
422
|
+
# Safe to run directly
|
423
|
+
return asyncio.run(coro)
|
424
|
+
|
425
|
+
q = queue.Queue()
|
426
|
+
|
427
|
+
def runner():
|
428
|
+
try:
|
429
|
+
result = asyncio.run(coro)
|
430
|
+
q.put(result)
|
431
|
+
except Exception as e:
|
432
|
+
q.put(e)
|
433
|
+
|
434
|
+
t = threading.Thread(target=runner)
|
435
|
+
t.start()
|
436
|
+
result = q.get()
|
437
|
+
t.join()
|
438
|
+
if isinstance(result, Exception):
|
439
|
+
raise result
|
440
|
+
return result
|
404
441
|
|
405
442
|
# Reconstruct signature from list of args
|
406
443
|
params = []
|
@@ -426,92 +463,7 @@ def parse_tool_signature(
|
|
426
463
|
return func
|
427
464
|
|
428
465
|
|
429
|
-
|
430
|
-
def run_remote_tool(
|
431
|
-
tool_id: str,
|
432
|
-
index_folder: os.PathLike,
|
433
|
-
args: list | None = None,
|
434
|
-
kwargs: dict | None = None,
|
435
|
-
venv: str = VENV_NAME,
|
436
|
-
env_var: dict | None = None,
|
437
|
-
stream: bool = False,
|
438
|
-
):
|
439
|
-
args = args or []
|
440
|
-
kwargs = kwargs or {}
|
441
|
-
env_var = env_var or {}
|
442
|
-
|
443
|
-
module_name = ".".join(tool_id.split(".")[:-1])
|
444
|
-
tool_name = tool_id.split(".")[-1]
|
445
|
-
payload = json.dumps(
|
446
|
-
{
|
447
|
-
"args": args,
|
448
|
-
"kwargs": kwargs,
|
449
|
-
}
|
450
|
-
).encode("utf-8")
|
451
|
-
|
452
|
-
# We use sockets to pass function output
|
453
|
-
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
454
|
-
listener.bind(("localhost", 0))
|
455
|
-
listener.listen(1)
|
456
|
-
_, port = listener.getsockname()
|
457
|
-
|
458
|
-
result_data = {}
|
459
|
-
|
460
|
-
def handle_connection_sync():
|
461
|
-
conn, _ = listener.accept()
|
462
|
-
with conn:
|
463
|
-
buffer = ""
|
464
|
-
while True:
|
465
|
-
chunk = conn.recv(4096).decode("utf-8")
|
466
|
-
if not chunk:
|
467
|
-
break
|
468
|
-
buffer += chunk
|
469
|
-
while "\n" in buffer:
|
470
|
-
line, buffer = buffer.split("\n", 1)
|
471
|
-
if not line.strip():
|
472
|
-
continue
|
473
|
-
msg = json.loads(line)
|
474
|
-
if msg.get("ok") and "stream" in msg:
|
475
|
-
result_data.setdefault("stream", []).append(msg["stream"])
|
476
|
-
elif msg.get("ok") and "result" in msg:
|
477
|
-
result_data["result"] = msg["result"]
|
478
|
-
elif "error" in msg:
|
479
|
-
result_data["error"] = msg["error"]
|
480
|
-
elif msg.get("done"):
|
481
|
-
return
|
482
|
-
|
483
|
-
async def handle_connection_async():
|
484
|
-
loop = asyncio.get_running_loop()
|
485
|
-
conn, _ = await loop.sock_accept(listener)
|
486
|
-
conn.setblocking(False)
|
487
|
-
buffer = ""
|
488
|
-
try:
|
489
|
-
while True:
|
490
|
-
chunk = await loop.sock_recv(conn, 4096)
|
491
|
-
if not chunk:
|
492
|
-
break
|
493
|
-
buffer += chunk.decode("utf-8")
|
494
|
-
while "\n" in buffer:
|
495
|
-
line, buffer = buffer.split("\n", 1)
|
496
|
-
if not line.strip():
|
497
|
-
continue
|
498
|
-
msg = json.loads(line)
|
499
|
-
if msg.get("ok") and "stream" in msg:
|
500
|
-
yield msg["stream"]
|
501
|
-
elif msg.get("ok") and "result" in msg:
|
502
|
-
yield msg["result"]
|
503
|
-
elif "error" in msg:
|
504
|
-
raise RuntimeError(f"Subprocess error:\n{msg['error']}")
|
505
|
-
elif msg.get("done"):
|
506
|
-
return
|
507
|
-
finally:
|
508
|
-
conn.close()
|
509
|
-
|
510
|
-
if not stream:
|
511
|
-
thread = threading.Thread(target=lambda: handle_connection_sync())
|
512
|
-
thread.start()
|
513
|
-
|
514
|
-
runner = f"""
|
466
|
+
tool_runner = """
|
515
467
|
import asyncio, inspect, json, socket, sys, traceback
|
516
468
|
sys.path.insert(0, "{index_folder}")
|
517
469
|
|
@@ -560,26 +512,98 @@ finally:
|
|
560
512
|
pass
|
561
513
|
"""
|
562
514
|
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
515
|
+
|
516
|
+
# TODO: Sanitize tool_id, args, and kwargs
|
517
|
+
async def run_remote_tool_async(
|
518
|
+
tool_id: str,
|
519
|
+
index_folder: os.PathLike,
|
520
|
+
args: list | None = None,
|
521
|
+
kwargs: dict | None = None,
|
522
|
+
venv: str = VENV_NAME,
|
523
|
+
env_var: dict | None = None,
|
524
|
+
stream: bool = True,
|
525
|
+
):
|
526
|
+
args = args or []
|
527
|
+
kwargs = kwargs or {}
|
528
|
+
env_var = env_var or {}
|
529
|
+
|
530
|
+
module_name = ".".join(tool_id.split(".")[:-1])
|
531
|
+
tool_name = tool_id.split(".")[-1]
|
532
|
+
payload = json.dumps({"args": args, "kwargs": kwargs}).encode("utf-8")
|
533
|
+
|
534
|
+
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
535
|
+
listener.bind(("localhost", 0))
|
536
|
+
listener.listen(1)
|
537
|
+
listener.setblocking(False)
|
538
|
+
_, port = listener.getsockname()
|
539
|
+
|
540
|
+
loop = asyncio.get_running_loop()
|
541
|
+
conn_task = loop.create_task(loop.sock_accept(listener))
|
542
|
+
|
543
|
+
runner = tool_runner.format(
|
544
|
+
index_folder=index_folder,
|
545
|
+
port=port,
|
546
|
+
module_name=module_name,
|
547
|
+
tool_name=tool_name,
|
548
|
+
)
|
549
|
+
|
550
|
+
proc = await asyncio.create_subprocess_exec(
|
551
|
+
get_python_command(Path(index_folder) / venv),
|
552
|
+
"-c",
|
553
|
+
runner,
|
554
|
+
stdin=asyncio.subprocess.PIPE,
|
555
|
+
stdout=asyncio.subprocess.DEVNULL,
|
556
|
+
stderr=asyncio.subprocess.PIPE,
|
568
557
|
env=env_var or None,
|
569
558
|
)
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
559
|
+
|
560
|
+
try:
|
561
|
+
proc.stdin.write(payload)
|
562
|
+
await proc.stdin.drain()
|
563
|
+
proc.stdin.close()
|
564
|
+
|
565
|
+
conn, _ = await conn_task
|
566
|
+
conn.setblocking(False)
|
567
|
+
|
568
|
+
buffer = ""
|
569
|
+
result = None
|
570
|
+
while True:
|
571
|
+
chunk = await loop.sock_recv(conn, 4096)
|
572
|
+
if not chunk:
|
573
|
+
break
|
574
|
+
buffer += chunk.decode("utf-8")
|
575
|
+
while "\n" in buffer:
|
576
|
+
line, buffer = buffer.split("\n", 1)
|
577
|
+
if not line.strip():
|
578
|
+
continue
|
579
|
+
msg = json.loads(line)
|
580
|
+
|
581
|
+
if msg.get("ok") and "stream" in msg:
|
582
|
+
if stream:
|
583
|
+
yield msg["stream"]
|
584
|
+
else:
|
585
|
+
result = msg["stream"]
|
586
|
+
elif msg.get("ok") and "result" in msg:
|
587
|
+
result = msg["result"]
|
588
|
+
elif "error" in msg:
|
589
|
+
raise RuntimeError(f"Subprocess error:\n{msg['error']}")
|
590
|
+
elif "done" in msg and result is not None:
|
591
|
+
yield result
|
592
|
+
return
|
593
|
+
|
594
|
+
except asyncio.CancelledError:
|
595
|
+
proc.kill()
|
596
|
+
await proc.wait()
|
597
|
+
raise
|
598
|
+
finally:
|
599
|
+
try:
|
600
|
+
conn.close()
|
601
|
+
except Exception:
|
602
|
+
pass
|
603
|
+
try:
|
604
|
+
listener.close()
|
605
|
+
except Exception:
|
606
|
+
pass
|
607
|
+
if proc.returncode is None:
|
608
|
+
proc.kill()
|
609
|
+
await proc.wait()
|
@@ -1,5 +1,5 @@
|
|
1
1
|
from enum import Enum
|
2
|
-
from typing import TypedDict
|
2
|
+
from typing import Literal, TypedDict
|
3
3
|
|
4
4
|
|
5
5
|
def foo(bar: str):
|
@@ -46,3 +46,25 @@ class Animal(TypedDict):
|
|
46
46
|
|
47
47
|
def typed_dict_input(bar: Animal):
|
48
48
|
return bar
|
49
|
+
|
50
|
+
|
51
|
+
def literal_input(bar: Literal["red", "green", "blue"]):
|
52
|
+
return bar
|
53
|
+
|
54
|
+
|
55
|
+
def literal_nonstring_input(bar: Literal[1, 2, 3]):
|
56
|
+
return bar
|
57
|
+
|
58
|
+
|
59
|
+
def default_input(bar: str = "foo"):
|
60
|
+
return bar
|
61
|
+
|
62
|
+
|
63
|
+
def stream_input(bar: str):
|
64
|
+
for _ in range(3):
|
65
|
+
yield bar
|
66
|
+
|
67
|
+
|
68
|
+
async def astream_input(bar: str):
|
69
|
+
for _ in range(3):
|
70
|
+
yield bar
|
@@ -55,3 +55,13 @@ def tuple_input(bar: tuple[Animal]) -> tuple[Animal]:
|
|
55
55
|
|
56
56
|
def union_input(bar: Union[Color, Animal]) -> Union[Color, Animal]:
|
57
57
|
return bar
|
58
|
+
|
59
|
+
|
60
|
+
def stream_input(bar: str):
|
61
|
+
for _ in range(3):
|
62
|
+
yield bar
|
63
|
+
|
64
|
+
|
65
|
+
async def astream_input(bar: str):
|
66
|
+
for _ in range(3):
|
67
|
+
yield bar
|
@@ -128,6 +128,8 @@ def a_tool_with_complex_args(provider):
|
|
128
128
|
elif provider == ProviderFormat.GOOGLE_GEMINI:
|
129
129
|
for k in parameters.keys():
|
130
130
|
parameters[k]["nullable"] = False
|
131
|
+
if parameters[k]["type"] == "object":
|
132
|
+
del parameters[k]["additionalProperties"]
|
131
133
|
schema = {
|
132
134
|
"name": tool.__name__,
|
133
135
|
"parameters": {
|
@@ -196,15 +196,15 @@ def foo_w_optional_and_default(bar: Optional[str] = "test"):
|
|
196
196
|
params=[
|
197
197
|
{
|
198
198
|
"function": foo_w_default,
|
199
|
-
"signature": "(bar:
|
199
|
+
"signature": "(bar: str | None = None)",
|
200
200
|
},
|
201
201
|
{
|
202
202
|
"function": foo_w_default_notype,
|
203
|
-
"signature": "(bar:
|
203
|
+
"signature": "(bar: str | None = None)",
|
204
204
|
},
|
205
205
|
{
|
206
206
|
"function": foo_w_optional_and_default,
|
207
|
-
"signature": "(bar:
|
207
|
+
"signature": "(bar: str | None = None)",
|
208
208
|
},
|
209
209
|
],
|
210
210
|
)
|
@@ -466,3 +466,28 @@ def cast_foo_unhandled(bar: list[str, int]):
|
|
466
466
|
)
|
467
467
|
def cast_tool(request):
|
468
468
|
yield request.param
|
469
|
+
|
470
|
+
|
471
|
+
def foo(bar: str):
|
472
|
+
return bar
|
473
|
+
|
474
|
+
|
475
|
+
async def afoo(bar: str):
|
476
|
+
return bar
|
477
|
+
|
478
|
+
|
479
|
+
def stream_foo(bar: str):
|
480
|
+
for _ in range(3):
|
481
|
+
yield bar
|
482
|
+
|
483
|
+
|
484
|
+
async def astream_foo(bar: str):
|
485
|
+
for _ in range(3):
|
486
|
+
yield bar
|
487
|
+
|
488
|
+
|
489
|
+
@pytest.fixture(
|
490
|
+
params=[foo, afoo, stream_foo, astream_foo],
|
491
|
+
)
|
492
|
+
def various_runtype_tool(request):
|
493
|
+
yield request.param
|
@@ -1,4 +1,6 @@
|
|
1
|
+
import asyncio
|
1
2
|
import inspect
|
3
|
+
import json
|
2
4
|
import logging
|
3
5
|
|
4
6
|
import pytest
|
@@ -197,3 +199,162 @@ def test_base_index_format_tools(sample_tool, provider):
|
|
197
199
|
},
|
198
200
|
}
|
199
201
|
]
|
202
|
+
|
203
|
+
|
204
|
+
def handle_various_run_type(tool_fn, kwargs: dict, collect_results=False):
|
205
|
+
if inspect.isasyncgenfunction(tool_fn):
|
206
|
+
# Handle async generator
|
207
|
+
|
208
|
+
async def collect():
|
209
|
+
results = []
|
210
|
+
async for value in tool_fn(**kwargs):
|
211
|
+
results.append(value)
|
212
|
+
if collect_results:
|
213
|
+
return results
|
214
|
+
else:
|
215
|
+
return results[-1]
|
216
|
+
|
217
|
+
loop = asyncio.new_event_loop()
|
218
|
+
asyncio.set_event_loop(loop)
|
219
|
+
return loop.run_until_complete(collect())
|
220
|
+
elif inspect.isgeneratorfunction(tool_fn):
|
221
|
+
# Handle sync generator
|
222
|
+
results = []
|
223
|
+
for value in tool_fn(**kwargs):
|
224
|
+
results.append(value)
|
225
|
+
if collect_results:
|
226
|
+
return results
|
227
|
+
else:
|
228
|
+
return results[-1]
|
229
|
+
elif inspect.iscoroutinefunction(tool_fn):
|
230
|
+
# Handle async
|
231
|
+
loop = asyncio.new_event_loop()
|
232
|
+
asyncio.set_event_loop(loop)
|
233
|
+
return loop.run_until_complete(tool_fn(**kwargs))
|
234
|
+
else:
|
235
|
+
# Handle sync
|
236
|
+
return tool_fn(**kwargs)
|
237
|
+
|
238
|
+
|
239
|
+
async def ahandle_various_run_type(tool_fn, kwargs: dict, collect_results=False):
|
240
|
+
kwargs = kwargs or {}
|
241
|
+
if inspect.isasyncgenfunction(tool_fn):
|
242
|
+
# Handle async generator
|
243
|
+
results = []
|
244
|
+
async for value in tool_fn(**kwargs):
|
245
|
+
results.append(value)
|
246
|
+
if collect_results:
|
247
|
+
return results
|
248
|
+
else:
|
249
|
+
return results[-1]
|
250
|
+
elif inspect.isgeneratorfunction(tool_fn):
|
251
|
+
# Handle sync generator
|
252
|
+
results = []
|
253
|
+
for value in tool_fn(**kwargs):
|
254
|
+
results.append(value)
|
255
|
+
if collect_results:
|
256
|
+
return results
|
257
|
+
else:
|
258
|
+
return results[-1]
|
259
|
+
elif inspect.iscoroutinefunction(tool_fn):
|
260
|
+
# Handle async
|
261
|
+
return await tool_fn(**kwargs)
|
262
|
+
else:
|
263
|
+
# Handle sync
|
264
|
+
return tool_fn(**kwargs)
|
265
|
+
|
266
|
+
|
267
|
+
def test_base_index_execute(various_runtype_tool):
|
268
|
+
index = BaseIndex([various_runtype_tool])
|
269
|
+
|
270
|
+
assert index.parse_and_execute(
|
271
|
+
json.dumps(
|
272
|
+
{
|
273
|
+
"toolname": various_runtype_tool.__name__,
|
274
|
+
"kwargs": {"bar": "hello"},
|
275
|
+
}
|
276
|
+
)
|
277
|
+
) == handle_various_run_type(various_runtype_tool, {"bar": "hello"})
|
278
|
+
|
279
|
+
|
280
|
+
async def test_base_index_aexecute(various_runtype_tool):
|
281
|
+
index = BaseIndex([various_runtype_tool])
|
282
|
+
|
283
|
+
assert await index.aparse_and_execute(
|
284
|
+
json.dumps(
|
285
|
+
{
|
286
|
+
"toolname": various_runtype_tool.__name__,
|
287
|
+
"kwargs": {"bar": "hello"},
|
288
|
+
}
|
289
|
+
)
|
290
|
+
) == await ahandle_various_run_type(various_runtype_tool, {"bar": "hello"})
|
291
|
+
|
292
|
+
|
293
|
+
def test_base_index_execute_collect(various_runtype_tool):
|
294
|
+
index = BaseIndex([various_runtype_tool])
|
295
|
+
|
296
|
+
assert index.execute(
|
297
|
+
various_runtype_tool.__name__, {"bar": "hello"}, collect_results=True
|
298
|
+
) == handle_various_run_type(
|
299
|
+
various_runtype_tool, {"bar": "hello"}, collect_results=True
|
300
|
+
)
|
301
|
+
|
302
|
+
|
303
|
+
async def test_base_index_aexecute_collect(various_runtype_tool):
|
304
|
+
index = BaseIndex([various_runtype_tool])
|
305
|
+
|
306
|
+
assert await index.aexecute(
|
307
|
+
various_runtype_tool.__name__, {"bar": "hello"}, collect_results=True
|
308
|
+
) == await ahandle_various_run_type(
|
309
|
+
various_runtype_tool, {"bar": "hello"}, collect_results=True
|
310
|
+
)
|
311
|
+
|
312
|
+
|
313
|
+
def test_base_index_stream_execute(various_runtype_tool):
|
314
|
+
index = BaseIndex([various_runtype_tool])
|
315
|
+
|
316
|
+
results = []
|
317
|
+
for result in index.stream_parse_and_execute(
|
318
|
+
json.dumps(
|
319
|
+
{
|
320
|
+
"toolname": various_runtype_tool.__name__,
|
321
|
+
"kwargs": {"bar": "hello"},
|
322
|
+
}
|
323
|
+
)
|
324
|
+
):
|
325
|
+
results.append(result)
|
326
|
+
if not inspect.isgeneratorfunction(
|
327
|
+
various_runtype_tool
|
328
|
+
) and not inspect.isasyncgenfunction(various_runtype_tool):
|
329
|
+
results = results[0]
|
330
|
+
|
331
|
+
answer = handle_various_run_type(
|
332
|
+
various_runtype_tool, {"bar": "hello"}, collect_results=True
|
333
|
+
)
|
334
|
+
|
335
|
+
assert results == answer
|
336
|
+
|
337
|
+
|
338
|
+
async def test_base_index_astream_execute(various_runtype_tool):
|
339
|
+
index = BaseIndex([various_runtype_tool])
|
340
|
+
|
341
|
+
results = []
|
342
|
+
async for result in index.astream_parse_and_execute(
|
343
|
+
json.dumps(
|
344
|
+
{
|
345
|
+
"toolname": various_runtype_tool.__name__,
|
346
|
+
"kwargs": {"bar": "hello"},
|
347
|
+
}
|
348
|
+
)
|
349
|
+
):
|
350
|
+
results.append(result)
|
351
|
+
if not inspect.isgeneratorfunction(
|
352
|
+
various_runtype_tool
|
353
|
+
) and not inspect.isasyncgenfunction(various_runtype_tool):
|
354
|
+
results = results[0]
|
355
|
+
|
356
|
+
answer = await ahandle_various_run_type(
|
357
|
+
various_runtype_tool, {"bar": "hello"}, collect_results=True
|
358
|
+
)
|
359
|
+
|
360
|
+
assert results == answer
|
@@ -31,6 +31,11 @@ async def test_index(local_index_folder):
|
|
31
31
|
"tools.async_foo",
|
32
32
|
"tools.enum_input",
|
33
33
|
"tools.typed_dict_input",
|
34
|
+
"tools.literal_input",
|
35
|
+
"tools.literal_nonstring_input",
|
36
|
+
"tools.default_input",
|
37
|
+
"tools.stream_input",
|
38
|
+
"tools.astream_input",
|
34
39
|
"hello.world",
|
35
40
|
"foo",
|
36
41
|
]
|
@@ -13,6 +13,11 @@ def test_local_index_basic(local_index_folder):
|
|
13
13
|
"tools.async_foo",
|
14
14
|
"tools.enum_input",
|
15
15
|
"tools.typed_dict_input",
|
16
|
+
"tools.literal_input",
|
17
|
+
"tools.literal_nonstring_input",
|
18
|
+
"tools.default_input",
|
19
|
+
"tools.stream_input",
|
20
|
+
"tools.astream_input",
|
16
21
|
"hello.world",
|
17
22
|
]
|
18
23
|
# Test tool execution
|
@@ -22,6 +27,11 @@ def test_local_index_basic(local_index_folder):
|
|
22
27
|
"tools.async_foo": "hello world",
|
23
28
|
"tools.enum_input": "red",
|
24
29
|
"tools.typed_dict_input": {"name": "Tiger", "num_legs": 4},
|
30
|
+
"tools.literal_input": "red",
|
31
|
+
"tools.literal_nonstring_input": 1,
|
32
|
+
"tools.default_input": "foo",
|
33
|
+
"tools.stream_input": "hello world",
|
34
|
+
"tools.astream_input": "hello world",
|
25
35
|
"hello.world": "hello world",
|
26
36
|
}
|
27
37
|
for tool in index.tools[:-1]:
|
@@ -53,9 +63,11 @@ def test_local_index_invalid_folder(buggy_index_folder):
|
|
53
63
|
def test_local_index_with_deps(remote_index_folder):
|
54
64
|
# Regular LocalIndex should not load index folder with deps since deps are not installed
|
55
65
|
with pytest.raises(ModuleNotFoundError):
|
56
|
-
LocalIndex(remote_index_folder)
|
66
|
+
LocalIndex(remote_index_folder, exclude=["mock_index.not_a_function"])
|
57
67
|
# LocalIndex can load index folder with deps if create_venv=True
|
58
|
-
LocalIndex(
|
68
|
+
LocalIndex(
|
69
|
+
remote_index_folder, exclude=["mock_index.not_a_function"], create_venv=True
|
70
|
+
)
|
59
71
|
|
60
72
|
|
61
73
|
def test_local_index_with_env_var():
|
@@ -21,6 +21,12 @@ def test_lookup_index():
|
|
21
21
|
}
|
22
22
|
|
23
23
|
|
24
|
+
async def test_fake_remote_index():
|
25
|
+
fake_index = "no_such_index"
|
26
|
+
with pytest.raises(ValueError, match=f"Index {fake_index} not found"):
|
27
|
+
stores.indexes.RemoteIndex(fake_index)
|
28
|
+
|
29
|
+
|
24
30
|
async def test_remote_index():
|
25
31
|
# Check that env_vars are set correctly
|
26
32
|
index = stores.indexes.RemoteIndex(
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import inspect
|
2
2
|
import logging
|
3
3
|
import venv
|
4
|
+
from typing import get_args, get_origin, get_type_hints
|
4
5
|
|
5
6
|
import pytest
|
6
7
|
|
@@ -48,7 +49,11 @@ async def test_install_venv_deps(remote_index_folder):
|
|
48
49
|
assert reinstall_result == "Already installed"
|
49
50
|
|
50
51
|
# Tools should load and be runnable
|
51
|
-
tools = venv_utils.init_venv_tools(
|
52
|
+
tools = venv_utils.init_venv_tools(
|
53
|
+
remote_index_folder,
|
54
|
+
# Test exclude
|
55
|
+
exclude=["mock_index.not_a_function"],
|
56
|
+
)
|
52
57
|
|
53
58
|
# Tools should run successfully
|
54
59
|
sample_string = "red"
|
@@ -73,15 +78,55 @@ async def test_install_venv_deps(remote_index_folder):
|
|
73
78
|
kwargs = {"bar": [sample_animal, sample_animal]}
|
74
79
|
elif tool.__name__ == "mock_index.union_input":
|
75
80
|
kwargs = {"bar": sample_animal}
|
81
|
+
elif tool.__name__ in ["mock_index.stream_input", "mock_index.astream_input"]:
|
82
|
+
kwargs = {"bar": sample_string}
|
76
83
|
else:
|
77
84
|
kwargs = {}
|
78
85
|
output = kwargs.get("bar", "pip_install_test")
|
79
|
-
if inspect.
|
86
|
+
if inspect.isasyncgenfunction(tool):
|
87
|
+
async for value in tool(**kwargs):
|
88
|
+
assert value == output
|
89
|
+
elif inspect.isgeneratorfunction(tool):
|
90
|
+
for value in tool(**kwargs):
|
91
|
+
assert value == output
|
92
|
+
elif inspect.iscoroutinefunction(tool):
|
80
93
|
assert await tool(**kwargs) == output
|
81
94
|
else:
|
82
95
|
assert tool(**kwargs) == output
|
83
96
|
|
84
97
|
|
98
|
+
async def test_install_venv_deps_with_include(remote_index_folder):
|
99
|
+
# Create venv
|
100
|
+
venv_folder = remote_index_folder / VENV_NAME
|
101
|
+
venv.create(venv_folder, symlinks=True, with_pip=True)
|
102
|
+
# Test installation
|
103
|
+
result = venv_utils.install_venv_deps(remote_index_folder)
|
104
|
+
assert (remote_index_folder / venv_utils.HASH_FILE).exists()
|
105
|
+
|
106
|
+
for config_file in venv_utils.SUPPORTED_CONFIGS:
|
107
|
+
if (remote_index_folder / config_file).exists():
|
108
|
+
assert result.endswith(
|
109
|
+
f'"{" ".join(venv_utils.get_pip_command(venv_folder, config_file))}"'
|
110
|
+
)
|
111
|
+
assert venv_utils.has_installed(remote_index_folder / config_file)
|
112
|
+
break
|
113
|
+
|
114
|
+
# Running install again should show "Already installed"
|
115
|
+
reinstall_result = venv_utils.install_venv_deps(remote_index_folder)
|
116
|
+
assert reinstall_result == "Already installed"
|
117
|
+
|
118
|
+
# Tools should load and be runnable
|
119
|
+
tools = venv_utils.init_venv_tools(
|
120
|
+
remote_index_folder,
|
121
|
+
# Test include
|
122
|
+
include=["mock_index.typed_function"],
|
123
|
+
)
|
124
|
+
|
125
|
+
assert len(tools) == 1
|
126
|
+
tool = tools[0]
|
127
|
+
assert tool.__name__ == "mock_index.typed_function"
|
128
|
+
|
129
|
+
|
85
130
|
def test_index_with_invalid_tool(index_folder_custom_class):
|
86
131
|
with pytest.raises(RuntimeError, match="Error loading tool"):
|
87
132
|
venv_utils.init_venv_tools(index_folder_custom_class)
|
@@ -89,10 +134,41 @@ def test_index_with_invalid_tool(index_folder_custom_class):
|
|
89
134
|
|
90
135
|
def test_index_with_tool_error(index_folder_function_error):
|
91
136
|
tools = venv_utils.init_venv_tools(index_folder_function_error)
|
92
|
-
with pytest.raises(RuntimeError, match="
|
137
|
+
with pytest.raises(RuntimeError, match="ZeroDivisionError"):
|
93
138
|
tools[0]()
|
94
139
|
|
95
140
|
|
141
|
+
def test_parse_param_type_with_forward_ref():
|
142
|
+
param_info = {
|
143
|
+
"type": "TypedDict",
|
144
|
+
"type_name": "Person",
|
145
|
+
"fields": {
|
146
|
+
"name": {"type": str},
|
147
|
+
"friends": {
|
148
|
+
"type": "List",
|
149
|
+
"item_type": {
|
150
|
+
"type": "Person",
|
151
|
+
},
|
152
|
+
},
|
153
|
+
},
|
154
|
+
}
|
155
|
+
typ = venv_utils.parse_param_type(param_info)
|
156
|
+
assert typ.__class__.__name__ == "_TypedDictMeta"
|
157
|
+
assert typ.__name__ == "Person"
|
158
|
+
hints = get_type_hints(typ)
|
159
|
+
for k, v in hints.items():
|
160
|
+
if k == "name":
|
161
|
+
assert v is str
|
162
|
+
elif k == "friends":
|
163
|
+
origin = get_origin(v)
|
164
|
+
assert origin is list
|
165
|
+
args = list(get_args(v))
|
166
|
+
assert len(args) == 1
|
167
|
+
assert args[0] == "Person"
|
168
|
+
else:
|
169
|
+
raise AssertionError("Invalid attribute")
|
170
|
+
|
171
|
+
|
96
172
|
def test_parse_param_type_with_invalid_type():
|
97
173
|
with pytest.raises(TypeError, match="Invalid param type"):
|
98
174
|
venv_utils.parse_param_type({"type": "not a type"})
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|