weco 0.1.8__tar.gz → 0.1.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.1.8 → weco-0.1.10}/.github/workflows/release.yml +2 -2
- {weco-0.1.8 → weco-0.1.10}/PKG-INFO +1 -1
- {weco-0.1.8 → weco-0.1.10}/pyproject.toml +1 -1
- {weco-0.1.8 → weco-0.1.10}/tests/test_asynchronous.py +3 -1
- {weco-0.1.8 → weco-0.1.10}/tests/test_batching.py +2 -2
- {weco-0.1.8 → weco-0.1.10}/tests/test_reasoning.py +7 -2
- {weco-0.1.8 → weco-0.1.10}/tests/test_synchronous.py +3 -1
- {weco-0.1.8 → weco-0.1.10}/weco/client.py +51 -19
- {weco-0.1.8 → weco-0.1.10}/weco/functional.py +32 -5
- {weco-0.1.8 → weco-0.1.10}/weco.egg-info/PKG-INFO +1 -1
- {weco-0.1.8 → weco-0.1.10}/.github/workflows/lint.yml +0 -0
- {weco-0.1.8 → weco-0.1.10}/.gitignore +0 -0
- {weco-0.1.8 → weco-0.1.10}/LICENSE +0 -0
- {weco-0.1.8 → weco-0.1.10}/README.md +0 -0
- {weco-0.1.8 → weco-0.1.10}/assets/weco.svg +0 -0
- {weco-0.1.8 → weco-0.1.10}/examples/cookbook.ipynb +0 -0
- {weco-0.1.8 → weco-0.1.10}/setup.cfg +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco/__init__.py +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco/constants.py +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco/utils.py +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco.egg-info/SOURCES.txt +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco.egg-info/requires.txt +0 -0
- {weco-0.1.8 → weco-0.1.10}/weco.egg-info/top_level.txt +0 -0
|
@@ -79,7 +79,7 @@ jobs:
|
|
|
79
79
|
GITHUB_TOKEN: ${{ github.token }}
|
|
80
80
|
run: >-
|
|
81
81
|
gh release create
|
|
82
|
-
'v0.1.
|
|
82
|
+
'v0.1.10'
|
|
83
83
|
--repo '${{ github.repository }}'
|
|
84
84
|
--notes ""
|
|
85
85
|
- name: Upload artifact signatures to GitHub Release
|
|
@@ -90,5 +90,5 @@ jobs:
|
|
|
90
90
|
# sigstore-produced signatures and certificates.
|
|
91
91
|
run: >-
|
|
92
92
|
gh release upload
|
|
93
|
-
'v0.1.
|
|
93
|
+
'v0.1.9' dist/**
|
|
94
94
|
--repo '${{ github.repository }}'
|
|
@@ -10,7 +10,7 @@ authors = [
|
|
|
10
10
|
]
|
|
11
11
|
description = "A client facing API for interacting with the WeCo AI function builder service."
|
|
12
12
|
readme = "README.md"
|
|
13
|
-
version = "0.1.
|
|
13
|
+
version = "0.1.10"
|
|
14
14
|
license = {text = "MIT"}
|
|
15
15
|
requires-python = ">=3.8"
|
|
16
16
|
dependencies = ["asyncio", "httpx[http2]", "pillow"]
|
|
@@ -35,7 +35,7 @@ async def text_evaluator():
|
|
|
35
35
|
@pytest.mark.asyncio
|
|
36
36
|
async def test_text_aquery(text_evaluator):
|
|
37
37
|
fn_name, version_number, _ = await text_evaluator
|
|
38
|
-
query_response = await aquery(fn_name=fn_name, version_number=version_number, text_input="I love this product!")
|
|
38
|
+
query_response = await aquery(fn_name=fn_name, version=-1, version_number=version_number, text_input="I love this product!")
|
|
39
39
|
|
|
40
40
|
await assert_query_response(query_response)
|
|
41
41
|
assert set(query_response["output"].keys()) == {"sentiment", "explanation"}
|
|
@@ -55,6 +55,7 @@ async def test_image_aquery(image_evaluator):
|
|
|
55
55
|
fn_name, version_number, _ = await image_evaluator
|
|
56
56
|
query_response = await aquery(
|
|
57
57
|
fn_name=fn_name,
|
|
58
|
+
version=-1,
|
|
58
59
|
version_number=version_number,
|
|
59
60
|
images_input=[
|
|
60
61
|
"https://www.integratedtreatmentservices.co.uk/wp-content/uploads/2013/12/Objects-of-Reference.jpg",
|
|
@@ -80,6 +81,7 @@ async def test_text_and_image_aquery(text_and_image_evaluator):
|
|
|
80
81
|
fn_name, version_number, _ = await text_and_image_evaluator
|
|
81
82
|
query_response = await aquery(
|
|
82
83
|
fn_name=fn_name,
|
|
84
|
+
version=-1,
|
|
83
85
|
version_number=version_number,
|
|
84
86
|
text_input="Find x and y.",
|
|
85
87
|
images_input=[
|
|
@@ -49,7 +49,7 @@ def test_batch_query_text(ml_task_evaluator, ml_task_inputs):
|
|
|
49
49
|
fn_name, version_number = ml_task_evaluator
|
|
50
50
|
batch_inputs = ml_task_inputs
|
|
51
51
|
|
|
52
|
-
query_responses = batch_query(fn_name=fn_name, version_number=version_number, batch_inputs=batch_inputs)
|
|
52
|
+
query_responses = batch_query(fn_name=fn_name, version=-1, version_number=version_number, batch_inputs=batch_inputs)
|
|
53
53
|
|
|
54
54
|
assert len(query_responses) == len(batch_inputs)
|
|
55
55
|
|
|
@@ -67,7 +67,7 @@ def test_batch_query_image(image_evaluator, image_inputs):
|
|
|
67
67
|
fn_name, version_number = image_evaluator
|
|
68
68
|
batch_inputs = image_inputs
|
|
69
69
|
|
|
70
|
-
query_responses = batch_query(fn_name=fn_name, version_number=version_number, batch_inputs=batch_inputs)
|
|
70
|
+
query_responses = batch_query(fn_name=fn_name, version=-1, version_number=version_number, batch_inputs=batch_inputs)
|
|
71
71
|
|
|
72
72
|
assert len(query_responses) == len(batch_inputs)
|
|
73
73
|
|
|
@@ -7,7 +7,8 @@ def assert_query_response(query_response):
|
|
|
7
7
|
assert isinstance(query_response, dict)
|
|
8
8
|
assert isinstance(query_response["output"], dict)
|
|
9
9
|
assert isinstance(query_response["reasoning_steps"], list)
|
|
10
|
-
for step in query_response["reasoning_steps"]:
|
|
10
|
+
for step in query_response["reasoning_steps"]:
|
|
11
|
+
assert isinstance(step, str)
|
|
11
12
|
assert isinstance(query_response["in_tokens"], int)
|
|
12
13
|
assert isinstance(query_response["out_tokens"], int)
|
|
13
14
|
assert isinstance(query_response["latency_ms"], float)
|
|
@@ -24,11 +25,14 @@ def text_reasoning_evaluator():
|
|
|
24
25
|
|
|
25
26
|
def test_text_reasoning_query(text_reasoning_evaluator):
|
|
26
27
|
fn_name, version_number, _ = text_reasoning_evaluator
|
|
27
|
-
query_response = query(
|
|
28
|
+
query_response = query(
|
|
29
|
+
fn_name=fn_name, version=-1, version_number=version_number, text_input="I love this product!", return_reasoning=True
|
|
30
|
+
)
|
|
28
31
|
|
|
29
32
|
assert_query_response(query_response)
|
|
30
33
|
assert set(query_response["output"].keys()) == {"sentiment", "explanation"}
|
|
31
34
|
|
|
35
|
+
|
|
32
36
|
@pytest.fixture
|
|
33
37
|
def vision_reasoning_evaluator():
|
|
34
38
|
fn_name, version_number, fn_desc = build(
|
|
@@ -42,6 +46,7 @@ def test_vision_reasoning_query(vision_reasoning_evaluator):
|
|
|
42
46
|
fn_name, version_number, _ = vision_reasoning_evaluator
|
|
43
47
|
query_response = query(
|
|
44
48
|
fn_name=fn_name,
|
|
49
|
+
version=-1,
|
|
45
50
|
version_number=version_number,
|
|
46
51
|
text_input="Find x and y.",
|
|
47
52
|
images_input=[
|
|
@@ -33,7 +33,7 @@ def text_evaluator():
|
|
|
33
33
|
|
|
34
34
|
def test_text_query(text_evaluator):
|
|
35
35
|
fn_name, version_number, _ = text_evaluator
|
|
36
|
-
query_response = query(fn_name=fn_name, version_number=version_number, text_input="I love this product!")
|
|
36
|
+
query_response = query(fn_name=fn_name, version=-1, version_number=version_number, text_input="I love this product!")
|
|
37
37
|
|
|
38
38
|
assert_query_response(query_response)
|
|
39
39
|
assert set(query_response["output"].keys()) == {"sentiment", "explanation"}
|
|
@@ -52,6 +52,7 @@ def test_image_query(image_evaluator):
|
|
|
52
52
|
fn_name, version_number, _ = image_evaluator
|
|
53
53
|
query_response = query(
|
|
54
54
|
fn_name=fn_name,
|
|
55
|
+
version=-1,
|
|
55
56
|
version_number=version_number,
|
|
56
57
|
images_input=[
|
|
57
58
|
"https://www.integratedtreatmentservices.co.uk/wp-content/uploads/2013/12/Objects-of-Reference.jpg",
|
|
@@ -76,6 +77,7 @@ def test_text_and_image_query(text_and_image_evaluator):
|
|
|
76
77
|
fn_name, version_number, _ = text_and_image_evaluator
|
|
77
78
|
query_response = query(
|
|
78
79
|
fn_name=fn_name,
|
|
80
|
+
version=-1,
|
|
79
81
|
version_number=version_number,
|
|
80
82
|
text_input="Find x and y.",
|
|
81
83
|
images_input=[
|
|
@@ -68,6 +68,7 @@ class WecoAI:
|
|
|
68
68
|
self.http2 = http2
|
|
69
69
|
self.timeout = timeout
|
|
70
70
|
self.base_url = "https://function.api.weco.ai"
|
|
71
|
+
|
|
71
72
|
# Setup clients
|
|
72
73
|
self.client = httpx.Client(http2=http2, timeout=timeout)
|
|
73
74
|
self.async_client = httpx.AsyncClient(http2=http2, timeout=timeout)
|
|
@@ -246,18 +247,12 @@ class WecoAI:
|
|
|
246
247
|
"""
|
|
247
248
|
return self._build(task_description=task_description, multimodal=multimodal, is_async=False)
|
|
248
249
|
|
|
249
|
-
def _upload_image(self,
|
|
250
|
+
def _upload_image(self, image_info: Dict[str, Any]) -> str:
|
|
250
251
|
"""
|
|
251
252
|
Uploads an image to an S3 bucket and returns the URL of the uploaded image.
|
|
252
253
|
|
|
253
254
|
Parameters
|
|
254
255
|
----------
|
|
255
|
-
fn_name : str
|
|
256
|
-
The name of the function for which the image is being uploaded.
|
|
257
|
-
version_number : int
|
|
258
|
-
The version number of the function for which the image is being uploaded.
|
|
259
|
-
upload_id: str
|
|
260
|
-
A unique identifier for the image upload.
|
|
261
256
|
image_info : Dict[str, Any]
|
|
262
257
|
A dictionary containing the image metadata.
|
|
263
258
|
|
|
@@ -290,7 +285,7 @@ class WecoAI:
|
|
|
290
285
|
|
|
291
286
|
# Request a presigned URL from the server
|
|
292
287
|
endpoint = "upload_link"
|
|
293
|
-
request_data = {"
|
|
288
|
+
request_data = {"file_type": file_type}
|
|
294
289
|
# This needs to be a synchronous request since we need the presigned URL to upload the image
|
|
295
290
|
response = self._make_request(endpoint=endpoint, data=request_data, is_async=False)
|
|
296
291
|
|
|
@@ -393,10 +388,11 @@ class WecoAI:
|
|
|
393
388
|
self,
|
|
394
389
|
is_async: bool,
|
|
395
390
|
fn_name: str,
|
|
396
|
-
|
|
391
|
+
version: Union[str, int],
|
|
392
|
+
version_number: int,
|
|
397
393
|
text_input: Optional[str],
|
|
398
394
|
images_input: Optional[List[str]],
|
|
399
|
-
return_reasoning: Optional[bool]
|
|
395
|
+
return_reasoning: Optional[bool],
|
|
400
396
|
) -> Union[Dict[str, Any], Coroutine[Any, Any, Dict[str, Any]]]:
|
|
401
397
|
"""Internal method to handle both synchronous and asynchronous query requests.
|
|
402
398
|
|
|
@@ -406,6 +402,8 @@ class WecoAI:
|
|
|
406
402
|
Whether to perform an asynchronous request.
|
|
407
403
|
fn_name : str
|
|
408
404
|
The name of the function to query.
|
|
405
|
+
version : Union[str, int]
|
|
406
|
+
The version alias or number of the function to query.
|
|
409
407
|
version_number : int, optional
|
|
410
408
|
The version number of the function to query.
|
|
411
409
|
text_input : str, optional
|
|
@@ -430,17 +428,23 @@ class WecoAI:
|
|
|
430
428
|
|
|
431
429
|
# Create links for all images that are not public URLs and upload images
|
|
432
430
|
image_urls = []
|
|
433
|
-
upload_id = generate_random_base16_code()
|
|
434
431
|
for i, info in enumerate(image_info):
|
|
435
432
|
if info["source"] == "url" or info["source"] == "base64" or info["source"] == "local":
|
|
436
|
-
url = self._upload_image(
|
|
433
|
+
url = self._upload_image(image_info=info)
|
|
437
434
|
else:
|
|
438
435
|
raise ValueError(f"Image at index {i} must be a public URL or a path to a local image file.")
|
|
439
436
|
image_urls.append(url)
|
|
440
437
|
|
|
441
438
|
# Make the request
|
|
442
439
|
endpoint = "query"
|
|
443
|
-
data = {
|
|
440
|
+
data = {
|
|
441
|
+
"name": fn_name,
|
|
442
|
+
"version": version,
|
|
443
|
+
"version_number": version_number,
|
|
444
|
+
"text": text_input,
|
|
445
|
+
"images": image_urls,
|
|
446
|
+
"return_reasoning": return_reasoning,
|
|
447
|
+
}
|
|
444
448
|
request = self._make_request(endpoint=endpoint, data=data, is_async=is_async)
|
|
445
449
|
|
|
446
450
|
if is_async:
|
|
@@ -457,10 +461,11 @@ class WecoAI:
|
|
|
457
461
|
async def aquery(
|
|
458
462
|
self,
|
|
459
463
|
fn_name: str,
|
|
464
|
+
version: Optional[Union[str, int]] = -1,
|
|
460
465
|
version_number: Optional[int] = -1,
|
|
461
466
|
text_input: Optional[str] = "",
|
|
462
467
|
images_input: Optional[List[str]] = [],
|
|
463
|
-
return_reasoning: Optional[bool] = False
|
|
468
|
+
return_reasoning: Optional[bool] = False,
|
|
464
469
|
) -> Dict[str, Any]:
|
|
465
470
|
"""Asynchronously queries a function with the given function ID and input.
|
|
466
471
|
|
|
@@ -468,6 +473,8 @@ class WecoAI:
|
|
|
468
473
|
----------
|
|
469
474
|
fn_name : str
|
|
470
475
|
The name of the function to query.
|
|
476
|
+
version : Union[str, int], optional
|
|
477
|
+
The version alias or number of the function to query. If not provided, the latest version will be used. Pass -1 to use the latest version.
|
|
471
478
|
version_number : int, optional
|
|
472
479
|
The version number of the function to query. If not provided, the latest version will be used. Pass -1 to use the latest version.
|
|
473
480
|
text_input : str, optional
|
|
@@ -484,16 +491,23 @@ class WecoAI:
|
|
|
484
491
|
and the latency in milliseconds.
|
|
485
492
|
"""
|
|
486
493
|
return await self._query(
|
|
487
|
-
fn_name=fn_name,
|
|
494
|
+
fn_name=fn_name,
|
|
495
|
+
version=version,
|
|
496
|
+
version_number=version_number,
|
|
497
|
+
text_input=text_input,
|
|
498
|
+
images_input=images_input,
|
|
499
|
+
return_reasoning=return_reasoning,
|
|
500
|
+
is_async=True,
|
|
488
501
|
)
|
|
489
502
|
|
|
490
503
|
def query(
|
|
491
504
|
self,
|
|
492
505
|
fn_name: str,
|
|
506
|
+
version: Optional[Union[str, int]] = -1,
|
|
493
507
|
version_number: Optional[int] = -1,
|
|
494
508
|
text_input: Optional[str] = "",
|
|
495
509
|
images_input: Optional[List[str]] = [],
|
|
496
|
-
return_reasoning: Optional[bool] = False
|
|
510
|
+
return_reasoning: Optional[bool] = False,
|
|
497
511
|
) -> Dict[str, Any]:
|
|
498
512
|
"""Synchronously queries a function with the given function ID and input.
|
|
499
513
|
|
|
@@ -517,11 +531,22 @@ class WecoAI:
|
|
|
517
531
|
and the latency in milliseconds.
|
|
518
532
|
"""
|
|
519
533
|
return self._query(
|
|
520
|
-
fn_name=fn_name,
|
|
534
|
+
fn_name=fn_name,
|
|
535
|
+
version=version,
|
|
536
|
+
version_number=version_number,
|
|
537
|
+
text_input=text_input,
|
|
538
|
+
images_input=images_input,
|
|
539
|
+
return_reasoning=return_reasoning,
|
|
540
|
+
is_async=False,
|
|
521
541
|
)
|
|
522
542
|
|
|
523
543
|
def batch_query(
|
|
524
|
-
self,
|
|
544
|
+
self,
|
|
545
|
+
fn_name: str,
|
|
546
|
+
batch_inputs: List[Dict[str, Any]],
|
|
547
|
+
version: Optional[Union[str, int]] = -1,
|
|
548
|
+
version_number: Optional[int] = -1,
|
|
549
|
+
return_reasoning: Optional[bool] = False,
|
|
525
550
|
) -> List[Dict[str, Any]]:
|
|
526
551
|
"""Batch queries a function version with a list of inputs.
|
|
527
552
|
|
|
@@ -533,6 +558,8 @@ class WecoAI:
|
|
|
533
558
|
A list of inputs for the functions to query. The input must be a dictionary containing the data to be processed. e.g.,
|
|
534
559
|
when providing for a text input, the dictionary should be {"text_input": "input text"}, for an image input, the dictionary should be {"images_input": ["url1", "url2", ...]}
|
|
535
560
|
and for a combination of text and image inputs, the dictionary should be {"text_input": "input text", "images_input": ["url1", "url2", ...]}.
|
|
561
|
+
version : Union[str, int], optional
|
|
562
|
+
The version alias or number of the function to query. If not provided, the latest version will be used. Pass -1 to use the latest version.
|
|
536
563
|
version_number : int, optional
|
|
537
564
|
The version number of the function to query. If not provided, the latest version will be used. Pass -1 to use the latest version.
|
|
538
565
|
return_reasoning : bool, optional
|
|
@@ -547,7 +574,12 @@ class WecoAI:
|
|
|
547
574
|
|
|
548
575
|
async def run_queries():
|
|
549
576
|
tasks = list(
|
|
550
|
-
map(
|
|
577
|
+
map(
|
|
578
|
+
lambda fn_input: self.aquery(
|
|
579
|
+
fn_name=fn_name, version=version, version_number=version_number, return_reasoning=return_reasoning, **fn_input
|
|
580
|
+
),
|
|
581
|
+
batch_inputs,
|
|
582
|
+
)
|
|
551
583
|
)
|
|
552
584
|
return await asyncio.gather(*tasks)
|
|
553
585
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, Dict, List, Optional
|
|
1
|
+
from typing import Any, Dict, List, Optional, Union
|
|
2
2
|
|
|
3
3
|
from .client import WecoAI
|
|
4
4
|
|
|
@@ -49,6 +49,7 @@ async def abuild(task_description: str, multimodal: bool = False, api_key: str =
|
|
|
49
49
|
|
|
50
50
|
def query(
|
|
51
51
|
fn_name: str,
|
|
52
|
+
version: Optional[Union[str, int]] = -1,
|
|
52
53
|
version_number: Optional[int] = -1,
|
|
53
54
|
text_input: Optional[str] = "",
|
|
54
55
|
images_input: Optional[List[str]] = [],
|
|
@@ -61,6 +62,8 @@ def query(
|
|
|
61
62
|
----------
|
|
62
63
|
fn_name : str
|
|
63
64
|
The name of the function to query.
|
|
65
|
+
version : str | int, optional
|
|
66
|
+
The version alias or number of the function to query. If not provided, the latest version is used. Default is -1 for the same behavior.
|
|
64
67
|
version_number : int, optional
|
|
65
68
|
The version number of the function to query. If not provided, the latest version is used. Default is -1 for the same behavior.
|
|
66
69
|
text_input : str, optional
|
|
@@ -79,12 +82,20 @@ def query(
|
|
|
79
82
|
and the latency in milliseconds.
|
|
80
83
|
"""
|
|
81
84
|
client = WecoAI(api_key=api_key)
|
|
82
|
-
response = client.query(
|
|
85
|
+
response = client.query(
|
|
86
|
+
fn_name=fn_name,
|
|
87
|
+
version=version,
|
|
88
|
+
version_number=version_number,
|
|
89
|
+
text_input=text_input,
|
|
90
|
+
images_input=images_input,
|
|
91
|
+
return_reasoning=return_reasoning,
|
|
92
|
+
)
|
|
83
93
|
return response
|
|
84
94
|
|
|
85
95
|
|
|
86
96
|
async def aquery(
|
|
87
97
|
fn_name: str,
|
|
98
|
+
version: Optional[Union[str, int]] = -1,
|
|
88
99
|
version_number: Optional[int] = -1,
|
|
89
100
|
text_input: Optional[str] = "",
|
|
90
101
|
images_input: Optional[List[str]] = [],
|
|
@@ -97,6 +108,8 @@ async def aquery(
|
|
|
97
108
|
----------
|
|
98
109
|
fn_name : str
|
|
99
110
|
The name of the function to query.
|
|
111
|
+
version: str | int, optional
|
|
112
|
+
The version number or alias of the function to query. If not provided, the latest version is used. Default is -1 for the same behavior.
|
|
100
113
|
version_number : int, optional
|
|
101
114
|
The version number of the function to query. If not provided, the latest version is used. Default is -1 for the same behavior.
|
|
102
115
|
text_input : str, optional
|
|
@@ -116,13 +129,23 @@ async def aquery(
|
|
|
116
129
|
"""
|
|
117
130
|
client = WecoAI(api_key=api_key)
|
|
118
131
|
response = await client.aquery(
|
|
119
|
-
fn_name=fn_name,
|
|
132
|
+
fn_name=fn_name,
|
|
133
|
+
version=version,
|
|
134
|
+
version_number=version_number,
|
|
135
|
+
text_input=text_input,
|
|
136
|
+
images_input=images_input,
|
|
137
|
+
return_reasoning=return_reasoning,
|
|
120
138
|
)
|
|
121
139
|
return response
|
|
122
140
|
|
|
123
141
|
|
|
124
142
|
def batch_query(
|
|
125
|
-
fn_name: str,
|
|
143
|
+
fn_name: str,
|
|
144
|
+
batch_inputs: List[Dict[str, Any]],
|
|
145
|
+
version: Optional[Union[str, int]] = -1,
|
|
146
|
+
version_number: Optional[int] = -1,
|
|
147
|
+
return_reasoning: Optional[bool] = False,
|
|
148
|
+
api_key: Optional[str] = None,
|
|
126
149
|
) -> List[Dict[str, Any]]:
|
|
127
150
|
"""Synchronously queries multiple functions using asynchronous calls internally.
|
|
128
151
|
|
|
@@ -139,6 +162,8 @@ def batch_query(
|
|
|
139
162
|
A list of inputs for the functions to query. The input must be a dictionary containing the data to be processed. e.g.,
|
|
140
163
|
when providing for a text input, the dictionary should be {"text_input": "input text"}, for an image input, the dictionary should be {"images_input": ["url1", "url2", ...]}
|
|
141
164
|
and for a combination of text and image inputs, the dictionary should be {"text_input": "input text", "images_input": ["url1", "url2", ...]}.
|
|
165
|
+
version : str | int, optional
|
|
166
|
+
The version number or alias of the function to query. If not provided, the latest version is used. Default is -1 for the same behavior.
|
|
142
167
|
version_number : int, optional
|
|
143
168
|
The version number of the function to query. If not provided, the latest version is used. Default is -1 for the same behavior.
|
|
144
169
|
return_reasoning : bool, optional
|
|
@@ -153,5 +178,7 @@ def batch_query(
|
|
|
153
178
|
in the same order as the input queries.
|
|
154
179
|
"""
|
|
155
180
|
client = WecoAI(api_key=api_key)
|
|
156
|
-
responses = client.batch_query(
|
|
181
|
+
responses = client.batch_query(
|
|
182
|
+
fn_name=fn_name, version=version, version_number=version_number, batch_inputs=batch_inputs, return_reasoning=return_reasoning
|
|
183
|
+
)
|
|
157
184
|
return responses
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|