vlmparse 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,7 +28,11 @@ class GraniteDoclingDockerServerConfig(VLLMDockerServerConfig):
28
28
 
29
29
  @property
30
30
  def client_config(self):
31
- return GraniteDoclingConverterConfig(llm_params=self.llm_params)
31
+ return GraniteDoclingConverterConfig(
32
+ **self._create_client_kwargs(
33
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
34
+ )
35
+ )
32
36
 
33
37
 
34
38
  class GraniteDoclingConverterConfig(OpenAIConverterConfig):
@@ -70,49 +74,18 @@ class GraniteDoclingConverter(OpenAIConverterClient):
70
74
  }
71
75
  ]
72
76
 
73
- doctags = await self._get_chat_completion_adaptive(
77
+ doctags, usage = await self._get_chat_completion(
74
78
  messages, completion_kwargs=self.config.completion_kwargs
75
79
  )
76
80
  doctags = clean_response(doctags)
77
81
 
78
82
  page.raw_response = doctags
79
83
  page.text = _doctags_to_markdown(doctags, image)
84
+ if usage is not None:
85
+ page.prompt_tokens = usage.prompt_tokens
86
+ page.completion_tokens = usage.completion_tokens
80
87
  return page
81
88
 
82
- async def _get_chat_completion_adaptive(
83
- self, messages: list[dict], completion_kwargs: dict | None
84
- ) -> str:
85
- """
86
- vLLM enforces input+output <= model context length. If `max_tokens` is too
87
- high (especially for multimodal prompts), retry with progressively smaller
88
- `max_tokens`.
89
- """
90
- kwargs = (completion_kwargs or {}).copy()
91
- max_tokens = kwargs.get("max_tokens") or kwargs.get("max_completion_tokens")
92
-
93
- for _ in range(6):
94
- try:
95
- return await self._get_chat_completion(
96
- messages, completion_kwargs=kwargs
97
- )
98
- except Exception as e:
99
- msg = str(e)
100
- too_large = (
101
- "max_tokens" in msg
102
- and "maximum context length" in msg
103
- and "is too large" in msg
104
- )
105
- if not too_large or not isinstance(max_tokens, int):
106
- raise
107
-
108
- max_tokens = max(256, int(max_tokens * 0.75))
109
- if "max_tokens" in kwargs:
110
- kwargs["max_tokens"] = max_tokens
111
- if "max_completion_tokens" in kwargs:
112
- kwargs["max_completion_tokens"] = max_tokens
113
-
114
- return await self._get_chat_completion(messages, completion_kwargs=kwargs)
115
-
116
89
 
117
90
  def _doctags_to_markdown(doctags: str, image):
118
91
  try:
@@ -25,7 +25,11 @@ class HunyuanOCRDockerServerConfig(VLLMDockerServerConfig):
25
25
 
26
26
  @property
27
27
  def client_config(self):
28
- return HunyuanOCRConverterConfig(llm_params=self.llm_params)
28
+ return HunyuanOCRConverterConfig(
29
+ **self._create_client_kwargs(
30
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
31
+ )
32
+ )
29
33
 
30
34
 
31
35
  class HunyuanOCRConverterConfig(OpenAIConverterConfig):
@@ -25,7 +25,11 @@ class LightOnOCRDockerServerConfig(VLLMDockerServerConfig):
25
25
 
26
26
  @property
27
27
  def client_config(self):
28
- return LightOnOCRConverterConfig(llm_params=self.llm_params)
28
+ return LightOnOCRConverterConfig(
29
+ **self._create_client_kwargs(
30
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
31
+ )
32
+ )
29
33
 
30
34
 
31
35
  class LightOnOCRConverterConfig(OpenAIConverterConfig):
@@ -41,3 +45,21 @@ class LightOnOCRConverterConfig(OpenAIConverterConfig):
41
45
  }
42
46
  dpi: int = 200
43
47
  aliases: list[str] = Field(default_factory=lambda: ["lightonocr"])
48
+
49
+
50
+ class LightonOCR21BServerConfig(LightOnOCRDockerServerConfig):
51
+ model_name: str = "lightonai/LightOnOCR-2-1B"
52
+ aliases: list[str] = Field(default_factory=lambda: ["lightonocr2"])
53
+
54
+ @property
55
+ def client_config(self):
56
+ return LightonOCR21BConverterConfig(
57
+ **self._create_client_kwargs(
58
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
59
+ )
60
+ )
61
+
62
+
63
+ class LightonOCR21BConverterConfig(LightOnOCRConverterConfig):
64
+ model_name: str = "lightonai/LightOnOCR-2-1B"
65
+ aliases: list[str] = Field(default_factory=lambda: ["lightonocr2"])
@@ -31,7 +31,6 @@ class MinerUDockerServerConfig(DockerServerConfig):
31
31
  class MinerUConverterConfig(ConverterConfig):
32
32
  """Configuration for MinerU API converter."""
33
33
 
34
- base_url: str
35
34
  model_name: str = "opendatalab/MinerU2.5-2509-1.2B"
36
35
  aliases: list[str] = Field(default_factory=lambda: ["mineru25"])
37
36
  timeout: int = 600
@@ -0,0 +1,85 @@
1
+ import os
2
+
3
+ import httpx
4
+ import orjson
5
+ from loguru import logger
6
+ from pydantic import Field
7
+
8
+ from vlmparse.clients.pipe_utils.html_to_md_conversion import html_to_md_keep_tables
9
+ from vlmparse.clients.pipe_utils.utils import clean_response
10
+ from vlmparse.converter import BaseConverter, ConverterConfig
11
+ from vlmparse.data_model.document import Page
12
+ from vlmparse.utils import to_base64
13
+
14
+
15
+ class MistralOCRConverterConfig(ConverterConfig):
16
+ """Configuration for Mistral OCR converter."""
17
+
18
+ base_url: str = "https://api.mistral.ai/v1"
19
+ model_name: str = "mistral-ocr-latest"
20
+ api_key: str | None = None
21
+ timeout: int = 300
22
+ aliases: list[str] = Field(
23
+ default_factory=lambda: ["mistral-ocr-latest", "mistral-ocr"]
24
+ )
25
+
26
+ def get_client(self, **kwargs) -> "MistralOCRConverter":
27
+ return MistralOCRConverter(config=self, **kwargs)
28
+
29
+
30
+ class MistralOCRConverter(BaseConverter):
31
+ """Client for Mistral OCR API."""
32
+
33
+ config: MistralOCRConverterConfig
34
+
35
+ def __init__(self, config: MistralOCRConverterConfig, **kwargs):
36
+ super().__init__(config=config, **kwargs)
37
+ if not self.config.api_key:
38
+ self.config.api_key = os.getenv("MISTRAL_API_KEY")
39
+ if not self.config.api_key:
40
+ raise ValueError("MISTRAL_API_KEY environment variable not set")
41
+ self._base_url = self.config.base_url.rstrip("/")
42
+
43
+ async def _async_ocr(self, image) -> httpx.Response:
44
+ payload = {
45
+ "model": self.config.model_name,
46
+ "document": {
47
+ "type": "image_url",
48
+ "image_url": f"data:image/png;base64,{to_base64(image)}",
49
+ },
50
+ }
51
+ headers = {"Authorization": f"Bearer {self.config.api_key}"}
52
+
53
+ async with httpx.AsyncClient(timeout=self.config.timeout) as client:
54
+ response = await client.post(
55
+ f"{self._base_url}/ocr",
56
+ json=payload,
57
+ headers=headers,
58
+ )
59
+ response.raise_for_status()
60
+ return response
61
+
62
+ async def async_call_inside_page(self, page: Page) -> Page:
63
+ response = await self._async_ocr(page.image)
64
+ page.raw_response = response.text
65
+
66
+ try:
67
+ data = response.json()
68
+ except ValueError:
69
+ logger.warning("Mistral OCR returned non-JSON response")
70
+ page.text = clean_response(response.text)
71
+ return page
72
+
73
+ pages = data.get("pages") or []
74
+ if pages:
75
+ page_data = pages[0]
76
+ text = page_data.get("markdown") or page_data.get("text") or ""
77
+ else:
78
+ text = (
79
+ data.get("markdown") or data.get("text") or orjson.dumps(data).decode()
80
+ )
81
+
82
+ text = clean_response(text)
83
+ text = html_to_md_keep_tables(text)
84
+ page.text = text
85
+ return page
@@ -12,7 +12,11 @@ class NanonetOCR2DockerServerConfig(VLLMDockerServerConfig):
12
12
 
13
13
  @property
14
14
  def client_config(self):
15
- return NanonetOCR2ConverterConfig(llm_params=self.llm_params)
15
+ return NanonetOCR2ConverterConfig(
16
+ **self._create_client_kwargs(
17
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
18
+ )
19
+ )
16
20
 
17
21
 
18
22
  class NanonetOCR2ConverterConfig(OpenAIConverterConfig):
@@ -23,7 +23,11 @@ class OlmOCRDockerServerConfig(VLLMDockerServerConfig):
23
23
 
24
24
  @property
25
25
  def client_config(self):
26
- return OlmOCRConverterConfig(llm_params=self.llm_params)
26
+ return OlmOCRConverterConfig(
27
+ **self._create_client_kwargs(
28
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
29
+ )
30
+ )
27
31
 
28
32
 
29
33
  class OlmOCRConverterConfig(OpenAIConverterConfig):
@@ -37,7 +41,7 @@ class OlmOCRConverterConfig(OpenAIConverterConfig):
37
41
  "Return your output as markdown, with a front matter section on top specifying values for the primary_language, is_rotation_valid, rotation_correction, is_table, and is_diagram parameters."
38
42
  )
39
43
  postprompt: str | None = None
40
- completion_kwargs: dict | None = {
44
+ completion_kwargs: dict = {
41
45
  "temperature": 0.1,
42
46
  "max_tokens": 8000,
43
47
  }
@@ -1,15 +1,13 @@
1
- import os
1
+ import asyncio
2
2
  from typing import Literal, Optional
3
3
 
4
4
  from loguru import logger
5
5
  from pydantic import Field
6
6
 
7
- from vlmparse.base_model import VLMParseBaseModel
8
7
  from vlmparse.clients.pipe_utils.html_to_md_conversion import html_to_md_keep_tables
9
8
  from vlmparse.clients.pipe_utils.utils import clean_response
10
9
  from vlmparse.converter import BaseConverter, ConverterConfig
11
10
  from vlmparse.data_model.document import Page
12
- from vlmparse.servers.docker_server import DEFAULT_MODEL_NAME
13
11
  from vlmparse.utils import to_base64
14
12
 
15
13
  from .prompts import PDF2MD_PROMPT
@@ -17,50 +15,14 @@ from .prompts import PDF2MD_PROMPT
17
15
  GOOGLE_API_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
18
16
 
19
17
 
20
- class LLMParams(VLMParseBaseModel):
18
+ class OpenAIConverterConfig(ConverterConfig):
21
19
  api_key: str = ""
22
- base_url: str | None = None
23
- model_name: str = DEFAULT_MODEL_NAME
24
20
  timeout: int | None = 500
25
21
  max_retries: int = 1
26
-
27
-
28
- def get_llm_params(model_name: str, uri: str | None = None):
29
- if uri is not None:
30
- return LLMParams(base_url=uri, model_name="vllm-model", api_key="")
31
- if model_name in [
32
- "gpt-4o",
33
- "gpt-4o-mini",
34
- "gpt-4.1",
35
- "gpt-4.1-mini",
36
- "gpt-4.1-nano",
37
- "gpt-5",
38
- "gpt-5-mini",
39
- "gpt-5-nano",
40
- ]:
41
- base_url = None
42
- api_key = os.getenv("OPENAI_API_KEY")
43
- if api_key is None:
44
- raise ValueError("OPENAI_API_KEY environment variable not set")
45
- else:
46
- if model_name in [
47
- "gemini-2.5-flash-lite",
48
- "gemini-2.5-flash",
49
- "gemini-2.5-pro",
50
- ]:
51
- base_url = GOOGLE_API_BASE_URL
52
- api_key = os.getenv("GOOGLE_API_KEY")
53
- if api_key is None:
54
- raise ValueError("GOOGLE_API_KEY environment variable not set")
55
- else:
56
- return None
57
- return LLMParams(base_url=base_url, model_name=model_name, api_key=api_key)
58
-
59
-
60
- class OpenAIConverterConfig(ConverterConfig):
61
- llm_params: LLMParams
62
22
  preprompt: str | None = None
63
- postprompt: str | None = PDF2MD_PROMPT
23
+ postprompt: str | dict[str, str] | None = PDF2MD_PROMPT
24
+ prompts: dict[str, str] = Field(default_factory=dict)
25
+ prompt_mode_map: dict[str, str] = Field(default_factory=dict)
64
26
  completion_kwargs: dict = Field(default_factory=dict)
65
27
  stream: bool = False
66
28
 
@@ -71,6 +33,33 @@ class OpenAIConverterConfig(ConverterConfig):
71
33
  class OpenAIConverterClient(BaseConverter):
72
34
  """Client for OpenAI-compatible API servers."""
73
35
 
36
+ def get_prompt_key(self) -> str | None:
37
+ """Resolve a prompt key from conversion_mode using class mappings."""
38
+ mode = getattr(self.config, "conversion_mode", None) or "ocr"
39
+ prompts = self._get_prompts()
40
+ if mode in prompts:
41
+ return mode
42
+ mapped = self._get_prompt_mode_map().get(mode)
43
+ if mapped in prompts:
44
+ return mapped
45
+ return None
46
+
47
+ def get_prompt_for_mode(self) -> str | None:
48
+ key = self.get_prompt_key()
49
+ if key is None:
50
+ return None
51
+ return self._get_prompts().get(key)
52
+
53
+ def _get_prompts(self) -> dict[str, str]:
54
+ if self.config.prompts:
55
+ return self.config.prompts
56
+ if isinstance(self.config.postprompt, dict):
57
+ return self.config.postprompt
58
+ return {}
59
+
60
+ def _get_prompt_mode_map(self) -> dict[str, str]:
61
+ return self.config.prompt_mode_map or {}
62
+
74
63
  def __init__(
75
64
  self,
76
65
  config: OpenAIConverterConfig,
@@ -90,14 +79,54 @@ class OpenAIConverterClient(BaseConverter):
90
79
  debug=debug,
91
80
  return_documents_in_batch_mode=return_documents_in_batch_mode,
92
81
  )
93
- from openai import AsyncOpenAI
94
-
95
- self.model = AsyncOpenAI(
96
- base_url=self.config.llm_params.base_url,
97
- api_key=self.config.llm_params.api_key,
98
- timeout=self.config.llm_params.timeout,
99
- max_retries=self.config.llm_params.max_retries,
100
- )
82
+ self._model = None
83
+ self._model_loop = None
84
+
85
+ async def _get_async_model(self):
86
+ loop = asyncio.get_running_loop()
87
+ if self._model is None or self._model_loop is not loop:
88
+ await self._close_model()
89
+ from openai import AsyncOpenAI
90
+
91
+ self._model = AsyncOpenAI(
92
+ base_url=self.config.base_url,
93
+ api_key=self.config.api_key,
94
+ timeout=self.config.timeout,
95
+ max_retries=self.config.max_retries,
96
+ )
97
+ self._model_loop = loop
98
+ return self._model
99
+
100
+ async def _close_model(self):
101
+ """Close the async OpenAI client if it exists."""
102
+ if self._model is not None:
103
+ try:
104
+ await self._model.close()
105
+ except RuntimeError:
106
+ # Event loop may already be closed
107
+ pass
108
+ finally:
109
+ self._model = None
110
+ self._model_loop = None
111
+
112
+ async def aclose(self):
113
+ """Close the converter and release resources."""
114
+ await self._close_model()
115
+
116
+ def close(self):
117
+ """Synchronously close the converter if possible."""
118
+ if self._model is not None:
119
+ try:
120
+ loop = asyncio.get_running_loop()
121
+ loop.create_task(self._close_model())
122
+ except RuntimeError:
123
+ # No running loop, try to close synchronously
124
+ try:
125
+ asyncio.run(self._close_model())
126
+ except RuntimeError:
127
+ # Event loop already closed, force cleanup
128
+ self._model = None
129
+ self._model_loop = None
101
130
 
102
131
  async def _get_chat_completion(
103
132
  self, messages: list[dict], completion_kwargs: dict | None = None
@@ -106,9 +135,11 @@ class OpenAIConverterClient(BaseConverter):
106
135
  if completion_kwargs is None:
107
136
  completion_kwargs = self.config.completion_kwargs
108
137
 
138
+ model = await self._get_async_model()
139
+
109
140
  if self.config.stream:
110
- response_stream = await self.model.chat.completions.create(
111
- model=self.config.llm_params.model_name,
141
+ response_stream = await model.chat.completions.create(
142
+ model=self.config.default_model_name,
112
143
  messages=messages,
113
144
  stream=True,
114
145
  **completion_kwargs,
@@ -120,8 +151,8 @@ class OpenAIConverterClient(BaseConverter):
120
151
 
121
152
  return "".join(response_parts), None
122
153
  else:
123
- response_obj = await self.model.chat.completions.create(
124
- model=self.config.llm_params.model_name,
154
+ response_obj = await model.chat.completions.create(
155
+ model=self.config.default_model_name,
125
156
  messages=messages,
126
157
  **completion_kwargs,
127
158
  )
@@ -147,11 +178,15 @@ class OpenAIConverterClient(BaseConverter):
147
178
  else:
148
179
  preprompt = []
149
180
 
150
- postprompt = (
151
- [{"type": "text", "text": self.config.postprompt}]
152
- if self.config.postprompt
153
- else []
154
- )
181
+ selected_prompt = self.get_prompt_for_mode()
182
+ if selected_prompt is not None:
183
+ postprompt = [{"type": "text", "text": selected_prompt}]
184
+ else:
185
+ postprompt = (
186
+ [{"type": "text", "text": self.config.postprompt}]
187
+ if isinstance(self.config.postprompt, str) and self.config.postprompt
188
+ else []
189
+ )
155
190
 
156
191
  messages = [
157
192
  *preprompt,
@@ -22,7 +22,11 @@ class PaddleOCRVLDockerServerConfig(VLLMDockerServerConfig):
22
22
 
23
23
  @property
24
24
  def client_config(self):
25
- return PaddleOCRVLConverterConfig(llm_params=self.llm_params)
25
+ return PaddleOCRVLConverterConfig(
26
+ **self._create_client_kwargs(
27
+ f"http://localhost:{self.docker_port}{self.get_base_url_suffix()}"
28
+ )
29
+ )
26
30
 
27
31
 
28
32
  # Task-specific base prompts
@@ -39,7 +43,10 @@ class PaddleOCRVLConverterConfig(OpenAIConverterConfig):
39
43
 
40
44
  model_name: str = "PaddlePaddle/PaddleOCR-VL"
41
45
  preprompt: str | None = None
42
- postprompt: str | None = TASKS["ocr"]
46
+ postprompt: dict[str, str] = TASKS
47
+ prompt_mode_map: dict[str, str] = {
48
+ "ocr_layout": "ocr",
49
+ }
43
50
  completion_kwargs: dict | None = {
44
51
  "temperature": 0.0,
45
52
  "max_completion_tokens": 16384,
vlmparse/converter.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import asyncio
2
2
  import threading
3
3
  import time
4
- import traceback
5
4
  from pathlib import Path
6
5
  from typing import Literal
7
6
 
@@ -9,6 +8,8 @@ from loguru import logger
9
8
  from PIL import Image
10
9
  from pydantic import Field
11
10
 
11
+ from vlmparse.servers.docker_server import DEFAULT_MODEL_NAME
12
+
12
13
  from .base_model import VLMParseBaseModel
13
14
  from .build_doc import convert_specific_page_to_image, get_page_count, resize_image
14
15
  from .constants import IMAGE_EXTENSIONS, PDF_EXTENSION
@@ -19,9 +20,20 @@ PDFIUM_LOCK = threading.Lock()
19
20
 
20
21
 
21
22
  class ConverterConfig(VLMParseBaseModel):
23
+ model_name: str
22
24
  aliases: list[str] = Field(default_factory=list)
23
- dpi: int = 175
24
- max_image_size: int | None = 4000
25
+ dpi: int = Field(default=175, ge=30, le=600)
26
+ max_image_size: int | None = Field(default=4000, ge=50)
27
+ base_url: str | None = None
28
+ default_model_name: str = DEFAULT_MODEL_NAME
29
+ conversion_mode: Literal[
30
+ "ocr",
31
+ "ocr_layout",
32
+ "table",
33
+ "image_description",
34
+ "formula",
35
+ "chart",
36
+ ] = "ocr"
25
37
 
26
38
  def get_client(self, **kwargs) -> "BaseConverter":
27
39
  return BaseConverter(config=self, **kwargs)
@@ -94,14 +106,22 @@ class BaseConverter:
94
106
  page = await self.async_call_inside_page(page)
95
107
  toc = time.perf_counter()
96
108
  page.latency = toc - tic
97
- logger.debug(f"Time taken: {page.latency} seconds")
109
+ logger.debug(
110
+ "Page {page_idx} processed in {latency:.2f}s",
111
+ page_idx=page_idx,
112
+ latency=page.latency,
113
+ )
98
114
  except KeyboardInterrupt:
99
115
  raise
100
116
  except Exception:
101
117
  if self.debug:
102
118
  raise
103
119
  else:
104
- logger.exception(traceback.format_exc())
120
+ logger.opt(exception=True).error(
121
+ "Error processing page {page_idx} of {file_path}",
122
+ page_idx=page_idx,
123
+ file_path=str(file_path),
124
+ )
105
125
  page.error = ProcessingError.from_class(self)
106
126
  if not self.save_page_images:
107
127
  page.buffer_image = dict(
@@ -122,12 +142,19 @@ class BaseConverter:
122
142
  if self.debug:
123
143
  raise
124
144
  else:
125
- logger.exception(traceback.format_exc())
145
+ logger.opt(exception=True).error(
146
+ "Error processing document {file_path}",
147
+ file_path=str(file_path),
148
+ )
126
149
  document.error = ProcessingError.from_class(self)
127
150
  return document
128
151
  toc = time.perf_counter()
129
152
  document.latency = toc - tic
130
- logger.debug(f"Time taken to process the document: {document.latency} seconds")
153
+ logger.debug(
154
+ "Document {file_path} processed in {latency:.2f}s",
155
+ file_path=str(file_path),
156
+ latency=document.latency,
157
+ )
131
158
  if self.save_folder is not None:
132
159
  self._save_document(document)
133
160
 
@@ -169,8 +196,16 @@ class BaseConverter:
169
196
  else:
170
197
  logger.warning(f"Unknown save_mode: {self.save_mode}, skipping save")
171
198
 
199
+ async def _async_call_with_cleanup(self, file_path: str | Path):
200
+ """Call async_call and ensure cleanup."""
201
+ try:
202
+ return await self.async_call(file_path)
203
+ finally:
204
+ if hasattr(self, "aclose"):
205
+ await self.aclose()
206
+
172
207
  def __call__(self, file_path: str | Path):
173
- return asyncio.run(self.async_call(file_path))
208
+ return asyncio.run(self._async_call_with_cleanup(file_path))
174
209
 
175
210
  async def async_batch(self, file_paths: list[str | Path]) -> list[Document] | None:
176
211
  """Process multiple files concurrently with semaphore limit."""
@@ -184,9 +219,14 @@ class BaseConverter:
184
219
  await self.async_call(file_path)
185
220
 
186
221
  tasks = [asyncio.create_task(worker(file_path)) for file_path in file_paths]
187
- documents = await asyncio.gather(*tasks)
188
- if self.return_documents_in_batch_mode:
189
- return documents
222
+ try:
223
+ documents = await asyncio.gather(*tasks)
224
+ if self.return_documents_in_batch_mode:
225
+ return documents
226
+ finally:
227
+ # Close async resources before the event loop ends
228
+ if hasattr(self, "aclose"):
229
+ await self.aclose()
190
230
 
191
231
  def batch(self, file_paths: list[str | Path]) -> list[Document] | None:
192
232
  """Synchronous wrapper for async_batch."""