datapizza-ai-clients-mistral 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
@@ -0,0 +1,13 @@
1
+ Metadata-Version: 2.4
2
+ Name: datapizza-ai-clients-mistral
3
+ Version: 0.0.2
4
+ Summary: Mistral AI client for the datapizza-ai framework
5
+ Author-email: Datapizza <datapizza@datapizza.tech>
6
+ License: MIT
7
+ Classifier: License :: OSI Approved :: MIT License
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: <4,>=3.10.0
11
+ Requires-Dist: datapizza-ai-core>=0.0.0
12
+ Requires-Dist: mistralai<2.0.0,>=1.2.0
13
+ Requires-Dist: requests<3.0.0,>=2.25.0
File without changes
@@ -0,0 +1,3 @@
1
+ from datapizza.clients.mistral.mistral_client import MistralClient
2
+
3
+ __all__ = ["MistralClient"]
@@ -0,0 +1,103 @@
1
+ import base64
2
+ import json
3
+
4
+ from datapizza.memory.memory import Turn
5
+ from datapizza.memory.memory_adapter import MemoryAdapter
6
+ from datapizza.type import (
7
+ ROLE,
8
+ FunctionCallBlock,
9
+ FunctionCallResultBlock,
10
+ MediaBlock,
11
+ StructuredBlock,
12
+ TextBlock,
13
+ )
14
+
15
+
16
+ class MistralMemoryAdapter(MemoryAdapter):
17
+ def _turn_to_message(self, turn: Turn) -> dict:
18
+ content = []
19
+ tool_calls = []
20
+ tool_call_id = None
21
+
22
+ for block in turn:
23
+ block_dict = {}
24
+
25
+ match block:
26
+ case TextBlock():
27
+ block_dict = {"type": "text", "text": block.content}
28
+ case FunctionCallBlock():
29
+ tool_calls.append(
30
+ {
31
+ "id": block.id,
32
+ "function": {
33
+ "name": block.name,
34
+ "arguments": json.dumps(block.arguments),
35
+ },
36
+ "type": "function",
37
+ }
38
+ )
39
+ case FunctionCallResultBlock():
40
+ tool_call_id = block.id
41
+ block_dict = {"type": "text", "text": block.result}
42
+ case StructuredBlock():
43
+ block_dict = {"type": "text", "text": str(block.content)}
44
+ case MediaBlock():
45
+ match block.media.media_type:
46
+ case "image":
47
+ block_dict = self._process_image_block(block)
48
+ # case "pdf":
49
+ # block_dict = self._process_pdf_block(block)
50
+
51
+ case _:
52
+ raise NotImplementedError(
53
+ f"Unsupported media type: {block.media.media_type}, only image are supported"
54
+ )
55
+
56
+ if block_dict:
57
+ content.append(block_dict)
58
+
59
+ messages: dict = {
60
+ "role": turn.role.value,
61
+ }
62
+
63
+ if content:
64
+ messages["content"] = content
65
+
66
+ if tool_calls:
67
+ messages["tool_calls"] = tool_calls
68
+
69
+ if tool_call_id:
70
+ messages["tool_call_id"] = tool_call_id
71
+
72
+ return messages
73
+
74
+ def _text_to_message(self, text: str, role: ROLE) -> dict:
75
+ return {"role": role.value, "content": text}
76
+
77
+ def _process_image_block(self, block: MediaBlock) -> dict:
78
+ match block.media.source_type:
79
+ case "url":
80
+ return {
81
+ "type": "image_url",
82
+ "image_url": {"url": block.media.source},
83
+ }
84
+ case "base64":
85
+ return {
86
+ "type": "image_url",
87
+ "image_url": {
88
+ "url": f"data:image/{block.media.extension};base64,{block.media.source}"
89
+ },
90
+ }
91
+ case "path":
92
+ with open(block.media.source, "rb") as image_file:
93
+ base64_image = base64.b64encode(image_file.read()).decode("utf-8")
94
+ return {
95
+ "type": "image_url",
96
+ "image_url": {
97
+ "url": f"data:image/{block.media.extension};base64,{base64_image}"
98
+ },
99
+ }
100
+ case _:
101
+ raise NotImplementedError(
102
+ f"Unsupported media source type: {block.media.source_type}, only url, base64, path are supported"
103
+ )
@@ -0,0 +1,466 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ from collections.abc import AsyncIterator, Iterator
5
+ from typing import Literal
6
+
7
+ import requests
8
+ from datapizza.core.cache import Cache
9
+ from datapizza.core.clients import Client, ClientResponse
10
+ from datapizza.memory import Memory
11
+ from datapizza.tools import Tool
12
+ from datapizza.tools.tool_converter import ToolConverter
13
+ from datapizza.type import (
14
+ FunctionCallBlock,
15
+ Media,
16
+ MediaBlock,
17
+ Model,
18
+ StructuredBlock,
19
+ TextBlock,
20
+ )
21
+ from mistralai import Mistral
22
+ from mistralai.models.ocrresponse import OCRResponse
23
+
24
+ from datapizza.clients.mistral.memory_adapter import MistralMemoryAdapter
25
+
26
+ log = logging.getLogger(__name__)
27
+
28
+
29
+ class MistralClient(Client):
30
+ """A client for interacting with the Mistral API.
31
+
32
+ This class provides methods for invoking the Mistral API to generate responses
33
+ based on given input data. It extends the Client class.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ api_key: str,
39
+ model: str = "mistral-large-latest",
40
+ system_prompt: str = "",
41
+ temperature: float | None = None,
42
+ cache: Cache | None = None,
43
+ ):
44
+ """
45
+ Args:
46
+ api_key: The API key for the Mistral API.
47
+ model: The model to use for the Mistral API.
48
+ system_prompt: The system prompt to use for the Mistral API.
49
+ temperature: The temperature to use for the Mistral API.
50
+ cache: The cache to use for the Mistral API.
51
+ """
52
+ if temperature and not 0 <= temperature <= 2:
53
+ raise ValueError("Temperature must be between 0 and 2")
54
+
55
+ super().__init__(
56
+ model_name=model,
57
+ system_prompt=system_prompt,
58
+ temperature=temperature,
59
+ cache=cache,
60
+ )
61
+
62
+ self.api_key = api_key
63
+ self.memory_adapter = MistralMemoryAdapter()
64
+ self._set_client()
65
+
66
+ def _set_client(self):
67
+ self.client = Mistral(api_key=self.api_key)
68
+
69
+ def _response_to_client_response(
70
+ self, response, tool_map: dict[str, Tool] | None = None
71
+ ) -> ClientResponse:
72
+ blocks = []
73
+ for choice in response.choices:
74
+ if choice.message.content:
75
+ blocks.append(TextBlock(content=choice.message.content))
76
+
77
+ if choice.message.tool_calls:
78
+ for tool_call in choice.message.tool_calls:
79
+ tool = tool_map.get(tool_call.function.name) if tool_map else None
80
+
81
+ if tool is None:
82
+ raise ValueError(f"Tool {tool_call.function.name} not found")
83
+
84
+ blocks.append(
85
+ FunctionCallBlock(
86
+ id=tool_call.id,
87
+ name=tool_call.function.name,
88
+ arguments=json.loads(tool_call.function.arguments),
89
+ tool=tool,
90
+ )
91
+ )
92
+
93
+ # Handle media content if present
94
+ if hasattr(choice.message, "media") and choice.message.media:
95
+ for media_item in choice.message.media:
96
+ media = Media(
97
+ media_type=media_item.type,
98
+ source_type="url" if media_item.source_url else "base64",
99
+ source=media_item.source_url or media_item.data,
100
+ detail=getattr(media_item, "detail", "high"),
101
+ )
102
+ blocks.append(MediaBlock(media=media))
103
+
104
+ log.debug(f"{self.__class__.__name__} response = {response}")
105
+ return ClientResponse(
106
+ content=blocks,
107
+ stop_reason=response.choices[0].finish_reason,
108
+ prompt_tokens_used=response.usage.prompt_tokens,
109
+ completion_tokens_used=response.usage.completion_tokens,
110
+ cached_tokens_used=0,
111
+ )
112
+
113
+ def _convert_tools(self, tools: Tool) -> dict:
114
+ """Convert tools to Mistral function format"""
115
+ return ToolConverter.to_mistral_format(tools)
116
+
117
+ def _convert_tool_choice(
118
+ self, tool_choice: Literal["auto", "required", "none"] | list[str]
119
+ ) -> dict | Literal["auto", "required", "none"]:
120
+ if isinstance(tool_choice, list) and len(tool_choice) > 1:
121
+ raise NotImplementedError(
122
+ "multiple function names is not supported by Mistral"
123
+ )
124
+ elif isinstance(tool_choice, list):
125
+ return {
126
+ "type": "function",
127
+ "function": {"name": tool_choice[0]},
128
+ }
129
+ else:
130
+ return tool_choice
131
+
132
+ def _invoke(
133
+ self,
134
+ *,
135
+ input: str,
136
+ tools: list[Tool] | None,
137
+ memory: Memory | None,
138
+ tool_choice: Literal["auto", "required", "none"] | list[str],
139
+ temperature: float | None,
140
+ max_tokens: int,
141
+ system_prompt: str | None,
142
+ **kwargs,
143
+ ) -> ClientResponse:
144
+ if tools is None:
145
+ tools = []
146
+ log.debug(f"{self.__class__.__name__} input = {input}")
147
+ messages = self._memory_to_contents(system_prompt, input, memory)
148
+
149
+ tool_map = {tool.name: tool for tool in tools}
150
+
151
+ request_params = {
152
+ "model": self.model_name,
153
+ "messages": messages,
154
+ "stream": False,
155
+ "max_tokens": max_tokens,
156
+ **kwargs,
157
+ }
158
+
159
+ if temperature:
160
+ request_params["temperature"] = temperature
161
+
162
+ if tools:
163
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
164
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
165
+
166
+ response = self.client.chat.complete(**request_params)
167
+ return self._response_to_client_response(response, tool_map)
168
+
169
+ async def _a_invoke(
170
+ self,
171
+ *,
172
+ input: str,
173
+ tools: list[Tool] | None,
174
+ memory: Memory | None,
175
+ tool_choice: Literal["auto", "required", "none"] | list[str],
176
+ temperature: float | None,
177
+ max_tokens: int,
178
+ system_prompt: str | None,
179
+ **kwargs,
180
+ ) -> ClientResponse:
181
+ if tools is None:
182
+ tools = []
183
+ log.debug(f"{self.__class__.__name__} input = {input}")
184
+ messages = self._memory_to_contents(system_prompt, input, memory)
185
+
186
+ tool_map = {tool.name: tool for tool in tools}
187
+
188
+ request_params = {
189
+ "model": self.model_name,
190
+ "messages": messages,
191
+ "stream": False,
192
+ "max_tokens": max_tokens,
193
+ **kwargs,
194
+ }
195
+
196
+ if temperature:
197
+ request_params["temperature"] = temperature
198
+
199
+ if tools:
200
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
201
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
202
+
203
+ response = await self.client.chat.complete_async(**request_params)
204
+ return self._response_to_client_response(response, tool_map)
205
+
206
+ def _stream_invoke(
207
+ self,
208
+ input: str,
209
+ tools: list[Tool] | None,
210
+ memory: Memory | None,
211
+ tool_choice: Literal["auto", "required", "none"] | list[str],
212
+ temperature: float | None,
213
+ max_tokens: int,
214
+ system_prompt: str | None,
215
+ **kwargs,
216
+ ) -> Iterator[ClientResponse]:
217
+ if tools is None:
218
+ tools = []
219
+ messages = self._memory_to_contents(system_prompt, input, memory)
220
+ request_params = {
221
+ "model": self.model_name,
222
+ "messages": messages,
223
+ "max_tokens": max_tokens,
224
+ **kwargs,
225
+ }
226
+
227
+ if temperature:
228
+ request_params["temperature"] = temperature
229
+
230
+ if tools:
231
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
232
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
233
+
234
+ response = self.client.chat.stream(**request_params)
235
+ text = ""
236
+ for chunk in response:
237
+ delta = chunk.data.choices[0].delta.content or ""
238
+ text += delta
239
+ yield ClientResponse(
240
+ content=[],
241
+ delta=str(delta),
242
+ stop_reason=chunk.data.choices[0].finish_reason,
243
+ prompt_tokens_used=chunk.data.usage.prompt_tokens
244
+ if chunk.data.usage
245
+ else 0,
246
+ completion_tokens_used=chunk.data.usage.completion_tokens
247
+ if chunk.data.usage
248
+ else 0,
249
+ cached_tokens_used=0,
250
+ )
251
+
252
+ async def _a_stream_invoke(
253
+ self,
254
+ input: str,
255
+ tools: list[Tool] | None = None,
256
+ memory: Memory | None = None,
257
+ tool_choice: Literal["auto", "required", "none"] | list[str] = "auto",
258
+ temperature: float | None = None,
259
+ max_tokens: int | None = None,
260
+ system_prompt: str | None = None,
261
+ **kwargs,
262
+ ) -> AsyncIterator[ClientResponse]:
263
+ if tools is None:
264
+ tools = []
265
+ messages = self._memory_to_contents(system_prompt, input, memory)
266
+ request_params = {
267
+ "model": self.model_name,
268
+ "messages": messages,
269
+ "max_tokens": max_tokens or 1024,
270
+ **kwargs,
271
+ }
272
+
273
+ if temperature:
274
+ request_params["temperature"] = temperature
275
+
276
+ if tools:
277
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
278
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
279
+
280
+ response = await self.client.chat.stream_async(**request_params)
281
+ text = ""
282
+ async for chunk in response:
283
+ delta = chunk.data.choices[0].delta.content or ""
284
+ text += delta
285
+ yield ClientResponse(
286
+ content=[],
287
+ delta=str(delta),
288
+ stop_reason=chunk.data.choices[0].finish_reason,
289
+ prompt_tokens_used=chunk.data.usage.prompt_tokens
290
+ if chunk.data.usage
291
+ else 0,
292
+ completion_tokens_used=chunk.data.usage.completion_tokens
293
+ if chunk.data.usage
294
+ else 0,
295
+ cached_tokens_used=0,
296
+ )
297
+
298
+ def _structured_response(
299
+ self,
300
+ input: str,
301
+ output_cls: type[Model],
302
+ memory: Memory | None,
303
+ temperature: float | None,
304
+ max_tokens: int,
305
+ system_prompt: str | None,
306
+ tools: list[Tool] | None,
307
+ tool_choice: Literal["auto", "required", "none"] | list[str] = "auto",
308
+ **kwargs,
309
+ ) -> ClientResponse:
310
+ # Add system message to enforce JSON output
311
+ messages = self._memory_to_contents(system_prompt, input, memory)
312
+
313
+ if not tools:
314
+ tools = []
315
+
316
+ if tools:
317
+ kwargs["tools"] = [self._convert_tools(tool) for tool in tools]
318
+ kwargs["tool_choice"] = self._convert_tool_choice(tool_choice)
319
+
320
+ response = self.client.chat.parse(
321
+ model=self.model_name,
322
+ messages=messages,
323
+ response_format=output_cls,
324
+ temperature=temperature,
325
+ max_tokens=max_tokens,
326
+ **kwargs,
327
+ )
328
+
329
+ if not response.choices:
330
+ raise ValueError("No response from Mistral")
331
+
332
+ log.debug(f"{self.__class__.__name__} structured response: {response}")
333
+ stop_reason = response.choices[0].finish_reason if response.choices else None
334
+ if hasattr(output_cls, "model_validate_json"):
335
+ structured_data = output_cls.model_validate_json(
336
+ str(response.choices[0].message.content) # type: ignore
337
+ )
338
+ else:
339
+ structured_data = json.loads(str(response.choices[0].message.content)) # type: ignore
340
+ return ClientResponse(
341
+ content=[StructuredBlock(content=structured_data)],
342
+ stop_reason=stop_reason,
343
+ prompt_tokens_used=response.usage.prompt_tokens,
344
+ completion_tokens_used=response.usage.completion_tokens,
345
+ cached_tokens_used=0,
346
+ )
347
+
348
+ async def _a_structured_response(
349
+ self,
350
+ input: str,
351
+ output_cls: type[Model],
352
+ memory: Memory | None,
353
+ temperature: float | None,
354
+ max_tokens: int,
355
+ system_prompt: str | None,
356
+ tools: list[Tool] | None,
357
+ tool_choice: Literal["auto", "required", "none"] | list[str] = "auto",
358
+ **kwargs,
359
+ ) -> ClientResponse:
360
+ # Add system message to enforce JSON output
361
+ messages = self._memory_to_contents(system_prompt, input, memory)
362
+
363
+ if not tools:
364
+ tools = []
365
+
366
+ if tools:
367
+ kwargs["tools"] = [self._convert_tools(tool) for tool in tools]
368
+ kwargs["tool_choice"] = self._convert_tool_choice(tool_choice)
369
+
370
+ response = await self.client.chat.parse_async(
371
+ model=self.model_name,
372
+ messages=messages,
373
+ response_format=output_cls,
374
+ temperature=temperature,
375
+ max_tokens=max_tokens,
376
+ **kwargs,
377
+ )
378
+
379
+ if not response.choices:
380
+ raise ValueError("No response from Mistral")
381
+
382
+ log.debug(f"{self.__class__.__name__} structured response: {response}")
383
+ stop_reason = response.choices[0].finish_reason if response.choices else None
384
+ if hasattr(output_cls, "model_validate_json"):
385
+ structured_data = output_cls.model_validate_json(
386
+ str(response.choices[0].message.content) # type: ignore
387
+ )
388
+ else:
389
+ structured_data = json.loads(str(response.choices[0].message.content)) # type: ignore
390
+ return ClientResponse(
391
+ content=[StructuredBlock(content=structured_data)],
392
+ stop_reason=stop_reason,
393
+ prompt_tokens_used=response.usage.prompt_tokens,
394
+ completion_tokens_used=response.usage.completion_tokens,
395
+ cached_tokens_used=0,
396
+ )
397
+
398
+ def _embed(
399
+ self, text: str | list[str], model_name: str | None, **kwargs
400
+ ) -> list[float] | list[list[float]]:
401
+ """Embed a text using the model"""
402
+ response = self.client.embeddings.create(
403
+ inputs=text, model=model_name or self.model_name, **kwargs
404
+ )
405
+
406
+ embeddings = [item.embedding for item in response.data]
407
+
408
+ if not embeddings:
409
+ return []
410
+
411
+ if isinstance(text, str) and embeddings[0]:
412
+ return embeddings[0]
413
+
414
+ return embeddings
415
+
416
+ async def _a_embed(
417
+ self, text: str | list[str], model_name: str | None, **kwargs
418
+ ) -> list[float] | list[list[float]]:
419
+ """Embed a text using the model"""
420
+ response = await self.client.embeddings.create_async(
421
+ inputs=text, model=model_name or self.model_name, **kwargs
422
+ )
423
+
424
+ embeddings = [item.embedding for item in response.data]
425
+
426
+ if not embeddings:
427
+ return []
428
+
429
+ if isinstance(text, str) and embeddings[0]:
430
+ return embeddings[0]
431
+
432
+ return embeddings or []
433
+
434
+ def parse_document(
435
+ self,
436
+ document_path: str,
437
+ autodelete: bool = True,
438
+ include_image_base64: bool = True,
439
+ ) -> OCRResponse:
440
+ filename = os.path.basename(document_path)
441
+ with open(document_path, "rb") as f:
442
+ uploaded_pdf = self.client.files.upload(
443
+ file={"file_name": filename, "content": f}, purpose="ocr"
444
+ )
445
+
446
+ signed_url = self.client.files.get_signed_url(file_id=uploaded_pdf.id)
447
+
448
+ response = self.client.ocr.process(
449
+ model="mistral-ocr-latest",
450
+ document={
451
+ "type": "document_url",
452
+ "document_url": signed_url.url,
453
+ },
454
+ include_image_base64=include_image_base64,
455
+ )
456
+
457
+ if autodelete:
458
+ url = f"https://api.mistral.ai/v1/files/{uploaded_pdf.id}"
459
+ headers = {
460
+ "Content-Type": "application/json",
461
+ "Authorization": f"Bearer {self.api_key}",
462
+ }
463
+
464
+ requests.delete(url, headers=headers, timeout=30)
465
+
466
+ return response
@@ -0,0 +1,59 @@
1
+ # Build system configuration
2
+ [build-system]
3
+ requires = ["hatchling"]
4
+ build-backend = "hatchling.build"
5
+
6
+ # Project metadata
7
+ [project]
8
+ name = "datapizza-ai-clients-mistral"
9
+ version = "0.0.2"
10
+ description = "Mistral AI client for the datapizza-ai framework"
11
+ readme = "README.md"
12
+ license = {text = "MIT"}
13
+ authors = [
14
+ {name = "Datapizza", email = "datapizza@datapizza.tech"}
15
+ ]
16
+ requires-python = ">=3.10.0,<4"
17
+ classifiers = [
18
+ "Programming Language :: Python :: 3",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Operating System :: OS Independent",
21
+ ]
22
+ dependencies = [
23
+ "datapizza-ai-core>=0.0.0",
24
+ "mistralai>=1.2.0,<2.0.0",
25
+ "requests>=2.25.0,<3.0.0",
26
+ ]
27
+
28
+ # Development dependencies
29
+ [dependency-groups]
30
+ dev = [
31
+ "deptry>=0.23.0",
32
+ "pytest",
33
+ "ruff>=0.11.5",
34
+ ]
35
+
36
+ # Hatch build configuration
37
+ [tool.hatch.build.targets.sdist]
38
+ include = ["datapizza"]
39
+ exclude = ["**/BUILD"]
40
+
41
+ [tool.hatch.build.targets.wheel]
42
+ include = ["datapizza"]
43
+ exclude = ["**/BUILD"]
44
+
45
+ # Ruff configuration
46
+ [tool.ruff]
47
+ line-length = 88
48
+
49
+ [tool.ruff.lint]
50
+ select = [
51
+ "W", # pycodestyle warnings
52
+ "F", # pyflakes
53
+ "B", # flake8-bugbear
54
+ "I", # isort
55
+ "UP", # pyupgrade
56
+ "SIM", # flake8-simplify
57
+ "RUF", # Ruff-specific rules
58
+ "C4", # flake8-comprehensions
59
+ ]