datapizza-ai-clients-mistral 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3 @@
1
+ from datapizza.clients.mistral.mistral_client import MistralClient
2
+
3
+ __all__ = ["MistralClient"]
@@ -0,0 +1,103 @@
1
+ import base64
2
+ import json
3
+
4
+ from datapizza.memory.memory import Turn
5
+ from datapizza.memory.memory_adapter import MemoryAdapter
6
+ from datapizza.type import (
7
+ ROLE,
8
+ FunctionCallBlock,
9
+ FunctionCallResultBlock,
10
+ MediaBlock,
11
+ StructuredBlock,
12
+ TextBlock,
13
+ )
14
+
15
+
16
+ class MistralMemoryAdapter(MemoryAdapter):
17
+ def _turn_to_message(self, turn: Turn) -> dict:
18
+ content = []
19
+ tool_calls = []
20
+ tool_call_id = None
21
+
22
+ for block in turn:
23
+ block_dict = {}
24
+
25
+ match block:
26
+ case TextBlock():
27
+ block_dict = {"type": "text", "text": block.content}
28
+ case FunctionCallBlock():
29
+ tool_calls.append(
30
+ {
31
+ "id": block.id,
32
+ "function": {
33
+ "name": block.name,
34
+ "arguments": json.dumps(block.arguments),
35
+ },
36
+ "type": "function",
37
+ }
38
+ )
39
+ case FunctionCallResultBlock():
40
+ tool_call_id = block.id
41
+ block_dict = {"type": "text", "text": block.result}
42
+ case StructuredBlock():
43
+ block_dict = {"type": "text", "text": str(block.content)}
44
+ case MediaBlock():
45
+ match block.media.media_type:
46
+ case "image":
47
+ block_dict = self._process_image_block(block)
48
+ # case "pdf":
49
+ # block_dict = self._process_pdf_block(block)
50
+
51
+ case _:
52
+ raise NotImplementedError(
53
+ f"Unsupported media type: {block.media.media_type}, only image are supported"
54
+ )
55
+
56
+ if block_dict:
57
+ content.append(block_dict)
58
+
59
+ messages: dict = {
60
+ "role": turn.role.value,
61
+ }
62
+
63
+ if content:
64
+ messages["content"] = content
65
+
66
+ if tool_calls:
67
+ messages["tool_calls"] = tool_calls
68
+
69
+ if tool_call_id:
70
+ messages["tool_call_id"] = tool_call_id
71
+
72
+ return messages
73
+
74
+ def _text_to_message(self, text: str, role: ROLE) -> dict:
75
+ return {"role": role.value, "content": text}
76
+
77
+ def _process_image_block(self, block: MediaBlock) -> dict:
78
+ match block.media.source_type:
79
+ case "url":
80
+ return {
81
+ "type": "image_url",
82
+ "image_url": {"url": block.media.source},
83
+ }
84
+ case "base64":
85
+ return {
86
+ "type": "image_url",
87
+ "image_url": {
88
+ "url": f"data:image/{block.media.extension};base64,{block.media.source}"
89
+ },
90
+ }
91
+ case "path":
92
+ with open(block.media.source, "rb") as image_file:
93
+ base64_image = base64.b64encode(image_file.read()).decode("utf-8")
94
+ return {
95
+ "type": "image_url",
96
+ "image_url": {
97
+ "url": f"data:image/{block.media.extension};base64,{base64_image}"
98
+ },
99
+ }
100
+ case _:
101
+ raise NotImplementedError(
102
+ f"Unsupported media source type: {block.media.source_type}, only url, base64, path are supported"
103
+ )
@@ -0,0 +1,466 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ from collections.abc import AsyncIterator, Iterator
5
+ from typing import Literal
6
+
7
+ import requests
8
+ from datapizza.core.cache import Cache
9
+ from datapizza.core.clients import Client, ClientResponse
10
+ from datapizza.memory import Memory
11
+ from datapizza.tools import Tool
12
+ from datapizza.tools.tool_converter import ToolConverter
13
+ from datapizza.type import (
14
+ FunctionCallBlock,
15
+ Media,
16
+ MediaBlock,
17
+ Model,
18
+ StructuredBlock,
19
+ TextBlock,
20
+ )
21
+ from mistralai import Mistral
22
+ from mistralai.models.ocrresponse import OCRResponse
23
+
24
+ from datapizza.clients.mistral.memory_adapter import MistralMemoryAdapter
25
+
26
+ log = logging.getLogger(__name__)
27
+
28
+
29
+ class MistralClient(Client):
30
+ """A client for interacting with the Mistral API.
31
+
32
+ This class provides methods for invoking the Mistral API to generate responses
33
+ based on given input data. It extends the Client class.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ api_key: str,
39
+ model: str = "mistral-large-latest",
40
+ system_prompt: str = "",
41
+ temperature: float | None = None,
42
+ cache: Cache | None = None,
43
+ ):
44
+ """
45
+ Args:
46
+ api_key: The API key for the Mistral API.
47
+ model: The model to use for the Mistral API.
48
+ system_prompt: The system prompt to use for the Mistral API.
49
+ temperature: The temperature to use for the Mistral API.
50
+ cache: The cache to use for the Mistral API.
51
+ """
52
+ if temperature and not 0 <= temperature <= 2:
53
+ raise ValueError("Temperature must be between 0 and 2")
54
+
55
+ super().__init__(
56
+ model_name=model,
57
+ system_prompt=system_prompt,
58
+ temperature=temperature,
59
+ cache=cache,
60
+ )
61
+
62
+ self.api_key = api_key
63
+ self.memory_adapter = MistralMemoryAdapter()
64
+ self._set_client()
65
+
66
+ def _set_client(self):
67
+ self.client = Mistral(api_key=self.api_key)
68
+
69
+ def _response_to_client_response(
70
+ self, response, tool_map: dict[str, Tool] | None = None
71
+ ) -> ClientResponse:
72
+ blocks = []
73
+ for choice in response.choices:
74
+ if choice.message.content:
75
+ blocks.append(TextBlock(content=choice.message.content))
76
+
77
+ if choice.message.tool_calls:
78
+ for tool_call in choice.message.tool_calls:
79
+ tool = tool_map.get(tool_call.function.name) if tool_map else None
80
+
81
+ if tool is None:
82
+ raise ValueError(f"Tool {tool_call.function.name} not found")
83
+
84
+ blocks.append(
85
+ FunctionCallBlock(
86
+ id=tool_call.id,
87
+ name=tool_call.function.name,
88
+ arguments=json.loads(tool_call.function.arguments),
89
+ tool=tool,
90
+ )
91
+ )
92
+
93
+ # Handle media content if present
94
+ if hasattr(choice.message, "media") and choice.message.media:
95
+ for media_item in choice.message.media:
96
+ media = Media(
97
+ media_type=media_item.type,
98
+ source_type="url" if media_item.source_url else "base64",
99
+ source=media_item.source_url or media_item.data,
100
+ detail=getattr(media_item, "detail", "high"),
101
+ )
102
+ blocks.append(MediaBlock(media=media))
103
+
104
+ log.debug(f"{self.__class__.__name__} response = {response}")
105
+ return ClientResponse(
106
+ content=blocks,
107
+ stop_reason=response.choices[0].finish_reason,
108
+ prompt_tokens_used=response.usage.prompt_tokens,
109
+ completion_tokens_used=response.usage.completion_tokens,
110
+ cached_tokens_used=0,
111
+ )
112
+
113
+ def _convert_tools(self, tools: Tool) -> dict:
114
+ """Convert tools to Mistral function format"""
115
+ return ToolConverter.to_mistral_format(tools)
116
+
117
+ def _convert_tool_choice(
118
+ self, tool_choice: Literal["auto", "required", "none"] | list[str]
119
+ ) -> dict | Literal["auto", "required", "none"]:
120
+ if isinstance(tool_choice, list) and len(tool_choice) > 1:
121
+ raise NotImplementedError(
122
+ "multiple function names is not supported by Mistral"
123
+ )
124
+ elif isinstance(tool_choice, list):
125
+ return {
126
+ "type": "function",
127
+ "function": {"name": tool_choice[0]},
128
+ }
129
+ else:
130
+ return tool_choice
131
+
132
+ def _invoke(
133
+ self,
134
+ *,
135
+ input: str,
136
+ tools: list[Tool] | None,
137
+ memory: Memory | None,
138
+ tool_choice: Literal["auto", "required", "none"] | list[str],
139
+ temperature: float | None,
140
+ max_tokens: int,
141
+ system_prompt: str | None,
142
+ **kwargs,
143
+ ) -> ClientResponse:
144
+ if tools is None:
145
+ tools = []
146
+ log.debug(f"{self.__class__.__name__} input = {input}")
147
+ messages = self._memory_to_contents(system_prompt, input, memory)
148
+
149
+ tool_map = {tool.name: tool for tool in tools}
150
+
151
+ request_params = {
152
+ "model": self.model_name,
153
+ "messages": messages,
154
+ "stream": False,
155
+ "max_tokens": max_tokens,
156
+ **kwargs,
157
+ }
158
+
159
+ if temperature:
160
+ request_params["temperature"] = temperature
161
+
162
+ if tools:
163
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
164
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
165
+
166
+ response = self.client.chat.complete(**request_params)
167
+ return self._response_to_client_response(response, tool_map)
168
+
169
+ async def _a_invoke(
170
+ self,
171
+ *,
172
+ input: str,
173
+ tools: list[Tool] | None,
174
+ memory: Memory | None,
175
+ tool_choice: Literal["auto", "required", "none"] | list[str],
176
+ temperature: float | None,
177
+ max_tokens: int,
178
+ system_prompt: str | None,
179
+ **kwargs,
180
+ ) -> ClientResponse:
181
+ if tools is None:
182
+ tools = []
183
+ log.debug(f"{self.__class__.__name__} input = {input}")
184
+ messages = self._memory_to_contents(system_prompt, input, memory)
185
+
186
+ tool_map = {tool.name: tool for tool in tools}
187
+
188
+ request_params = {
189
+ "model": self.model_name,
190
+ "messages": messages,
191
+ "stream": False,
192
+ "max_tokens": max_tokens,
193
+ **kwargs,
194
+ }
195
+
196
+ if temperature:
197
+ request_params["temperature"] = temperature
198
+
199
+ if tools:
200
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
201
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
202
+
203
+ response = await self.client.chat.complete_async(**request_params)
204
+ return self._response_to_client_response(response, tool_map)
205
+
206
+ def _stream_invoke(
207
+ self,
208
+ input: str,
209
+ tools: list[Tool] | None,
210
+ memory: Memory | None,
211
+ tool_choice: Literal["auto", "required", "none"] | list[str],
212
+ temperature: float | None,
213
+ max_tokens: int,
214
+ system_prompt: str | None,
215
+ **kwargs,
216
+ ) -> Iterator[ClientResponse]:
217
+ if tools is None:
218
+ tools = []
219
+ messages = self._memory_to_contents(system_prompt, input, memory)
220
+ request_params = {
221
+ "model": self.model_name,
222
+ "messages": messages,
223
+ "max_tokens": max_tokens,
224
+ **kwargs,
225
+ }
226
+
227
+ if temperature:
228
+ request_params["temperature"] = temperature
229
+
230
+ if tools:
231
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
232
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
233
+
234
+ response = self.client.chat.stream(**request_params)
235
+ text = ""
236
+ for chunk in response:
237
+ delta = chunk.data.choices[0].delta.content or ""
238
+ text += delta
239
+ yield ClientResponse(
240
+ content=[],
241
+ delta=str(delta),
242
+ stop_reason=chunk.data.choices[0].finish_reason,
243
+ prompt_tokens_used=chunk.data.usage.prompt_tokens
244
+ if chunk.data.usage
245
+ else 0,
246
+ completion_tokens_used=chunk.data.usage.completion_tokens
247
+ if chunk.data.usage
248
+ else 0,
249
+ cached_tokens_used=0,
250
+ )
251
+
252
+ async def _a_stream_invoke(
253
+ self,
254
+ input: str,
255
+ tools: list[Tool] | None = None,
256
+ memory: Memory | None = None,
257
+ tool_choice: Literal["auto", "required", "none"] | list[str] = "auto",
258
+ temperature: float | None = None,
259
+ max_tokens: int | None = None,
260
+ system_prompt: str | None = None,
261
+ **kwargs,
262
+ ) -> AsyncIterator[ClientResponse]:
263
+ if tools is None:
264
+ tools = []
265
+ messages = self._memory_to_contents(system_prompt, input, memory)
266
+ request_params = {
267
+ "model": self.model_name,
268
+ "messages": messages,
269
+ "max_tokens": max_tokens or 1024,
270
+ **kwargs,
271
+ }
272
+
273
+ if temperature:
274
+ request_params["temperature"] = temperature
275
+
276
+ if tools:
277
+ request_params["tools"] = [self._convert_tools(tool) for tool in tools]
278
+ request_params["tool_choice"] = self._convert_tool_choice(tool_choice)
279
+
280
+ response = await self.client.chat.stream_async(**request_params)
281
+ text = ""
282
+ async for chunk in response:
283
+ delta = chunk.data.choices[0].delta.content or ""
284
+ text += delta
285
+ yield ClientResponse(
286
+ content=[],
287
+ delta=str(delta),
288
+ stop_reason=chunk.data.choices[0].finish_reason,
289
+ prompt_tokens_used=chunk.data.usage.prompt_tokens
290
+ if chunk.data.usage
291
+ else 0,
292
+ completion_tokens_used=chunk.data.usage.completion_tokens
293
+ if chunk.data.usage
294
+ else 0,
295
+ cached_tokens_used=0,
296
+ )
297
+
298
+ def _structured_response(
299
+ self,
300
+ input: str,
301
+ output_cls: type[Model],
302
+ memory: Memory | None,
303
+ temperature: float | None,
304
+ max_tokens: int,
305
+ system_prompt: str | None,
306
+ tools: list[Tool] | None,
307
+ tool_choice: Literal["auto", "required", "none"] | list[str] = "auto",
308
+ **kwargs,
309
+ ) -> ClientResponse:
310
+ # Add system message to enforce JSON output
311
+ messages = self._memory_to_contents(system_prompt, input, memory)
312
+
313
+ if not tools:
314
+ tools = []
315
+
316
+ if tools:
317
+ kwargs["tools"] = [self._convert_tools(tool) for tool in tools]
318
+ kwargs["tool_choice"] = self._convert_tool_choice(tool_choice)
319
+
320
+ response = self.client.chat.parse(
321
+ model=self.model_name,
322
+ messages=messages,
323
+ response_format=output_cls,
324
+ temperature=temperature,
325
+ max_tokens=max_tokens,
326
+ **kwargs,
327
+ )
328
+
329
+ if not response.choices:
330
+ raise ValueError("No response from Mistral")
331
+
332
+ log.debug(f"{self.__class__.__name__} structured response: {response}")
333
+ stop_reason = response.choices[0].finish_reason if response.choices else None
334
+ if hasattr(output_cls, "model_validate_json"):
335
+ structured_data = output_cls.model_validate_json(
336
+ str(response.choices[0].message.content) # type: ignore
337
+ )
338
+ else:
339
+ structured_data = json.loads(str(response.choices[0].message.content)) # type: ignore
340
+ return ClientResponse(
341
+ content=[StructuredBlock(content=structured_data)],
342
+ stop_reason=stop_reason,
343
+ prompt_tokens_used=response.usage.prompt_tokens,
344
+ completion_tokens_used=response.usage.completion_tokens,
345
+ cached_tokens_used=0,
346
+ )
347
+
348
+ async def _a_structured_response(
349
+ self,
350
+ input: str,
351
+ output_cls: type[Model],
352
+ memory: Memory | None,
353
+ temperature: float | None,
354
+ max_tokens: int,
355
+ system_prompt: str | None,
356
+ tools: list[Tool] | None,
357
+ tool_choice: Literal["auto", "required", "none"] | list[str] = "auto",
358
+ **kwargs,
359
+ ) -> ClientResponse:
360
+ # Add system message to enforce JSON output
361
+ messages = self._memory_to_contents(system_prompt, input, memory)
362
+
363
+ if not tools:
364
+ tools = []
365
+
366
+ if tools:
367
+ kwargs["tools"] = [self._convert_tools(tool) for tool in tools]
368
+ kwargs["tool_choice"] = self._convert_tool_choice(tool_choice)
369
+
370
+ response = await self.client.chat.parse_async(
371
+ model=self.model_name,
372
+ messages=messages,
373
+ response_format=output_cls,
374
+ temperature=temperature,
375
+ max_tokens=max_tokens,
376
+ **kwargs,
377
+ )
378
+
379
+ if not response.choices:
380
+ raise ValueError("No response from Mistral")
381
+
382
+ log.debug(f"{self.__class__.__name__} structured response: {response}")
383
+ stop_reason = response.choices[0].finish_reason if response.choices else None
384
+ if hasattr(output_cls, "model_validate_json"):
385
+ structured_data = output_cls.model_validate_json(
386
+ str(response.choices[0].message.content) # type: ignore
387
+ )
388
+ else:
389
+ structured_data = json.loads(str(response.choices[0].message.content)) # type: ignore
390
+ return ClientResponse(
391
+ content=[StructuredBlock(content=structured_data)],
392
+ stop_reason=stop_reason,
393
+ prompt_tokens_used=response.usage.prompt_tokens,
394
+ completion_tokens_used=response.usage.completion_tokens,
395
+ cached_tokens_used=0,
396
+ )
397
+
398
+ def _embed(
399
+ self, text: str | list[str], model_name: str | None, **kwargs
400
+ ) -> list[float] | list[list[float]]:
401
+ """Embed a text using the model"""
402
+ response = self.client.embeddings.create(
403
+ inputs=text, model=model_name or self.model_name, **kwargs
404
+ )
405
+
406
+ embeddings = [item.embedding for item in response.data]
407
+
408
+ if not embeddings:
409
+ return []
410
+
411
+ if isinstance(text, str) and embeddings[0]:
412
+ return embeddings[0]
413
+
414
+ return embeddings
415
+
416
+ async def _a_embed(
417
+ self, text: str | list[str], model_name: str | None, **kwargs
418
+ ) -> list[float] | list[list[float]]:
419
+ """Embed a text using the model"""
420
+ response = await self.client.embeddings.create_async(
421
+ inputs=text, model=model_name or self.model_name, **kwargs
422
+ )
423
+
424
+ embeddings = [item.embedding for item in response.data]
425
+
426
+ if not embeddings:
427
+ return []
428
+
429
+ if isinstance(text, str) and embeddings[0]:
430
+ return embeddings[0]
431
+
432
+ return embeddings or []
433
+
434
+ def parse_document(
435
+ self,
436
+ document_path: str,
437
+ autodelete: bool = True,
438
+ include_image_base64: bool = True,
439
+ ) -> OCRResponse:
440
+ filename = os.path.basename(document_path)
441
+ with open(document_path, "rb") as f:
442
+ uploaded_pdf = self.client.files.upload(
443
+ file={"file_name": filename, "content": f}, purpose="ocr"
444
+ )
445
+
446
+ signed_url = self.client.files.get_signed_url(file_id=uploaded_pdf.id)
447
+
448
+ response = self.client.ocr.process(
449
+ model="mistral-ocr-latest",
450
+ document={
451
+ "type": "document_url",
452
+ "document_url": signed_url.url,
453
+ },
454
+ include_image_base64=include_image_base64,
455
+ )
456
+
457
+ if autodelete:
458
+ url = f"https://api.mistral.ai/v1/files/{uploaded_pdf.id}"
459
+ headers = {
460
+ "Content-Type": "application/json",
461
+ "Authorization": f"Bearer {self.api_key}",
462
+ }
463
+
464
+ requests.delete(url, headers=headers, timeout=30)
465
+
466
+ return response
@@ -0,0 +1,13 @@
1
+ Metadata-Version: 2.4
2
+ Name: datapizza-ai-clients-mistral
3
+ Version: 0.0.2
4
+ Summary: Mistral AI client for the datapizza-ai framework
5
+ Author-email: Datapizza <datapizza@datapizza.tech>
6
+ License: MIT
7
+ Classifier: License :: OSI Approved :: MIT License
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: <4,>=3.10.0
11
+ Requires-Dist: datapizza-ai-core>=0.0.0
12
+ Requires-Dist: mistralai<2.0.0,>=1.2.0
13
+ Requires-Dist: requests<3.0.0,>=2.25.0
@@ -0,0 +1,6 @@
1
+ datapizza/clients/mistral/__init__.py,sha256=P3udscOuAOWZ8ltqzNv03IfNiLvZwHqv096v-Cf-YPg,96
2
+ datapizza/clients/mistral/memory_adapter.py,sha256=DfXPFZ9yR4l2tai3tJTn6JiMNlXqf2uxv8bk-jnWoqo,3545
3
+ datapizza/clients/mistral/mistral_client.py,sha256=YcR74FGgIU8ityCMJECR3fqjIwkKKjHcFyBFlIi_R6Y,16108
4
+ datapizza_ai_clients_mistral-0.0.2.dist-info/METADATA,sha256=lpBSEVgJX2toEV1Oij-I0fPIpYViT9Fyojx6Bc81N0E,487
5
+ datapizza_ai_clients_mistral-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ datapizza_ai_clients_mistral-0.0.2.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any