mistralai 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mistralai/async_client.py CHANGED
@@ -92,7 +92,7 @@ class MistralAsyncClient(ClientBase):
92
92
  async def _request(
93
93
  self,
94
94
  method: str,
95
- json: Dict[str, Any],
95
+ json: Optional[Dict[str, Any]],
96
96
  path: str,
97
97
  stream: bool = False,
98
98
  attempt: int = 1,
@@ -291,3 +291,74 @@ class MistralAsyncClient(ClientBase):
291
291
  return ModelList(**response)
292
292
 
293
293
  raise MistralException("No response received")
294
+
295
+ async def completion(
296
+ self,
297
+ model: str,
298
+ prompt: str,
299
+ suffix: Optional[str] = None,
300
+ temperature: Optional[float] = None,
301
+ max_tokens: Optional[int] = None,
302
+ top_p: Optional[float] = None,
303
+ random_seed: Optional[int] = None,
304
+ stop: Optional[List[str]] = None,
305
+ ) -> ChatCompletionResponse:
306
+ """An asynchronous completion endpoint that returns a single response.
307
+
308
+ Args:
309
+ model (str): model the name of the model to get completions with, e.g. codestral-latest
310
+ prompt (str): the prompt to complete
311
+ suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
312
+ temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
313
+ max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
314
+ top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
315
+ Defaults to None.
316
+ random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
317
+ stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
318
+ Returns:
319
+ Dict[str, Any]: a response object containing the generated text.
320
+ """
321
+ request = self._make_completion_request(
322
+ prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop
323
+ )
324
+ single_response = self._request("post", request, "v1/fim/completions")
325
+
326
+ async for response in single_response:
327
+ return ChatCompletionResponse(**response)
328
+
329
+ raise MistralException("No response received")
330
+
331
+ async def completion_stream(
332
+ self,
333
+ model: str,
334
+ prompt: str,
335
+ suffix: Optional[str] = None,
336
+ temperature: Optional[float] = None,
337
+ max_tokens: Optional[int] = None,
338
+ top_p: Optional[float] = None,
339
+ random_seed: Optional[int] = None,
340
+ stop: Optional[List[str]] = None,
341
+ ) -> AsyncGenerator[ChatCompletionStreamResponse, None]:
342
+ """An asynchronous completion endpoint that returns a streaming response.
343
+
344
+ Args:
345
+ model (str): model the name of the model to get completions with, e.g. codestral-latest
346
+ prompt (str): the prompt to complete
347
+ suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
348
+ temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
349
+ max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
350
+ top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
351
+ Defaults to None.
352
+ random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
353
+ stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
354
+
355
+ Returns:
356
+ Dict[str, Any]: a response object containing the generated text.
357
+ """
358
+ request = self._make_completion_request(
359
+ prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True
360
+ )
361
+ async_response = self._request("post", request, "v1/fim/completions", stream=True)
362
+
363
+ async for json_response in async_response:
364
+ yield ChatCompletionStreamResponse(**json_response)
mistralai/client.py CHANGED
@@ -85,7 +85,7 @@ class MistralClient(ClientBase):
85
85
  def _request(
86
86
  self,
87
87
  method: str,
88
- json: Dict[str, Any],
88
+ json: Optional[Dict[str, Any]],
89
89
  path: str,
90
90
  stream: bool = False,
91
91
  attempt: int = 1,
@@ -285,3 +285,77 @@ class MistralClient(ClientBase):
285
285
  return ModelList(**response)
286
286
 
287
287
  raise MistralException("No response received")
288
+
289
+ def completion(
290
+ self,
291
+ model: str,
292
+ prompt: str,
293
+ suffix: Optional[str] = None,
294
+ temperature: Optional[float] = None,
295
+ max_tokens: Optional[int] = None,
296
+ top_p: Optional[float] = None,
297
+ random_seed: Optional[int] = None,
298
+ stop: Optional[List[str]] = None,
299
+ ) -> ChatCompletionResponse:
300
+ """A completion endpoint that returns a single response.
301
+
302
+ Args:
303
+ model (str): model the name of the model to get completion with, e.g. codestral-latest
304
+ prompt (str): the prompt to complete
305
+ suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
306
+ temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
307
+ max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
308
+ top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
309
+ Defaults to None.
310
+ random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
311
+ stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
312
+
313
+ Returns:
314
+ Dict[str, Any]: a response object containing the generated text.
315
+ """
316
+ request = self._make_completion_request(
317
+ prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop
318
+ )
319
+
320
+ single_response = self._request("post", request, "v1/fim/completions", stream=False)
321
+
322
+ for response in single_response:
323
+ return ChatCompletionResponse(**response)
324
+
325
+ raise MistralException("No response received")
326
+
327
+ def completion_stream(
328
+ self,
329
+ model: str,
330
+ prompt: str,
331
+ suffix: Optional[str] = None,
332
+ temperature: Optional[float] = None,
333
+ max_tokens: Optional[int] = None,
334
+ top_p: Optional[float] = None,
335
+ random_seed: Optional[int] = None,
336
+ stop: Optional[List[str]] = None,
337
+ ) -> Iterable[ChatCompletionStreamResponse]:
338
+ """An asynchronous completion endpoint that streams responses.
339
+
340
+ Args:
341
+ model (str): model the name of the model to get completions with, e.g. codestral-latest
342
+ prompt (str): the prompt to complete
343
+ suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion
344
+ temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5.
345
+ max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None.
346
+ top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9.
347
+ Defaults to None.
348
+ random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None.
349
+ stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n']
350
+
351
+ Returns:
352
+ Iterable[Dict[str, Any]]: a generator that yields response objects containing the generated text.
353
+ """
354
+ request = self._make_completion_request(
355
+ prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True
356
+ )
357
+
358
+ response = self._request("post", request, "v1/fim/completions", stream=True)
359
+
360
+ for json_streamed_response in response:
361
+ yield ChatCompletionStreamResponse(**json_streamed_response)
mistralai/client_base.py CHANGED
@@ -73,6 +73,63 @@ class ClientBase(ABC):
73
73
 
74
74
  return parsed_messages
75
75
 
76
+ def _make_completion_request(
77
+ self,
78
+ prompt: str,
79
+ model: Optional[str] = None,
80
+ suffix: Optional[str] = None,
81
+ temperature: Optional[float] = None,
82
+ max_tokens: Optional[int] = None,
83
+ top_p: Optional[float] = None,
84
+ random_seed: Optional[int] = None,
85
+ stop: Optional[List[str]] = None,
86
+ stream: Optional[bool] = False,
87
+ ) -> Dict[str, Any]:
88
+ request_data: Dict[str, Any] = {
89
+ "prompt": prompt,
90
+ "suffix": suffix,
91
+ "model": model,
92
+ "stream": stream,
93
+ }
94
+
95
+ if stop is not None:
96
+ request_data["stop"] = stop
97
+
98
+ if model is not None:
99
+ request_data["model"] = model
100
+ else:
101
+ if self._default_model is None:
102
+ raise MistralException(message="model must be provided")
103
+ request_data["model"] = self._default_model
104
+
105
+ request_data.update(
106
+ self._build_sampling_params(
107
+ temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed
108
+ )
109
+ )
110
+
111
+ self._logger.debug(f"Completion request: {request_data}")
112
+
113
+ return request_data
114
+
115
+ def _build_sampling_params(
116
+ self,
117
+ max_tokens: Optional[int],
118
+ random_seed: Optional[int],
119
+ temperature: Optional[float],
120
+ top_p: Optional[float],
121
+ ) -> Dict[str, Any]:
122
+ params = {}
123
+ if temperature is not None:
124
+ params["temperature"] = temperature
125
+ if max_tokens is not None:
126
+ params["max_tokens"] = max_tokens
127
+ if top_p is not None:
128
+ params["top_p"] = top_p
129
+ if random_seed is not None:
130
+ params["random_seed"] = random_seed
131
+ return params
132
+
76
133
  def _make_chat_request(
77
134
  self,
78
135
  messages: List[Any],
@@ -99,16 +156,14 @@ class ClientBase(ABC):
99
156
  raise MistralException(message="model must be provided")
100
157
  request_data["model"] = self._default_model
101
158
 
159
+ request_data.update(
160
+ self._build_sampling_params(
161
+ temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed
162
+ )
163
+ )
164
+
102
165
  if tools is not None:
103
166
  request_data["tools"] = self._parse_tools(tools)
104
- if temperature is not None:
105
- request_data["temperature"] = temperature
106
- if max_tokens is not None:
107
- request_data["max_tokens"] = max_tokens
108
- if top_p is not None:
109
- request_data["top_p"] = top_p
110
- if random_seed is not None:
111
- request_data["random_seed"] = random_seed
112
167
  if stream is not None:
113
168
  request_data["stream"] = stream
114
169
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mistralai
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary:
5
5
  Author: Bam4d
6
6
  Author-email: bam4d@mistral.ai
@@ -1,7 +1,7 @@
1
1
  mistralai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- mistralai/async_client.py,sha256=orHDWXtchV8yYk5AQdX-rBPvPdARFdAXqjX_Mlo6H8U,11378
3
- mistralai/client.py,sha256=3ta6VFeKKngp1tI8doK7WKDLcndk_4LYAbn01_6GodE,11119
4
- mistralai/client_base.py,sha256=SY8E1VctuPvD3LVTX0hudUGRHd-j6xee-LtE0f2ABSs,4710
2
+ mistralai/async_client.py,sha256=rqgOnJrfL222aVPNPn5sqTrFespxo89XGO74Yf7QbHs,14984
3
+ mistralai/client.py,sha256=jDsfMMX56KAp24MweT-CgcO02iRODxeGhYi8vqi8oIo,14719
4
+ mistralai/client_base.py,sha256=OV-gp8EAaU4aoaNfZMS2dbcGw38DqPYty-H_aqAcJsA,6349
5
5
  mistralai/constants.py,sha256=FvokZPfTBC-DC6-HfiV83pD3FP6huHk3SUIl0yx5jx8,84
6
6
  mistralai/exceptions.py,sha256=R3pswvZyY5CuSbqhVklgfGPVJoz7T7l2VQKMOXK229A,1652
7
7
  mistralai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -10,7 +10,7 @@ mistralai/models/common.py,sha256=zatP4aV_LIEpzj3_igsKkJBICwGhmXG0LX3CdO3kn-o,17
10
10
  mistralai/models/embeddings.py,sha256=-VthLQBj6wrq7HXJbGmnkQEEanSemA3MAlaMFh94VBg,331
11
11
  mistralai/models/models.py,sha256=mDNIPnbsZOnfS7i8563NnIvYYxYbavu1CBijEoEqitw,714
12
12
  mistralai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- mistralai-0.2.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
14
- mistralai-0.2.0.dist-info/METADATA,sha256=r-IhqjSA1z1eQcN7uU2Nm0oCe5fAfJaagkH64-ZKHeQ,1831
15
- mistralai-0.2.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
16
- mistralai-0.2.0.dist-info/RECORD,,
13
+ mistralai-0.3.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
14
+ mistralai-0.3.0.dist-info/METADATA,sha256=bPYP_fghDu27kG2L1dr_hZ7YGDD8lC9Cte3qJoSMMjk,1831
15
+ mistralai-0.3.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
16
+ mistralai-0.3.0.dist-info/RECORD,,