mixpeek 0.6.22__tar.gz → 0.6.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mixpeek
3
- Version: 0.6.22
3
+ Version: 0.6.24
4
4
  Summary: Mixpeek Python SDK
5
5
  Home-page: https://github.com/mixpeek/mixpeek-python
6
6
  Author: Ethan Steininger
@@ -46,7 +46,7 @@ extraction = mixpeek.extract.text(
46
46
 
47
47
  ```python
48
48
  embedding = mixpeek.embed.video(
49
- model="mixpeek/vuse-generic-v1",
49
+ model_id="mixpeek/vuse-generic-v1",
50
50
  input="s3://waving_boy.mp4",
51
51
  input_type="url"
52
52
  )
@@ -60,6 +60,7 @@ class ResponseFormat(BaseModel):
60
60
  weather: float
61
61
 
62
62
  generated_content = mixpeek.generate.text(
63
+ model_id="openai/gpt-4-turbo",
63
64
  response_format=ResponseFormat,
64
65
  context="Please tell me the weather and make sure to respond in the provided JSON schema"
65
66
  )
@@ -31,7 +31,7 @@ extraction = mixpeek.extract.text(
31
31
 
32
32
  ```python
33
33
  embedding = mixpeek.embed.video(
34
- model="mixpeek/vuse-generic-v1",
34
+ model_id="mixpeek/vuse-generic-v1",
35
35
  input="s3://waving_boy.mp4",
36
36
  input_type="url"
37
37
  )
@@ -45,6 +45,7 @@ class ResponseFormat(BaseModel):
45
45
  weather: float
46
46
 
47
47
  generated_content = mixpeek.generate.text(
48
+ model_id="openai/gpt-4-turbo",
48
49
  response_format=ResponseFormat,
49
50
  context="Please tell me the weather and make sure to respond in the provided JSON schema"
50
51
  )
@@ -11,7 +11,7 @@ from .endpoints.pipelines import Pipelines
11
11
  class Mixpeek:
12
12
  def __init__(self, api_key: str):
13
13
  self.api_key = api_key
14
- self.base_url = "https://api.mixpeek.com/"
14
+ self.base_url = "http://localhost:8000/"
15
15
  self.headers = {
16
16
  "Authorization": f"Bearer {self.api_key}",
17
17
  "Content-Type": "application/json"
@@ -5,44 +5,44 @@ class Embed:
5
5
  self.base_url = base_url
6
6
  self.headers = headers
7
7
 
8
- def video(self, model: str, input: str, input_type: str):
8
+ def video(self, model_id: str, input: str, input_type: str):
9
9
  url = f"{self.base_url}embed/"
10
10
  data = {
11
11
  "modality": "video",
12
- "model": model,
12
+ "model_id": model_id,
13
13
  "input": input,
14
14
  "input_type": input_type
15
15
  }
16
16
  response = requests.post(url, json=data, headers=self.headers)
17
17
  return response.json()
18
18
 
19
- def text(self, model: str, input: str, input_type: str):
19
+ def text(self, model_id: str, input: str, input_type: str):
20
20
  url = f"{self.base_url}embed/"
21
21
  data = {
22
22
  "modality": "text",
23
- "model": model,
23
+ "model_id": model_id,
24
24
  "input": input,
25
25
  "input_type": input_type
26
26
  }
27
27
  response = requests.post(url, json=data, headers=self.headers)
28
28
  return response.json()
29
29
 
30
- def image(self, model: str, input: str, input_type: str):
30
+ def image(self, model_id: str, input: str, input_type: str):
31
31
  url = f"{self.base_url}embed/"
32
32
  data = {
33
33
  "modality": "image",
34
- "model": model,
34
+ "model_id": model_id,
35
35
  "input": input,
36
36
  "input_type": input_type
37
37
  }
38
38
  response = requests.post(url, json=data, headers=self.headers)
39
39
  return response.json()
40
40
 
41
- def audio(self, model: str, input: str, input_type: str):
41
+ def audio(self, model_id: str, input: str, input_type: str):
42
42
  url = f"{self.base_url}embed/"
43
43
  data = {
44
44
  "modality": "audio",
45
- "model": model,
45
+ "model_id": model_id,
46
46
  "input": input,
47
47
  "input_type": input_type
48
48
  }
@@ -6,12 +6,12 @@ class Generate:
6
6
  self.base_url = base_url
7
7
  self.headers = headers
8
8
 
9
- def generate(self, response_format: BaseModel, context: str):
9
+ def text(self, model_id: str, response_format: BaseModel, context: str):
10
10
  url = f"{self.base_url}generate/text"
11
11
  data = {
12
- "response_format": response_format.model_json_schema(),
12
+ "model_id": model_id,
13
+ "response_format": response_format.schema_json(), # Ensure correct method to get JSON schema
13
14
  "context": context
14
15
  }
15
16
  response = requests.post(url, json=data, headers=self.headers)
16
- return response.json()
17
-
17
+ return response.json()
@@ -17,7 +17,7 @@ class Tools:
17
17
  "url": url,
18
18
  "frame_interval": frame_interval,
19
19
  "resolution": resolution,
20
- "use_base64": return_base64
20
+ "return_base64": return_base64
21
21
  }
22
22
  response = requests.post(endpoint, json=data, headers=self.headers)
23
23
  return response.json()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mixpeek
3
- Version: 0.6.22
3
+ Version: 0.6.24
4
4
  Summary: Mixpeek Python SDK
5
5
  Home-page: https://github.com/mixpeek/mixpeek-python
6
6
  Author: Ethan Steininger
@@ -46,7 +46,7 @@ extraction = mixpeek.extract.text(
46
46
 
47
47
  ```python
48
48
  embedding = mixpeek.embed.video(
49
- model="mixpeek/vuse-generic-v1",
49
+ model_id="mixpeek/vuse-generic-v1",
50
50
  input="s3://waving_boy.mp4",
51
51
  input_type="url"
52
52
  )
@@ -60,6 +60,7 @@ class ResponseFormat(BaseModel):
60
60
  weather: float
61
61
 
62
62
  generated_content = mixpeek.generate.text(
63
+ model_id="openai/gpt-4-turbo",
63
64
  response_format=ResponseFormat,
64
65
  context="Please tell me the weather and make sure to respond in the provided JSON schema"
65
66
  )
@@ -6,7 +6,7 @@ with open('requirements.txt') as f:
6
6
 
7
7
  setup(
8
8
  name='mixpeek',
9
- version='0.6.22',
9
+ version='0.6.24',
10
10
  author='Ethan Steininger',
11
11
  author_email='ethan@mixpeek.com',
12
12
  description='Mixpeek Python SDK',
File without changes
File without changes
File without changes
File without changes
File without changes