pullai 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pullai-0.3.5/PKG-INFO ADDED
@@ -0,0 +1,64 @@
1
+ Metadata-Version: 2.4
2
+ Name: pullai
3
+ Version: 0.3.5
4
+ Summary: Python SDK for PullAI — run AI models locally
5
+ Author: Metiu
6
+ License: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/pullai/pullai
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+
11
+ # PullAI Python SDK
12
+
13
+ ```bash
14
+ pip install pullai
15
+ ```
16
+
17
+ Requires the server running: `pullai serve`
18
+
19
+ ---
20
+
21
+ ## Quick start
22
+
23
+ ```python
24
+ from pullai import PullAI
25
+
26
+ ai = PullAI()
27
+
28
+ # List installed models
29
+ ai.models()
30
+
31
+ # Single message (streaming)
32
+ ai.chat("llm/mistral:7b", "What is Python?")
33
+
34
+ # Multi-turn conversation (model remembers everything)
35
+ conv = ai.conversation("llm/mistral:7b")
36
+ conv.say("My name is Marco and I love Python.")
37
+ conv.say("What is my name?") # → "Your name is Marco."
38
+ conv.say("What language do I love?") # → "You love Python."
39
+ conv.clear() # wipe memory, start fresh
40
+
41
+ # Generate an image
42
+ ai.image("image/sdxl", "an astronaut cat on Mars", "cat.png")
43
+
44
+ # Transcribe audio
45
+ ai.transcribe("audio/whisper:large", "meeting.mp3")
46
+
47
+ # Text to speech
48
+ ai.speak("audio/kokoro", "Hello!", "hello.wav")
49
+
50
+ # Generate a video
51
+ ai.video("video/cogvideo:5b", "a panda running", "panda.mp4")
52
+
53
+ # Generate a 3D model from text
54
+ ai.model3d("3d/shap-e", "chair.ply", description="a wooden chair")
55
+
56
+ # Generate a 3D model from image
57
+ ai.model3d("3d/triposr", "chair.obj", image="photo.jpg")
58
+ ```
59
+
60
+ ---
61
+
62
+ ## License
63
+
64
+ Apache-2.0 © Metiu — PullAI Contributors
pullai-0.3.5/README.md ADDED
@@ -0,0 +1,54 @@
1
+ # PullAI Python SDK
2
+
3
+ ```bash
4
+ pip install pullai
5
+ ```
6
+
7
+ Requires the server running: `pullai serve`
8
+
9
+ ---
10
+
11
+ ## Quick start
12
+
13
+ ```python
14
+ from pullai import PullAI
15
+
16
+ ai = PullAI()
17
+
18
+ # List installed models
19
+ ai.models()
20
+
21
+ # Single message (streaming)
22
+ ai.chat("llm/mistral:7b", "What is Python?")
23
+
24
+ # Multi-turn conversation (model remembers everything)
25
+ conv = ai.conversation("llm/mistral:7b")
26
+ conv.say("My name is Marco and I love Python.")
27
+ conv.say("What is my name?") # → "Your name is Marco."
28
+ conv.say("What language do I love?") # → "You love Python."
29
+ conv.clear() # wipe memory, start fresh
30
+
31
+ # Generate an image
32
+ ai.image("image/sdxl", "an astronaut cat on Mars", "cat.png")
33
+
34
+ # Transcribe audio
35
+ ai.transcribe("audio/whisper:large", "meeting.mp3")
36
+
37
+ # Text to speech
38
+ ai.speak("audio/kokoro", "Hello!", "hello.wav")
39
+
40
+ # Generate a video
41
+ ai.video("video/cogvideo:5b", "a panda running", "panda.mp4")
42
+
43
+ # Generate a 3D model from text
44
+ ai.model3d("3d/shap-e", "chair.ply", description="a wooden chair")
45
+
46
+ # Generate a 3D model from image
47
+ ai.model3d("3d/triposr", "chair.obj", image="photo.jpg")
48
+ ```
49
+
50
+ ---
51
+
52
+ ## License
53
+
54
+ Apache-2.0 © Metiu — PullAI Contributors
@@ -0,0 +1,4 @@
1
+ from .client import PullAI, Conversation
2
+
3
+ __version__ = "0.3.5"
4
+ __all__ = ["PullAI", "Conversation"]
@@ -0,0 +1,519 @@
1
+ """
2
+ PullAI Python SDK
3
+ Requires the server to be running: pullai serve
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import base64
9
+ import json
10
+ import urllib.request
11
+ import urllib.error
12
+ from pathlib import Path
13
+ from typing import List, Optional
14
+
15
+
16
+ class PullAI:
17
+ """
18
+ Python client for PullAI — run AI models locally.
19
+
20
+ Start the server first with: pullai serve
21
+
22
+ Example:
23
+ ai = PullAI()
24
+ ai.models()
25
+ ai.chat("llm/mistral:7b", "Hello!")
26
+ """
27
+
28
+ def __init__(self, port: int = 11500) -> None:
29
+ """
30
+ Create the PullAI client.
31
+
32
+ Args:
33
+ port (int): Port of the PullAI server. Default is 11500.
34
+ Start the server with: pullai serve --port 11500
35
+
36
+ Example:
37
+ ai = PullAI() # default port 11500
38
+ ai = PullAI(port=8080) # custom port
39
+ """
40
+ self._base: str = f"http://127.0.0.1:{port}"
41
+
42
+ # ──────────────────────────────────────────────────────────────────────
43
+ # MODELS
44
+ # ──────────────────────────────────────────────────────────────────────
45
+
46
+ def models(self) -> None:
47
+ """
48
+ Print a table of all locally installed models.
49
+
50
+ No arguments needed. Shows name, size and format of every
51
+ downloaded model.
52
+
53
+ Example:
54
+ ai.models()
55
+
56
+ # Output:
57
+ # 📦 3 models installed
58
+ # ──────────────────────────────────────────────────
59
+ # 1 llm/mistral:7b 4.1 GB gguf
60
+ # 2 image/sdxl:latest 6.5 GB gguf
61
+ # 3 audio/whisper:large 1.5 GB safetensors
62
+ """
63
+ response: dict = self._get("/api/models")
64
+ items: List[dict] = response.get("models") or []
65
+
66
+ if not items:
67
+ print("No models installed.")
68
+ print("Download one with: pullai pull llm/mistral:7b")
69
+ return
70
+
71
+ print(f"\n📦 {len(items)} model{'s' if len(items) != 1 else ''} installed")
72
+ print("─" * 50)
73
+ for i, m in enumerate(items, 1):
74
+ ref: str = f"{m.get('type','?')}/{m.get('name','?')}:{m.get('tag','?')}"
75
+ size: str = _human_size(m.get("size_bytes", 0))
76
+ fmt: str = m.get("format", "")
77
+ print(f" {i:<3} {ref:<32} {size:>8} {fmt}")
78
+ print()
79
+
80
+ # ──────────────────────────────────────────────────────────────────────
81
+ # CHAT — single message
82
+ # ──────────────────────────────────────────────────────────────────────
83
+
84
+ def chat(self, model: str, message: str) -> str:
85
+ """
86
+ Send a single message to a language model and get a reply.
87
+
88
+ Tokens are printed in real time as the model generates them
89
+ (streaming), just like ChatGPT.
90
+
91
+ For a multi-turn conversation where the model remembers previous
92
+ messages, use conversation() instead.
93
+
94
+ Args:
95
+ model (str): The model to use.
96
+ Examples: "llm/mistral:7b"
97
+ "llm/llama3:8b"
98
+ "llm/phi3:mini"
99
+ "llm/qwen:0.5b"
100
+ message (str): The text to send to the model.
101
+
102
+ Returns:
103
+ str: The full reply as a string.
104
+
105
+ Example:
106
+ ai.chat("llm/mistral:7b", "What is Python?")
107
+ ai.chat("llm/phi3:mini", "Write a poem about the sea")
108
+ ai.chat("llm/llama3:8b", "Translate to Italian: Hello world")
109
+ """
110
+ return self._stream(model, [{"role": "user", "content": message}])
111
+
112
+ # ──────────────────────────────────────────────────────────────────────
113
+ # CONVERSATION — multi-turn chat with memory
114
+ # ──────────────────────────────────────────────────────────────────────
115
+
116
+ def conversation(
117
+ self,
118
+ model: str,
119
+ system: str = "You are a helpful assistant.",
120
+ ) -> "Conversation":
121
+ """
122
+ Start a multi-turn conversation with a language model.
123
+
124
+ Unlike chat(), the model remembers everything said so far.
125
+ You can keep talking back and forth without starting over.
126
+
127
+ Args:
128
+ model (str): The model to use.
129
+ Examples: "llm/mistral:7b"
130
+ "llm/llama3:8b"
131
+ "llm/phi3:mini"
132
+ system (str): Instructions that define how the model behaves.
133
+ Sent once at the start and never changes.
134
+ Default: "You are a helpful assistant."
135
+ Examples:
136
+ "You are a Python expert. Answer only with code."
137
+ "You are a translator. Translate everything to French."
138
+ "You are a friendly tutor explaining things simply."
139
+
140
+ Returns:
141
+ Conversation: An object with a .say() method to send messages.
142
+
143
+ Example:
144
+ conv = ai.conversation("llm/mistral:7b")
145
+ conv.say("My name is Marco.")
146
+ conv.say("What is my name?") # model remembers: "Your name is Marco."
147
+ conv.say("Tell me a joke.")
148
+
149
+ # Custom personality
150
+ conv = ai.conversation(
151
+ "llm/mistral:7b",
152
+ system="You are a sarcastic assistant who answers very briefly."
153
+ )
154
+ conv.say("What is 2 + 2?")
155
+ """
156
+ return Conversation(model=model, system=system, client=self)
157
+
158
+ # ──────────────────────────────────────────────────────────────────────
159
+ # IMAGE
160
+ # ──────────────────────────────────────────────────────────────────────
161
+
162
+ def image(
163
+ self,
164
+ model: str,
165
+ description: str,
166
+ save_to: str,
167
+ steps: int = 20,
168
+ ) -> None:
169
+ """
170
+ Generate an image from a text description and save it to a file.
171
+
172
+ Args:
173
+ model (str): The model to use.
174
+ Examples: "image/sdxl"
175
+ "image/flux:schnell"
176
+ "image/openjourney"
177
+ "image/dreamshaper"
178
+ description (str): What the image should contain.
179
+ Example: "an astronaut cat on Mars, digital art"
180
+ save_to (str): Path where the image will be saved (.png).
181
+ Example: "cat.png"
182
+ steps (int): Generation steps. Default 20.
183
+ More steps = better quality but slower.
184
+ Recommended: 20 (fast) · 30 (good) · 50 (best)
185
+
186
+ Example:
187
+ ai.image("image/sdxl", "a purple sunset over the ocean", "sunset.png")
188
+ ai.image("image/flux:schnell", "medieval castle", "castle.png", steps=30)
189
+ """
190
+ self._generate_media(model, prompt=description, output=save_to, steps=steps)
191
+
192
+ # ──────────────────────────────────────────────────────────────────────
193
+ # TRANSCRIBE
194
+ # ──────────────────────────────────────────────────────────────────────
195
+
196
+ def transcribe(
197
+ self,
198
+ model: str,
199
+ audio_file: str,
200
+ save_to: Optional[str] = None,
201
+ ) -> str:
202
+ """
203
+ Transcribe an audio file to text (Speech-to-Text) using Whisper.
204
+
205
+ Args:
206
+ model (str): The Whisper model to use.
207
+ Examples: "audio/whisper:large" (most accurate)
208
+ "audio/whisper:base" (faster)
209
+ audio_file (str): Path to the audio file to transcribe.
210
+ Supported: .mp3 .wav .flac .m4a .ogg
211
+ Example: "meeting.mp3"
212
+ save_to (str, optional): Path to save the transcription as .txt.
213
+ If not given, text is only returned.
214
+ Example: "transcript.txt"
215
+
216
+ Returns:
217
+ str: The transcribed text.
218
+
219
+ Example:
220
+ text = ai.transcribe("audio/whisper:large", "meeting.mp3")
221
+ ai.transcribe("audio/whisper:base", "note.wav", save_to="note.txt")
222
+ """
223
+ return self._generate_media(model, input_file=audio_file, output=save_to)
224
+
225
+ # ──────────────────────────────────────────────────────────────────────
226
+ # SPEAK
227
+ # ──────────────────────────────────────────────────────────────────────
228
+
229
+ def speak(
230
+ self,
231
+ model: str,
232
+ text: str,
233
+ save_to: str,
234
+ ) -> None:
235
+ """
236
+ Convert text to speech and save the audio as a .wav file.
237
+
238
+ Args:
239
+ model (str): The TTS engine to use.
240
+ Examples: "audio/kokoro" (high quality, recommended)
241
+ "audio/bark" (very expressive, slower)
242
+ text (str): The text to convert to speech.
243
+ Example: "Hello! I am PullAI, your local AI assistant."
244
+ save_to (str): Path where the audio file will be saved (.wav).
245
+ Example: "greeting.wav"
246
+
247
+ Example:
248
+ ai.speak("audio/kokoro", "Good morning!", "morning.wav")
249
+ ai.speak("audio/bark", "Hello everyone!", "hello.wav")
250
+ """
251
+ self._generate_media(model, prompt=text, output=save_to)
252
+
253
+ # ──────────────────────────────────────────────────────────────────────
254
+ # VIDEO
255
+ # ──────────────────────────────────────────────────────────────────────
256
+
257
+ def video(
258
+ self,
259
+ model: str,
260
+ description: str,
261
+ save_to: str,
262
+ steps: int = 20,
263
+ ) -> None:
264
+ """
265
+ Generate a video from a text description and save it as .mp4.
266
+
267
+ Warning: video generation can take several minutes even with a GPU.
268
+
269
+ Args:
270
+ model (str): The model to use.
271
+ Examples: "video/cogvideo:5b" (high quality)
272
+ "video/animatediff:v3" (faster)
273
+ description (str): What the video should show.
274
+ Example: "a panda eating bamboo in the forest"
275
+ save_to (str): Path where the video will be saved (.mp4).
276
+ Example: "panda.mp4"
277
+ steps (int): Generation steps. Default 20.
278
+ More steps = better quality but slower.
279
+
280
+ Example:
281
+ ai.video("video/cogvideo:5b", "a horse galloping", "horse.mp4")
282
+ ai.video("video/animatediff:v3", "ocean waves", "ocean.mp4", steps=30)
283
+ """
284
+ self._generate_media(model, prompt=description, output=save_to, steps=steps)
285
+
286
+ # ──────────────────────────────────────────────────────────────────────
287
+ # MODEL 3D
288
+ # ──────────────────────────────────────────────────────────────────────
289
+
290
+ def model3d(
291
+ self,
292
+ model: str,
293
+ save_to: str,
294
+ description: Optional[str] = None,
295
+ image: Optional[str] = None,
296
+ ) -> None:
297
+ """
298
+ Generate a 3D model from text or from an image.
299
+
300
+ You must provide at least one of 'description' or 'image'.
301
+
302
+ Args:
303
+ model (str): The 3D model to use.
304
+ Examples: "3d/triposr" (image → 3D, fast)
305
+ "3d/shap-e" (text → 3D, OpenAI)
306
+ "3d/lgm" (image → 3D, quality)
307
+ "3d/trellis" (text/image, Microsoft)
308
+ save_to (str): Path where the 3D model will be saved.
309
+ .obj → TripoSR
310
+ .ply → Shap-E, LGM
311
+ .glb → TRELLIS
312
+ Example: "chair.obj"
313
+ description (str, optional): Text description of the object.
314
+ Example: "a wooden chair with four legs"
315
+ image (str, optional): Path to a photo of the object.
316
+ Best with plain white background.
317
+ Example: "chair.jpg"
318
+
319
+ Example:
320
+ # From text
321
+ ai.model3d("3d/shap-e", "chair.ply", description="a wooden chair")
322
+
323
+ # From image
324
+ ai.model3d("3d/triposr", "chair.obj", image="photo.jpg")
325
+ """
326
+ if not description and not image:
327
+ raise ValueError("Provide at least 'description' or 'image'.")
328
+ self._generate_media(model, prompt=description or "", input_file=image, output=save_to)
329
+
330
+ # ──────────────────────────────────────────────────────────────────────
331
+ # INTERNALS
332
+ # ──────────────────────────────────────────────────────────────────────
333
+
334
+ def _stream(self, model: str, messages: List[dict]) -> str:
335
+ prompt: str = "\n".join(
336
+ f"[{m['role'].capitalize()}]: {m['content']}" for m in messages
337
+ )
338
+ body: bytes = json.dumps({
339
+ "model": model, "prompt": prompt, "stream": True,
340
+ }).encode()
341
+ req = urllib.request.Request(
342
+ f"{self._base}/api/generate", data=body,
343
+ method="POST", headers={"Content-Type": "application/json"},
344
+ )
345
+ tokens: List[str] = []
346
+ try:
347
+ with urllib.request.urlopen(req, timeout=600) as resp:
348
+ for raw in resp:
349
+ line: str = raw.decode("utf-8").rstrip("\n\r")
350
+ if not line.startswith("data: "):
351
+ continue
352
+ payload: str = line[6:]
353
+ if payload == "[DONE]":
354
+ break
355
+ if payload.startswith("[ERROR]"):
356
+ raise RuntimeError(payload[8:])
357
+ print(payload, end="", flush=True)
358
+ tokens.append(payload)
359
+ except urllib.error.URLError as exc:
360
+ raise ConnectionError(
361
+ f"PullAI server not reachable at {self._base}\n"
362
+ "Start it with: pullai serve"
363
+ ) from exc
364
+ print()
365
+ return "".join(tokens)
366
+
367
+ def _generate_media(
368
+ self,
369
+ model: str,
370
+ prompt: str = "",
371
+ input_file: Optional[str] = None,
372
+ output: Optional[str] = None,
373
+ steps: int = 20,
374
+ ) -> str:
375
+ body: bytes = json.dumps({
376
+ "model": model,
377
+ "prompt": prompt,
378
+ "input_file": str(input_file) if input_file else "",
379
+ "output_file": str(output) if output else "",
380
+ "steps": steps,
381
+ }).encode()
382
+ req = urllib.request.Request(
383
+ f"{self._base}/api/generate", data=body,
384
+ method="POST", headers={"Content-Type": "application/json"},
385
+ )
386
+ try:
387
+ with urllib.request.urlopen(req, timeout=900) as resp:
388
+ data: dict = json.loads(resp.read())
389
+ except urllib.error.HTTPError as exc:
390
+ msg: str = json.loads(exc.read()).get("error", str(exc))
391
+ raise RuntimeError(msg) from exc
392
+ except urllib.error.URLError as exc:
393
+ raise ConnectionError(
394
+ f"PullAI server not reachable at {self._base}\n"
395
+ "Start it with: pullai serve"
396
+ ) from exc
397
+
398
+ if "error" in data:
399
+ raise RuntimeError(data["error"])
400
+
401
+ if data.get("data") and output:
402
+ Path(output).write_bytes(base64.b64decode(data["data"]))
403
+ print(f"✅ Saved to: {output}")
404
+ return str(output)
405
+
406
+ if data.get("data"):
407
+ return base64.b64decode(data["data"]).decode("utf-8", errors="replace")
408
+
409
+ return data.get("saved_to", "")
410
+
411
+ def _get(self, path: str) -> dict:
412
+ try:
413
+ with urllib.request.urlopen(f"{self._base}{path}", timeout=10) as r:
414
+ return json.loads(r.read()) # type: ignore[no-any-return]
415
+ except urllib.error.URLError as exc:
416
+ raise ConnectionError(
417
+ f"PullAI server not reachable at {self._base}\n"
418
+ "Start it with: pullai serve"
419
+ ) from exc
420
+
421
+ def __repr__(self) -> str:
422
+ return f"<PullAI {self._base}>"
423
+
424
+
425
+ # ──────────────────────────────────────────────────────────────────────────────
426
+ # CONVERSATION
427
+ # ──────────────────────────────────────────────────────────────────────────────
428
+
429
+ class Conversation:
430
+ """
431
+ A multi-turn conversation with a language model.
432
+
433
+ The model remembers every message sent so far, so you can talk
434
+ back and forth naturally without repeating context.
435
+
436
+ Do not create this class directly — use ai.conversation() instead.
437
+
438
+ Example:
439
+ conv = ai.conversation("llm/mistral:7b")
440
+ conv.say("My name is Marco and I love Python.")
441
+ conv.say("What is my name?") # → "Your name is Marco."
442
+ conv.say("What language do I love?") # → "You love Python."
443
+ conv.clear() # wipe history, start fresh
444
+ """
445
+
446
+ def __init__(self, model: str, system: str, client: "PullAI") -> None:
447
+ self._model: str = model
448
+ self._client: PullAI = client
449
+ self._history: List[dict] = [{"role": "system", "content": system}]
450
+
451
+ def say(self, message: str) -> str:
452
+ """
453
+ Send a message and get a reply. The full history is remembered automatically.
454
+
455
+ The model sees every previous message in the conversation, so it
456
+ can refer back to anything said earlier.
457
+
458
+ Args:
459
+ message (str): The message to send to the model.
460
+
461
+ Returns:
462
+ str: The model's reply. Also printed in real time as tokens stream in.
463
+
464
+ Example:
465
+ conv = ai.conversation("llm/mistral:7b")
466
+
467
+ conv.say("My favourite colour is blue.")
468
+ conv.say("What is my favourite colour?") # → "Your favourite colour is blue."
469
+ conv.say("Why did I mention it?") # → model refers back to context
470
+ """
471
+ self._history.append({"role": "user", "content": message})
472
+ reply: str = self._client._stream(self._model, self._history)
473
+ self._history.append({"role": "assistant", "content": reply})
474
+ return reply
475
+
476
+ def clear(self) -> None:
477
+ """
478
+ Wipe the conversation history and start fresh.
479
+
480
+ The system prompt is kept, but all previous messages
481
+ (both yours and the model's replies) are deleted.
482
+
483
+ Example:
484
+ conv.say("My name is Marco.")
485
+ conv.clear()
486
+ conv.say("What is my name?") # model no longer knows
487
+ """
488
+ system: dict = self._history[0]
489
+ self._history = [system]
490
+
491
+ def history(self) -> List[dict]:
492
+ """
493
+ Return the full conversation history as a list of messages.
494
+
495
+ Each message is a dict with 'role' and 'content'.
496
+ Roles: 'system', 'user', 'assistant'.
497
+
498
+ Returns:
499
+ List[dict]: All messages, ordered from oldest to newest.
500
+
501
+ Example:
502
+ for msg in conv.history():
503
+ print(f"{msg['role']}: {msg['content']}")
504
+ """
505
+ return list(self._history)
506
+
507
+ def __repr__(self) -> str:
508
+ turns: int = (len(self._history) - 1) // 2
509
+ return f"<Conversation model={self._model!r} turns={turns}>"
510
+
511
+
512
+ # ──────────────────────────────────────────────────────────────────────────────
513
+
514
+ def _human_size(b: int) -> str:
515
+ if b >= 1_000_000_000:
516
+ return f"{b / 1e9:.1f} GB"
517
+ if b >= 1_000_000:
518
+ return f"{b / 1e6:.0f} MB"
519
+ return f"{b / 1e3:.0f} KB"
File without changes
@@ -0,0 +1,64 @@
1
+ Metadata-Version: 2.4
2
+ Name: pullai
3
+ Version: 0.3.5
4
+ Summary: Python SDK for PullAI — run AI models locally
5
+ Author: Metiu
6
+ License: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/pullai/pullai
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+
11
+ # PullAI Python SDK
12
+
13
+ ```bash
14
+ pip install pullai
15
+ ```
16
+
17
+ Requires the server running: `pullai serve`
18
+
19
+ ---
20
+
21
+ ## Quick start
22
+
23
+ ```python
24
+ from pullai import PullAI
25
+
26
+ ai = PullAI()
27
+
28
+ # List installed models
29
+ ai.models()
30
+
31
+ # Single message (streaming)
32
+ ai.chat("llm/mistral:7b", "What is Python?")
33
+
34
+ # Multi-turn conversation (model remembers everything)
35
+ conv = ai.conversation("llm/mistral:7b")
36
+ conv.say("My name is Marco and I love Python.")
37
+ conv.say("What is my name?") # → "Your name is Marco."
38
+ conv.say("What language do I love?") # → "You love Python."
39
+ conv.clear() # wipe memory, start fresh
40
+
41
+ # Generate an image
42
+ ai.image("image/sdxl", "an astronaut cat on Mars", "cat.png")
43
+
44
+ # Transcribe audio
45
+ ai.transcribe("audio/whisper:large", "meeting.mp3")
46
+
47
+ # Text to speech
48
+ ai.speak("audio/kokoro", "Hello!", "hello.wav")
49
+
50
+ # Generate a video
51
+ ai.video("video/cogvideo:5b", "a panda running", "panda.mp4")
52
+
53
+ # Generate a 3D model from text
54
+ ai.model3d("3d/shap-e", "chair.ply", description="a wooden chair")
55
+
56
+ # Generate a 3D model from image
57
+ ai.model3d("3d/triposr", "chair.obj", image="photo.jpg")
58
+ ```
59
+
60
+ ---
61
+
62
+ ## License
63
+
64
+ Apache-2.0 © Metiu — PullAI Contributors
@@ -0,0 +1,9 @@
1
+ README.md
2
+ pyproject.toml
3
+ pullai/__init__.py
4
+ pullai/client.py
5
+ pullai/py.typed
6
+ pullai.egg-info/PKG-INFO
7
+ pullai.egg-info/SOURCES.txt
8
+ pullai.egg-info/dependency_links.txt
9
+ pullai.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ pullai
@@ -0,0 +1,20 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "pullai"
7
+ version = "0.3.5"
8
+ description = "Python SDK for PullAI — run AI models locally"
9
+ readme = "README.md"
10
+ license = { text = "Apache-2.0" }
11
+ authors = [{ name = "Metiu" }]
12
+ requires-python = ">=3.8"
13
+ dependencies = []
14
+
15
+ [project.urls]
16
+ Homepage = "https://github.com/pullai/pullai"
17
+
18
+ [tool.setuptools.packages.find]
19
+ where = ["."]
20
+ include = ["pullai*"]
pullai-0.3.5/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+