pop-python 1.0.4__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. POP/Embedder.py +121 -119
  2. POP/__init__.py +34 -16
  3. POP/api_registry.py +148 -0
  4. POP/context.py +47 -0
  5. POP/env_api_keys.py +33 -0
  6. POP/models.py +20 -0
  7. POP/prompt_function.py +378 -0
  8. POP/prompts/__init__.py +8 -0
  9. POP/prompts/openai-json_schema_generator.md +12 -161
  10. POP/providers/__init__.py +33 -0
  11. POP/providers/deepseek_client.py +69 -0
  12. POP/providers/doubao_client.py +101 -0
  13. POP/providers/gemini_client.py +119 -0
  14. POP/providers/llm_client.py +60 -0
  15. POP/providers/local_client.py +45 -0
  16. POP/providers/ollama_client.py +129 -0
  17. POP/providers/openai_client.py +100 -0
  18. POP/stream.py +77 -0
  19. POP/utils/__init__.py +9 -0
  20. POP/utils/event_stream.py +43 -0
  21. POP/utils/http_proxy.py +16 -0
  22. POP/utils/json_parse.py +21 -0
  23. POP/utils/oauth/__init__.py +31 -0
  24. POP/utils/overflow.py +33 -0
  25. POP/utils/sanitize_unicode.py +18 -0
  26. POP/utils/validation.py +23 -0
  27. POP/utils/web_snapshot.py +108 -0
  28. {pop_python-1.0.4.dist-info → pop_python-1.1.0.dist-info}/METADATA +160 -57
  29. pop_python-1.1.0.dist-info/RECORD +42 -0
  30. {pop_python-1.0.4.dist-info → pop_python-1.1.0.dist-info}/WHEEL +1 -1
  31. pop_python-1.1.0.dist-info/top_level.txt +2 -0
  32. tests/__init__.py +0 -0
  33. tests/conftest.py +47 -0
  34. tests/test_api_registry.py +36 -0
  35. tests/test_context_utils.py +54 -0
  36. tests/test_embedder.py +64 -0
  37. tests/test_env_api_keys.py +15 -0
  38. tests/test_prompt_function.py +98 -0
  39. tests/test_web_snapshot.py +47 -0
  40. POP/LLMClient.py +0 -410
  41. POP/POP.py +0 -400
  42. POP/prompts/2024-11-19-content_finder.md +0 -46
  43. POP/prompts/2024-11-19-get_content.md +0 -71
  44. POP/prompts/2024-11-19-get_title_and_url.md +0 -62
  45. POP/prompts/CLI_AI_helper.md +0 -75
  46. POP/prompts/content_finder.md +0 -42
  47. POP/prompts/corpus_splitter.md +0 -28
  48. POP/prompts/function_code_generator.md +0 -51
  49. POP/prompts/function_description_generator.md +0 -45
  50. POP/prompts/get_content.md +0 -75
  51. POP/prompts/get_title_and_url.md +0 -62
  52. POP/prompts/openai-function_description_generator.md +0 -126
  53. POP/prompts/openai-prompt_generator.md +0 -49
  54. POP/schemas/biomedical_ner_extractor.json +0 -37
  55. POP/schemas/entity_extraction_per_sentence.json +0 -92
  56. pop_python-1.0.4.dist-info/RECORD +0 -26
  57. pop_python-1.0.4.dist-info/top_level.txt +0 -1
  58. {pop_python-1.0.4.dist-info → pop_python-1.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,16 @@
1
+ """
2
+ HTTP proxy utilities.
3
+
4
+ This module acts as a placeholder for HTTP proxy configuration. In
5
+ pi-ai the proxy module centralises configuration for upstream proxies
6
+ and TLS settings. Here we expose a simple helper that returns a
7
+ requests session configured with environment proxy variables.
8
+ """
9
+
10
+ import requests
11
+
12
+ def get_session_with_proxy() -> requests.Session:
13
+ """Create a requests Session that respects HTTP(S)_PROXY environment variables."""
14
+ session = requests.Session()
15
+ session.trust_env = True
16
+ return session
@@ -0,0 +1,21 @@
1
+ """
2
+ JSON parsing utilities.
3
+
4
+ In the pi‑ai project this module contains helpers for streaming JSON
5
+ parsing and error handling. Here we provide simple functions to
6
+ parse JSON strings into Python objects and to safely extract values.
7
+ """
8
+
9
+ import json
10
+ from typing import Any, Dict, Optional
11
+
12
+ def parse_json(json_str: str) -> Any:
13
+ """Parse a JSON string into a Python object.
14
+
15
+ Raises a ValueError if the input is not valid JSON.
16
+ """
17
+ return json.loads(json_str)
18
+
19
+ def get_value(data: Dict[str, Any], key: str, default: Optional[Any] = None) -> Any:
20
+ """Safely get a value from a dict, returning a default if the key is missing."""
21
+ return data.get(key, default)
@@ -0,0 +1,31 @@
1
+ """
2
+ OAuth utilities for the restructured POP project.
3
+
4
+ The POP framework includes an ``oauth`` package to mirror pi‑ai’s
5
+ support for provider credential configuration and OAuth flows. In
6
+ this simplified version, the module provides stubs that can be
7
+ extended to support specific OAuth providers.
8
+ """
9
+
10
+ from typing import Dict, Any
11
+
12
+ def configure_oauth(provider: str, **kwargs: Any) -> Dict[str, Any]:
13
+ """Return a configuration dictionary for a given OAuth provider.
14
+
15
+ Parameters
16
+ ----------
17
+ provider:
18
+ The name of the OAuth provider (e.g. "google", "microsoft", etc.).
19
+ kwargs:
20
+ Additional keyword arguments specific to the provider.
21
+
22
+ Returns
23
+ -------
24
+ dict
25
+ A dictionary containing configuration details. Currently
26
+ returns an empty dict; extend this function to support real
27
+ providers.
28
+ """
29
+ # In a real implementation, you would construct and return the
30
+ # necessary OAuth configuration here.
31
+ return {}
POP/utils/overflow.py ADDED
@@ -0,0 +1,33 @@
1
+ """
2
+ Overflow helpers.
3
+
4
+ This stub module mirrors pi‑ai's overflow handlers for managing
5
+ large outputs. In this rewrite we provide a basic function to
6
+ check if a message exceeds a threshold and to truncate it.
7
+ """
8
+
9
+ from typing import List
10
+
11
+ def truncate_messages(messages: List[str], max_length: int = 4096) -> List[str]:
12
+ """Truncate a list of messages so that their concatenated length does not exceed max_length.
13
+
14
+ Parameters
15
+ ----------
16
+ messages:
17
+ A list of message strings.
18
+ max_length:
19
+ The maximum total length allowed.
20
+
21
+ Returns
22
+ -------
23
+ List[str]
24
+ A list of messages truncated to the allowed length.
25
+ """
26
+ total = 0
27
+ result: List[str] = []
28
+ for msg in messages:
29
+ if total + len(msg) > max_length:
30
+ break
31
+ result.append(msg)
32
+ total += len(msg)
33
+ return result
@@ -0,0 +1,18 @@
1
+ """
2
+ Unicode sanitisation helpers.
3
+
4
+ Language models occasionally emit characters outside the basic
5
+ multilingual plane. To ensure consistent downstream processing,
6
+ this module provides helper functions to normalise and strip
7
+ unsupported Unicode characters.
8
+ """
9
+
10
+ import unicodedata
11
+
12
+ def sanitize(text: str) -> str:
13
+ """Normalise and strip diacritics from a string.
14
+
15
+ Uses NFKD normalisation and discards combining characters.
16
+ """
17
+ normalized = unicodedata.normalize("NFKD", text)
18
+ return "".join([c for c in normalized if not unicodedata.combining(c)])
@@ -0,0 +1,23 @@
1
+ """
2
+ Validation helpers.
3
+
4
+ This module contains light-weight validation routines for inputs,
5
+ outputs and schemas. The original pi‑ai project uses zod for type
6
+ checking; here we use simple runtime checks and raise ValueError on
7
+ failure.
8
+ """
9
+
10
+ from typing import Any
11
+ import json
12
+
13
+ def validate_not_empty(value: Any, message: str = "Value must not be empty") -> None:
14
+ """Raise a ValueError if the provided value is empty or falsy."""
15
+ if not value:
16
+ raise ValueError(message)
17
+
18
+ def validate_json(value: str, message: str = "Invalid JSON") -> Any:
19
+ """Parse a string as JSON, raising a ValueError if it fails."""
20
+ try:
21
+ return json.loads(value)
22
+ except json.JSONDecodeError as e:
23
+ raise ValueError(f"{message}: {e}")
@@ -0,0 +1,108 @@
1
+ """
2
+ Web snapshot utility.
3
+
4
+ This module wraps the ``get_text_snapshot`` function from the original
5
+ POP project. It uses the r.jina.ai service to fetch text snapshots
6
+ of arbitrary web pages and supports various flags to control the
7
+ formatting of the returned content.
8
+ """
9
+
10
+ import requests
11
+ from os import getenv
12
+ from typing import List, Optional
13
+
14
+ def get_text_snapshot(
15
+ web_url: str,
16
+ use_api_key: bool = True,
17
+ return_format: str = "default",
18
+ timeout: int = 0,
19
+ target_selector: Optional[List[str]] = None,
20
+ wait_for_selector: Optional[List[str]] = None,
21
+ exclude_selector: Optional[List[str]] = None,
22
+ remove_image: bool = False,
23
+ links_at_end: bool = False,
24
+ images_at_end: bool = False,
25
+ json_response: bool = False,
26
+ image_caption: bool = False,
27
+ cookie: str = None,
28
+ ) -> str:
29
+ """Fetch a text snapshot of the webpage using r.jina.ai.
30
+
31
+ Parameters
32
+ ----------
33
+ web_url:
34
+ The URL of the page to snapshot. This should include the
35
+ protocol (http:// or https://) and any path or query parameters.
36
+ use_api_key:
37
+ Whether to send the JINAAI_API_KEY from the environment as an
38
+ authorization header.
39
+ return_format:
40
+ The return format accepted by r.jina.ai. Defaults to ``default``.
41
+ timeout:
42
+ Request timeout in seconds. 0 means no timeout header is sent.
43
+ target_selector:
44
+ A list of CSS selectors to target specific content within the page.
45
+ wait_for_selector:
46
+ A list of CSS selectors to wait for before capturing the snapshot.
47
+ exclude_selector:
48
+ A list of CSS selectors to exclude from the snapshot.
49
+ remove_image:
50
+ If ``True``, remove all images from the snapshot.
51
+ links_at_end:
52
+ If ``True``, append a links summary to the end of the snapshot.
53
+ images_at_end:
54
+ If ``True``, append an images summary to the end of the snapshot.
55
+ json_response:
56
+ If ``True``, request the snapshot as JSON rather than plain text.
57
+ image_caption:
58
+ If ``True``, include generated alt text for images.
59
+ cookie:
60
+ An optional cookie string to include in the request.
61
+
62
+ Returns
63
+ -------
64
+ str
65
+ The snapshot text, or an error message if the request fails.
66
+ """
67
+ target_selector = target_selector or []
68
+ wait_for_selector = wait_for_selector or []
69
+ exclude_selector = exclude_selector or []
70
+
71
+ headers = {}
72
+ api_key = 'Bearer ' + getenv("JINAAI_API_KEY") if use_api_key else None
73
+
74
+ header_values = {
75
+ "Authorization": api_key,
76
+ "X-Return-Format": None if return_format == "default" else return_format,
77
+ "X-Timeout": timeout if timeout > 0 else None,
78
+ "X-Target-Selector": ",".join(target_selector) if target_selector else None,
79
+ "X-Wait-For-Selector": ",".join(wait_for_selector) if wait_for_selector else None,
80
+ "X-Remove-Selector": ",".join(exclude_selector) if exclude_selector else None,
81
+ "X-Retain-Images": "none" if remove_image else None,
82
+ "X-With-Links-Summary": "true" if links_at_end else None,
83
+ "X-With-Images-Summary": "true" if images_at_end else None,
84
+ "Accept": "application/json" if json_response else None,
85
+ "X-With-Generated-Alt": "true" if image_caption else None,
86
+ "X-Set-Cookie": cookie if cookie else None,
87
+ }
88
+
89
+ for key, value in header_values.items():
90
+ if value is not None:
91
+ headers[key] = value
92
+
93
+ try:
94
+ api_url = f"https://r.jina.ai/{web_url}"
95
+ response = requests.get(api_url, headers=headers)
96
+ response.raise_for_status()
97
+ return response.text
98
+ except requests.exceptions.RequestException as e:
99
+ return f"Error fetching text snapshot: {e}"
100
+
101
+
102
+ if __name__ == "__main__":
103
+ # support url passed via command line for quick testing
104
+ import sys
105
+ if len(sys.argv) > 1:
106
+ url = sys.argv[1]
107
+ snapshot = get_text_snapshot(url, use_api_key=False)
108
+ print(snapshot)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pop-python
3
- Version: 1.0.4
3
+ Version: 1.1.0
4
4
  Summary: Prompt Oriented Programming (POP): reusable, composable prompt functions for LLMs.
5
5
  Home-page: https://github.com/sgt1796/POP
6
6
  Author: Guotai Shen
@@ -35,11 +35,11 @@ Dynamic: summary
35
35
  # Prompt Oriented Programming (POP)
36
36
 
37
37
  ```python
38
- from POP import PromptFunction
38
+ from pop import PromptFunction
39
39
 
40
40
  pf = PromptFunction(
41
41
  prompt="Draw a simple ASCII art of <<<object>>>.",
42
- client = "openai"
42
+ client="openai",
43
43
  )
44
44
 
45
45
  print(pf.execute(object="a cat"))
@@ -61,7 +61,7 @@ print(pf.execute(object="a rocket"))
61
61
  ---
62
62
  Reusable, composable prompt functions for LLM workflows.
63
63
 
64
- This release cleans the architecture, moves all LLM client logic to a separate `LLMClient` module, and extends multi-LLM backend support.
64
+ This 1.1.0 dev update restructures POP into small, focused modules and adds a provider registry inspired by pi-mono's `ai` package.
65
65
 
66
66
  PyPI:
67
67
  [https://pypi.org/project/pop-python/](https://pypi.org/project/pop-python/)
@@ -74,21 +74,19 @@ GitHub:
74
74
  ## Table of Contents
75
75
 
76
76
  1. [Overview](#1-overview)
77
- 2. [Major Updates](#2-major-updates)
78
- 3. [Features](#3-features)
79
- 4. [Installation](#4-installation)
80
- 5. [Setup](#5-setup)
81
- 6. [PromptFunction](#6-promptfunction)
82
-
83
- * Placeholders
84
- * Reserved Keywords
85
- * Executing prompts
86
- * Improving prompts
87
- 7. [Function Schema Generation](#7-function-schema-generation)
88
- 8. [Embeddings](#8-embeddings)
89
- 9. [Web Snapshot Utility](#9-web-snapshot-utility)
90
- 10. [Examples](#10-examples)
91
- 11. [Contributing](#11-contributing)
77
+ 2. [Update Note](#2-update-note)
78
+ 3. [Major Updates](#3-major-updates)
79
+ 4. [Features](#4-features)
80
+ 5. [Installation](#5-installation)
81
+ 6. [Setup](#6-setup)
82
+ 7. [PromptFunction](#7-promptfunction)
83
+ 8. [Provider Registry](#8-provider-registry)
84
+ 9. [Tool Calling](#9-tool-calling)
85
+ 10. [Function Schema Generation](#10-function-schema-generation)
86
+ 11. [Embeddings](#11-embeddings)
87
+ 12. [Web Snapshot Utility](#12-web-snapshot-utility)
88
+ 13. [Examples](#13-examples)
89
+ 14. [Contributing](#14-contributing)
92
90
  ---
93
91
 
94
92
  # 1. Overview
@@ -102,43 +100,64 @@ Instead of scattering prompt strings across your codebase, POP lets you:
102
100
  * improve prompts using meta-prompting
103
101
  * generate OpenAI-compatible function schemas
104
102
  * use unified embedding tools
105
- * work with multiple LLM providers through `LLMClient` subclasses
103
+ * work with multiple LLM providers through a centralized registry
106
104
 
107
105
  POP is designed to be simple, extensible, and production-friendly.
108
106
 
109
107
  ---
110
108
 
111
- # 2. Major Updates
109
+ # 2. Update Note
112
110
 
113
- This version introduces structural and functional improvements:
111
+ **1.1.0-dev (February 5, 2026)**
114
112
 
115
- ### 2.1. LLMClient moved into its own module
113
+ * **Breaking import path**: use `pop` (lowercase) for imports. Example: `from pop import PromptFunction`.
114
+ * **Provider registry**: clients live under `pop/providers/` and are instantiated via `pop.api_registry`.
115
+ * **LLMClient base class**: now in `pop.providers.llm_client` (kept as an abstract base class).
116
116
 
117
- `LLMClient.py` now holds all LLM backends:
117
+ ---
118
+
119
+ # 3. Major Updates
120
+
121
+ ### 3.1. Modularized architecture
122
+
123
+ The project has been decomposed into small, focused modules:
124
+
125
+ * `pop/prompt_function.py`
126
+ * `pop/embedder.py`
127
+ * `pop/context.py`
128
+ * `pop/api_registry.py`
129
+ * `pop/providers/` (one provider per file)
130
+ * `pop/utils/`
131
+
132
+ This mirrors the structure in the pi-mono `ai` package for clarity and maintainability.
118
133
 
119
- * OpenAI
120
- * Gemini
121
- * Deepseek
122
- * Doubao
123
- * Local PyTorch stub
124
- * Extensible architecture for adding new backends
134
+ ### 3.2. Provider registry + per-provider clients
125
135
 
126
- ### 2.2. Expanded multi-LLM support
136
+ Each provider has its own adaptor (OpenAI, Gemini, DeepSeek, Doubao, Local, Ollama). The registry gives you:
127
137
 
128
- Each backend now has consistent interface behavior and multimodal (text + image) support where applicable.
138
+ * `list_providers()`
139
+ * `list_default_model()`
140
+ * `list_models()`
141
+ * `get_client()`
129
142
 
130
143
  ---
131
144
 
132
- # 3. Features
145
+ # 4. Features
133
146
 
134
147
  * **Reusable Prompt Functions**
135
148
  Use `<<<placeholder>>>` syntax to inject dynamic content.
136
149
 
137
150
  * **Multi-LLM Backend**
138
- Choose between OpenAI, Gemini, Deepseek, Doubao, or local models.
151
+ Choose between OpenAI, Gemini, DeepSeek, Doubao, Local, or Ollama.
152
+
153
+ * **Tool Calling**
154
+ Pass a tool schema list to `execute()` and receive tool-call arguments.
155
+
156
+ * **Multimodal (Text + Image)**
157
+ Pass `images=[...]` (URLs or base64) when the provider supports it.
139
158
 
140
159
  * **Prompt Improvement**
141
- Improve or rewrite prompts using Fabric-style metaprompts.
160
+ Improve or rewrite prompts using Fabric-style meta-prompts.
142
161
 
143
162
  * **Function Schema Generation**
144
163
  Convert natural language descriptions into OpenAI-function schemas.
@@ -151,7 +170,7 @@ Each backend now has consistent interface behavior and multimodal (text + image)
151
170
 
152
171
  ---
153
172
 
154
- # 4. Installation
173
+ # 5. Installation
155
174
 
156
175
  Install from PyPI:
157
176
 
@@ -169,7 +188,7 @@ pip install -e .
169
188
 
170
189
  ---
171
190
 
172
- # 5. Setup
191
+ # 6. Setup
173
192
 
174
193
  Create a `.env` file in your project root:
175
194
 
@@ -185,16 +204,16 @@ All clients automatically read keys from environment variables.
185
204
 
186
205
  ---
187
206
 
188
- # 6. PromptFunction
207
+ # 7. PromptFunction
189
208
 
190
209
  The core abstraction of POP is the `PromptFunction` class.
191
210
 
192
211
  ```python
193
- from POP import PromptFunction
212
+ from pop import PromptFunction
194
213
 
195
214
  pf = PromptFunction(
196
215
  sys_prompt="You are a helpful AI.",
197
- prompt="Give me a summary about <<<topic>>>."
216
+ prompt="Give me a summary about <<<topic>>>.",
198
217
  )
199
218
 
200
219
  print(pf.execute(topic="quantum biology"))
@@ -202,7 +221,7 @@ print(pf.execute(topic="quantum biology"))
202
221
 
203
222
  ---
204
223
 
205
- ## 6.1. Placeholder Syntax
224
+ ## 7.1. Placeholder Syntax
206
225
 
207
226
  Use angle-triple-brackets inside your prompt:
208
227
 
@@ -220,7 +239,7 @@ prompt = "Translate <<<sentence>>> to French."
220
239
 
221
240
  ---
222
241
 
223
- ## 6.2. Reserved Keywords
242
+ ## 7.2. Reserved Keywords
224
243
 
225
244
  Within `.execute()`, the following keyword arguments are **reserved** and should not be used as placeholder names:
226
245
 
@@ -228,6 +247,7 @@ Within `.execute()`, the following keyword arguments are **reserved** and should
228
247
  * `sys`
229
248
  * `fmt`
230
249
  * `tools`
250
+ * `tool_choice`
231
251
  * `temp`
232
252
  * `images`
233
253
  * `ADD_BEFORE`
@@ -237,32 +257,104 @@ Most keywords are used for parameters. `ADD_BEFORE` and `ADD_AFTER` will attach
237
257
 
238
258
  ---
239
259
 
240
- ## 6.3. Executing prompts
260
+ ## 7.3. Executing prompts
241
261
 
242
262
  ```python
243
263
  result = pf.execute(
244
264
  topic="photosynthesis",
245
- model="gpt-4o-mini",
246
- temp=0.3
265
+ model="gpt-5-mini",
266
+ temp=0.3,
247
267
  )
248
268
  ```
249
269
 
250
270
  ---
251
271
 
252
- ## 6.4. Improving Prompts
272
+ ## 7.4. Improving Prompts
253
273
 
254
274
  You can ask POP to rewrite or enhance your system prompt:
255
275
 
256
276
  ```python
257
- better = pf._improve_prompt()
277
+ better = pf.improve_prompt()
258
278
  print(better)
259
279
  ```
260
280
 
261
- This uses a Fabric-inspired meta-prompt bundled in the `prompts/` directory.
281
+ This uses a Fabric-inspired meta-prompt bundled in the `pop/prompts/` directory.
282
+
283
+ ---
284
+
285
+ # 8. Provider Registry
286
+
287
+ Use the registry to list providers/models or instantiate clients.
288
+
289
+ ```python
290
+ from pop import list_providers, list_models, list_default_model, get_client
291
+
292
+ print(list_providers())
293
+ print(list_default_model())
294
+ print(list_models())
295
+
296
+ client = get_client("openai")
297
+ ```
298
+
299
+ Non-default model example:
300
+
301
+ ```python
302
+ from pop import PromptFunction, get_client
303
+
304
+ client = get_client("gemini", "gemini-2.5-pro")
305
+
306
+ pf = PromptFunction(prompt="Draw a rocket.", client=client)
307
+ print(pf.execute())
308
+ ```
309
+
310
+ Direct provider class example:
311
+
312
+ ```python
313
+ from pop import PromptFunction
314
+ from pop.providers.gemini_client import GeminiClient
315
+
316
+ pf = PromptFunction(prompt="Draw a rocket.", client=GeminiClient(model="gemini-2.5-pro"))
317
+ print(pf.execute())
318
+ ```
319
+
320
+ ---
321
+
322
+ # 9. Tool Calling
323
+
324
+ ```python
325
+ from pop import PromptFunction
326
+
327
+ tools = [
328
+ {
329
+ "type": "function",
330
+ "function": {
331
+ "name": "create_reminder",
332
+ "description": "Create a reminder.",
333
+ "parameters": {
334
+ "type": "object",
335
+ "properties": {
336
+ "description": {"type": "string"},
337
+ "when": {"type": "string"},
338
+ },
339
+ "required": ["description"],
340
+ },
341
+ },
342
+ }
343
+ ]
344
+
345
+ pf = PromptFunction(
346
+ sys_prompt="You are a helpful assistant.",
347
+ prompt="<<<input>>>",
348
+ client="openai",
349
+ )
350
+
351
+ result = pf.execute(input="Remind me to walk at 9am.", tools=tools)
352
+ print(result)
353
+ ```
262
354
 
263
355
  ---
264
356
 
265
- # 7. Function Schema Generation
357
+ # 10. Function Schema Generation
266
358
 
267
359
  POP supports generating **OpenAI function-calling schemas** from natural language descriptions.
268
360
 
@@ -279,16 +371,16 @@ What this does:
279
371
  * Applies a standard meta-prompt
280
372
  * Uses the selected LLM backend
281
373
  * Produces a valid JSON Schema for OpenAI function calling
282
- * Optionally saves it under `functions/`
374
+ * Optionally saves it under `schemas/`
283
375
 
284
376
  ---
285
377
 
286
- # 8. Embeddings
378
+ # 11. Embeddings
287
379
 
288
380
  POP includes a unified embedding interface:
289
381
 
290
382
  ```python
291
- from POP.Embedder import Embedder
383
+ from pop import Embedder
292
384
 
293
385
  embedder = Embedder(use_api="openai")
294
386
  vecs = embedder.get_embedding(["hello world"])
@@ -304,10 +396,10 @@ Large inputs are chunked automatically when needed.
304
396
 
305
397
  ---
306
398
 
307
- # 9. Web Snapshot Utility
399
+ # 12. Web Snapshot Utility
308
400
 
309
401
  ```python
310
- from POP import get_text_snapshot
402
+ from pop.utils.web_snapshot import get_text_snapshot
311
403
 
312
404
  text = get_text_snapshot("https://example.com", image_caption=True)
313
405
  print(text[:500])
@@ -322,10 +414,10 @@ Supports:
322
414
 
323
415
  ---
324
416
 
325
- # 10. Examples
417
+ # 13. Examples
326
418
 
327
419
  ```python
328
- from POP import PromptFunction
420
+ from pop import PromptFunction
329
421
 
330
422
  pf = PromptFunction(prompt="Give me 3 creative names for a <<<thing>>>.")
331
423
 
@@ -333,9 +425,20 @@ print(pf.execute(thing="robot"))
333
425
  print(pf.execute(thing="new language"))
334
426
  ```
335
427
 
428
+ Multimodal example (provider must support images):
429
+
430
+ ```python
431
+ from pop import PromptFunction
432
+
433
+ image_b64 = "..." # base64-encoded image
434
+
435
+ pf = PromptFunction(prompt="Describe the image.", client="openai")
436
+ print(pf.execute(images=[image_b64]))
437
+ ```
438
+
336
439
  ---
337
440
 
338
- # 11. Contributing
441
+ # 14. Contributing
339
442
 
340
443
  Steps:
341
444
 
@@ -0,0 +1,42 @@
1
+ POP/Embedder.py,sha256=FyrFC-CDWv1DXr56zY7QYPXq8t9idq1_EUSi0hzil1U,9455
2
+ POP/__init__.py,sha256=NK4b-hVWuQHLG-7B78nMM0Xas5448qvUH-JFkS-8vbA,1013
3
+ POP/api_registry.py,sha256=aBLbmMhS32qQ8-1QR1nAOWdRAgZGjXYk29pb9Z0HgyA,4939
4
+ POP/context.py,sha256=qlsZ3vFmnTV3oh2Q8HvJRdS7mV-PLUPvqp5L9vsyL0Q,1570
5
+ POP/env_api_keys.py,sha256=suIWIyA_lntIa_QuGJbehovmKL9YDYoK8CQTmXuhpFQ,938
6
+ POP/models.py,sha256=gbVX7uPQfUtbXgUB40uG8gc8QVzRhX_eGBxScgwuZDk,673
7
+ POP/prompt_function.py,sha256=SdL23nE8vmTnWPg74T9c8pJ79daOUc5sMlSaVbNnKtc,15765
8
+ POP/stream.py,sha256=p9F1VUo2RftFLYViDcgCRi7SyInI0XBBNxIY7-XPNK0,2856
9
+ POP/prompts/__init__.py,sha256=GhKiG3nNUSx-EAb5m7RbGzpJq5wzh1DnHgtUsHggCnU,313
10
+ POP/prompts/fabric-improve_prompt.md,sha256=McipYmKSBHRwOr8xTHZOO3h39bCTULgoddOjMXtYvds,34777
11
+ POP/prompts/json_formatter_prompt.md,sha256=64UG19TegJPcy3tNO1XjmrdXhomSucnrkUOkK9wJQH4,1565
12
+ POP/prompts/openai-json_schema_generator.md,sha256=N2nKK7b9T--1M8x2cqkry5DYQ63JBpdg0ozBmBh2Sgc,826
13
+ POP/providers/__init__.py,sha256=UB0cL2VQxCTxALA-4UkoalgxElSGkF-3BwjqB0ybXl8,1086
14
+ POP/providers/deepseek_client.py,sha256=E7pBGaJ4tLDMoRybeZUVbVZqZ48vkljg_F5_4l6OQtc,2126
15
+ POP/providers/doubao_client.py,sha256=bZPJCsG9lwLznmxvkkt6C9tbGcJ9auv5NB3xEdKHdLY,3265
16
+ POP/providers/gemini_client.py,sha256=pfdvDDLag4hT2KAKDnMgwWQX9d1LBcdK03Qpcu6u72s,4151
17
+ POP/providers/llm_client.py,sha256=8sL1RJmpU5NaAa8qff0vR85_6rSyAFKHIwaZnjvEkIs,2376
18
+ POP/providers/local_client.py,sha256=lvmvbGiIFejzBVeyY3zNz9btZyKiusZ8tuBcNUD1teE,1417
19
+ POP/providers/ollama_client.py,sha256=e0n96W8A_U0Sd90FgGXkK_u5ghJGNmbKO_8cLuI4h4s,4737
20
+ POP/providers/openai_client.py,sha256=J60IebSbxRaC4WtEFjIi6UdPBeT3cZ2e5ao4y_nV2GQ,3918
21
+ POP/utils/__init__.py,sha256=vbVtL3dEZq76bA0AwlOc-grVdia-P5yZu6CEYpfN46o,373
22
+ POP/utils/event_stream.py,sha256=aNQkkkHx5O3q2D9QRFre2h1oHOGDn6ubEu3SJTNEvOA,1383
23
+ POP/utils/http_proxy.py,sha256=dltUf8B2YuxDI3xf0sCqx9XXfw5Y_JjXaizFKWtKxJk,532
24
+ POP/utils/json_parse.py,sha256=cdB9_t0Z50YYckrRIqXujjajzGAcTaogQiM0Wmw8jso,680
25
+ POP/utils/overflow.py,sha256=lA8O-KuWFmmLUm2_oJLBQSHJo0EHAeKiZzV-aQ8p6xc,861
26
+ POP/utils/sanitize_unicode.py,sha256=UQLti5ZAvfm_DMbu7k4QF3yn2kmQrgTLG9w11AbyARg,567
27
+ POP/utils/validation.py,sha256=UwNM0AJTFZm9lF6Mm9G5je4iXk7Kh3pLnYBnD7wjXI0,748
28
+ POP/utils/web_snapshot.py,sha256=NTYecpzwd2wPVTCsxKM-0_NzlI7FRNWskvv8PMdpe6M,3971
29
+ POP/utils/oauth/__init__.py,sha256=Gesgv0salgfRW0ykVs6CqWKDtRB1_7voNfMnrlA-sTE,987
30
+ pop_python-1.1.0.dist-info/licenses/LICENSE,sha256=zm0TNFsVPrnSK7IDoO504aeYMnO69QrL9IY9P5jRHHg,1064
31
+ tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
+ tests/conftest.py,sha256=ggMVkEc5WTLuq4Bi3r98FROOHe1ySuiiBv2w-FIQMPE,1393
33
+ tests/test_api_registry.py,sha256=KDoAwPAEc6q37vUizau3zZF0RmW8UVPhIJOObCarDDA,904
34
+ tests/test_context_utils.py,sha256=h_E91WtrZ4Me1LvydXUmVkyn64qnxMhTpXDWdDCdPkY,1607
35
+ tests/test_embedder.py,sha256=2GS3ObUYovjyjQG9kIwP3bc6DQCQjcoJQkIJz6hnG0o,2028
36
+ tests/test_env_api_keys.py,sha256=R0yGcQScKEuELVvXGyO_AQngc7_XuMiS5hgj30SSF74,425
37
+ tests/test_prompt_function.py,sha256=BqrKTgM1RT43x2tY6ZM0ylhDgKYAj9vk-b0YLhrCp0E,3764
38
+ tests/test_web_snapshot.py,sha256=QaTyTdeOvnLE9o--gtCmLQdkSBdW7sKOw6WIBadS0QA,1476
39
+ pop_python-1.1.0.dist-info/METADATA,sha256=m_PKMYyM5PymBVB0yUP-MamDI7V-wWTItK40Y5lsVog,9539
40
+ pop_python-1.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
41
+ pop_python-1.1.0.dist-info/top_level.txt,sha256=qsxIWiiocgXyixwg3wirsnA9-x543XIHDz96kFcZg6w,10
42
+ pop_python-1.1.0.dist-info/RECORD,,