pop-python 1.0.4__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. POP/Embedder.py +121 -119
  2. POP/__init__.py +34 -16
  3. POP/api_registry.py +148 -0
  4. POP/context.py +47 -0
  5. POP/env_api_keys.py +33 -0
  6. POP/models.py +20 -0
  7. POP/prompt_function.py +378 -0
  8. POP/prompts/__init__.py +8 -0
  9. POP/prompts/openai-json_schema_generator.md +12 -161
  10. POP/providers/__init__.py +33 -0
  11. POP/providers/deepseek_client.py +69 -0
  12. POP/providers/doubao_client.py +101 -0
  13. POP/providers/gemini_client.py +119 -0
  14. POP/providers/llm_client.py +60 -0
  15. POP/providers/local_client.py +45 -0
  16. POP/providers/ollama_client.py +129 -0
  17. POP/providers/openai_client.py +100 -0
  18. POP/stream.py +77 -0
  19. POP/utils/__init__.py +9 -0
  20. POP/utils/event_stream.py +43 -0
  21. POP/utils/http_proxy.py +16 -0
  22. POP/utils/json_parse.py +21 -0
  23. POP/utils/oauth/__init__.py +31 -0
  24. POP/utils/overflow.py +33 -0
  25. POP/utils/sanitize_unicode.py +18 -0
  26. POP/utils/validation.py +23 -0
  27. POP/utils/web_snapshot.py +108 -0
  28. {pop_python-1.0.4.dist-info → pop_python-1.1.0.dist-info}/METADATA +160 -57
  29. pop_python-1.1.0.dist-info/RECORD +42 -0
  30. {pop_python-1.0.4.dist-info → pop_python-1.1.0.dist-info}/WHEEL +1 -1
  31. pop_python-1.1.0.dist-info/top_level.txt +2 -0
  32. tests/__init__.py +0 -0
  33. tests/conftest.py +47 -0
  34. tests/test_api_registry.py +36 -0
  35. tests/test_context_utils.py +54 -0
  36. tests/test_embedder.py +64 -0
  37. tests/test_env_api_keys.py +15 -0
  38. tests/test_prompt_function.py +98 -0
  39. tests/test_web_snapshot.py +47 -0
  40. POP/LLMClient.py +0 -410
  41. POP/POP.py +0 -400
  42. POP/prompts/2024-11-19-content_finder.md +0 -46
  43. POP/prompts/2024-11-19-get_content.md +0 -71
  44. POP/prompts/2024-11-19-get_title_and_url.md +0 -62
  45. POP/prompts/CLI_AI_helper.md +0 -75
  46. POP/prompts/content_finder.md +0 -42
  47. POP/prompts/corpus_splitter.md +0 -28
  48. POP/prompts/function_code_generator.md +0 -51
  49. POP/prompts/function_description_generator.md +0 -45
  50. POP/prompts/get_content.md +0 -75
  51. POP/prompts/get_title_and_url.md +0 -62
  52. POP/prompts/openai-function_description_generator.md +0 -126
  53. POP/prompts/openai-prompt_generator.md +0 -49
  54. POP/schemas/biomedical_ner_extractor.json +0 -37
  55. POP/schemas/entity_extraction_per_sentence.json +0 -92
  56. pop_python-1.0.4.dist-info/RECORD +0 -26
  57. pop_python-1.0.4.dist-info/top_level.txt +0 -1
  58. {pop_python-1.0.4.dist-info → pop_python-1.1.0.dist-info}/licenses/LICENSE +0 -0
POP/prompt_function.py ADDED
@@ -0,0 +1,378 @@
1
+ """PromptFunction class for reusable prompts.
2
+
3
+ The :class:`PromptFunction` encapsulates a system prompt, a base
4
+ prompt template and the logic to execute that prompt against any
5
+ registered LLM provider. It provides features such as dynamic
6
+ placeholder substitution, prompt improvement via a meta prompt,
7
+ function schema generation and prompt saving.
8
+
9
+ This implementation mirrors the original POP ``PromptFunction`` but
10
+ delegates provider instantiation to the central registry defined in
11
+ ``pop.api_registry`` and stores meta prompts in ``pop/prompts/``.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import re
17
+ import json
18
+ from dataclasses import dataclass
19
+ from os import getenv, path
20
+ from typing import List, Dict, Any, Optional, Union
21
+
22
+ from .providers.llm_client import LLMClient
23
+ from .api_registry import get_client
24
+ from .models import DEFAULT_MODEL
25
+
26
+
27
+ class PromptFunction:
28
+ """Represent a reusable prompt function."""
29
+
30
+ def __init__(
31
+ self,
32
+ sys_prompt: str = "",
33
+ prompt: str = "",
34
+ client: Union[LLMClient, str, None] = None,
35
+ ) -> None:
36
+ """Initialize a new prompt function.
37
+
38
+ Parameters
39
+ ----------
40
+ sys_prompt : str
41
+ The system prompt that provides high‑level instructions to the LLM.
42
+ prompt : str
43
+ The base prompt template. Placeholders of the form
44
+ ``<<<name>>>`` will be replaced with values passed to
45
+ :meth:`execute`.
46
+ client : LLMClient | str | None
47
+ An instance of an LLM client or a provider identifier. If
48
+ omitted, the default provider is ``openai``.
49
+ """
50
+ self.prompt: str = prompt
51
+ self.sys_prompt: str = sys_prompt
52
+ self.placeholders: List[str] = self._get_place_holder()
53
+ self.client: LLMClient
54
+
55
+ # Instantiate the client based on the type of ``client``
56
+ if isinstance(client, LLMClient):
57
+ self.client = client
58
+ else:
59
+ provider_name = client or "openai"
60
+ self.client = get_client(provider_name) # type: ignore[assignment]
61
+ if self.client is None:
62
+ raise ValueError(f"Unknown provider: {provider_name}")
63
+
64
+ # Choose a default model based on the client class name
65
+ self.default_model_name: str = DEFAULT_MODEL.get(self.client.__class__.__name__, "")
66
+ # gpt-5/mini/nano only supports temperature 1 (legacy from POP)
67
+ if (
68
+ self.client.__class__.__name__ == "OpenAIClient"
69
+ and self.default_model_name in ["gpt-5-nano", "gpt-5-mini", "gpt-5"]
70
+ ):
71
+ self.temperature: float = 1.0
72
+ else:
73
+ self.temperature = 0.0
74
+ self.last_response: Any = None
75
+ # Provide some debug output so users know what client and model are in use
76
+ print(
77
+ f"[PromptFunction] Using client: {self.client.__class__.__name__}, using model: {self.client.model_name}"
78
+ )
79
+
80
+ def execute(self, *args: str, **kwargs: Any) -> str:
81
+ """Execute the prompt with dynamic argument injection.
82
+
83
+ Parameters
84
+ ----------
85
+ *args : str
86
+ Positional strings appended to the prompt.
87
+ **kwargs : Any
88
+ Keyword arguments for placeholder replacement or extra
89
+ context. Special keys include:
90
+
91
+ * ``model`` – override the default model name.
92
+ * ``sys`` – additional system instructions.
93
+ * ``fmt`` – response format (pydantic model or JSON schema).
94
+ * ``tools`` – list of function tools for tool calling.
95
+ * ``tool_choice`` – specific tool to call (default ``"auto"`` if
96
+ ``tools`` is provided).
97
+ * ``temp`` – override the temperature.
98
+ * ``images`` – list of image URLs or base64 strings.
99
+ * ``ADD_BEFORE`` – text prepended to the prompt.
100
+ * ``ADD_AFTER`` – text appended to the prompt.
101
+
102
+ Returns
103
+ -------
104
+ str
105
+ The LLM‑generated response. If the provider returns a
106
+ function call, its arguments are returned instead.
107
+ """
108
+ # Pop recognised special keys
109
+ model = kwargs.pop("model", self.default_model_name)
110
+ system_extra = kwargs.pop("sys", "")
111
+ fmt = kwargs.pop("fmt", None)
112
+ tools = kwargs.pop("tools", None)
113
+ temp = kwargs.pop("temp", self.temperature)
114
+ images = kwargs.pop("images", None)
115
+ tool_choice = kwargs.pop("tool_choice", None)
116
+ if tools and not tool_choice:
117
+ tool_choice = "auto"
118
+
119
+ # Prepare the prompt with dynamic injections
120
+ formatted_prompt = self._prepare_prompt(*args, **kwargs)
121
+
122
+ # Build the message payload
123
+ system_message = {
124
+ "role": "system",
125
+ "content": (
126
+ "You are a general‑purpose helpful assistant that is responsible for executing Prompt Functions, you will receive instructions and return as ordered. "
127
+ "Since your return is expected to be read by code most of the time, DO NOT wrap your returns in '```' tags unless user explicitly asks for markdown or similar format. "
128
+ f"Base system prompt:\n{self.sys_prompt}\n\n"
129
+ f"Additional instructions:\n{system_extra}"
130
+ ),
131
+ }
132
+ user_message = {"role": "user", "content": f"<if no user message, check system prompt.> {formatted_prompt}"}
133
+ messages = [system_message, user_message]
134
+
135
+ # Assemble call parameters
136
+ call_kwargs: Dict[str, Any] = {
137
+ "messages": messages,
138
+ "model": model,
139
+ "temperature": temp,
140
+ }
141
+ if fmt is not None:
142
+ call_kwargs["response_format"] = fmt
143
+ if tools is not None:
144
+ call_kwargs["tools"] = tools
145
+ call_kwargs["tool_choice"] = tool_choice
146
+ if images is not None:
147
+ call_kwargs["images"] = images
148
+
149
+ # Execute the call
150
+ try:
151
+ raw_response = self.client.chat_completion(**call_kwargs)
152
+ except Exception as exc:
153
+ # Print verbose diagnostics
154
+ print(
155
+ f"Error occurred while executing prompt function: {exc}\nparameters:\n"
156
+ f"model: {model}\ntemperature: {temp}\nprompt: {formatted_prompt}\nsys: {system_extra}\n"
157
+ f"format: {fmt}\ntools: {tools}\nimages: {images}"
158
+ )
159
+ return ""
160
+ # Save entire response for later inspection
161
+ self.last_response = raw_response
162
+
163
+ # Extract the reply content. If it's a function call, extract the
164
+ # arguments instead of the raw content
165
+ reply_content = ""
166
+ try:
167
+ first_choice = raw_response.choices[0]
168
+ message = first_choice.message
169
+ if getattr(message, "tool_calls", None):
170
+ tool_call = message.tool_calls[0]
171
+ reply_content = tool_call.function.arguments # type: ignore[attr-defined]
172
+ else:
173
+ reply_content = message.content
174
+ except Exception:
175
+ # Fallback: attempt to coerce response to string
176
+ reply_content = str(raw_response)
177
+ return reply_content
178
+
179
+ def _prepare_prompt(self, *args: str, **kwargs: Any) -> str:
180
+ """Prepare the prompt by injecting dynamic arguments.
181
+
182
+ Replacement occurs in three passes: 1) base prompt or system
183
+ prompt; 2) positional arguments appended; 3) replace
184
+ placeholders and remaining keyword arguments; 4) prepend or
185
+ append additional text.
186
+ """
187
+ before = kwargs.pop("ADD_BEFORE", "")
188
+ after = kwargs.pop("ADD_AFTER", "")
189
+ # Determine starting prompt
190
+ prompt = self.prompt
191
+ if not prompt:
192
+ if self.sys_prompt:
193
+ prompt = "User instruction:"
194
+ # When building from system prompt, encode kwargs into lines
195
+ if kwargs:
196
+ prompt += "\n" + "\n".join(f"{k}: {v}" for k, v in kwargs.items())
197
+ else:
198
+ raise ValueError("No prompt or system prompt provided.")
199
+ # Append positional arguments
200
+ if args:
201
+ prompt = prompt + "\n" + "\n".join(args)
202
+ # First pass: replace placeholders defined in the original template
203
+ for placeholder in self.placeholders:
204
+ if placeholder in kwargs:
205
+ prompt = prompt.replace(f"<<<{placeholder}>>>", str(kwargs.pop(placeholder)))
206
+ # Second pass: replace any remaining kwargs by exact key match
207
+ for key, value in list(kwargs.items()):
208
+ prompt = prompt.replace(f"<<<{key}>>>", str(value))
209
+ # Prepend and append additional text
210
+ if before:
211
+ prompt = before + "\n" + prompt
212
+ if after:
213
+ prompt = prompt + "\n" + after
214
+ return prompt
215
+
216
+ def _get_place_holder(self) -> List[str]:
217
+ """Extract placeholders from the prompt or system prompt."""
218
+ target_text = self.prompt if self.prompt else self.sys_prompt
219
+ if not target_text:
220
+ return []
221
+ placeholders = re.findall(r"<<<(.*?)>>>", target_text)
222
+ if placeholders:
223
+ print("Placeholders found:", placeholders)
224
+ return placeholders
225
+
226
+ def improve_prompt(
227
+ self,
228
+ replace: bool = False,
229
+ use_prompt: str = "fabric",
230
+ instruction: Optional[str] = None,
231
+ user_instruction: Optional[str] = None,
232
+ ) -> str:
233
+ """Improve the prompt using a meta prompt.
234
+
235
+ Parameters
236
+ ----------
237
+ replace : bool, optional
238
+ If True, replace the existing system prompt with the improved
239
+ version (default False).
240
+ use_prompt : str, optional
241
+ Identifier for which meta prompt to use (currently only
242
+ ``"fabric"`` is supported).
243
+ instruction : str, optional
244
+ Override the meta prompt instructions by providing a full
245
+ meta prompt directly.
246
+ user_instruction : str, optional
247
+ Additional instructions from the user.
248
+ Returns
249
+ -------
250
+ str
251
+ The improved prompt.
252
+ """
253
+ if use_prompt == "fabric":
254
+ # Determine path to the meta prompt file relative to this file
255
+ current_dir = path.dirname(path.abspath(__file__))
256
+ file_path = path.join(current_dir, "prompts", "fabric-improve_prompt.md")
257
+ try:
258
+ with open(file_path, "r", encoding="utf-8") as f:
259
+ instruction = f.read()
260
+ except FileNotFoundError:
261
+ raise FileNotFoundError(f"File not found: {file_path}")
262
+ # Compose meta instruction that preserves placeholders
263
+ meta_instruction = (
264
+ f"\nAdditional instruction:\n{user_instruction}\n"
265
+ "Ensure that original placeholders (<<<placeholder>>>) are preserved in the improved prompt and placed in a clear position."
266
+ "do not use any '<<<' or '>>>' in the improved prompt other than the original placeholder, and you have to show the placehold in the exact same order and amount of times as in the original prompt."
267
+ )
268
+ # Execute the meta prompt via the model
269
+ improved_prompt = self.execute(
270
+ ADD_BEFORE=meta_instruction,
271
+ model="gpt-5",
272
+ sys=(
273
+ "You are asked to improve the above 'Base system prompt' using the following instruction:\n"
274
+ + (instruction or "")
275
+ ),
276
+ )
277
+ if use_prompt == "fabric":
278
+ # Extract only the part after the '# OUTPUT' marker
279
+ if "# OUTPUT" in improved_prompt:
280
+ improved_prompt = improved_prompt.split("# OUTPUT", 1)[-1].lstrip("\n")
281
+ if replace:
282
+ self.sys_prompt = improved_prompt
283
+ return improved_prompt
284
+
285
+ def generate_schema(
286
+ self,
287
+ description: Optional[str] = None,
288
+ meta_prompt: Optional[str] = None,
289
+ meta_schema: Optional[dict] = None,
290
+ model: str = "gpt-5-mini",
291
+ save: bool = True,
292
+ ) -> dict:
293
+ """Generate a function schema from a natural language description.
294
+
295
+ The schema is generated by calling the underlying model with a
296
+ meta prompt that instructs it to produce a JSON schema. This
297
+ mirrors the behaviour of the original POP implementation.
298
+ """
299
+ # Fallback to the instance prompt if no description provided
300
+ if not description:
301
+ if self.prompt:
302
+ description = self.prompt
303
+ else:
304
+ raise ValueError(
305
+ "Description or instance prompt must be provided to generate a function schema."
306
+ )
307
+ # Load meta prompt from file if necessary
308
+ if meta_prompt is None:
309
+ try:
310
+ meta_prompt = PromptFunction.load_prompt("prompts/openai-json_schema_generator.md")
311
+ except FileNotFoundError:
312
+ raise FileNotFoundError(
313
+ "Meta prompt file 'prompts/openai-json_schema_generator.md' not found. "
314
+ "Either place it there or pass meta_prompt manually."
315
+ )
316
+ else:
317
+ meta_prompt = PromptFunction.load_prompt(meta_prompt)
318
+ # Load meta schema from file if provided
319
+ if meta_schema is not None and not isinstance(meta_schema, dict):
320
+ # assume string path to JSON file
321
+ with open(meta_schema, "r", encoding="utf-8") as f:
322
+ meta_schema = json.load(f)
323
+ # Prepare messages
324
+ messages = [
325
+ {"role": "system", "content": meta_prompt},
326
+ {"role": "user", "content": "Description:\n" + description},
327
+ ]
328
+ # Execute call using the chosen model; response_format holds the meta_schema
329
+ response = self.client.chat_completion(
330
+ messages=messages,
331
+ model=model,
332
+ temperature=self.temperature,
333
+ response_format=meta_schema,
334
+ )
335
+ # Parse JSON from the model's response
336
+ try:
337
+ content = response.choices[0].message.content
338
+ except Exception:
339
+ content = str(response)
340
+ parsed_schema = json.loads(content)
341
+ # Optionally save to a file under schemas/
342
+ if save:
343
+ import os
344
+ os.makedirs("schemas", exist_ok=True)
345
+ prompts_name = parsed_schema.get("name", "generated_schema")
346
+ safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", prompts_name)
347
+ file_path = os.path.join("schemas", f"{safe_name}.json")
348
+ with open(file_path, "w", encoding="utf-8") as f:
349
+ json.dump(parsed_schema, f, indent=2)
350
+ print(f"[generate_schema] Function schema saved to {file_path}")
351
+ return parsed_schema
352
+
353
+ @staticmethod
354
+ def load_prompt(file: str) -> str:
355
+ """Load a prompt from a file.
356
+
357
+ The ``file`` parameter may be an absolute or relative path. If
358
+ it is relative, it is resolved relative to this module's
359
+ directory to ensure that files under ``pop/prompts`` are found.
360
+ """
361
+ # Determine absolute path
362
+ if not path.isabs(file):
363
+ current_dir = path.dirname(path.abspath(__file__))
364
+ file = path.join(current_dir, file)
365
+ with open(file, "r", encoding="utf-8") as f:
366
+ return f.read()
367
+
368
+ def set_temperature(self, temperature: float) -> None:
369
+ """Set the sampling temperature for the next execution."""
370
+ self.temperature = temperature
371
+
372
+ def save(self, file_path: str) -> None:
373
+ """Save the base prompt to a file."""
374
+ with open(file_path, "w", encoding="utf-8") as f:
375
+ f.write(self.prompt)
376
+
377
+
378
+ __all__ = ["PromptFunction"]
@@ -0,0 +1,8 @@
1
+ """
2
+ This package contains meta-prompts used by the POP framework.
3
+
4
+ These files include prebuilt prompts that instruct the system how to
5
+ improve existing prompts and how to generate JSON schema definitions.
6
+ They are stored as Markdown files and can be loaded using the
7
+ PromptFunction class or other utilities.
8
+ """
@@ -1,165 +1,16 @@
1
- # Instructions
2
- Return a valid schema for the described JSON.
1
+ ### Task: Generate an OpenAI Function JSON Schema
3
2
 
4
- You must also make sure:
5
- - all fields in an object are set as required
6
- - I REPEAT, ALL FIELDS MUST BE MARKED AS REQUIRED
7
- - all objects must have additionalProperties set to false
8
- - because of this, some cases like "attributes" or "metadata" properties that would normally allow additional properties should instead have a fixed set of properties
9
- - all objects must have properties defined
10
- - field order matters. any form of "thinking" or "explanation" should come before the conclusion
11
- - $defs must be defined under the schema param
3
+ You are given a natural language description of a function. Your task is to return a **valid JSON object** that defines an OpenAI-compatible function schema.
12
4
 
13
- Notable keywords NOT supported include:
14
- - For strings: minLength, maxLength, pattern, format
15
- - For numbers: minimum, maximum, multipleOf
16
- - For objects: patternProperties, unevaluatedProperties, propertyNames, minProperties, maxProperties
17
- - For arrays: unevaluatedItems, contains, minContains, maxContains, minItems, maxItems, uniqueItems
5
+ #### Requirements
18
6
 
19
- Other notes:
20
- - definitions and recursion are supported
21
- - only if necessary to include references e.g. "$defs", it must be inside the "schema" object
7
+ 1. **Output JSON only**: Do not include markdown or extra text.
8
+ 2. **Top-level fields**: The JSON object must include both `"name"` and `"schema"`.
9
+ 3. **Schema format**: Use JSON Schema Draft-07. The `"schema"` field must be an object with at least:
10
+ - `"$schema": "http://json-schema.org/draft-07/schema#"`
11
+ - `"type": "object"`
12
+ - `"properties": { ... }`
13
+ - `"required": [ ... ]` (if needed)
14
+ 4. **Be precise**: Reflect the description accurately using types, constraints, and clear property names.
22
15
 
23
- # Examples
24
- Input: Generate a math reasoning schema with steps and a final answer.
25
- Output: {
26
- "name": "math_reasoning",
27
- "type": "object",
28
- "properties": {
29
- "steps": {
30
- "type": "array",
31
- "description": "A sequence of steps involved in solving the math problem.",
32
- "items": {
33
- "type": "object",
34
- "properties": {
35
- "explanation": {
36
- "type": "string",
37
- "description": "Description of the reasoning or method used in this step."
38
- },
39
- "output": {
40
- "type": "string",
41
- "description": "Result or outcome of this specific step."
42
- }
43
- },
44
- "required": [
45
- "explanation",
46
- "output"
47
- ],
48
- "additionalProperties": false
49
- }
50
- },
51
- "final_answer": {
52
- "type": "string",
53
- "description": "The final solution or answer to the math problem."
54
- }
55
- },
56
- "required": [
57
- "steps",
58
- "final_answer"
59
- ],
60
- "additionalProperties": false
61
- }
62
-
63
- Input: Give me a linked list
64
- Output: {
65
- "name": "linked_list",
66
- "type": "object",
67
- "properties": {
68
- "linked_list": {
69
- "$ref": "#/$defs/linked_list_node",
70
- "description": "The head node of the linked list."
71
- }
72
- },
73
- "$defs": {
74
- "linked_list_node": {
75
- "type": "object",
76
- "description": "Defines a node in a singly linked list.",
77
- "properties": {
78
- "value": {
79
- "type": "number",
80
- "description": "The value stored in this node."
81
- },
82
- "next": {
83
- "anyOf": [
84
- {
85
- "$ref": "#/$defs/linked_list_node"
86
- },
87
- {
88
- "type": "null"
89
- }
90
- ],
91
- "description": "Reference to the next node; null if it is the last node."
92
- }
93
- },
94
- "required": [
95
- "value",
96
- "next"
97
- ],
98
- "additionalProperties": false
99
- }
100
- },
101
- "required": [
102
- "linked_list"
103
- ],
104
- "additionalProperties": false
105
- }
106
-
107
- Input: Dynamically generated UI
108
- Output: {
109
- "name": "ui",
110
- "type": "object",
111
- "properties": {
112
- "type": {
113
- "type": "string",
114
- "description": "The type of the UI component",
115
- "enum": [
116
- "div",
117
- "button",
118
- "header",
119
- "section",
120
- "field",
121
- "form"
122
- ]
123
- },
124
- "label": {
125
- "type": "string",
126
- "description": "The label of the UI component, used for buttons or form fields"
127
- },
128
- "children": {
129
- "type": "array",
130
- "description": "Nested UI components",
131
- "items": {
132
- "$ref": "#"
133
- }
134
- },
135
- "attributes": {
136
- "type": "array",
137
- "description": "Arbitrary attributes for the UI component, suitable for any element",
138
- "items": {
139
- "type": "object",
140
- "properties": {
141
- "name": {
142
- "type": "string",
143
- "description": "The name of the attribute, for example onClick or className"
144
- },
145
- "value": {
146
- "type": "string",
147
- "description": "The value of the attribute"
148
- }
149
- },
150
- "required": [
151
- "name",
152
- "value"
153
- ],
154
- "additionalProperties": false
155
- }
156
- }
157
- },
158
- "required": [
159
- "type",
160
- "label",
161
- "children",
162
- "attributes"
163
- ],
164
- "additionalProperties": false
165
- }
16
+ The user description will be provided in the user message. Return only the JSON object.
@@ -0,0 +1,33 @@
1
+ """Provider registry.
2
+
3
+ This subpackage contains one module per supported provider. The
4
+ ``CLIENTS`` mapping associates short provider names (e.g. ``"openai"``)
5
+ with the corresponding client class. External consumers should use
6
+ ``pop.api_registry.get_client()`` rather than importing classes
7
+ directly from this module.
8
+
9
+ Adding a new provider is as simple as creating a new module in this
10
+ directory that defines a class derived from :class:`pop.providers.llm_client.LLMClient`
11
+ and then registering it here.
12
+ """
13
+
14
+ from .openai_client import OpenAIClient
15
+ from .gemini_client import GeminiClient
16
+ from .deepseek_client import DeepseekClient
17
+ from .local_client import LocalPyTorchClient
18
+ from .doubao_client import DoubaoClient
19
+ from .ollama_client import OllamaClient
20
+
21
+
22
+ # Map short provider names to their client classes. New providers
23
+ # should be inserted here.
24
+ DEFAULT_CLIENTS = {
25
+ "openai": OpenAIClient,
26
+ "gemini": GeminiClient,
27
+ "deepseek": DeepseekClient,
28
+ "local": LocalPyTorchClient,
29
+ "doubao": DoubaoClient,
30
+ "ollama": OllamaClient,
31
+ }
32
+
33
+ __all__ = ["DEFAULT_CLIENTS"]
@@ -0,0 +1,69 @@
1
+ """Deepseek API client implementation.
2
+
3
+ This module provides a client for the Deepseek API using OpenAI's
4
+ Python client under the hood. It mirrors the original POP
5
+ implementation but exists as its own provider module.
6
+ """
7
+
8
+ from typing import List, Dict, Any
9
+ from os import getenv
10
+
11
+ from .llm_client import LLMClient
12
+
13
+
14
+ try:
15
+ from openai import OpenAI
16
+ except Exception:
17
+ OpenAI = None # type: ignore
18
+
19
+
20
+ class DeepseekClient(LLMClient):
21
+ """Client for Deepseek chat completions."""
22
+
23
+ def __init__(self, model = None) -> None:
24
+ if OpenAI is None:
25
+ raise ImportError(
26
+ "openai package is not installed. Install it to use DeepseekClient."
27
+ )
28
+ # Use a custom base URL for Deepseek
29
+ self.client = OpenAI(api_key=getenv("DEEPSEEK_API_KEY"), base_url="https://api.deepseek.com")
30
+ self.model_name = model
31
+
32
+ def chat_completion(
33
+ self,
34
+ messages: List[Dict[str, Any]],
35
+ model: str,
36
+ temperature: float = 0.0,
37
+ **kwargs: Any,
38
+ ) -> Any:
39
+ request_payload: Dict[str, Any] = {
40
+ "model": model,
41
+ "messages": [],
42
+ "temperature": temperature,
43
+ }
44
+
45
+ # Images are not currently supported by Deepseek
46
+ images = kwargs.pop("images", None)
47
+ if images:
48
+ raise NotImplementedError("DeepseekClient does not support images yet.")
49
+
50
+ # Build messages list
51
+ for msg in messages:
52
+ role = msg.get("role", "user")
53
+ content = msg.get("content", "")
54
+ request_payload["messages"].append({"role": role, "content": content})
55
+
56
+ # Tools support (OpenAI‑style function calls)
57
+ tools = kwargs.get("tools")
58
+ if tools:
59
+ request_payload["tools"] = tools
60
+ request_payload["tool_choice"] = kwargs.get("tool_choice", "auto")
61
+
62
+ try:
63
+ response = self.client.chat.completions.create(**request_payload)
64
+ except Exception as exc:
65
+ raise RuntimeError(f"Deepseek chat_completion error: {exc}") from exc
66
+ return response
67
+
68
+
69
+ __all__ = ["DeepseekClient"]