content-core 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of content-core might be problematic. Click here for more details.

content_core/__init__.py CHANGED
@@ -5,9 +5,12 @@ import os
5
5
  import sys
6
6
  from xml.etree import ElementTree as ET
7
7
 
8
- from dicttoxml import dicttoxml # type: ignore
9
8
  from dotenv import load_dotenv
10
9
 
10
+ load_dotenv()
11
+
12
+ from dicttoxml import dicttoxml # type: ignore
13
+
11
14
  from content_core.common import ProcessSourceInput
12
15
  from content_core.content.cleanup import cleanup_content
13
16
  from content_core.content.extraction import extract_content
@@ -18,7 +21,6 @@ from content_core.logging import configure_logging, logger
18
21
  extract = extract_content
19
22
  clean = cleanup_content
20
23
 
21
- load_dotenv()
22
24
 
23
25
  # Configure loguru logger using centralized configuration
24
26
  configure_logging(debug=False)
@@ -212,3 +214,5 @@ def csum():
212
214
 
213
215
  if __name__ == "__main__":
214
216
  ccore()
217
+ if __name__ == "__main__":
218
+ ccore()
@@ -305,7 +305,7 @@
305
305
  },
306
306
  {
307
307
  "cell_type": "code",
308
- "execution_count": 2,
308
+ "execution_count": 1,
309
309
  "metadata": {},
310
310
  "outputs": [
311
311
  {
@@ -328,18 +328,18 @@
328
328
  }
329
329
  ],
330
330
  "source": [
331
- "from content_core.config import set_extraction_engine, set_docling_output_format\n",
331
+ "# from content_core.config import set_extraction_engine, set_docling_output_format\n",
332
332
  "from content_core.content.extraction import extract_content\n",
333
333
  "\n",
334
- "# 2) Turn on Docling\n",
335
- "set_extraction_engine(\"docling\")\n",
334
+ "# # 2) Turn on Docling\n",
335
+ "# set_extraction_engine(\"docling\")\n",
336
336
  "\n",
337
- "# 3) (Optionally) pick your format – markdown, html or json\n",
338
- "set_docling_output_format(\"markdown\")\n",
337
+ "# # 3) (Optionally) pick your format – markdown, html or json\n",
338
+ "# set_docling_output_format(\"markdown\")\n",
339
339
  "\n",
340
340
  "# 4) Now extract exactly as before:\n",
341
341
  "result = await extract_content({\n",
342
- " \"file_path\": \"../../../tests/input_content/file.pdf\"\n",
342
+ " \"file_path\": \"../../../tests/input_content/file.pdf\", \"engine\": \"docling\", \"output_format\": \"markdown\"\n",
343
343
  "})\n",
344
344
  "print(result.content)"
345
345
  ]
@@ -1,18 +1,18 @@
1
1
  from typing import Dict, Optional, Union
2
2
 
3
+ from ai_prompter import Prompter
3
4
  from esperanto import LanguageModel
4
5
  from esperanto.common_types import Message
5
6
  from pydantic import BaseModel, Field
6
7
 
7
8
  from content_core.models import ModelFactory
8
- from content_core.prompter import Prompter
9
9
 
10
10
 
11
11
  class TemplatedMessageInput(BaseModel):
12
- system_prompt_template: Optional[str] = ""
13
- system_prompt_text: Optional[str] = ""
14
- user_prompt_template: Optional[str] = ""
15
- user_prompt_text: Optional[str] = ""
12
+ system_prompt_template: Optional[str] = None
13
+ system_prompt_text: Optional[str] = None
14
+ user_prompt_template: Optional[str] = None
15
+ user_prompt_text: Optional[str] = None
16
16
  data: Optional[Union[Dict, BaseModel]] = Field(default_factory=lambda: {})
17
17
  config: Dict = Field(
18
18
  description="The config for the LLM",
@@ -28,30 +28,22 @@ async def templated_message(
28
28
  input: TemplatedMessageInput, model: Optional[LanguageModel] = None
29
29
  ) -> str:
30
30
  if not model:
31
- model = ModelFactory.get_model('default_model')
31
+ model = ModelFactory.get_model("default_model")
32
32
 
33
33
  msgs = []
34
34
  if input.system_prompt_template or input.system_prompt_text:
35
- msgs.append(
36
- Message(
37
- role="system",
38
- content=Prompter(
39
- prompt_template=input.system_prompt_template,
40
- prompt_text=input.system_prompt_text,
41
- ).render(data=input.data),
42
- )
43
- )
35
+ system_prompt = Prompter(
36
+ prompt_template=input.system_prompt_template,
37
+ template_text=input.system_prompt_text,
38
+ ).render(data=input.data)
39
+ msgs.append(Message(role="system", content=system_prompt))
44
40
 
45
41
  if input.user_prompt_template or input.user_prompt_text:
46
- msgs.append(
47
- Message(
48
- role="user",
49
- content=Prompter(
50
- prompt_template=input.user_prompt_template,
51
- prompt_text=input.user_prompt_text,
52
- ).render(data=input.data),
53
- )
54
- )
42
+ user_prompt = Prompter(
43
+ prompt_template=input.user_prompt_template,
44
+ template_text=input.user_prompt_text,
45
+ ).render(data=input.data)
46
+ msgs.append(Message(role="user", content=user_prompt))
55
47
 
56
48
  result = await model.achat_complete(msgs)
57
49
  return result.content
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: content-core
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: Extract what matters from any media source
5
5
  Author-email: LUIS NOVO <lfnovo@gmail.com>
6
6
  License-File: LICENSE
7
7
  Requires-Python: >=3.10
8
+ Requires-Dist: ai-prompter>=0.2.3
8
9
  Requires-Dist: aiohttp>=3.11
9
10
  Requires-Dist: bs4>=0.0.2
10
11
  Requires-Dist: dicttoxml>=1.7.16
@@ -1,12 +1,11 @@
1
- content_core/__init__.py,sha256=sBCcvRJ-9u5htV5AdptlYPNO0R8NmAex2K1XAkJAoL0,6474
1
+ content_core/__init__.py,sha256=ANKeslNXOGumwrkjqgRik23e5PdGps2C0FSup8_XH2Y,6515
2
2
  content_core/cc_config.yaml,sha256=w66fo5ut6TPaU3o4hkjnroqg2hkr8YuOG3BRtI50j1s,701
3
3
  content_core/config.py,sha256=-aUsTB6Z3fa_XIWdHNXhMgWkVLWjEW1kfyQXXB_-j54,1632
4
4
  content_core/logging.py,sha256=oeRdWKknEolptopxF1IvnEGEc0ZUw45QXYUEZ71GcdY,438
5
5
  content_core/models.py,sha256=FBV_tV6cmI0F82WfcA6xHag-YMsxI1dIbDGWG-3Eq_Y,935
6
6
  content_core/models_config.yaml,sha256=Yr-GS94ffxnkaWojUfpErUMM7m_MShsYjR6QuDjMzwo,444
7
- content_core/prompter.py,sha256=-ShuSyHvK50xlgsAFfA9AnAJV-LlzWwmbPDq2wUZRcI,5793
8
7
  content_core/py.typed,sha256=pLuU3XTTeVpXo4UomOjcvAIQqOrzIotlWlJ3KFo2lxQ,154
9
- content_core/templated_message.py,sha256=iWz-TwWq08mspgZW3EgIGf7HqtW1tXuTDpo9FkNwixQ,1729
8
+ content_core/templated_message.py,sha256=KbI2rcvgGM5oRIcsG68zAZfgNsC97fR16D61683ZSnY,1617
10
9
  content_core/common/__init__.py,sha256=SjDp-0QRjX9PMubyTjv77_GrUqm6eC4gBuXr593JVK4,525
11
10
  content_core/common/exceptions.py,sha256=NpYedVbckIq4kP2wek7bicMVgGGn0fkhCvid5cIxfy4,1304
12
11
  content_core/common/state.py,sha256=cJvIwqvrvGxuk1t51bTOvPV-RM5Nbd8F8C4o0dawIXo,1185
@@ -19,7 +18,7 @@ content_core/content/extraction/graph.py,sha256=Sp9XJ6AoLXA_FUFWhmfTMzOC2gkarp1Q
19
18
  content_core/content/summary/__init__.py,sha256=ReKCZWKfDtqlInKeh87Y1DEfiNzVWabGybEz3hS2FrI,114
20
19
  content_core/content/summary/core.py,sha256=LejUbPxnRD0sbO6MupiIb-IHLxEUGU5beBZwmIiBncc,542
21
20
  content_core/notebooks/docling.ipynb,sha256=aTad8NORNd-TUMlbX58DURJ4-QCeplTeTT0vUj301m0,631
22
- content_core/notebooks/run.ipynb,sha256=vmOYratdx0MnhNChjq3I5b7K2iYWuqO2dECK4Dp0jbU,369422
21
+ content_core/notebooks/run.ipynb,sha256=lV8n1fx_kgIQHBnk1vR6ChBjMS5luAEuDDljsTBNjrQ,369490
23
22
  content_core/processors/audio.py,sha256=jDn0_6F5dLcmz_C-iR80uOqOIAz49ELya2R5JeM15vo,3538
24
23
  content_core/processors/docling.py,sha256=wQ8ThAcyrCy-c95QtgplQ9UZtjCZTddLD9y1_CrRtSQ,2111
25
24
  content_core/processors/office.py,sha256=DXkfmjqUhmhP6rJaO5Z5Y9sv-iK0zaPZ3waynFIPtsk,12153
@@ -34,8 +33,8 @@ content_core/tools/__init__.py,sha256=DuJmd7fE-NpDvLP8IW1XY5MUkAQcdks52rn2jk4N8j
34
33
  content_core/tools/cleanup.py,sha256=5IdKedsFyRQMdYzgFSKtsfyxJldbroXQXHesHICNENI,523
35
34
  content_core/tools/extract.py,sha256=-r2_jsuMMXyXxGVqWhh1ilNPo_UMYAbw3Pkp1FzPy5g,577
36
35
  content_core/tools/summarize.py,sha256=DPfeglLWB08q8SvHrsKpOKZ35XjduUDs2J02ISwjdj0,596
37
- content_core-0.5.0.dist-info/METADATA,sha256=3im9n4tqCrStAX1UkdR42NnODwwHggKeQJdYL_eX68U,10499
38
- content_core-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
- content_core-0.5.0.dist-info/entry_points.txt,sha256=9fGQUk6bxBVXj9PRwfWVPn54ClSEJV7J-KBLXtjOhQw,99
40
- content_core-0.5.0.dist-info/licenses/LICENSE,sha256=myj0z2T4qIkenCgLsRfx7Wk6UqCQNj5c7O14Qx4zpGg,1066
41
- content_core-0.5.0.dist-info/RECORD,,
36
+ content_core-0.5.1.dist-info/METADATA,sha256=mkvdVcLsiBDGiobgswCVQF8Xkceq5VpIRZspniB61PY,10533
37
+ content_core-0.5.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
+ content_core-0.5.1.dist-info/entry_points.txt,sha256=9fGQUk6bxBVXj9PRwfWVPn54ClSEJV7J-KBLXtjOhQw,99
39
+ content_core-0.5.1.dist-info/licenses/LICENSE,sha256=myj0z2T4qIkenCgLsRfx7Wk6UqCQNj5c7O14Qx4zpGg,1066
40
+ content_core-0.5.1.dist-info/RECORD,,
content_core/prompter.py DELETED
@@ -1,159 +0,0 @@
1
- """
2
- A prompt management module using Jinja to generate complex prompts with simple templates.
3
- """
4
-
5
- import os
6
- from dataclasses import dataclass
7
- from datetime import datetime
8
- from typing import Any, Dict, Optional, Union
9
-
10
- from dotenv import load_dotenv
11
- from jinja2 import Environment, FileSystemLoader, Template
12
- from langchain_core.prompts import ChatPromptTemplate
13
- from pydantic import BaseModel
14
-
15
- from content_core.logging import logger
16
-
17
- load_dotenv()
18
-
19
- prompt_path_default = os.path.join(
20
- os.path.dirname(os.path.abspath(__file__)), "prompts"
21
- )
22
- prompt_path_custom = os.getenv("PROMPT_PATH")
23
-
24
- logger.debug(
25
- f"Pasta de prompts personalizada: {prompt_path_custom if prompt_path_custom else 'Não definida'}"
26
- )
27
- logger.debug(f"Pasta de prompts padrão: {prompt_path_default}")
28
-
29
- env_custom = (
30
- Environment(loader=FileSystemLoader(prompt_path_custom))
31
- if prompt_path_custom and os.path.exists(prompt_path_custom)
32
- else None
33
- )
34
- env_default = Environment(loader=FileSystemLoader(prompt_path_default))
35
-
36
-
37
- @dataclass
38
- class Prompter:
39
- """
40
- A class for managing and rendering prompt templates.
41
-
42
- Attributes:
43
- prompt_template (str, optional): The name of the prompt template file.
44
- prompt_variation (str, optional): The variation of the prompt template.
45
- prompt_text (str, optional): The raw prompt text.
46
- template (Union[str, Template], optional): The Jinja2 template object.
47
- """
48
-
49
- prompt_template: Optional[str] = None
50
- prompt_variation: Optional[str] = "default"
51
- prompt_text: Optional[str] = None
52
- template: Optional[Union[str, Template]] = None
53
- parser: Optional[Any] = None
54
-
55
- def __init__(self, prompt_template=None, prompt_text=None, parser=None):
56
- """
57
- Initialize the Prompter with either a template file or raw text.
58
-
59
- Args:
60
- prompt_template (str, optional): The name of the prompt template file.
61
- prompt_text (str, optional): The raw prompt text.
62
- """
63
- self.prompt_template = prompt_template
64
- self.prompt_text = prompt_text
65
- self.parser = parser
66
- self.setup()
67
-
68
- def setup(self):
69
- """
70
- Set up the Jinja2 template based on the provided template file or text.
71
- Raises:
72
- ValueError: If neither prompt_template nor prompt_text is provided.
73
- """
74
- if self.prompt_template:
75
- # Primeiro tenta carregar da pasta personalizada, se disponível
76
- if env_custom:
77
- try:
78
- self.template = env_custom.get_template(
79
- f"{self.prompt_template}.jinja"
80
- )
81
- logger.debug(
82
- f"Template {self.prompt_template} carregado da pasta personalizada"
83
- )
84
- return
85
- except Exception as e:
86
- logger.debug(
87
- f"Template {self.prompt_template} não encontrado na pasta personalizada: {e}"
88
- )
89
-
90
- # Se não encontrou na personalizada ou não há pasta personalizada, tenta a padrão
91
- try:
92
- self.template = env_default.get_template(
93
- f"{self.prompt_template}.jinja"
94
- )
95
- logger.debug(
96
- f"Template {self.prompt_template} carregado da pasta padrão"
97
- )
98
- except Exception as e:
99
- raise ValueError(
100
- f"Template {self.prompt_template} não encontrado na pasta padrão: {e}"
101
- )
102
- elif self.prompt_text:
103
- self.template = Template(self.prompt_text)
104
- else:
105
- raise ValueError("Prompter must have a prompt_template or prompt_text")
106
-
107
- assert self.prompt_template or self.prompt_text, "Prompt is required"
108
-
109
- def to_langchain(self):
110
- if isinstance(self.template, str):
111
- template_text = self.template
112
- else:
113
- # For file-based templates, read the raw content
114
- template_path = os.path.join("prompts", f"{self.prompt_template}.jinja")
115
- with open(template_path, "r") as f:
116
- template_text = f.read()
117
- return ChatPromptTemplate.from_template(template_text, template_format="jinja2")
118
-
119
- @classmethod
120
- def from_text(cls, text: str):
121
- """
122
- Create a Prompter instance from raw text, which can contain Jinja code.
123
-
124
- Args:
125
- text (str): The raw prompt text.
126
-
127
- Returns:
128
- Prompter: A new Prompter instance.
129
- """
130
-
131
- return cls(prompt_text=text)
132
-
133
- def render(self, data: Optional[Union[Dict, BaseModel]] = {}) -> str:
134
- """
135
- Render the prompt template with the given data.
136
-
137
- Args:
138
- data (Union[Dict, BaseModel]): The data to be used in rendering the template.
139
- Can be either a dictionary or a Pydantic BaseModel.
140
-
141
- Returns:
142
- str: The rendered prompt text.
143
-
144
- Raises:
145
- AssertionError: If the template is not defined or not a Jinja2 Template.
146
- """
147
- # Convert Pydantic model to dict if necessary
148
- data_dict = data.model_dump() if isinstance(data, BaseModel) else data
149
- # Create a new mutable dictionary with the original data
150
- render_data = dict(data_dict)
151
- render_data["current_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
152
- if self.parser:
153
- render_data["format_instructions"] = self.parser.get_format_instructions()
154
- assert self.template, "Prompter template is not defined"
155
- assert isinstance(
156
- self.template, Template
157
- ), "Prompter template is not a Jinja2 Template"
158
- return self.template.render(render_data)
159
- return self.template.render(render_data)