content-core 0.5.0__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of content-core might be problematic. Click here for more details.

Files changed (64) hide show
  1. {content_core-0.5.0 → content_core-0.5.1}/.gitignore +1 -0
  2. content_core-0.5.1/Makefile +16 -0
  3. {content_core-0.5.0 → content_core-0.5.1}/PKG-INFO +2 -1
  4. {content_core-0.5.0 → content_core-0.5.1}/pyproject.toml +2 -1
  5. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/__init__.py +6 -2
  6. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/notebooks/run.ipynb +7 -7
  7. content_core-0.5.1/src/content_core/templated_message.py +49 -0
  8. {content_core-0.5.0 → content_core-0.5.1}/uv.lock +26 -1
  9. content_core-0.5.0/.windsurfrules +0 -13
  10. content_core-0.5.0/Makefile +0 -8
  11. content_core-0.5.0/src/content_core/prompter.py +0 -159
  12. content_core-0.5.0/src/content_core/templated_message.py +0 -57
  13. {content_core-0.5.0 → content_core-0.5.1}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
  14. {content_core-0.5.0 → content_core-0.5.1}/.github/workflows/publish.yml +0 -0
  15. {content_core-0.5.0 → content_core-0.5.1}/.python-version +0 -0
  16. {content_core-0.5.0 → content_core-0.5.1}/CONTRIBUTING.md +0 -0
  17. {content_core-0.5.0 → content_core-0.5.1}/LICENSE +0 -0
  18. {content_core-0.5.0 → content_core-0.5.1}/README.md +0 -0
  19. {content_core-0.5.0 → content_core-0.5.1}/docs/processors.md +0 -0
  20. {content_core-0.5.0 → content_core-0.5.1}/docs/usage.md +0 -0
  21. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/cc_config.yaml +0 -0
  22. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/common/__init__.py +0 -0
  23. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/common/exceptions.py +0 -0
  24. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/common/state.py +0 -0
  25. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/common/utils.py +0 -0
  26. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/config.py +0 -0
  27. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/__init__.py +0 -0
  28. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/cleanup/__init__.py +0 -0
  29. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/cleanup/core.py +0 -0
  30. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/extraction/__init__.py +0 -0
  31. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/extraction/graph.py +0 -0
  32. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/summary/__init__.py +0 -0
  33. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/content/summary/core.py +0 -0
  34. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/logging.py +0 -0
  35. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/models.py +0 -0
  36. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/models_config.yaml +0 -0
  37. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/notebooks/docling.ipynb +0 -0
  38. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/audio.py +0 -0
  39. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/docling.py +0 -0
  40. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/office.py +0 -0
  41. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/pdf.py +0 -0
  42. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/text.py +0 -0
  43. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/url.py +0 -0
  44. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/video.py +0 -0
  45. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/processors/youtube.py +0 -0
  46. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/prompts/content/cleanup.jinja +0 -0
  47. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/prompts/content/summarize.jinja +0 -0
  48. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/py.typed +0 -0
  49. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/tools/__init__.py +0 -0
  50. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/tools/cleanup.py +0 -0
  51. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/tools/extract.py +0 -0
  52. {content_core-0.5.0 → content_core-0.5.1}/src/content_core/tools/summarize.py +0 -0
  53. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.docx +0 -0
  54. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.epub +0 -0
  55. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.md +0 -0
  56. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.mp3 +0 -0
  57. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.mp4 +0 -0
  58. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.pdf +0 -0
  59. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.pptx +0 -0
  60. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.txt +0 -0
  61. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file.xlsx +0 -0
  62. {content_core-0.5.0 → content_core-0.5.1}/tests/input_content/file_audio.mp3 +0 -0
  63. {content_core-0.5.0 → content_core-0.5.1}/tests/integration/test_extraction.py +0 -0
  64. {content_core-0.5.0 → content_core-0.5.1}/tests/unit/test_docling.py +0 -0
@@ -21,3 +21,4 @@ todo.md
21
21
  WIP/
22
22
 
23
23
  *.ignore
24
+ .windsurfrules
@@ -0,0 +1,16 @@
1
+ .PHONY: tag test build-docs ruff
2
+
3
+ tag:
4
+ @version=$$(grep '^version = ' pyproject.toml | sed 's/version = "\(.*\)"/\1/'); \
5
+ echo "Creating tag v$$version"; \
6
+ git tag "v$$version"; \
7
+ git push origin "v$$version"
8
+
9
+ test:
10
+ uv run pytest -v
11
+
12
+ build-docs:
13
+ repomix . --include "**/*.py,**/*.yaml" --compress --style xml -o ai_docs/core.txt
14
+
15
+ ruff:
16
+ ruff check . --fix
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: content-core
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: Extract what matters from any media source
5
5
  Author-email: LUIS NOVO <lfnovo@gmail.com>
6
6
  License-File: LICENSE
7
7
  Requires-Python: >=3.10
8
+ Requires-Dist: ai-prompter>=0.2.3
8
9
  Requires-Dist: aiohttp>=3.11
9
10
  Requires-Dist: bs4>=0.0.2
10
11
  Requires-Dist: dicttoxml>=1.7.16
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "content-core"
3
- version = "0.5.0"
3
+ version = "0.5.1"
4
4
  description = "Extract what matters from any media source"
5
5
  readme = "README.md"
6
6
  homepage = "https://github.com/lfnovo/content-core"
@@ -29,6 +29,7 @@ dependencies = [
29
29
  "langgraph>=0.3.29",
30
30
  "dicttoxml>=1.7.16",
31
31
  "validators>=0.34.0",
32
+ "ai-prompter>=0.2.3",
32
33
  ]
33
34
 
34
35
  [project.optional-dependencies]
@@ -5,9 +5,12 @@ import os
5
5
  import sys
6
6
  from xml.etree import ElementTree as ET
7
7
 
8
- from dicttoxml import dicttoxml # type: ignore
9
8
  from dotenv import load_dotenv
10
9
 
10
+ load_dotenv()
11
+
12
+ from dicttoxml import dicttoxml # type: ignore
13
+
11
14
  from content_core.common import ProcessSourceInput
12
15
  from content_core.content.cleanup import cleanup_content
13
16
  from content_core.content.extraction import extract_content
@@ -18,7 +21,6 @@ from content_core.logging import configure_logging, logger
18
21
  extract = extract_content
19
22
  clean = cleanup_content
20
23
 
21
- load_dotenv()
22
24
 
23
25
  # Configure loguru logger using centralized configuration
24
26
  configure_logging(debug=False)
@@ -212,3 +214,5 @@ def csum():
212
214
 
213
215
  if __name__ == "__main__":
214
216
  ccore()
217
+ if __name__ == "__main__":
218
+ ccore()
@@ -305,7 +305,7 @@
305
305
  },
306
306
  {
307
307
  "cell_type": "code",
308
- "execution_count": 2,
308
+ "execution_count": 1,
309
309
  "metadata": {},
310
310
  "outputs": [
311
311
  {
@@ -328,18 +328,18 @@
328
328
  }
329
329
  ],
330
330
  "source": [
331
- "from content_core.config import set_extraction_engine, set_docling_output_format\n",
331
+ "# from content_core.config import set_extraction_engine, set_docling_output_format\n",
332
332
  "from content_core.content.extraction import extract_content\n",
333
333
  "\n",
334
- "# 2) Turn on Docling\n",
335
- "set_extraction_engine(\"docling\")\n",
334
+ "# # 2) Turn on Docling\n",
335
+ "# set_extraction_engine(\"docling\")\n",
336
336
  "\n",
337
- "# 3) (Optionally) pick your format – markdown, html or json\n",
338
- "set_docling_output_format(\"markdown\")\n",
337
+ "# # 3) (Optionally) pick your format – markdown, html or json\n",
338
+ "# set_docling_output_format(\"markdown\")\n",
339
339
  "\n",
340
340
  "# 4) Now extract exactly as before:\n",
341
341
  "result = await extract_content({\n",
342
- " \"file_path\": \"../../../tests/input_content/file.pdf\"\n",
342
+ " \"file_path\": \"../../../tests/input_content/file.pdf\", \"engine\": \"docling\", \"output_format\": \"markdown\"\n",
343
343
  "})\n",
344
344
  "print(result.content)"
345
345
  ]
@@ -0,0 +1,49 @@
1
+ from typing import Dict, Optional, Union
2
+
3
+ from ai_prompter import Prompter
4
+ from esperanto import LanguageModel
5
+ from esperanto.common_types import Message
6
+ from pydantic import BaseModel, Field
7
+
8
+ from content_core.models import ModelFactory
9
+
10
+
11
+ class TemplatedMessageInput(BaseModel):
12
+ system_prompt_template: Optional[str] = None
13
+ system_prompt_text: Optional[str] = None
14
+ user_prompt_template: Optional[str] = None
15
+ user_prompt_text: Optional[str] = None
16
+ data: Optional[Union[Dict, BaseModel]] = Field(default_factory=lambda: {})
17
+ config: Dict = Field(
18
+ description="The config for the LLM",
19
+ default={
20
+ "temperature": 0,
21
+ "top_p": 1,
22
+ "max_tokens": 600,
23
+ },
24
+ )
25
+
26
+
27
+ async def templated_message(
28
+ input: TemplatedMessageInput, model: Optional[LanguageModel] = None
29
+ ) -> str:
30
+ if not model:
31
+ model = ModelFactory.get_model("default_model")
32
+
33
+ msgs = []
34
+ if input.system_prompt_template or input.system_prompt_text:
35
+ system_prompt = Prompter(
36
+ prompt_template=input.system_prompt_template,
37
+ template_text=input.system_prompt_text,
38
+ ).render(data=input.data)
39
+ msgs.append(Message(role="system", content=system_prompt))
40
+
41
+ if input.user_prompt_template or input.user_prompt_text:
42
+ user_prompt = Prompter(
43
+ prompt_template=input.user_prompt_template,
44
+ template_text=input.user_prompt_text,
45
+ ).render(data=input.data)
46
+ msgs.append(Message(role="user", content=user_prompt))
47
+
48
+ result = await model.achat_complete(msgs)
49
+ return result.content
@@ -23,6 +23,20 @@ resolution-markers = [
23
23
  "(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_machine != 'x86_64' and platform_system == 'Linux') or (python_full_version >= '3.12.4' and platform_machine != 'x86_64' and platform_system != 'Darwin' and platform_system != 'Linux') or (python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_system == 'Linux' and sys_platform != 'darwin') or (python_full_version >= '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux' and sys_platform != 'darwin')",
24
24
  ]
25
25
 
26
+ [[package]]
27
+ name = "ai-prompter"
28
+ version = "0.2.3"
29
+ source = { registry = "https://pypi.org/simple" }
30
+ dependencies = [
31
+ { name = "jinja2" },
32
+ { name = "pip" },
33
+ { name = "pydantic" },
34
+ ]
35
+ sdist = { url = "https://files.pythonhosted.org/packages/39/ff/cf13c31b88c06e11a1ffeed505601c167293b23d3e2e4e02adac93cc9300/ai_prompter-0.2.3.tar.gz", hash = "sha256:40f55c18f87df250a13f84d0cf7a4e8b31815a01f27666039386d6592849694b", size = 72955 }
36
+ wheels = [
37
+ { url = "https://files.pythonhosted.org/packages/5f/11/9e3712b8393dbef152258c68617baec343040c3d08b372d77b57e51d8e5d/ai_prompter-0.2.3-py3-none-any.whl", hash = "sha256:e8c0becbb3c8bdff399e372830e2c0a3cc3292e02d67921e2b255871329ee477", size = 7345 },
38
+ ]
39
+
26
40
  [[package]]
27
41
  name = "aiohappyeyeballs"
28
42
  version = "2.6.1"
@@ -391,9 +405,10 @@ wheels = [
391
405
 
392
406
  [[package]]
393
407
  name = "content-core"
394
- version = "0.5.0"
408
+ version = "0.5.1"
395
409
  source = { editable = "." }
396
410
  dependencies = [
411
+ { name = "ai-prompter" },
397
412
  { name = "aiohttp" },
398
413
  { name = "bs4" },
399
414
  { name = "dicttoxml" },
@@ -435,6 +450,7 @@ dev = [
435
450
 
436
451
  [package.metadata]
437
452
  requires-dist = [
453
+ { name = "ai-prompter", specifier = ">=0.2.3" },
438
454
  { name = "aiohttp", specifier = ">=3.11" },
439
455
  { name = "asciidoc", marker = "extra == 'docling'" },
440
456
  { name = "bs4", specifier = ">=0.0.2" },
@@ -2239,6 +2255,15 @@ wheels = [
2239
2255
  { url = "https://files.pythonhosted.org/packages/21/2c/5e05f58658cf49b6667762cca03d6e7d85cededde2caf2ab37b81f80e574/pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044", size = 2674751 },
2240
2256
  ]
2241
2257
 
2258
+ [[package]]
2259
+ name = "pip"
2260
+ version = "25.0.1"
2261
+ source = { registry = "https://pypi.org/simple" }
2262
+ sdist = { url = "https://files.pythonhosted.org/packages/70/53/b309b4a497b09655cb7e07088966881a57d082f48ac3cb54ea729fd2c6cf/pip-25.0.1.tar.gz", hash = "sha256:88f96547ea48b940a3a385494e181e29fb8637898f88d88737c5049780f196ea", size = 1950850 }
2263
+ wheels = [
2264
+ { url = "https://files.pythonhosted.org/packages/c9/bc/b7db44f5f39f9d0494071bddae6880eb645970366d0a200022a1a93d57f5/pip-25.0.1-py3-none-any.whl", hash = "sha256:c46efd13b6aa8279f33f2864459c8ce587ea6a1a59ee20de055868d8f7688f7f", size = 1841526 },
2265
+ ]
2266
+
2242
2267
  [[package]]
2243
2268
  name = "platformdirs"
2244
2269
  version = "4.3.7"
@@ -1,13 +0,0 @@
1
- Also use uv as the package manager: uv run, uv sync, uv add.
2
-
3
- All documentation (code or readmes) must be in english.
4
- Whenever I ask you to tag and release, make sure to run `make test` as part of the process.
5
-
6
- The full release process is:
7
- - Run `make test` to make sure everything is working (if we changed any code or import)
8
- - Update version on pyproject.toml
9
- - Run `uv sync` to update the lock file
10
- - Commit all that's needed
11
- - Merge to main (if in a branch)
12
- - Tag the release
13
- - Push to GitHub
@@ -1,8 +0,0 @@
1
- test:
2
- uv run pytest -v
3
-
4
- build-docs:
5
- repomix . --include "**/*.py,**/*.yaml" --compress --style xml -o ai_docs/core.txt
6
-
7
- ruff:
8
- ruff check . --fix
@@ -1,159 +0,0 @@
1
- """
2
- A prompt management module using Jinja to generate complex prompts with simple templates.
3
- """
4
-
5
- import os
6
- from dataclasses import dataclass
7
- from datetime import datetime
8
- from typing import Any, Dict, Optional, Union
9
-
10
- from dotenv import load_dotenv
11
- from jinja2 import Environment, FileSystemLoader, Template
12
- from langchain_core.prompts import ChatPromptTemplate
13
- from pydantic import BaseModel
14
-
15
- from content_core.logging import logger
16
-
17
- load_dotenv()
18
-
19
- prompt_path_default = os.path.join(
20
- os.path.dirname(os.path.abspath(__file__)), "prompts"
21
- )
22
- prompt_path_custom = os.getenv("PROMPT_PATH")
23
-
24
- logger.debug(
25
- f"Pasta de prompts personalizada: {prompt_path_custom if prompt_path_custom else 'Não definida'}"
26
- )
27
- logger.debug(f"Pasta de prompts padrão: {prompt_path_default}")
28
-
29
- env_custom = (
30
- Environment(loader=FileSystemLoader(prompt_path_custom))
31
- if prompt_path_custom and os.path.exists(prompt_path_custom)
32
- else None
33
- )
34
- env_default = Environment(loader=FileSystemLoader(prompt_path_default))
35
-
36
-
37
- @dataclass
38
- class Prompter:
39
- """
40
- A class for managing and rendering prompt templates.
41
-
42
- Attributes:
43
- prompt_template (str, optional): The name of the prompt template file.
44
- prompt_variation (str, optional): The variation of the prompt template.
45
- prompt_text (str, optional): The raw prompt text.
46
- template (Union[str, Template], optional): The Jinja2 template object.
47
- """
48
-
49
- prompt_template: Optional[str] = None
50
- prompt_variation: Optional[str] = "default"
51
- prompt_text: Optional[str] = None
52
- template: Optional[Union[str, Template]] = None
53
- parser: Optional[Any] = None
54
-
55
- def __init__(self, prompt_template=None, prompt_text=None, parser=None):
56
- """
57
- Initialize the Prompter with either a template file or raw text.
58
-
59
- Args:
60
- prompt_template (str, optional): The name of the prompt template file.
61
- prompt_text (str, optional): The raw prompt text.
62
- """
63
- self.prompt_template = prompt_template
64
- self.prompt_text = prompt_text
65
- self.parser = parser
66
- self.setup()
67
-
68
- def setup(self):
69
- """
70
- Set up the Jinja2 template based on the provided template file or text.
71
- Raises:
72
- ValueError: If neither prompt_template nor prompt_text is provided.
73
- """
74
- if self.prompt_template:
75
- # Primeiro tenta carregar da pasta personalizada, se disponível
76
- if env_custom:
77
- try:
78
- self.template = env_custom.get_template(
79
- f"{self.prompt_template}.jinja"
80
- )
81
- logger.debug(
82
- f"Template {self.prompt_template} carregado da pasta personalizada"
83
- )
84
- return
85
- except Exception as e:
86
- logger.debug(
87
- f"Template {self.prompt_template} não encontrado na pasta personalizada: {e}"
88
- )
89
-
90
- # Se não encontrou na personalizada ou não há pasta personalizada, tenta a padrão
91
- try:
92
- self.template = env_default.get_template(
93
- f"{self.prompt_template}.jinja"
94
- )
95
- logger.debug(
96
- f"Template {self.prompt_template} carregado da pasta padrão"
97
- )
98
- except Exception as e:
99
- raise ValueError(
100
- f"Template {self.prompt_template} não encontrado na pasta padrão: {e}"
101
- )
102
- elif self.prompt_text:
103
- self.template = Template(self.prompt_text)
104
- else:
105
- raise ValueError("Prompter must have a prompt_template or prompt_text")
106
-
107
- assert self.prompt_template or self.prompt_text, "Prompt is required"
108
-
109
- def to_langchain(self):
110
- if isinstance(self.template, str):
111
- template_text = self.template
112
- else:
113
- # For file-based templates, read the raw content
114
- template_path = os.path.join("prompts", f"{self.prompt_template}.jinja")
115
- with open(template_path, "r") as f:
116
- template_text = f.read()
117
- return ChatPromptTemplate.from_template(template_text, template_format="jinja2")
118
-
119
- @classmethod
120
- def from_text(cls, text: str):
121
- """
122
- Create a Prompter instance from raw text, which can contain Jinja code.
123
-
124
- Args:
125
- text (str): The raw prompt text.
126
-
127
- Returns:
128
- Prompter: A new Prompter instance.
129
- """
130
-
131
- return cls(prompt_text=text)
132
-
133
- def render(self, data: Optional[Union[Dict, BaseModel]] = {}) -> str:
134
- """
135
- Render the prompt template with the given data.
136
-
137
- Args:
138
- data (Union[Dict, BaseModel]): The data to be used in rendering the template.
139
- Can be either a dictionary or a Pydantic BaseModel.
140
-
141
- Returns:
142
- str: The rendered prompt text.
143
-
144
- Raises:
145
- AssertionError: If the template is not defined or not a Jinja2 Template.
146
- """
147
- # Convert Pydantic model to dict if necessary
148
- data_dict = data.model_dump() if isinstance(data, BaseModel) else data
149
- # Create a new mutable dictionary with the original data
150
- render_data = dict(data_dict)
151
- render_data["current_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
152
- if self.parser:
153
- render_data["format_instructions"] = self.parser.get_format_instructions()
154
- assert self.template, "Prompter template is not defined"
155
- assert isinstance(
156
- self.template, Template
157
- ), "Prompter template is not a Jinja2 Template"
158
- return self.template.render(render_data)
159
- return self.template.render(render_data)
@@ -1,57 +0,0 @@
1
- from typing import Dict, Optional, Union
2
-
3
- from esperanto import LanguageModel
4
- from esperanto.common_types import Message
5
- from pydantic import BaseModel, Field
6
-
7
- from content_core.models import ModelFactory
8
- from content_core.prompter import Prompter
9
-
10
-
11
- class TemplatedMessageInput(BaseModel):
12
- system_prompt_template: Optional[str] = ""
13
- system_prompt_text: Optional[str] = ""
14
- user_prompt_template: Optional[str] = ""
15
- user_prompt_text: Optional[str] = ""
16
- data: Optional[Union[Dict, BaseModel]] = Field(default_factory=lambda: {})
17
- config: Dict = Field(
18
- description="The config for the LLM",
19
- default={
20
- "temperature": 0,
21
- "top_p": 1,
22
- "max_tokens": 600,
23
- },
24
- )
25
-
26
-
27
- async def templated_message(
28
- input: TemplatedMessageInput, model: Optional[LanguageModel] = None
29
- ) -> str:
30
- if not model:
31
- model = ModelFactory.get_model('default_model')
32
-
33
- msgs = []
34
- if input.system_prompt_template or input.system_prompt_text:
35
- msgs.append(
36
- Message(
37
- role="system",
38
- content=Prompter(
39
- prompt_template=input.system_prompt_template,
40
- prompt_text=input.system_prompt_text,
41
- ).render(data=input.data),
42
- )
43
- )
44
-
45
- if input.user_prompt_template or input.user_prompt_text:
46
- msgs.append(
47
- Message(
48
- role="user",
49
- content=Prompter(
50
- prompt_template=input.user_prompt_template,
51
- prompt_text=input.user_prompt_text,
52
- ).render(data=input.data),
53
- )
54
- )
55
-
56
- result = await model.achat_complete(msgs)
57
- return result.content
File without changes
File without changes
File without changes