biblicus 0.14.0__py3-none-any.whl → 0.15.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biblicus/__init__.py +1 -1
- biblicus/_vendor/dotyaml/__init__.py +2 -2
- biblicus/_vendor/dotyaml/loader.py +40 -1
- biblicus/ai/__init__.py +39 -0
- biblicus/ai/embeddings.py +114 -0
- biblicus/ai/llm.py +138 -0
- biblicus/ai/models.py +226 -0
- biblicus/analysis/__init__.py +5 -2
- biblicus/analysis/markov.py +1624 -0
- biblicus/analysis/models.py +754 -1
- biblicus/analysis/topic_modeling.py +98 -19
- biblicus/backends/sqlite_full_text_search.py +4 -2
- biblicus/cli.py +118 -23
- biblicus/recipes.py +136 -0
- biblicus/text/__init__.py +43 -0
- biblicus/text/annotate.py +222 -0
- biblicus/text/extract.py +210 -0
- biblicus/text/link.py +519 -0
- biblicus/text/markup.py +200 -0
- biblicus/text/models.py +319 -0
- biblicus/text/prompts.py +113 -0
- biblicus/text/redact.py +229 -0
- biblicus/text/slice.py +155 -0
- biblicus/text/tool_loop.py +334 -0
- {biblicus-0.14.0.dist-info → biblicus-0.15.0.dist-info}/METADATA +88 -25
- {biblicus-0.14.0.dist-info → biblicus-0.15.0.dist-info}/RECORD +30 -15
- biblicus/analysis/llm.py +0 -106
- {biblicus-0.14.0.dist-info → biblicus-0.15.0.dist-info}/WHEEL +0 -0
- {biblicus-0.14.0.dist-info → biblicus-0.15.0.dist-info}/entry_points.txt +0 -0
- {biblicus-0.14.0.dist-info → biblicus-0.15.0.dist-info}/licenses/LICENSE +0 -0
- {biblicus-0.14.0.dist-info → biblicus-0.15.0.dist-info}/top_level.txt +0 -0
biblicus/analysis/llm.py
DELETED
|
@@ -1,106 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Lightweight LLM client configuration for analysis pipelines.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from __future__ import annotations
|
|
6
|
-
|
|
7
|
-
from enum import Enum
|
|
8
|
-
from typing import Optional
|
|
9
|
-
|
|
10
|
-
from pydantic import Field, field_validator
|
|
11
|
-
|
|
12
|
-
from ..user_config import resolve_openai_api_key
|
|
13
|
-
from .schema import AnalysisSchemaModel
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class LlmProvider(str, Enum):
|
|
17
|
-
"""
|
|
18
|
-
Supported LLM providers.
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
OPENAI = "openai"
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class LlmClientConfig(AnalysisSchemaModel):
|
|
25
|
-
"""
|
|
26
|
-
Configuration for an LLM client invocation.
|
|
27
|
-
|
|
28
|
-
:ivar provider: LLM provider identifier.
|
|
29
|
-
:vartype provider: LlmProvider
|
|
30
|
-
:ivar model: Model identifier for the provider.
|
|
31
|
-
:vartype model: str
|
|
32
|
-
:ivar api_key: Optional API key override.
|
|
33
|
-
:vartype api_key: str or None
|
|
34
|
-
:ivar temperature: Optional generation temperature.
|
|
35
|
-
:vartype temperature: float or None
|
|
36
|
-
:ivar max_tokens: Optional maximum output tokens.
|
|
37
|
-
:vartype max_tokens: int or None
|
|
38
|
-
:ivar max_retries: Optional maximum retry count for transient failures.
|
|
39
|
-
:vartype max_retries: int
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
provider: LlmProvider
|
|
43
|
-
model: str = Field(min_length=1)
|
|
44
|
-
api_key: Optional[str] = None
|
|
45
|
-
temperature: Optional[float] = Field(default=None, ge=0.0)
|
|
46
|
-
max_tokens: Optional[int] = Field(default=None, ge=1)
|
|
47
|
-
max_retries: int = Field(default=0, ge=0)
|
|
48
|
-
|
|
49
|
-
@field_validator("provider", mode="before")
|
|
50
|
-
@classmethod
|
|
51
|
-
def _parse_provider(cls, value: object) -> LlmProvider:
|
|
52
|
-
if isinstance(value, LlmProvider):
|
|
53
|
-
return value
|
|
54
|
-
if isinstance(value, str):
|
|
55
|
-
return LlmProvider(value)
|
|
56
|
-
raise ValueError("llm client provider must be a string or LlmProvider")
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def generate_completion(
|
|
60
|
-
*,
|
|
61
|
-
client: LlmClientConfig,
|
|
62
|
-
system_prompt: Optional[str],
|
|
63
|
-
user_prompt: str,
|
|
64
|
-
) -> str:
|
|
65
|
-
"""
|
|
66
|
-
Generate a completion using the configured LLM provider.
|
|
67
|
-
|
|
68
|
-
:param client: LLM client configuration.
|
|
69
|
-
:type client: LlmClientConfig
|
|
70
|
-
:param system_prompt: Optional system prompt content.
|
|
71
|
-
:type system_prompt: str or None
|
|
72
|
-
:param user_prompt: User prompt content.
|
|
73
|
-
:type user_prompt: str
|
|
74
|
-
:return: Generated completion text.
|
|
75
|
-
:rtype: str
|
|
76
|
-
:raises ValueError: If required dependencies or credentials are missing.
|
|
77
|
-
"""
|
|
78
|
-
try:
|
|
79
|
-
from openai import OpenAI
|
|
80
|
-
except ImportError as import_error:
|
|
81
|
-
raise ValueError(
|
|
82
|
-
"OpenAI LLM provider requires an optional dependency. "
|
|
83
|
-
'Install it with pip install "biblicus[openai]".'
|
|
84
|
-
) from import_error
|
|
85
|
-
api_key = client.api_key or resolve_openai_api_key()
|
|
86
|
-
if api_key is None:
|
|
87
|
-
raise ValueError(
|
|
88
|
-
"OpenAI LLM provider requires an OpenAI API key. "
|
|
89
|
-
"Set OPENAI_API_KEY or configure it in ~/.biblicus/config.yml or ./.biblicus/config.yml under "
|
|
90
|
-
"openai.api_key."
|
|
91
|
-
)
|
|
92
|
-
|
|
93
|
-
messages = []
|
|
94
|
-
if system_prompt:
|
|
95
|
-
messages.append({"role": "system", "content": system_prompt})
|
|
96
|
-
messages.append({"role": "user", "content": user_prompt})
|
|
97
|
-
|
|
98
|
-
client_instance = OpenAI(api_key=api_key)
|
|
99
|
-
response = client_instance.chat.completions.create(
|
|
100
|
-
model=client.model,
|
|
101
|
-
messages=messages,
|
|
102
|
-
temperature=client.temperature,
|
|
103
|
-
max_tokens=client.max_tokens,
|
|
104
|
-
)
|
|
105
|
-
content = response.choices[0].message.content
|
|
106
|
-
return str(content or "")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|