HowdenLLM 0.1.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
from dotenv import load_dotenv
|
|
2
|
+
import time
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
def llm_factory(
|
|
6
|
+
full_model_name : str,
|
|
7
|
+
):
|
|
8
|
+
|
|
9
|
+
load_dotenv() # Loads variables from .env into environment
|
|
10
|
+
|
|
11
|
+
if full_model_name.startswith("openai:"):
|
|
12
|
+
from langchain_community.chat_models import ChatOpenAI
|
|
13
|
+
from langchain.chains.question_answering import load_qa_chain
|
|
14
|
+
from langchain.schema import Document
|
|
15
|
+
|
|
16
|
+
if "OPENAI_API_KEY" not in os.environ:
|
|
17
|
+
raise ValueError("OPENAI_API_KEY is not set in environment variables.")
|
|
18
|
+
|
|
19
|
+
openai_model_name = full_model_name.split("openai:")[1]
|
|
20
|
+
llm = ChatOpenAI(model=openai_model_name, temperature=0)
|
|
21
|
+
|
|
22
|
+
# Your input text as a string
|
|
23
|
+
input_text = "Hugging Face is a company based in New York that specializes in natural language processing."
|
|
24
|
+
|
|
25
|
+
# Wrap input text in a Document (LangChain expects docs)
|
|
26
|
+
docs = [Document(page_content=input_text)]
|
|
27
|
+
|
|
28
|
+
# Load a QA chain with "stuff" method (no vectorstore, no retrieval)
|
|
29
|
+
qa_chain = load_qa_chain(llm, chain_type="stuff")
|
|
30
|
+
|
|
31
|
+
# Your question
|
|
32
|
+
question = "Where is Hugging Face based?"
|
|
33
|
+
|
|
34
|
+
start = time.time()
|
|
35
|
+
result = qa_chain.run(input_documents=docs, question=question)
|
|
36
|
+
end = time.time()
|
|
37
|
+
|
|
38
|
+
print(f"Question answering time: {end - start:.2f} seconds")
|
|
39
|
+
print("Answer:", result)
|
|
40
|
+
|
|
41
|
+
elif full_model_name.startswith("huggingface:"):
|
|
42
|
+
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
|
|
43
|
+
from huggingface_hub import snapshot_download
|
|
44
|
+
|
|
45
|
+
model_name = full_model_name.split("huggingface:")[1]
|
|
46
|
+
cache_dir = "./hf_cache"
|
|
47
|
+
|
|
48
|
+
start = time.time()
|
|
49
|
+
local_path = snapshot_download(model_name, cache_dir=cache_dir, resume_download=False)
|
|
50
|
+
end = time.time()
|
|
51
|
+
print(f"Model download time: {end - start:.2f} seconds")
|
|
52
|
+
|
|
53
|
+
if os.path.exists(local_path):
|
|
54
|
+
print("Model is already downloaded.")
|
|
55
|
+
else:
|
|
56
|
+
print("Model is not downloaded.")
|
|
57
|
+
|
|
58
|
+
start = time.time()
|
|
59
|
+
model = AutoModelForQuestionAnswering.from_pretrained(model_name, cache_dir=cache_dir)
|
|
60
|
+
end = time.time()
|
|
61
|
+
print(f"Model loading time: {end - start:.2f} seconds")
|
|
62
|
+
|
|
63
|
+
start = time.time()
|
|
64
|
+
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
|
|
65
|
+
end = time.time()
|
|
66
|
+
print(f"Tokenizer loading time: {end - start:.2f} seconds")
|
|
67
|
+
|
|
68
|
+
start = time.time()
|
|
69
|
+
qa_pipeline = pipeline(
|
|
70
|
+
"question-answering",
|
|
71
|
+
model=model,
|
|
72
|
+
tokenizer=tokenizer
|
|
73
|
+
)
|
|
74
|
+
end = time.time()
|
|
75
|
+
print(f"Pipeline setup time: {end - start:.2f} seconds")
|
|
76
|
+
|
|
77
|
+
context = "Hugging Face is a company based in New York that specializes in natural language processing."
|
|
78
|
+
question = "Where is Hugging Face based?"
|
|
79
|
+
|
|
80
|
+
start = time.time()
|
|
81
|
+
result = qa_pipeline(question=question, context=context)
|
|
82
|
+
end = time.time()
|
|
83
|
+
print(f"Question answering time: {end - start:.2f} seconds")
|
|
84
|
+
|
|
85
|
+
print("Answer:", result['answer'])
|
|
86
|
+
print("Confidence score:", result['score'])
|
|
87
|
+
print(context[result['start']:result['end']]) # prints "New York" else:
|
|
88
|
+
else:
|
|
89
|
+
raise ValueError(
|
|
90
|
+
f"Unsupported model_name '{full_model_name}'. Use 'openai:' or 'huggingface:' prefix."
|
|
91
|
+
)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: HowdenLLM
|
|
3
|
+
Version: 0.1.19
|
|
4
|
+
Summary: A simple configuration manager with Pydantic and JSON export.
|
|
5
|
+
License: MIT
|
|
6
|
+
Keywords: config,configuration,pydantic,json
|
|
7
|
+
Author: JesperThoftIllemannJ
|
|
8
|
+
Author-email: jesper.jaeger@howdendanmark.dk
|
|
9
|
+
Requires-Python: >=3.12,<3.14
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
14
|
+
Requires-Dist: langchain (>=0.3.27,<0.4.0)
|
|
15
|
+
Requires-Dist: langchain-community (>=0.3.27,<0.4.0)
|
|
16
|
+
Requires-Dist: openai (>=1.99.9,<2.0.0)
|
|
17
|
+
Requires-Dist: pydantic (>=2.11.7,<3.0.0)
|
|
18
|
+
Requires-Dist: python-dotenv (>=1.1.1,<2.0.0)
|
|
19
|
+
Requires-Dist: torch (>=2.8.0,<3.0.0)
|
|
20
|
+
Requires-Dist: transformers (>=4.55.0,<5.0.0)
|
|
21
|
+
Project-URL: Documentation, https://github.com/yourusername/config
|
|
22
|
+
Project-URL: Homepage, https://github.com/yourusername/config
|
|
23
|
+
Project-URL: Repository, https://github.com/yourusername/config
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
.\build.ps1
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
.\build.ps1
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "HowdenLLM"
|
|
3
|
+
description = ""
|
|
4
|
+
readme = "README.md"
|
|
5
|
+
requires-python = ">=3.12,<3.14"
|
|
6
|
+
dependencies = [ "transformers (>=4.55.0,<5.0.0)", "torch (>=2.8.0,<3.0.0)", "pydantic (>=2.11.7,<3.0.0)", "python-dotenv (>=1.1.1,<2.0.0)", "langchain (>=0.3.27,<0.4.0)", "langchain-community (>=0.3.27,<0.4.0)", "openai (>=1.99.9,<2.0.0)",]
|
|
7
|
+
[[project.authors]]
|
|
8
|
+
name = "JesperThoftIllemannJ"
|
|
9
|
+
email = "jesper.jaeger@howdendanmark.dk"
|
|
10
|
+
|
|
11
|
+
[build-system]
|
|
12
|
+
requires = [ "poetry-core>=2.0.0,<3.0.0",]
|
|
13
|
+
build-backend = "poetry.core.masonry.api"
|
|
14
|
+
|
|
15
|
+
[tool.poetry]
|
|
16
|
+
name = "HowdenLLM"
|
|
17
|
+
version = "0.1.19"
|
|
18
|
+
description = "A simple configuration manager with Pydantic and JSON export."
|
|
19
|
+
authors = [ "JesperThoftIllemannJ <jesper.jaeger@howdendanmark.dk>",]
|
|
20
|
+
readme = "README.md"
|
|
21
|
+
license = "MIT"
|
|
22
|
+
keywords = [ "config", "configuration", "pydantic", "json",]
|
|
23
|
+
homepage = "https://github.com/yourusername/config"
|
|
24
|
+
repository = "https://github.com/yourusername/config"
|
|
25
|
+
documentation = "https://github.com/yourusername/config"
|
|
26
|
+
[[tool.poetry.packages]]
|
|
27
|
+
include = "HowdenLLM"
|
|
28
|
+
|
|
29
|
+
[tool.poetry.group.dev.dependencies]
|
|
30
|
+
howdenconfig = "^0.1.12"
|
|
31
|
+
toml = "^0.10.2"
|
|
32
|
+
tomlkit = "^0.13.3"
|