h-ai-brain 0.0.17__tar.gz → 0.0.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {h_ai_brain-0.0.17/src/h_ai_brain.egg-info → h_ai_brain-0.0.20}/PKG-INFO +3 -2
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/pyproject.toml +7 -3
- h_ai_brain-0.0.20/src/h_ai/application/hai_service.py +20 -0
- h_ai_brain-0.0.20/src/h_ai/domain/llm_config.py +6 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/ollama_generate_repository.py +10 -6
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/prompt_loader.py +1 -1
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20/src/h_ai_brain.egg-info}/PKG-INFO +3 -2
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai_brain.egg-info/SOURCES.txt +1 -2
- h_ai_brain-0.0.20/src/h_ai_brain.egg-info/requires.txt +5 -0
- h_ai_brain-0.0.17/src/h_ai/application/hai_service.py +0 -13
- h_ai_brain-0.0.17/src/h_ai/infrastructure/llm/ollama/__init__.py +0 -0
- h_ai_brain-0.0.17/src/h_ai/infrastructure/llm/ollama/models/__init__.py +0 -0
- h_ai_brain-0.0.17/src/h_ai_brain.egg-info/requires.txt +0 -4
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/LICENSE +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/NOTICE.txt +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/setup.cfg +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/__init__.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/application/__init__.py +0 -0
- {h_ai_brain-0.0.17/src/h_ai/application/system_prompts → h_ai_brain-0.0.20/src/h_ai/domain}/__init__.py +0 -0
- {h_ai_brain-0.0.17/src/h_ai/application/system_prompts/roles → h_ai_brain-0.0.20/src/h_ai/domain/reasoning}/__init__.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/domain/reasoning/llm_chat_repository.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/domain/reasoning/llm_generate_respository.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/domain/reasoning/llm_tool_repository.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/domain/reasoning/text_analysis.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/domain/reasoning/tool_message.py +0 -0
- {h_ai_brain-0.0.17/src/h_ai/domain → h_ai_brain-0.0.20/src/h_ai/infrastructure}/__init__.py +0 -0
- {h_ai_brain-0.0.17/src/h_ai/domain/reasoning → h_ai_brain-0.0.20/src/h_ai/infrastructure/llm}/__init__.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/data_handler.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/llm_response_cleaner.py +0 -0
- {h_ai_brain-0.0.17/src/h_ai/infrastructure → h_ai_brain-0.0.20/src/h_ai/infrastructure/llm/ollama}/__init__.py +0 -0
- {h_ai_brain-0.0.17/src/h_ai/infrastructure/llm → h_ai_brain-0.0.20/src/h_ai/infrastructure/llm/ollama/models}/__init__.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/models/ollama_chat_message.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/models/ollama_chat_session.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/ollama_chat_repository.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/ollama_tool_repository.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/prompt_helper.py +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai_brain.egg-info/dependency_links.txt +0 -0
- {h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai_brain.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: h_ai_brain
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.20
|
4
4
|
Summary: AI Research agent API
|
5
5
|
Author-email: shoebill <shoebill.hai@gmail.com>
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
@@ -10,7 +10,8 @@ Requires-Python: >=3.10
|
|
10
10
|
Description-Content-Type: text/markdown
|
11
11
|
License-File: LICENSE
|
12
12
|
License-File: NOTICE.txt
|
13
|
-
Requires-Dist: h_message_bus~=0.0.
|
13
|
+
Requires-Dist: h_message_bus~=0.0.22
|
14
|
+
Requires-Dist: requests~=2.32.3
|
14
15
|
Provides-Extra: dev
|
15
16
|
Requires-Dist: pytest; extra == "dev"
|
16
17
|
Dynamic: license-file
|
@@ -4,7 +4,7 @@
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "h_ai_brain"
|
7
|
-
version = "0.0.
|
7
|
+
version = "0.0.20"
|
8
8
|
authors = [
|
9
9
|
{name = "shoebill", email = "shoebill.hai@gmail.com"},
|
10
10
|
]
|
@@ -17,11 +17,15 @@
|
|
17
17
|
"Operating System :: OS Independent",
|
18
18
|
]
|
19
19
|
dependencies = [
|
20
|
-
"h_message_bus~=0.0.
|
20
|
+
"h_message_bus~=0.0.22",
|
21
|
+
"requests~=2.32.3"
|
21
22
|
]
|
22
23
|
|
23
24
|
[project.optional-dependencies]
|
24
25
|
dev = [
|
25
26
|
"pytest",
|
26
27
|
# Other development dependencies
|
27
|
-
]
|
28
|
+
]
|
29
|
+
|
30
|
+
[tool.setuptools.package-data]
|
31
|
+
"src.h_ai.resources" = ["*.json"] # This will include all JSON files in the resources directory
|
@@ -0,0 +1,20 @@
|
|
1
|
+
from h_message_bus import NatsPublisherAdapter
|
2
|
+
|
3
|
+
from ..domain.llm_config import LLMConfig
|
4
|
+
from ..infrastructure.llm.ollama.ollama_generate_repository import OllamaGenerateRepository
|
5
|
+
|
6
|
+
|
7
|
+
class HaiService:
|
8
|
+
def __init__(self, nats_publisher_adapter: NatsPublisherAdapter, llm_config: LLMConfig):
|
9
|
+
self.nats_publisher_adapter = nats_publisher_adapter
|
10
|
+
self.llm_config = llm_config
|
11
|
+
self.llm_generate_repository = OllamaGenerateRepository(
|
12
|
+
self.llm_config.url,
|
13
|
+
self.llm_config.model_name,
|
14
|
+
temperature=self.llm_config.temperature,
|
15
|
+
max_tokens=self.llm_config.max_tokens)
|
16
|
+
|
17
|
+
def ask_question(self, question: str, system_prompt: str = None, max_tokens = None) -> str:
|
18
|
+
return self.llm_generate_repository.generate(question, system_prompt, max_tokens)
|
19
|
+
|
20
|
+
|
@@ -2,20 +2,22 @@ import uuid
|
|
2
2
|
|
3
3
|
import requests
|
4
4
|
|
5
|
+
from ..llm_response_cleaner import clean_llm_response
|
5
6
|
from ....domain.reasoning.llm_generate_respository import LlmGenerateRepository
|
6
7
|
|
7
8
|
|
8
9
|
class OllamaGenerateRepository(LlmGenerateRepository):
|
9
10
|
|
10
|
-
def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None):
|
11
|
+
def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None, max_tokens: int = 5000):
|
11
12
|
self.model_name = model_name
|
12
13
|
self.system_prompt = system_prompt
|
13
14
|
self.api_url = api_url
|
14
15
|
self.temperature = temperature
|
15
16
|
self.seed = seed
|
17
|
+
self.max_tokens = max_tokens
|
16
18
|
|
17
19
|
|
18
|
-
def generate(self, user_prompt: str, system_prompt: str = None, session_id: str = None) -> str|None:
|
20
|
+
def generate(self, user_prompt: str, system_prompt: str = None, session_id: str = None, max_tokens: int = None) -> str|None:
|
19
21
|
url = f"{self.api_url}/generate"
|
20
22
|
random_guid = uuid.uuid4()
|
21
23
|
guid_str = str(random_guid)
|
@@ -26,16 +28,18 @@ class OllamaGenerateRepository(LlmGenerateRepository):
|
|
26
28
|
"system": system_prompt,
|
27
29
|
"stream": False,
|
28
30
|
"session": guid_str,
|
29
|
-
"num_ctx": "
|
30
|
-
"temperature": "
|
31
|
+
"num_ctx": f"{self.max_tokens}",
|
32
|
+
"temperature": f"{self.temperature}"
|
31
33
|
}
|
32
34
|
|
33
35
|
if session_id:
|
34
36
|
payload["session"] = session_id
|
35
37
|
if self.seed:
|
36
|
-
payload["seed"] = self.seed
|
38
|
+
payload["seed"] = f"{self.seed}"
|
37
39
|
if self.temperature:
|
38
|
-
payload["temperature"] = self.temperature
|
40
|
+
payload["temperature"] = f"{self.temperature}"
|
41
|
+
if max_tokens:
|
42
|
+
payload["num_ctx"] = f"{max_tokens}"
|
39
43
|
|
40
44
|
try:
|
41
45
|
print(payload)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: h_ai_brain
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.20
|
4
4
|
Summary: AI Research agent API
|
5
5
|
Author-email: shoebill <shoebill.hai@gmail.com>
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
@@ -10,7 +10,8 @@ Requires-Python: >=3.10
|
|
10
10
|
Description-Content-Type: text/markdown
|
11
11
|
License-File: LICENSE
|
12
12
|
License-File: NOTICE.txt
|
13
|
-
Requires-Dist: h_message_bus~=0.0.
|
13
|
+
Requires-Dist: h_message_bus~=0.0.22
|
14
|
+
Requires-Dist: requests~=2.32.3
|
14
15
|
Provides-Extra: dev
|
15
16
|
Requires-Dist: pytest; extra == "dev"
|
16
17
|
Dynamic: license-file
|
@@ -4,9 +4,8 @@ pyproject.toml
|
|
4
4
|
src/h_ai/__init__.py
|
5
5
|
src/h_ai/application/__init__.py
|
6
6
|
src/h_ai/application/hai_service.py
|
7
|
-
src/h_ai/application/system_prompts/__init__.py
|
8
|
-
src/h_ai/application/system_prompts/roles/__init__.py
|
9
7
|
src/h_ai/domain/__init__.py
|
8
|
+
src/h_ai/domain/llm_config.py
|
10
9
|
src/h_ai/domain/reasoning/__init__.py
|
11
10
|
src/h_ai/domain/reasoning/llm_chat_repository.py
|
12
11
|
src/h_ai/domain/reasoning/llm_generate_respository.py
|
@@ -1,13 +0,0 @@
|
|
1
|
-
from h_message_bus import NatsPublisherAdapter
|
2
|
-
from h_message_bus.domain.request_messages.vector_read_metadata_request_message import VectorReadMetaDataRequestMessage
|
3
|
-
from h_message_bus.domain.request_messages.vector_read_metadata_response_message import VectorReadMetaDataResponseMessage
|
4
|
-
|
5
|
-
class HaiService:
|
6
|
-
def __init__(self, nats_publisher_adapter: NatsPublisherAdapter):
|
7
|
-
self.nats_publisher_adapter = nats_publisher_adapter
|
8
|
-
|
9
|
-
async def get_knowledgebase_metadata(self) -> VectorReadMetaDataResponseMessage:
|
10
|
-
message = VectorReadMetaDataRequestMessage.create_message()
|
11
|
-
response = await self.nats_publisher_adapter.request(message)
|
12
|
-
metadata_result = VectorReadMetaDataResponseMessage.from_hai_message(response)
|
13
|
-
return metadata_result
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/domain/reasoning/llm_generate_respository.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/ollama_chat_repository.py
RENAMED
File without changes
|
{h_ai_brain-0.0.17 → h_ai_brain-0.0.20}/src/h_ai/infrastructure/llm/ollama/ollama_tool_repository.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|