h-ai-brain 0.0.16__tar.gz → 0.0.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {h_ai_brain-0.0.16/src/h_ai_brain.egg-info → h_ai_brain-0.0.19}/PKG-INFO +2 -2
  2. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/pyproject.toml +6 -3
  3. h_ai_brain-0.0.19/src/h_ai/application/hai_service.py +20 -0
  4. h_ai_brain-0.0.19/src/h_ai/domain/llm_config.py +6 -0
  5. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/ollama/ollama_generate_repository.py +10 -6
  6. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/prompt_loader.py +1 -1
  7. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19/src/h_ai_brain.egg-info}/PKG-INFO +2 -2
  8. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai_brain.egg-info/SOURCES.txt +1 -2
  9. h_ai_brain-0.0.19/src/h_ai_brain.egg-info/requires.txt +4 -0
  10. h_ai_brain-0.0.16/src/h_ai/application/hai_service.py +0 -7
  11. h_ai_brain-0.0.16/src/h_ai/infrastructure/llm/ollama/__init__.py +0 -0
  12. h_ai_brain-0.0.16/src/h_ai/infrastructure/llm/ollama/models/__init__.py +0 -0
  13. h_ai_brain-0.0.16/src/h_ai_brain.egg-info/requires.txt +0 -4
  14. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/LICENSE +0 -0
  15. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/NOTICE.txt +0 -0
  16. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/setup.cfg +0 -0
  17. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/__init__.py +0 -0
  18. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/application/__init__.py +0 -0
  19. {h_ai_brain-0.0.16/src/h_ai/application/system_prompts → h_ai_brain-0.0.19/src/h_ai/domain}/__init__.py +0 -0
  20. {h_ai_brain-0.0.16/src/h_ai/application/system_prompts/roles → h_ai_brain-0.0.19/src/h_ai/domain/reasoning}/__init__.py +0 -0
  21. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/domain/reasoning/llm_chat_repository.py +0 -0
  22. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/domain/reasoning/llm_generate_respository.py +0 -0
  23. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/domain/reasoning/llm_tool_repository.py +0 -0
  24. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/domain/reasoning/text_analysis.py +0 -0
  25. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/domain/reasoning/tool_message.py +0 -0
  26. {h_ai_brain-0.0.16/src/h_ai/domain → h_ai_brain-0.0.19/src/h_ai/infrastructure}/__init__.py +0 -0
  27. {h_ai_brain-0.0.16/src/h_ai/domain/reasoning → h_ai_brain-0.0.19/src/h_ai/infrastructure/llm}/__init__.py +0 -0
  28. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/data_handler.py +0 -0
  29. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/llm_response_cleaner.py +0 -0
  30. {h_ai_brain-0.0.16/src/h_ai/infrastructure → h_ai_brain-0.0.19/src/h_ai/infrastructure/llm/ollama}/__init__.py +0 -0
  31. {h_ai_brain-0.0.16/src/h_ai/infrastructure/llm → h_ai_brain-0.0.19/src/h_ai/infrastructure/llm/ollama/models}/__init__.py +0 -0
  32. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/ollama/models/ollama_chat_message.py +0 -0
  33. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/ollama/models/ollama_chat_session.py +0 -0
  34. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/ollama/ollama_chat_repository.py +0 -0
  35. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/ollama/ollama_tool_repository.py +0 -0
  36. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai/infrastructure/llm/prompt_helper.py +0 -0
  37. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai_brain.egg-info/dependency_links.txt +0 -0
  38. {h_ai_brain-0.0.16 → h_ai_brain-0.0.19}/src/h_ai_brain.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: h_ai_brain
3
- Version: 0.0.16
3
+ Version: 0.0.19
4
4
  Summary: AI Research agent API
5
5
  Author-email: shoebill <shoebill.hai@gmail.com>
6
6
  Classifier: Programming Language :: Python :: 3
@@ -10,7 +10,7 @@ Requires-Python: >=3.10
10
10
  Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
12
  License-File: NOTICE.txt
13
- Requires-Dist: h_message_bus~=0.0.21
13
+ Requires-Dist: h_message_bus~=0.0.22
14
14
  Provides-Extra: dev
15
15
  Requires-Dist: pytest; extra == "dev"
16
16
  Dynamic: license-file
@@ -4,7 +4,7 @@
4
4
 
5
5
  [project]
6
6
  name = "h_ai_brain"
7
- version = "0.0.16"
7
+ version = "0.0.19"
8
8
  authors = [
9
9
  {name = "shoebill", email = "shoebill.hai@gmail.com"},
10
10
  ]
@@ -17,11 +17,14 @@
17
17
  "Operating System :: OS Independent",
18
18
  ]
19
19
  dependencies = [
20
- "h_message_bus~=0.0.21",
20
+ "h_message_bus~=0.0.22",
21
21
  ]
22
22
 
23
23
  [project.optional-dependencies]
24
24
  dev = [
25
25
  "pytest",
26
26
  # Other development dependencies
27
- ]
27
+ ]
28
+
29
+ [tool.setuptools.package-data]
30
+ "src.h_ai.resources" = ["*.json"] # This will include all JSON files in the resources directory
@@ -0,0 +1,20 @@
1
+ from h_message_bus import NatsPublisherAdapter
2
+
3
+ from ..domain.llm_config import LLMConfig
4
+ from ..infrastructure.llm.ollama.ollama_generate_repository import OllamaGenerateRepository
5
+
6
+
7
+ class HaiService:
8
+ def __init__(self, nats_publisher_adapter: NatsPublisherAdapter, llm_config: LLMConfig):
9
+ self.nats_publisher_adapter = nats_publisher_adapter
10
+ self.llm_config = llm_config
11
+ self.llm_generate_repository = OllamaGenerateRepository(
12
+ self.llm_config.url,
13
+ self.llm_config.model_name,
14
+ temperature=self.llm_config.temperature,
15
+ max_tokens=self.llm_config.max_tokens)
16
+
17
+ def ask_question(self, question: str, system_prompt: str = None, max_tokens = None) -> str:
18
+ return self.llm_generate_repository.generate(question, system_prompt, max_tokens)
19
+
20
+
@@ -0,0 +1,6 @@
1
+ class LLMConfig:
2
+ def __init__(self, url: str, model_name: str, temperature: float = 0.6, max_tokens: int = 2500):
3
+ self.url = url
4
+ self.model_name = model_name
5
+ self.temperature = temperature
6
+ self.max_tokens = max_tokens
@@ -2,20 +2,22 @@ import uuid
2
2
 
3
3
  import requests
4
4
 
5
+ from ..llm_response_cleaner import clean_llm_response
5
6
  from ....domain.reasoning.llm_generate_respository import LlmGenerateRepository
6
7
 
7
8
 
8
9
  class OllamaGenerateRepository(LlmGenerateRepository):
9
10
 
10
- def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None):
11
+ def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None, max_tokens: int = 5000):
11
12
  self.model_name = model_name
12
13
  self.system_prompt = system_prompt
13
14
  self.api_url = api_url
14
15
  self.temperature = temperature
15
16
  self.seed = seed
17
+ self.max_tokens = max_tokens
16
18
 
17
19
 
18
- def generate(self, user_prompt: str, system_prompt: str = None, session_id: str = None) -> str|None:
20
+ def generate(self, user_prompt: str, system_prompt: str = None, session_id: str = None, max_tokens: int = None) -> str|None:
19
21
  url = f"{self.api_url}/generate"
20
22
  random_guid = uuid.uuid4()
21
23
  guid_str = str(random_guid)
@@ -26,16 +28,18 @@ class OllamaGenerateRepository(LlmGenerateRepository):
26
28
  "system": system_prompt,
27
29
  "stream": False,
28
30
  "session": guid_str,
29
- "num_ctx": "5000",
30
- "temperature": "0.6"
31
+ "num_ctx": f"{self.max_tokens}",
32
+ "temperature": f"{self.temperature}"
31
33
  }
32
34
 
33
35
  if session_id:
34
36
  payload["session"] = session_id
35
37
  if self.seed:
36
- payload["seed"] = self.seed
38
+ payload["seed"] = f"{self.seed}"
37
39
  if self.temperature:
38
- payload["temperature"] = self.temperature
40
+ payload["temperature"] = f"{self.temperature}"
41
+ if max_tokens:
42
+ payload["num_ctx"] = f"{max_tokens}"
39
43
 
40
44
  try:
41
45
  print(payload)
@@ -4,7 +4,7 @@ import os
4
4
 
5
5
  class PromptLoader:
6
6
  def __init__(self, file_path):
7
- # Resolve relative path to absolute path
7
+ # Resolve a relative path to an absolute path
8
8
  absolute_file_path = os.path.abspath(file_path)
9
9
 
10
10
  with open(absolute_file_path, "r") as file:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: h_ai_brain
3
- Version: 0.0.16
3
+ Version: 0.0.19
4
4
  Summary: AI Research agent API
5
5
  Author-email: shoebill <shoebill.hai@gmail.com>
6
6
  Classifier: Programming Language :: Python :: 3
@@ -10,7 +10,7 @@ Requires-Python: >=3.10
10
10
  Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
12
  License-File: NOTICE.txt
13
- Requires-Dist: h_message_bus~=0.0.21
13
+ Requires-Dist: h_message_bus~=0.0.22
14
14
  Provides-Extra: dev
15
15
  Requires-Dist: pytest; extra == "dev"
16
16
  Dynamic: license-file
@@ -4,9 +4,8 @@ pyproject.toml
4
4
  src/h_ai/__init__.py
5
5
  src/h_ai/application/__init__.py
6
6
  src/h_ai/application/hai_service.py
7
- src/h_ai/application/system_prompts/__init__.py
8
- src/h_ai/application/system_prompts/roles/__init__.py
9
7
  src/h_ai/domain/__init__.py
8
+ src/h_ai/domain/llm_config.py
10
9
  src/h_ai/domain/reasoning/__init__.py
11
10
  src/h_ai/domain/reasoning/llm_chat_repository.py
12
11
  src/h_ai/domain/reasoning/llm_generate_respository.py
@@ -0,0 +1,4 @@
1
+ h_message_bus~=0.0.22
2
+
3
+ [dev]
4
+ pytest
@@ -1,7 +0,0 @@
1
- from h_message_bus import NatsPublisherAdapter
2
-
3
- class HaiService:
4
- def __init__(self, nats_publisher_adapter: NatsPublisherAdapter):
5
- self.nats_publisher_adapter = nats_publisher_adapter
6
-
7
-
@@ -1,4 +0,0 @@
1
- h_message_bus~=0.0.21
2
-
3
- [dev]
4
- pytest
File without changes
File without changes
File without changes