h-ai-brain 0.0.21__tar.gz → 0.0.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {h_ai_brain-0.0.21/src/h_ai_brain.egg-info → h_ai_brain-0.0.23}/PKG-INFO +1 -1
  2. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/pyproject.toml +1 -1
  3. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/application/hai_service.py +2 -1
  4. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/llm_config.py +3 -2
  5. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/data_handler.py +10 -6
  6. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/ollama_generate_repository.py +10 -4
  7. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23/src/h_ai_brain.egg-info}/PKG-INFO +1 -1
  8. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/LICENSE +0 -0
  9. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/NOTICE.txt +0 -0
  10. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/setup.cfg +0 -0
  11. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/__init__.py +0 -0
  12. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/application/__init__.py +0 -0
  13. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/__init__.py +0 -0
  14. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/reasoning/__init__.py +0 -0
  15. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/reasoning/llm_chat_repository.py +0 -0
  16. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/reasoning/llm_generate_respository.py +0 -0
  17. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/reasoning/llm_tool_repository.py +0 -0
  18. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/reasoning/text_analysis.py +0 -0
  19. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/domain/reasoning/tool_message.py +0 -0
  20. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/__init__.py +0 -0
  21. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/__init__.py +0 -0
  22. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/llm_response_cleaner.py +0 -0
  23. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/__init__.py +0 -0
  24. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/models/__init__.py +0 -0
  25. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/models/ollama_chat_message.py +0 -0
  26. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/models/ollama_chat_session.py +0 -0
  27. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/ollama_chat_repository.py +0 -0
  28. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/ollama/ollama_tool_repository.py +0 -0
  29. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/prompt_helper.py +0 -0
  30. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai/infrastructure/llm/prompt_loader.py +0 -0
  31. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai_brain.egg-info/SOURCES.txt +0 -0
  32. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai_brain.egg-info/dependency_links.txt +0 -0
  33. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai_brain.egg-info/requires.txt +0 -0
  34. {h_ai_brain-0.0.21 → h_ai_brain-0.0.23}/src/h_ai_brain.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: h_ai_brain
3
- Version: 0.0.21
3
+ Version: 0.0.23
4
4
  Summary: AI Research agent API
5
5
  Author-email: shoebill <shoebill.hai@gmail.com>
6
6
  Classifier: Programming Language :: Python :: 3
@@ -4,7 +4,7 @@
4
4
 
5
5
  [project]
6
6
  name = "h_ai_brain"
7
- version = "0.0.21"
7
+ version = "0.0.23"
8
8
  authors = [
9
9
  {name = "shoebill", email = "shoebill.hai@gmail.com"},
10
10
  ]
@@ -9,7 +9,8 @@ class HaiService:
9
9
  self.llm_config.url,
10
10
  self.llm_config.model_name,
11
11
  temperature=self.llm_config.temperature,
12
- max_tokens=self.llm_config.max_tokens)
12
+ max_tokens=self.llm_config.max_tokens,
13
+ api_token=self.llm_config.api_token)
13
14
 
14
15
  def ask_question(self, question: str, system_prompt: str = None, max_tokens = None) -> str:
15
16
  return self.llm_generate_repository.generate(question, system_prompt, max_tokens)
@@ -1,6 +1,7 @@
1
1
  class LLMConfig:
2
- def __init__(self, url: str, model_name: str, temperature: float = 0.6, max_tokens: int = 2500):
2
+ def __init__(self, url: str, model_name: str, temperature: float = 0.6, max_tokens: int = 2500, api_token: str = None):
3
3
  self.url = url
4
4
  self.model_name = model_name
5
5
  self.temperature = temperature
6
- self.max_tokens = max_tokens
6
+ self.max_tokens = max_tokens
7
+ self.api_token = api_token
@@ -1,7 +1,7 @@
1
1
  import json
2
2
 
3
3
 
4
- def parse_json_data(json_string: str) -> dict |None:
4
+ def parse_json_data(json_string: str) -> dict | None:
5
5
  try:
6
6
  # Check for empty string
7
7
  if not json_string or not json_string.strip():
@@ -14,17 +14,21 @@ def parse_json_data(json_string: str) -> dict |None:
14
14
  if match:
15
15
  json_string = match.group(1).strip()
16
16
  else:
17
- # If no code block found, try the existing logic
18
- if json_string.strip().startswith('json'):
19
- json_string = json_string.strip()[4:].strip()
17
+ # If no code block found, check for various JSON prefixes
18
+ stripped = json_string.strip()
19
+ # Check for <json> prefix
20
+ if stripped.startswith('<json>'):
21
+ json_string = stripped[6:].strip()
22
+ # Check for 'json' prefix
23
+ elif stripped.startswith('json'):
24
+ json_string = stripped[4:].strip()
20
25
 
21
26
  # Parse the JSON data
22
27
  data = json.loads(json_string)
23
-
24
28
  return data
25
29
  except json.JSONDecodeError as e:
26
30
  print(f"Error parsing JSON data: {e}")
27
31
  return {}
28
32
  except Exception as e:
29
33
  print(f"Unexpected error while parsing JSON data: {e}")
30
- return {}
34
+ return {}
@@ -8,13 +8,14 @@ from ....domain.reasoning.llm_generate_respository import LlmGenerateRepository
8
8
 
9
9
  class OllamaGenerateRepository(LlmGenerateRepository):
10
10
 
11
- def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None, max_tokens: int = 5000):
11
+ def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None, max_tokens: int = 5000, api_token: str = None):
12
12
  self.model_name = model_name
13
13
  self.system_prompt = system_prompt
14
14
  self.api_url = api_url
15
15
  self.temperature = temperature
16
16
  self.seed = seed
17
17
  self.max_tokens = max_tokens
18
+ self.api_token = api_token
18
19
 
19
20
 
20
21
  def generate(self, user_prompt: str, system_prompt: str = None, session_id: str = None, max_tokens: int = None) -> str|None:
@@ -41,12 +42,17 @@ class OllamaGenerateRepository(LlmGenerateRepository):
41
42
  if max_tokens:
42
43
  payload["num_ctx"] = f"{max_tokens}"
43
44
 
45
+ headers = {}
46
+ if self.api_token:
47
+ headers["Authorization"]="Bearer "+self.api_token
48
+
44
49
  try:
45
- print(payload)
46
- response = requests.post(url, json=payload)
50
+ #print(payload)
51
+ response = requests.post(url, json=payload, headers=headers)
52
+
47
53
  response.raise_for_status()
48
54
 
49
- print(response.json())
55
+ #print(response.json())
50
56
 
51
57
  response_content = response.json()["response"]
52
58
  return clean_llm_response(response_content)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: h_ai_brain
3
- Version: 0.0.21
3
+ Version: 0.0.23
4
4
  Summary: AI Research agent API
5
5
  Author-email: shoebill <shoebill.hai@gmail.com>
6
6
  Classifier: Programming Language :: Python :: 3
File without changes
File without changes
File without changes