h-ai-brain 0.0.22__py3-none-any.whl → 0.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h_ai/application/hai_service.py +2 -1
- h_ai/domain/llm_config.py +3 -2
- h_ai/infrastructure/llm/ollama/ollama_generate_repository.py +8 -2
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.23.dist-info}/METADATA +1 -1
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.23.dist-info}/RECORD +9 -9
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.23.dist-info}/WHEEL +1 -1
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.23.dist-info}/licenses/LICENSE +0 -0
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.23.dist-info}/licenses/NOTICE.txt +0 -0
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.23.dist-info}/top_level.txt +0 -0
h_ai/application/hai_service.py
CHANGED
@@ -9,7 +9,8 @@ class HaiService:
|
|
9
9
|
self.llm_config.url,
|
10
10
|
self.llm_config.model_name,
|
11
11
|
temperature=self.llm_config.temperature,
|
12
|
-
max_tokens=self.llm_config.max_tokens
|
12
|
+
max_tokens=self.llm_config.max_tokens,
|
13
|
+
api_token=self.llm_config.api_token)
|
13
14
|
|
14
15
|
def ask_question(self, question: str, system_prompt: str = None, max_tokens = None) -> str:
|
15
16
|
return self.llm_generate_repository.generate(question, system_prompt, max_tokens)
|
h_ai/domain/llm_config.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
class LLMConfig:
|
2
|
-
def __init__(self, url: str, model_name: str, temperature: float = 0.6, max_tokens: int = 2500):
|
2
|
+
def __init__(self, url: str, model_name: str, temperature: float = 0.6, max_tokens: int = 2500, api_token: str = None):
|
3
3
|
self.url = url
|
4
4
|
self.model_name = model_name
|
5
5
|
self.temperature = temperature
|
6
|
-
self.max_tokens = max_tokens
|
6
|
+
self.max_tokens = max_tokens
|
7
|
+
self.api_token = api_token
|
@@ -8,13 +8,14 @@ from ....domain.reasoning.llm_generate_respository import LlmGenerateRepository
|
|
8
8
|
|
9
9
|
class OllamaGenerateRepository(LlmGenerateRepository):
|
10
10
|
|
11
|
-
def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None, max_tokens: int = 5000):
|
11
|
+
def __init__(self, api_url: str, model_name: str, system_prompt: str = None, temperature: float = None, seed: int = None, max_tokens: int = 5000, api_token: str = None):
|
12
12
|
self.model_name = model_name
|
13
13
|
self.system_prompt = system_prompt
|
14
14
|
self.api_url = api_url
|
15
15
|
self.temperature = temperature
|
16
16
|
self.seed = seed
|
17
17
|
self.max_tokens = max_tokens
|
18
|
+
self.api_token = api_token
|
18
19
|
|
19
20
|
|
20
21
|
def generate(self, user_prompt: str, system_prompt: str = None, session_id: str = None, max_tokens: int = None) -> str|None:
|
@@ -41,9 +42,14 @@ class OllamaGenerateRepository(LlmGenerateRepository):
|
|
41
42
|
if max_tokens:
|
42
43
|
payload["num_ctx"] = f"{max_tokens}"
|
43
44
|
|
45
|
+
headers = {}
|
46
|
+
if self.api_token:
|
47
|
+
headers["Authorization"]="Bearer "+self.api_token
|
48
|
+
|
44
49
|
try:
|
45
50
|
#print(payload)
|
46
|
-
response = requests.post(url, json=payload)
|
51
|
+
response = requests.post(url, json=payload, headers=headers)
|
52
|
+
|
47
53
|
response.raise_for_status()
|
48
54
|
|
49
55
|
#print(response.json())
|
@@ -1,8 +1,8 @@
|
|
1
1
|
h_ai/__init__.py,sha256=63uVFHPxXmLrZVo2ZPixL2cU4jwf3XTAuwIVGHGkqJI,75
|
2
2
|
h_ai/application/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
h_ai/application/hai_service.py,sha256
|
3
|
+
h_ai/application/hai_service.py,sha256=-HfdxS8pYE2xx7OU_D5pOQLdlm7uZAVAn2gslouL2oY,736
|
4
4
|
h_ai/domain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
h_ai/domain/llm_config.py,sha256=
|
5
|
+
h_ai/domain/llm_config.py,sha256=sYmD_ceOcGaP_UCTz0vtyuh-CTODvMudyURLiyrQ8tM,317
|
6
6
|
h_ai/domain/reasoning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
h_ai/domain/reasoning/llm_chat_repository.py,sha256=rY2izDyaDnoyyrCRS1qc9erHB98vARj4Mp-SnPwNhyY,211
|
8
8
|
h_ai/domain/reasoning/llm_generate_respository.py,sha256=DPiV6ldCE8YhDdVb5rj98MBudKalDQHV3CZ2ADTm_f8,178
|
@@ -17,14 +17,14 @@ h_ai/infrastructure/llm/prompt_helper.py,sha256=QjxPbNW7hu2wBIi9GLJ7r00ELytT2Wr1
|
|
17
17
|
h_ai/infrastructure/llm/prompt_loader.py,sha256=hVep4BuheFc6Arple3OrV249KSwEqjIqHbAEJ_ymuvI,460
|
18
18
|
h_ai/infrastructure/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
19
|
h_ai/infrastructure/llm/ollama/ollama_chat_repository.py,sha256=GALea7UWLtKyt767Frtl3uv8rvy42HrOKMIQGpqq-H0,2108
|
20
|
-
h_ai/infrastructure/llm/ollama/ollama_generate_repository.py,sha256=
|
20
|
+
h_ai/infrastructure/llm/ollama/ollama_generate_repository.py,sha256=nF-ahfH9AsMLz9ix2uqGo-A_zLJqldLT9RpdOIyky3g,2162
|
21
21
|
h_ai/infrastructure/llm/ollama/ollama_tool_repository.py,sha256=7UZ-qsgXQUcJFx1qY7SVI7p3FhIy0Drdqs7jZIp42Ag,4683
|
22
22
|
h_ai/infrastructure/llm/ollama/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
23
|
h_ai/infrastructure/llm/ollama/models/ollama_chat_message.py,sha256=ZIz4PQ3869vI3xAYYufPrxXpacajRDtOI8RDl5Dm9RQ,305
|
24
24
|
h_ai/infrastructure/llm/ollama/models/ollama_chat_session.py,sha256=GZ_ddpbWa8iy6NZq50vokUFVZBiX0WNa81z9-r9RzTY,392
|
25
|
-
h_ai_brain-0.0.
|
26
|
-
h_ai_brain-0.0.
|
27
|
-
h_ai_brain-0.0.
|
28
|
-
h_ai_brain-0.0.
|
29
|
-
h_ai_brain-0.0.
|
30
|
-
h_ai_brain-0.0.
|
25
|
+
h_ai_brain-0.0.23.dist-info/licenses/LICENSE,sha256=SbvpEU5JIU3yzMMkyzrI0dGqHDoJR_lMKGdl6GZHsy4,11558
|
26
|
+
h_ai_brain-0.0.23.dist-info/licenses/NOTICE.txt,sha256=vxeIKUiGqAePLvDW4AVm3Xh-3BcsvMtCMn1tbsr9zsE,668
|
27
|
+
h_ai_brain-0.0.23.dist-info/METADATA,sha256=4Ca3XF-ypwR9giaqCPJ1Mo6FniMQFrUYRc-NTLEE6Lw,531
|
28
|
+
h_ai_brain-0.0.23.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
29
|
+
h_ai_brain-0.0.23.dist-info/top_level.txt,sha256=3MChDBWvDJV4cEHuZhzeODxQ4ewtw-arOuyaDOc6sIo,5
|
30
|
+
h_ai_brain-0.0.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|