h-ai-brain 0.0.22__py3-none-any.whl → 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- h_ai/__init__.py +11 -2
- h_ai/application/__init__.py +12 -0
- h_ai/application/hai_service.py +12 -17
- h_ai/application/services/__init__.py +11 -0
- h_ai/application/services/base_model_service.py +69 -0
- h_ai/application/services/granite_service.py +139 -0
- h_ai/application/services/nomic_service.py +117 -0
- h_ai/domain/llm_config.py +16 -2
- h_ai/domain/model_factory.py +44 -0
- h_ai/domain/reasoning/llm_chat_repository.py +39 -2
- h_ai/domain/reasoning/llm_embedding_repository.py +20 -0
- h_ai/domain/reasoning/llm_generate_respository.py +21 -4
- h_ai/domain/reasoning/llm_tool_repository.py +24 -1
- h_ai/infrastructure/llm/json_resource_loader.py +97 -0
- h_ai/infrastructure/llm/ollama/factories/__init__.py +1 -0
- h_ai/infrastructure/llm/ollama/factories/granite_factory.py +91 -0
- h_ai/infrastructure/llm/ollama/factories/nomic_factory.py +58 -0
- h_ai/infrastructure/llm/ollama/ollama_chat_repository.py +165 -26
- h_ai/infrastructure/llm/ollama/ollama_embed_repository.py +43 -0
- h_ai/infrastructure/llm/ollama/ollama_generate_repository.py +88 -26
- h_ai/infrastructure/llm/ollama/ollama_http_client.py +54 -0
- h_ai/infrastructure/llm/prompt_loader.py +42 -7
- h_ai/infrastructure/llm/template_loader.py +146 -0
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.24.dist-info}/METADATA +2 -1
- h_ai_brain-0.0.24.dist-info/RECORD +43 -0
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.24.dist-info}/WHEEL +1 -1
- h_ai_brain-0.0.22.dist-info/RECORD +0 -30
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.24.dist-info}/licenses/LICENSE +0 -0
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.24.dist-info}/licenses/NOTICE.txt +0 -0
- {h_ai_brain-0.0.22.dist-info → h_ai_brain-0.0.24.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,54 @@
|
|
1
|
+
import requests
|
2
|
+
from typing import Dict, Any, Optional
|
3
|
+
|
4
|
+
|
5
|
+
class OllamaHttpClient:
|
6
|
+
"""
|
7
|
+
Base HTTP client for the Ollama API.
|
8
|
+
Handles authentication and common request functionality.
|
9
|
+
"""
|
10
|
+
|
11
|
+
def __init__(self, api_url: str, api_token: str = None):
|
12
|
+
"""
|
13
|
+
Initialize a new OllamaHttpClient.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
api_url: The base URL of the Ollama API.
|
17
|
+
api_token: Optional API token for authentication.
|
18
|
+
"""
|
19
|
+
self.api_url = api_url.rstrip('/')
|
20
|
+
self.api_token = api_token
|
21
|
+
|
22
|
+
def _get_headers(self) -> Dict[str, str]:
|
23
|
+
"""
|
24
|
+
Get the headers for the request, including authentication if available.
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
Dict[str, str]: The headers for the request.
|
28
|
+
"""
|
29
|
+
headers = {}
|
30
|
+
if self.api_token:
|
31
|
+
headers["Authorization"] = f"Bearer {self.api_token}"
|
32
|
+
return headers
|
33
|
+
|
34
|
+
def post(self, endpoint: str, payload: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
35
|
+
"""
|
36
|
+
Send a POST request to the Ollama API.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
endpoint: The endpoint to send the request to (without the base URL).
|
40
|
+
payload: The payload to send with the request.
|
41
|
+
|
42
|
+
Returns:
|
43
|
+
Optional[Dict[str, Any]]: The JSON response from the API, or None if the request failed.
|
44
|
+
"""
|
45
|
+
url = f"{self.api_url}/{endpoint}"
|
46
|
+
headers = self._get_headers()
|
47
|
+
|
48
|
+
try:
|
49
|
+
response = requests.post(url, json=payload, headers=headers)
|
50
|
+
response.raise_for_status()
|
51
|
+
return response.json()
|
52
|
+
except requests.exceptions.RequestException as e:
|
53
|
+
print(f"Error during Ollama API call to {endpoint}: {e}")
|
54
|
+
return None
|
@@ -1,18 +1,53 @@
|
|
1
1
|
import json
|
2
|
-
import
|
2
|
+
from .json_resource_loader import JsonResourceLoader
|
3
3
|
|
4
4
|
|
5
5
|
class PromptLoader:
|
6
|
-
|
7
|
-
|
8
|
-
absolute_file_path = os.path.abspath(file_path)
|
6
|
+
"""
|
7
|
+
Loader for prompt configuration files.
|
9
8
|
|
10
|
-
|
11
|
-
|
9
|
+
This class provides functionality to load prompt configuration files from JSON resources.
|
10
|
+
It uses JsonResourceLoader to handle loading from either a package or the file system.
|
11
|
+
|
12
|
+
Usage:
|
13
|
+
# Create a prompt loader for a specific file
|
14
|
+
loader = PromptLoader("autonomous_agent")
|
15
|
+
|
16
|
+
# Get a specific config value
|
17
|
+
system_prompt = loader.get_config_value("system_prompt")
|
18
|
+
|
19
|
+
# Get the entire config as a JSON string
|
20
|
+
config_json = loader.get_entire_config()
|
21
|
+
"""
|
22
|
+
|
23
|
+
def __init__(self, resource_name):
|
24
|
+
"""
|
25
|
+
Initialize a new PromptLoader.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
resource_name: Name of the JSON resource file (with or without .json extension).
|
29
|
+
"""
|
30
|
+
# Use JsonResourceLoader to load the resource
|
31
|
+
json_loader = JsonResourceLoader()
|
32
|
+
self.config = json_loader.load_json_resource(resource_name)
|
12
33
|
|
13
34
|
def get_config_value(self, key):
|
35
|
+
"""
|
36
|
+
Get a specific value from the config.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
key: The key to look up in the config.
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
The value associated with the key, or None if the key doesn't exist.
|
43
|
+
"""
|
14
44
|
return self.config.get(key)
|
15
45
|
|
16
46
|
def get_entire_config(self):
|
17
|
-
|
47
|
+
"""
|
48
|
+
Get the entire config as a JSON string.
|
18
49
|
|
50
|
+
Returns:
|
51
|
+
str: The entire config as a formatted JSON string.
|
52
|
+
"""
|
53
|
+
return json.dumps(self.config, indent=2)
|
@@ -0,0 +1,146 @@
|
|
1
|
+
import os
|
2
|
+
import importlib.util
|
3
|
+
from typing import Dict, Any, Optional, List
|
4
|
+
from jinja2 import Environment, FileSystemLoader, PackageLoader, ChoiceLoader, select_autoescape
|
5
|
+
|
6
|
+
|
7
|
+
class TemplateLoader:
|
8
|
+
"""
|
9
|
+
Loader for Jinja2 templates.
|
10
|
+
|
11
|
+
This class provides functionality to load and render Jinja2 templates for model-specific prompt formatting.
|
12
|
+
|
13
|
+
Templates are stored in the resources/templates directory and should have a .j2 extension.
|
14
|
+
Different models can have different templates to format prompts according to their specific requirements.
|
15
|
+
|
16
|
+
The default template (default.j2) is used for models without specific formatting needs.
|
17
|
+
Model-specific templates (like granite3.j2) provide tailored formatting for particular models.
|
18
|
+
|
19
|
+
Usage:
|
20
|
+
# Create a template loader
|
21
|
+
loader = TemplateLoader()
|
22
|
+
|
23
|
+
# Get a list of available templates
|
24
|
+
templates = loader.get_template_names()
|
25
|
+
|
26
|
+
# Render a template with context
|
27
|
+
context = {
|
28
|
+
"System": "You are a helpful assistant.",
|
29
|
+
"Messages": [
|
30
|
+
{"Role": "user", "Content": "Hello!"}
|
31
|
+
]
|
32
|
+
}
|
33
|
+
formatted_prompt = loader.render_template("default.j2", context)
|
34
|
+
|
35
|
+
# Get the appropriate template for a model
|
36
|
+
template_name = loader.get_model_template("granite3.3:8b")
|
37
|
+
"""
|
38
|
+
|
39
|
+
def __init__(self, templates_dir: str = None, package_name: str = None, package_path: str = None):
|
40
|
+
"""
|
41
|
+
Initialize a new TemplateLoader.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
templates_dir: Directory containing Jinja2 templates. If None, defaults to 'resources/templates'.
|
45
|
+
package_name: Python package name containing templates. If None, defaults to 'h_ai'.
|
46
|
+
package_path: Path within the package to templates. If None, defaults to 'resources/templates'.
|
47
|
+
"""
|
48
|
+
# Get the base directory of the package
|
49
|
+
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
50
|
+
|
51
|
+
# Set default directories if not provided
|
52
|
+
if templates_dir is None:
|
53
|
+
templates_dir = os.path.join(base_dir, 'resources', 'templates')
|
54
|
+
if package_name is None:
|
55
|
+
package_name = 'h_ai'
|
56
|
+
if package_path is None:
|
57
|
+
package_path = 'resources/templates'
|
58
|
+
|
59
|
+
# Create a list of loaders to try in order
|
60
|
+
loaders = []
|
61
|
+
|
62
|
+
# First try to load from package
|
63
|
+
try:
|
64
|
+
# Check if the package exists and has the specified path
|
65
|
+
if importlib.util.find_spec(package_name) is not None:
|
66
|
+
loaders.append(PackageLoader(package_name, package_path))
|
67
|
+
except (ImportError, ModuleNotFoundError):
|
68
|
+
# If package loading fails, we'll fall back to file system
|
69
|
+
pass
|
70
|
+
|
71
|
+
# Then try to load from file system
|
72
|
+
if os.path.exists(templates_dir):
|
73
|
+
loaders.append(FileSystemLoader(templates_dir))
|
74
|
+
|
75
|
+
# If no loaders were added, raise an error
|
76
|
+
if not loaders:
|
77
|
+
raise ValueError(f"No valid template sources found. Tried package '{package_name}' and directory '{templates_dir}'")
|
78
|
+
|
79
|
+
# Initialize Jinja2 environment with a ChoiceLoader
|
80
|
+
self.env = Environment(
|
81
|
+
loader=ChoiceLoader(loaders),
|
82
|
+
autoescape=select_autoescape(['html', 'xml']),
|
83
|
+
trim_blocks=True,
|
84
|
+
lstrip_blocks=True
|
85
|
+
)
|
86
|
+
|
87
|
+
self.templates_dir = templates_dir
|
88
|
+
self.package_name = package_name
|
89
|
+
self.package_path = package_path
|
90
|
+
|
91
|
+
def get_template_names(self) -> list[str]:
|
92
|
+
"""
|
93
|
+
Get a list of available template names.
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
list[str]: List of template filenames.
|
97
|
+
"""
|
98
|
+
return self.env.list_templates()
|
99
|
+
|
100
|
+
def render_template(self, template_name: str, context: Dict[str, Any]) -> str:
|
101
|
+
"""
|
102
|
+
Render a template with the given context.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
template_name: Name of the template file.
|
106
|
+
context: Dictionary of variables to pass to the template.
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
str: The rendered template.
|
110
|
+
"""
|
111
|
+
template = self.env.get_template(template_name)
|
112
|
+
return template.render(**context)
|
113
|
+
|
114
|
+
|
115
|
+
def get_model_template(self, model_name: str) -> Optional[str]:
|
116
|
+
"""
|
117
|
+
Get the appropriate template name for a given model.
|
118
|
+
|
119
|
+
Args:
|
120
|
+
model_name: The name of the model.
|
121
|
+
|
122
|
+
Returns:
|
123
|
+
Optional[str]: The template name for the model, or None if no specific template exists.
|
124
|
+
"""
|
125
|
+
# Map model names to template names
|
126
|
+
model_templates = {
|
127
|
+
"granite3.3:8b": "granite3.j2",
|
128
|
+
# Add more model-to-template mappings as needed
|
129
|
+
}
|
130
|
+
|
131
|
+
# Check if there's a specific template for this model
|
132
|
+
template_name = model_templates.get(model_name)
|
133
|
+
|
134
|
+
# If no specific template, use default
|
135
|
+
if template_name is None:
|
136
|
+
template_name = "default.j2"
|
137
|
+
|
138
|
+
# Verify the template exists
|
139
|
+
if template_name in self.get_template_names():
|
140
|
+
return template_name
|
141
|
+
|
142
|
+
# Fall back to default if the specific template doesn't exist
|
143
|
+
if "default.j2" in self.get_template_names():
|
144
|
+
return "default.j2"
|
145
|
+
|
146
|
+
return None
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: h_ai_brain
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.24
|
4
4
|
Summary: AI Research agent API
|
5
5
|
Author-email: shoebill <shoebill.hai@gmail.com>
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
@@ -11,6 +11,7 @@ Description-Content-Type: text/markdown
|
|
11
11
|
License-File: LICENSE
|
12
12
|
License-File: NOTICE.txt
|
13
13
|
Requires-Dist: requests~=2.32.3
|
14
|
+
Requires-Dist: jinja2~=3.1.3
|
14
15
|
Provides-Extra: dev
|
15
16
|
Requires-Dist: pytest; extra == "dev"
|
16
17
|
Dynamic: license-file
|
@@ -0,0 +1,43 @@
|
|
1
|
+
h_ai/__init__.py,sha256=xPmyEAMjUDrEbH2ECOfNuDg1n_ILOUFo6E2gJJpmBF8,510
|
2
|
+
h_ai/application/__init__.py,sha256=mw3Zk9ONVHmZouknalYI4lPOBecjbXn1XVmD6OxhR0I,437
|
3
|
+
h_ai/application/hai_service.py,sha256=JtxlxmBIr5cPzdEfowTM4Kzs5-mlAqaNswL1clXwm9A,394
|
4
|
+
h_ai/application/services/__init__.py,sha256=0Gm07RkBplyPStFYtBNOG3GxFZEj0IkhUfnzht5CvaQ,329
|
5
|
+
h_ai/application/services/base_model_service.py,sha256=Ep3z_-xg9iaT5yIt9yovQwxSHUe148KVo0W_Z4Zsdrw,2468
|
6
|
+
h_ai/application/services/granite_service.py,sha256=Fh0yYzTCxmv9F8xxqsrK-pNlL22WbsYUMHMhQdbbcW8,5481
|
7
|
+
h_ai/application/services/nomic_service.py,sha256=LOt-3pKRXftx2Fp8lIUFhRsPRKtYrLUzA8I9_rPBa_k,3988
|
8
|
+
h_ai/domain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
+
h_ai/domain/llm_config.py,sha256=gclk53-NCtljTr14y4P9TCuG2aHW1--4GHm0dbitTRg,810
|
10
|
+
h_ai/domain/model_factory.py,sha256=bEWbtUHu-_a7ZAdAjf6CszigMb04QFsUXThRNXkqxWA,1493
|
11
|
+
h_ai/domain/reasoning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
+
h_ai/domain/reasoning/llm_chat_repository.py,sha256=zNwzONyLf7YFhBUk6o0SMMR-0kocCmKbw-xNriDSwyI,1427
|
13
|
+
h_ai/domain/reasoning/llm_embedding_repository.py,sha256=Q3e2hyD-q89HZuOaVyK6I6OWMtbL3I3k5zWB85Qamzg,533
|
14
|
+
h_ai/domain/reasoning/llm_generate_respository.py,sha256=20yd1gXbOcCX5Y9KjhAt__xiBtDNxyFQPxBpeFWUfiA,690
|
15
|
+
h_ai/domain/reasoning/llm_tool_repository.py,sha256=VNMwbyhlHVJOSihVJwivVMYaT6xZbubPxfKxmiVcV_I,1099
|
16
|
+
h_ai/domain/reasoning/text_analysis.py,sha256=rmCUHWzJ3muFBorVXx7HcU2Sw-UfXFOuAVXRAPkqS8E,5183
|
17
|
+
h_ai/domain/reasoning/tool_message.py,sha256=jpbfbJXj6oqZyB3lDxGOUyFB4faHtXAaEOVBHgTgSnk,67
|
18
|
+
h_ai/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
+
h_ai/infrastructure/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
+
h_ai/infrastructure/llm/data_handler.py,sha256=9wAxF5v409DGu7hn_4MGubGe0XyiowYtC30ftvnRDdE,1198
|
21
|
+
h_ai/infrastructure/llm/json_resource_loader.py,sha256=dCghy0kZMjqgtj3eFu7NifyaY4d0M6K7EaVtGs5JxEY,4109
|
22
|
+
h_ai/infrastructure/llm/llm_response_cleaner.py,sha256=pp1K7I77hagrC1r6Ib61-iSNQnU6wlM54bRmOUa7eFk,859
|
23
|
+
h_ai/infrastructure/llm/prompt_helper.py,sha256=QjxPbNW7hu2wBIi9GLJ7r00ELytT2Wr1JKDAA1jB2U4,238
|
24
|
+
h_ai/infrastructure/llm/prompt_loader.py,sha256=kwi5JrMfq5IpKV0RiTNrOTwFFtnDnw-pFwVcsutvbhQ,1624
|
25
|
+
h_ai/infrastructure/llm/template_loader.py,sha256=iVQUT9Y7DePbW-ZoS1Zzvcat4C4VXlUobn3Zwra7V9g,5414
|
26
|
+
h_ai/infrastructure/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
|
+
h_ai/infrastructure/llm/ollama/ollama_chat_repository.py,sha256=eTfeKWaMqJdnwQN5wfz46ZNMAZp7NMw2MaYDAvLIQ_8,7872
|
28
|
+
h_ai/infrastructure/llm/ollama/ollama_embed_repository.py,sha256=ITGDG6oGGAD8xjURYEOjHUa7noGVSWUtlqoJWon7wWI,1373
|
29
|
+
h_ai/infrastructure/llm/ollama/ollama_generate_repository.py,sha256=koDRoU6LKBtMmq4sNstA5oJKJHM3UAYnLBErNmF2cUs,4795
|
30
|
+
h_ai/infrastructure/llm/ollama/ollama_http_client.py,sha256=sSnxkjR8eGEI7gG97vu8-A2GOPoJ3XML5AlIVSD0qLI,1837
|
31
|
+
h_ai/infrastructure/llm/ollama/ollama_tool_repository.py,sha256=7UZ-qsgXQUcJFx1qY7SVI7p3FhIy0Drdqs7jZIp42Ag,4683
|
32
|
+
h_ai/infrastructure/llm/ollama/factories/__init__.py,sha256=UL6WBv4BtKrx59yr5X2Ddt-r0t_KHfgPjo9dZ5zZ3sc,82
|
33
|
+
h_ai/infrastructure/llm/ollama/factories/granite_factory.py,sha256=RL796T8JBv_HG4sjVvPAZLxXrkcH5b-z72767IGEhCk,3462
|
34
|
+
h_ai/infrastructure/llm/ollama/factories/nomic_factory.py,sha256=_yJYNbF738ugJ1ZN6pqmttTBk2tTwk6NJpMs4Yeq8pY,2081
|
35
|
+
h_ai/infrastructure/llm/ollama/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
36
|
+
h_ai/infrastructure/llm/ollama/models/ollama_chat_message.py,sha256=ZIz4PQ3869vI3xAYYufPrxXpacajRDtOI8RDl5Dm9RQ,305
|
37
|
+
h_ai/infrastructure/llm/ollama/models/ollama_chat_session.py,sha256=GZ_ddpbWa8iy6NZq50vokUFVZBiX0WNa81z9-r9RzTY,392
|
38
|
+
h_ai_brain-0.0.24.dist-info/licenses/LICENSE,sha256=SbvpEU5JIU3yzMMkyzrI0dGqHDoJR_lMKGdl6GZHsy4,11558
|
39
|
+
h_ai_brain-0.0.24.dist-info/licenses/NOTICE.txt,sha256=vxeIKUiGqAePLvDW4AVm3Xh-3BcsvMtCMn1tbsr9zsE,668
|
40
|
+
h_ai_brain-0.0.24.dist-info/METADATA,sha256=ALGHTKK8mNI4EHw1ouKS5Dz505_wa-Uu1z9rmPDxEi0,561
|
41
|
+
h_ai_brain-0.0.24.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
42
|
+
h_ai_brain-0.0.24.dist-info/top_level.txt,sha256=3MChDBWvDJV4cEHuZhzeODxQ4ewtw-arOuyaDOc6sIo,5
|
43
|
+
h_ai_brain-0.0.24.dist-info/RECORD,,
|
@@ -1,30 +0,0 @@
|
|
1
|
-
h_ai/__init__.py,sha256=63uVFHPxXmLrZVo2ZPixL2cU4jwf3XTAuwIVGHGkqJI,75
|
2
|
-
h_ai/application/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
h_ai/application/hai_service.py,sha256=LPRAfwlyGpAY8qUL-pB3c_KhOWa93ztr_CJo11Sy7Xc,686
|
4
|
-
h_ai/domain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
h_ai/domain/llm_config.py,sha256=xdyRoc26yDoKy8Y5L724TT6LoF3MEOpi7SZPQvS-lx8,258
|
6
|
-
h_ai/domain/reasoning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
-
h_ai/domain/reasoning/llm_chat_repository.py,sha256=rY2izDyaDnoyyrCRS1qc9erHB98vARj4Mp-SnPwNhyY,211
|
8
|
-
h_ai/domain/reasoning/llm_generate_respository.py,sha256=DPiV6ldCE8YhDdVb5rj98MBudKalDQHV3CZ2ADTm_f8,178
|
9
|
-
h_ai/domain/reasoning/llm_tool_repository.py,sha256=nFwqtrJ0Gw8XUFX0uuO7-UejtgoqNuGeT51qZPQtxas,401
|
10
|
-
h_ai/domain/reasoning/text_analysis.py,sha256=rmCUHWzJ3muFBorVXx7HcU2Sw-UfXFOuAVXRAPkqS8E,5183
|
11
|
-
h_ai/domain/reasoning/tool_message.py,sha256=jpbfbJXj6oqZyB3lDxGOUyFB4faHtXAaEOVBHgTgSnk,67
|
12
|
-
h_ai/infrastructure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
-
h_ai/infrastructure/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
-
h_ai/infrastructure/llm/data_handler.py,sha256=9wAxF5v409DGu7hn_4MGubGe0XyiowYtC30ftvnRDdE,1198
|
15
|
-
h_ai/infrastructure/llm/llm_response_cleaner.py,sha256=pp1K7I77hagrC1r6Ib61-iSNQnU6wlM54bRmOUa7eFk,859
|
16
|
-
h_ai/infrastructure/llm/prompt_helper.py,sha256=QjxPbNW7hu2wBIi9GLJ7r00ELytT2Wr1JKDAA1jB2U4,238
|
17
|
-
h_ai/infrastructure/llm/prompt_loader.py,sha256=hVep4BuheFc6Arple3OrV249KSwEqjIqHbAEJ_ymuvI,460
|
18
|
-
h_ai/infrastructure/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
-
h_ai/infrastructure/llm/ollama/ollama_chat_repository.py,sha256=GALea7UWLtKyt767Frtl3uv8rvy42HrOKMIQGpqq-H0,2108
|
20
|
-
h_ai/infrastructure/llm/ollama/ollama_generate_repository.py,sha256=np6vYfn9sVrf_oKMN0y1QLdf7X2seWBOjvPS8OQghkc,1969
|
21
|
-
h_ai/infrastructure/llm/ollama/ollama_tool_repository.py,sha256=7UZ-qsgXQUcJFx1qY7SVI7p3FhIy0Drdqs7jZIp42Ag,4683
|
22
|
-
h_ai/infrastructure/llm/ollama/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
-
h_ai/infrastructure/llm/ollama/models/ollama_chat_message.py,sha256=ZIz4PQ3869vI3xAYYufPrxXpacajRDtOI8RDl5Dm9RQ,305
|
24
|
-
h_ai/infrastructure/llm/ollama/models/ollama_chat_session.py,sha256=GZ_ddpbWa8iy6NZq50vokUFVZBiX0WNa81z9-r9RzTY,392
|
25
|
-
h_ai_brain-0.0.22.dist-info/licenses/LICENSE,sha256=SbvpEU5JIU3yzMMkyzrI0dGqHDoJR_lMKGdl6GZHsy4,11558
|
26
|
-
h_ai_brain-0.0.22.dist-info/licenses/NOTICE.txt,sha256=vxeIKUiGqAePLvDW4AVm3Xh-3BcsvMtCMn1tbsr9zsE,668
|
27
|
-
h_ai_brain-0.0.22.dist-info/METADATA,sha256=1qw7KdgK6bWTAEZVsfqGpjEe-94RVtApgK6e67EnqUs,531
|
28
|
-
h_ai_brain-0.0.22.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
|
29
|
-
h_ai_brain-0.0.22.dist-info/top_level.txt,sha256=3MChDBWvDJV4cEHuZhzeODxQ4ewtw-arOuyaDOc6sIo,5
|
30
|
-
h_ai_brain-0.0.22.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|