hjxdl 0.2.81__py3-none-any.whl → 0.2.83__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/database_tools/web.py +15 -10
- hdl/utils/desc/template.py +11 -0
- hdl/utils/general/glob.py +17 -0
- hdl/utils/general/runners.py +8 -5
- hdl/utils/llm/chat.py +85 -10
- hdl/utils/llm/chatgr.py +4 -4
- hdl/utils/llm/vis.py +86 -1
- {hjxdl-0.2.81.dist-info → hjxdl-0.2.83.dist-info}/METADATA +1 -1
- {hjxdl-0.2.81.dist-info → hjxdl-0.2.83.dist-info}/RECORD +12 -12
- {hjxdl-0.2.81.dist-info → hjxdl-0.2.83.dist-info}/WHEEL +0 -0
- {hjxdl-0.2.81.dist-info → hjxdl-0.2.83.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/database_tools/web.py
CHANGED
@@ -58,10 +58,14 @@ def web_search_text(
|
|
58
58
|
|
59
59
|
def fetch_baidu_results(query, max_n_links=3):
|
60
60
|
"""
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
61
|
+
Fetches search results from Baidu for a given query and retrieves the text content of the top results.
|
62
|
+
Args:
|
63
|
+
query (str): The search query to be sent to Baidu.
|
64
|
+
max_n_links (int, optional): The maximum number of search result links to fetch. Defaults to 3.
|
65
|
+
Returns:
|
66
|
+
str: The concatenated text content of the fetched web pages.
|
67
|
+
Raises:
|
68
|
+
requests.RequestException: If there is an issue with the HTTP request.
|
65
69
|
"""
|
66
70
|
try:
|
67
71
|
max_n_links = int(max_n_links)
|
@@ -105,13 +109,14 @@ def fetch_baidu_results(query, max_n_links=3):
|
|
105
109
|
|
106
110
|
def wolfram_alpha_calculate(query):
|
107
111
|
"""
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
query (str): The query string to send to Wolfram Alpha.
|
112
|
-
|
112
|
+
Sends a query to the Wolfram Alpha API and returns the result.
|
113
|
+
Args:
|
114
|
+
query (str): The query string to be sent to Wolfram Alpha.
|
113
115
|
Returns:
|
114
|
-
|
116
|
+
str: The result of the query in plaintext format, or an error message if the query was unsuccessful or if an error occurred.
|
117
|
+
Raises:
|
118
|
+
requests.Timeout: If the request to Wolfram Alpha times out.
|
119
|
+
Exception: If any other error occurs during the request.
|
115
120
|
"""
|
116
121
|
# Get the Wolfram Alpha App ID from environment variables
|
117
122
|
app_id = os.getenv('WOLFRAM_APP_ID', None)
|
hdl/utils/desc/template.py
CHANGED
@@ -30,3 +30,14 @@ COT_TEMPLATE = """
|
|
30
30
|
你的回答中应只能是 Markdown 格式,且不能包含其他多余文字或格式错误。
|
31
31
|
以下是可用的工具:
|
32
32
|
"""
|
33
|
+
|
34
|
+
OD_TEMPLATE = """
|
35
|
+
Detect all the objects in the image, return bounding boxes for all of them using the following format (DO NOT INCLUDE ANY OTHER WORDS IN YOUR ANSWER BUT ONLY THE LIST ITSELF!):
|
36
|
+
[
|
37
|
+
{
|
38
|
+
"object": "object_name",
|
39
|
+
"bboxes": [[xmin, ymin, xmax, ymax], [xmin, ymin, xmax, ymax], ...]
|
40
|
+
},
|
41
|
+
...
|
42
|
+
]
|
43
|
+
"""
|
hdl/utils/general/glob.py
CHANGED
@@ -2,6 +2,14 @@ import subprocess
|
|
2
2
|
|
3
3
|
|
4
4
|
def get_num_lines(file):
|
5
|
+
"""
|
6
|
+
Get the number of lines in a given file.
|
7
|
+
Args:
|
8
|
+
file (str): The path to the file.
|
9
|
+
Returns:
|
10
|
+
int: The number of lines in the file.
|
11
|
+
"""
|
12
|
+
|
5
13
|
num_lines = subprocess.check_output(
|
6
14
|
['wc', '-l', file]
|
7
15
|
).split()[0]
|
@@ -9,6 +17,15 @@ def get_num_lines(file):
|
|
9
17
|
|
10
18
|
|
11
19
|
def str_from_line(file, line, split=False):
|
20
|
+
"""
|
21
|
+
Extracts a specific line from a file and optionally splits the line.
|
22
|
+
Args:
|
23
|
+
file (str): The path to the file from which to extract the line.
|
24
|
+
line (int): The line number to extract (0-based index).
|
25
|
+
split (bool, optional): If True, splits the line at the first space or tab and returns the first part. Defaults to False.
|
26
|
+
Returns:
|
27
|
+
str: The extracted line, optionally split at the first space or tab.
|
28
|
+
"""
|
12
29
|
smi = subprocess.check_output(
|
13
30
|
# ['sed','-n', f'{str(i+1)}p', file]
|
14
31
|
["sed", f"{str(line + 1)}q;d", file]
|
hdl/utils/general/runners.py
CHANGED
@@ -61,14 +61,17 @@ def calculate(expression):
|
|
61
61
|
|
62
62
|
def count_character_occurrences(text, char):
|
63
63
|
"""
|
64
|
-
|
64
|
+
Count the occurrences of a character in a text.
|
65
65
|
|
66
66
|
Args:
|
67
|
-
text (str):
|
68
|
-
char (str):
|
67
|
+
text (str): The text in which to count character occurrences.
|
68
|
+
char (str): The character to count occurrences of.
|
69
69
|
|
70
70
|
Returns:
|
71
|
-
|
71
|
+
str: A string indicating how many times the character appears in the text.
|
72
|
+
|
73
|
+
Raises:
|
74
|
+
ValueError: If the input text is not a string or if the character is not a single character string.
|
72
75
|
"""
|
73
76
|
if not isinstance(text, str):
|
74
77
|
raise ValueError("输入的文本必须是字符串类型")
|
@@ -77,4 +80,4 @@ def count_character_occurrences(text, char):
|
|
77
80
|
text = text.lower()
|
78
81
|
char = char.lower()
|
79
82
|
|
80
|
-
return f"{text} 中 {char} 共出现了 {text.count(char)} 次。"
|
83
|
+
return f"{text} 中 {char} 共出现了 {text.count(char)} 次。"
|
hdl/utils/llm/chat.py
CHANGED
@@ -8,12 +8,22 @@ import re
|
|
8
8
|
|
9
9
|
|
10
10
|
from openai import OpenAI
|
11
|
-
from ..desc.template import FN_TEMPLATE, COT_TEMPLATE
|
11
|
+
from ..desc.template import FN_TEMPLATE, COT_TEMPLATE, OD_TEMPLATE
|
12
12
|
from ..desc.func_desc import TOOL_DESC
|
13
|
+
from .vis import draw_and_plot_boxes_from_json
|
13
14
|
import json
|
14
15
|
# import traceback
|
15
16
|
|
16
17
|
def parse_fn_markdown(markdown_text, params_key="params"):
|
18
|
+
"""
|
19
|
+
Parses a markdown text to extract function name and parameters.
|
20
|
+
Args:
|
21
|
+
markdown_text (str): The markdown text containing the function name and parameters.
|
22
|
+
params_key (str, optional): The key under which the parameters will be nested in the result dictionary. Defaults to "params".
|
23
|
+
Returns:
|
24
|
+
dict: A dictionary containing the function name and parameters. The function name is stored with the key "function_name", and the parameters are nested under the specified params_key.
|
25
|
+
"""
|
26
|
+
|
17
27
|
lines = markdown_text.strip().split("\n")
|
18
28
|
result = {}
|
19
29
|
params = {}
|
@@ -53,6 +63,23 @@ def parse_fn_markdown(markdown_text, params_key="params"):
|
|
53
63
|
return result
|
54
64
|
|
55
65
|
def parse_cot_markdown(markdown_text):
|
66
|
+
"""
|
67
|
+
Parse a Markdown text formatted as 'COT' (Title, Tool, Content, Stop Thinking) and extract relevant information.
|
68
|
+
|
69
|
+
Args:
|
70
|
+
markdown_text (str): The Markdown text to parse.
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
dict: A dictionary containing the parsed information with the following keys:
|
74
|
+
- 'title': Title extracted from the Markdown text.
|
75
|
+
- 'tool': Tool extracted from the Markdown text.
|
76
|
+
- 'content': Content extracted from the Markdown text.
|
77
|
+
- 'stop_thinking': Boolean indicating whether 'stop_thinking' is true or false.
|
78
|
+
|
79
|
+
Note:
|
80
|
+
- 'stop_thinking' value is considered True only if it is explicitly 'true' (case-insensitive).
|
81
|
+
|
82
|
+
"""
|
56
83
|
# 提取标题(支持跨行)
|
57
84
|
title_match = re.search(r"##\s*(.+?)(?=\n-|\Z)", markdown_text, re.DOTALL)
|
58
85
|
title = title_match.group(1).strip() if title_match else ""
|
@@ -78,6 +105,7 @@ def parse_cot_markdown(markdown_text):
|
|
78
105
|
}
|
79
106
|
|
80
107
|
|
108
|
+
|
81
109
|
def run_tool_with_kwargs(tool, func_kwargs):
|
82
110
|
"""Run the specified tool with the provided keyword arguments.
|
83
111
|
|
@@ -105,9 +133,28 @@ class OpenAI_M():
|
|
105
133
|
tools: list = None,
|
106
134
|
tool_desc: dict = None,
|
107
135
|
cot_desc: str = None,
|
136
|
+
od_desc: str = None,
|
108
137
|
*args,
|
109
138
|
**kwargs
|
110
139
|
):
|
140
|
+
"""
|
141
|
+
Initialize an instance of the OpenAI_M class with configuration options.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
model_path (str): Path to the model. Defaults to "default_model".
|
145
|
+
device (str): Device to use, either 'gpu' or 'cpu'. Defaults to 'gpu'.
|
146
|
+
generation_kwargs (dict, optional): Additional keyword arguments for generation.
|
147
|
+
server_ip (str): IP address of the server. Defaults to "172.28.1.2".
|
148
|
+
server_port (int): Port number of the server. Defaults to 8000.
|
149
|
+
api_key (str): API key for authentication. Defaults to "dummy_key".
|
150
|
+
use_groq (bool): Flag to use Groq client. Defaults to False.
|
151
|
+
groq_api_key (str, optional): API key for Groq client.
|
152
|
+
tools (list, optional): List of tools to be used.
|
153
|
+
tool_desc (dict, optional): Additional tool descriptions.
|
154
|
+
cot_desc (str, optional): Chain of Thought description.
|
155
|
+
*args: Additional positional arguments.
|
156
|
+
**kwargs: Additional keyword arguments.
|
157
|
+
"""
|
111
158
|
# self.model_path = model_path
|
112
159
|
self.server_ip = server_ip
|
113
160
|
self.server_port = server_port
|
@@ -144,9 +191,8 @@ class OpenAI_M():
|
|
144
191
|
self.tool_info = "\n".join(self.tool_descs)
|
145
192
|
self.tool_desc_str = "\n".join(self.tool_descs_verbose)
|
146
193
|
|
147
|
-
self.cot_desc = cot_desc
|
148
|
-
if
|
149
|
-
self.cot_desc = COT_TEMPLATE
|
194
|
+
self.cot_desc = cot_desc if cot_desc else COT_TEMPLATE
|
195
|
+
self.od_desc = od_desc if od_desc else OD_TEMPLATE
|
150
196
|
|
151
197
|
def cot(
|
152
198
|
self,
|
@@ -155,14 +201,22 @@ class OpenAI_M():
|
|
155
201
|
steps: list = None,
|
156
202
|
**kwargs
|
157
203
|
):
|
158
|
-
"""
|
204
|
+
"""
|
205
|
+
Execute a Chain of Thought (COT) process to iteratively generate steps
|
206
|
+
towards solving a given prompt, utilizing tools if necessary.
|
159
207
|
|
160
208
|
Args:
|
161
|
-
prompt (
|
162
|
-
max_step (int, optional):
|
209
|
+
prompt (str): The initial question or problem to solve.
|
210
|
+
max_step (int, optional): Maximum number of steps to attempt. Defaults to 30.
|
211
|
+
steps (list, optional): List to accumulate steps taken. Defaults to None.
|
212
|
+
**kwargs: Additional keyword arguments for tool invocation.
|
163
213
|
|
164
|
-
|
165
|
-
|
214
|
+
Yields:
|
215
|
+
tuple: A tuple containing the current step number, accumulated information,
|
216
|
+
and the list of steps taken.
|
217
|
+
|
218
|
+
Raises:
|
219
|
+
Exception: If an error occurs during the parsing or tool invocation process.
|
166
220
|
"""
|
167
221
|
# 初始化当前信息为空字符串,用于累积后续的思考步骤和用户问题
|
168
222
|
current_info = ""
|
@@ -520,6 +574,27 @@ class OpenAI_M():
|
|
520
574
|
print(e)
|
521
575
|
return ""
|
522
576
|
|
577
|
+
def od(
|
578
|
+
self,
|
579
|
+
image_path,
|
580
|
+
save_path=None
|
581
|
+
):
|
582
|
+
json_str = self.invoke(
|
583
|
+
prompt="""
|
584
|
+
Detect all the objects in the image, return bounding boxes for all of them using the following format (DO NOT INCLUDE ANY OTHER WORDS IN YOUR ANSWER BUT ONLY THE LIST):
|
585
|
+
[
|
586
|
+
{
|
587
|
+
"object": "object_name",
|
588
|
+
"bboxes": [[xmin, ymin, xmax, ymax], [xmin, ymin, xmax, ymax], ...]
|
589
|
+
},
|
590
|
+
...
|
591
|
+
]
|
592
|
+
""",
|
593
|
+
images="https://air-example-data-2.s3.us-west-2.amazonaws.com/vllm_opensource_llava/stop_sign.jpg",
|
594
|
+
)
|
595
|
+
img = draw_and_plot_boxes_from_json(json_str, image_path, save_path)
|
596
|
+
return img, save_path
|
597
|
+
|
523
598
|
|
524
599
|
class MMChatter():
|
525
600
|
def __init__(
|
@@ -591,4 +666,4 @@ class MMChatter():
|
|
591
666
|
# Process the model's response by parsing the output
|
592
667
|
response = output.splitlines()[-1].strip('<assistant>') # Assuming the last line is the model's response
|
593
668
|
|
594
|
-
return response
|
669
|
+
return response
|
hdl/utils/llm/chatgr.py
CHANGED
@@ -32,11 +32,11 @@ def chat_with_llm(user_input, chat_history=[]):
|
|
32
32
|
def create_demo():
|
33
33
|
"""
|
34
34
|
Creates a Gradio demo interface for a chatbot application.
|
35
|
-
The interface
|
35
|
+
The interface includes:
|
36
36
|
- A chat history display at the top of the page.
|
37
|
-
- A user input textbox
|
38
|
-
|
39
|
-
|
37
|
+
- A user input textbox at the bottom of the page.
|
38
|
+
- A send button to submit messages.
|
39
|
+
The user can send messages either by clicking the send button or by pressing the Enter key.
|
40
40
|
Returns:
|
41
41
|
gr.Blocks: The Gradio Blocks object representing the demo interface.
|
42
42
|
"""
|
hdl/utils/llm/vis.py
CHANGED
@@ -8,11 +8,15 @@ import hashlib
|
|
8
8
|
|
9
9
|
import torch
|
10
10
|
import numpy as np
|
11
|
-
from PIL import Image
|
12
11
|
# from transformers import ChineseCLIPProcessor, ChineseCLIPModel
|
13
12
|
from transformers import AutoModel
|
14
13
|
from transformers import AutoTokenizer
|
15
14
|
import open_clip
|
15
|
+
|
16
|
+
from PIL import Image, ImageDraw, ImageFont
|
17
|
+
import json
|
18
|
+
import re
|
19
|
+
import matplotlib.pyplot as plt
|
16
20
|
# import natsort
|
17
21
|
from redis.commands.search.field import VectorField
|
18
22
|
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
@@ -120,6 +124,87 @@ def pilimg_to_base64(pilimg):
|
|
120
124
|
return img_base64
|
121
125
|
|
122
126
|
|
127
|
+
|
128
|
+
def draw_and_plot_boxes_from_json(
|
129
|
+
json_data,
|
130
|
+
image_path,
|
131
|
+
save_path=None
|
132
|
+
):
|
133
|
+
"""
|
134
|
+
Parses the JSON data to extract bounding box coordinates,
|
135
|
+
scales them according to the image size, draws the boxes on the image,
|
136
|
+
and returns the image as a PIL object.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
json_data (str or list): The JSON data as a string or already parsed list.
|
140
|
+
image_path (str): The path to the image file on which boxes are to be drawn.
|
141
|
+
save_path (str or None): The path to save the resulting image. If None, the image won't be saved.
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
PIL.Image.Image: The processed image with boxes drawn on it.
|
145
|
+
"""
|
146
|
+
# If json_data is a string, parse it into a Python object
|
147
|
+
if isinstance(json_data, str):
|
148
|
+
json_data = json_data.strip()
|
149
|
+
json_data = re.sub(r"^```json\s*", "", json_data)
|
150
|
+
json_data = re.sub(r"```$", "", json_data)
|
151
|
+
try:
|
152
|
+
data = json.loads(json_data)
|
153
|
+
except json.JSONDecodeError as e:
|
154
|
+
print("Failed to parse JSON data:", e)
|
155
|
+
return None
|
156
|
+
else:
|
157
|
+
data = json_data
|
158
|
+
|
159
|
+
# Open the image
|
160
|
+
try:
|
161
|
+
img = Image.open(image_path)
|
162
|
+
except FileNotFoundError:
|
163
|
+
print(f"Image file not found at {image_path}. Please check the path.")
|
164
|
+
return None
|
165
|
+
|
166
|
+
draw = ImageDraw.Draw(img)
|
167
|
+
width, height = img.size
|
168
|
+
|
169
|
+
# Use a commonly available font
|
170
|
+
try:
|
171
|
+
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", size=25)
|
172
|
+
except IOError:
|
173
|
+
print("Default font not found. Using a basic PIL font.")
|
174
|
+
font = ImageFont.load_default()
|
175
|
+
|
176
|
+
# Process and draw boxes
|
177
|
+
for item in data:
|
178
|
+
object_type = item.get("object", "unknown")
|
179
|
+
for bbox in item.get("bboxes", []):
|
180
|
+
x1, y1, x2, y2 = bbox
|
181
|
+
x1 = x1 * width / 1000
|
182
|
+
y1 = y1 * height / 1000
|
183
|
+
x2 = x2 * width / 1000
|
184
|
+
y2 = y2 * height / 1000
|
185
|
+
draw.rectangle([(x1, y1), (x2, y2)], outline="blue", width=5)
|
186
|
+
draw.text((x1, y1), object_type, fill="red", font=font)
|
187
|
+
|
188
|
+
# Plot the image using matplotlib and save it as a PIL Image
|
189
|
+
buf = BytesIO()
|
190
|
+
plt.figure(figsize=(8, 8))
|
191
|
+
plt.imshow(img)
|
192
|
+
plt.axis("off") # Hide axes ticks
|
193
|
+
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
|
194
|
+
buf.seek(0)
|
195
|
+
|
196
|
+
# Load the buffer into a PIL Image and ensure full loading into memory
|
197
|
+
pil_image = Image.open(buf)
|
198
|
+
pil_image.load() # Ensure full data is loaded from the buffer
|
199
|
+
|
200
|
+
# Save the image if save_path is provided
|
201
|
+
if save_path:
|
202
|
+
pil_image.save(save_path)
|
203
|
+
|
204
|
+
buf.close() # Close the buffer after use
|
205
|
+
|
206
|
+
return pil_image
|
207
|
+
|
123
208
|
class ImgHandler:
|
124
209
|
"""
|
125
210
|
ImgHandler is a class for handling image processing tasks using pretrained models.
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=OMRaBymw5QprdlebDPUsdFD1v26PbMK3f65RIPDhT1g,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -120,26 +120,26 @@ hdl/utils/chemical_tools/sdf.py,sha256=71PEqU0H885L6IeGHEa6n7ZLZThvMsZOVLuFG2wno
|
|
120
120
|
hdl/utils/database_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
121
121
|
hdl/utils/database_tools/connect.py,sha256=xCacGucKxlQUXs6AsNddpeECvdqT1180V1ZWqHrUdNA,875
|
122
122
|
hdl/utils/database_tools/datetime.py,sha256=xqE2xNiOpADzX-R8_bM0bioJRF3Ay9Jp1CAG6dy6uVI,1202
|
123
|
-
hdl/utils/database_tools/web.py,sha256=
|
123
|
+
hdl/utils/database_tools/web.py,sha256=awJ8lafL-2KRjf3V1uuij8JIvX9U5fI8fLZKOkOvqtk,5771
|
124
124
|
hdl/utils/desc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
125
125
|
hdl/utils/desc/func_desc.py,sha256=VCqjvZs7qCwBq3NR3ZRknl4oiO5-JP7xm-Rx85W2exg,3365
|
126
|
-
hdl/utils/desc/template.py,sha256=
|
126
|
+
hdl/utils/desc/template.py,sha256=Kf_tbL-XkDCKNQ3UncbCuYEeUgXEa7kRVCf9TD2b8og,2526
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
|
-
hdl/utils/general/glob.py,sha256=
|
129
|
-
hdl/utils/general/runners.py,sha256=
|
128
|
+
hdl/utils/general/glob.py,sha256=Zuf7WHU0UdUPOs9UrhxmrCiMC8GrHxQU6n3mTThv6yc,1120
|
129
|
+
hdl/utils/general/runners.py,sha256=x7QBolp3MrqNV6L4rB6Ueybr26bqkRFZTuXhY0SwyLk,3061
|
130
130
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
131
|
-
hdl/utils/llm/chat.py,sha256=
|
132
|
-
hdl/utils/llm/chatgr.py,sha256=
|
131
|
+
hdl/utils/llm/chat.py,sha256=_1enhg-IhcY-XewzcXXwCWGqSfkB3pvckpr4QcJ4XsA,24060
|
132
|
+
hdl/utils/llm/chatgr.py,sha256=5F5PJHe8vz3iCfi4TT54DCLRi1UeJshECdVtgvvvao0,3696
|
133
133
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
134
134
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
135
135
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
136
|
-
hdl/utils/llm/vis.py,sha256=
|
136
|
+
hdl/utils/llm/vis.py,sha256=JPil4gJ-n-awv_EjNIjOwG9Gc89lsqTojV58_66U0_A,24204
|
137
137
|
hdl/utils/llm/visrag.py,sha256=0i-VrxqgiV-J7R3VPshu9oc7-rKjFJOldYik3HDXj6M,10176
|
138
138
|
hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
139
139
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
140
140
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
141
141
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
142
|
-
hjxdl-0.2.
|
143
|
-
hjxdl-0.2.
|
144
|
-
hjxdl-0.2.
|
145
|
-
hjxdl-0.2.
|
142
|
+
hjxdl-0.2.83.dist-info/METADATA,sha256=0B1zj7EVTm7sUVpCYt5mRz7Y_UeA9e3E1gYQaFeYrZI,836
|
143
|
+
hjxdl-0.2.83.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
144
|
+
hjxdl-0.2.83.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
145
|
+
hjxdl-0.2.83.dist-info/RECORD,,
|
File without changes
|
File without changes
|