hjxdl 0.1.16__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hdl/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
1
  from ._version import *
2
- from ._version import __version__ as __version
2
+ # from ._version import __version__ as __version
hdl/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.1.16'
16
- __version_tuple__ = version_tuple = (0, 1, 16)
15
+ __version__ = version = '0.1.17'
16
+ __version_tuple__ = version_tuple = (0, 1, 17)
@@ -0,0 +1,3 @@
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b85aa7ade957da8f6dd546671bea47226a5427a0908b4588a186bac5d66654e3
3
+ size 5267584
@@ -0,0 +1,18 @@
1
+ FN_DESC = {
2
+ "get_weather": """
3
+ ## 函数名:get_weather
4
+ 描述:获得一个城市的天气信息
5
+ 参数:
6
+ # city (str): 城市名
7
+ 返回值:天气信息
8
+ 需要返回的json
9
+ {
10
+ "function_name": "get_weather",
11
+ "params":
12
+ {
13
+ "city": <city_name>
14
+ }
15
+ }
16
+ """,
17
+ "default": None
18
+ }
@@ -0,0 +1,18 @@
1
+ FN_TEMPLATE = """
2
+
3
+ 你是一个可以调用函数来执行任务的人工智能助手,根据用户最后的问题来决定是否调用函数
4
+
5
+ 若要调用函数,需要使用包含两个字段的JSON对象进行响应,并且不应包含其他多余文字,避免出现格式化问题:
6
+
7
+ "function_name":需要要调用的函数的名称。
8
+ "params":函数所需的参数。执行函数调用后,您将收到一个包含函数调用返回值的响应。
9
+
10
+ 若不需要调用函数,则返回如下字典,并且不应包含其他多余文字,避免出现格式化问题:
11
+ {
12
+ "function_name": null
13
+ }
14
+
15
+ ### 函数:
16
+ 以下是可用于与系统交互的函数列表,每个函数以 “##” 作为标记开始,每个参数会以 “#” 作为标记。
17
+ 每个功能都有特定的参数和要求。确保仔细遵循每个功能的说明。根据您想要执行的任务选择合适的功能。以JSON格式提供函数调用,其中参数的具体值从用户的提问中获取,并且不能带“<>”符号:
18
+ """
hdl/utils/llm/chat.py CHANGED
@@ -2,6 +2,9 @@ import typing as t
2
2
 
3
3
  from llama_cpp import Llama
4
4
  from openai import OpenAI
5
+ from ..desc.template import FN_TEMPLATE
6
+ from ..desc.func_desc import FN_DESC
7
+ import json
5
8
  # import traceback
6
9
 
7
10
 
@@ -14,16 +17,16 @@ def chat_oai_stream(
14
17
  **kwargs
15
18
  ):
16
19
  """Chat with OpenAI's GPT-3 model using the specified parameters.
17
-
20
+
18
21
  Args:
19
22
  base_url (str): The base URL for the OpenAI API. Default is "http://127.0.0.1:8000/v1".
20
23
  api_key (str): The API key for accessing the OpenAI API. Default is "dummy_key".
21
24
  model (str): The model ID to use for the chat. Default is "/data/models/Qwen-7B-Chat-Int4".
22
25
  prompt (str): The initial prompt for the chat conversation.
23
-
26
+
24
27
  Yields:
25
28
  str: The generated content from the chat conversation.
26
-
29
+
27
30
  """
28
31
  client = OpenAI(
29
32
  base_url=base_url,
@@ -39,7 +42,7 @@ def chat_oai_stream(
39
42
  *args,
40
43
  **kwargs
41
44
  )
42
-
45
+
43
46
  for chunk in response:
44
47
  content = chunk.choices[0].delta.content
45
48
  yield content
@@ -54,13 +57,13 @@ def chat_oai_invoke(
54
57
  **kwargs
55
58
  ):
56
59
  """Invoke OpenAI chat API to generate a response based on the given prompt.
57
-
60
+
58
61
  Args:
59
62
  base_url (str): The base URL of the OpenAI API. Default is "http://127.0.0.1:8000/v1".
60
63
  api_key (str): The API key for accessing the OpenAI API. Default is "dummy_key".
61
64
  model (str): The model to use for generating the response. Default is "/data/models/Qwen-7B-Chat-Int4".
62
65
  prompt (str): The prompt message to start the conversation. Default is "Who are you?".
63
-
66
+
64
67
  Returns:
65
68
  str: The response generated by the OpenAI chat API based on the prompt.
66
69
  """
@@ -78,7 +81,7 @@ def chat_oai_invoke(
78
81
  *args,
79
82
  **kwargs
80
83
  )
81
-
84
+
82
85
  return response.choices[0].message.content
83
86
 
84
87
 
@@ -94,7 +97,7 @@ class GGUF_M(Llama):
94
97
  **kwargs
95
98
  ):
96
99
  """Initialize the model with the specified parameters.
97
-
100
+
98
101
  Args:
99
102
  model_path (str): The path to the model.
100
103
  device (str, optional): The device to use, either 'gpu' or 'cpu'. Defaults to 'gpu'.
@@ -103,12 +106,12 @@ class GGUF_M(Llama):
103
106
  server_port (int, optional): The port of the server. Defaults to 8000.
104
107
  *args: Variable length argument list.
105
108
  **kwargs: Arbitrary keyword arguments.
106
-
109
+
107
110
  Raises:
108
111
  KeyError: If 'num_threads' or 'max_context_length' is missing in generation_kwargs.
109
112
  """
110
113
  print("正在从本地加载模型...")
111
- if device.lower() == 'cpu':
114
+ if device.lower() == 'cpu':
112
115
  super().__init__(
113
116
  model_path=model_path,
114
117
  n_threads=generation_kwargs['num_threads'],
@@ -129,23 +132,23 @@ class GGUF_M(Llama):
129
132
  self.generation_kwargs = generation_kwargs
130
133
 
131
134
  def invoke(
132
- self,
135
+ self,
133
136
  prompt : str,
134
137
  stop: list[str] | None = ["USER:", "ASSISTANT:"],
135
138
  # history: list = [],
136
139
  **kwargs: t.Any,
137
140
  ) -> str:
138
141
  """Invoke the model to generate a response based on the given prompt.
139
-
142
+
140
143
  Args:
141
144
  prompt (str): The prompt to be used for generating the response.
142
145
  stop (list[str], optional): List of strings that indicate when the model should stop generating the response. Defaults to ["USER:", "ASSISTANT:"].
143
146
  **kwargs: Additional keyword arguments to be passed to the model.
144
-
147
+
145
148
  Returns:
146
149
  str: The generated response based on the prompt.
147
150
  """
148
- prompt_final = f"USER:\n{prompt}\nASSISTANT:\n"
151
+ prompt_final = f"USER:\n{prompt}\nASSISTANT:\n"
149
152
 
150
153
  result = self.create_completion(
151
154
  prompt_final,
@@ -172,20 +175,20 @@ class GGUF_M(Llama):
172
175
  **kwargs: t.Any,
173
176
  ):
174
177
  """Generate text responses based on the given prompt using the model.
175
-
178
+
176
179
  Args:
177
180
  prompt (str): The prompt to generate text responses.
178
181
  stop (list[str], optional): List of strings to stop the generation. Defaults to ["USER:", "ASSISTANT:"].
179
182
  **kwargs: Additional keyword arguments for the model.
180
-
183
+
181
184
  Yields:
182
185
  str: Text responses generated by the model based on the prompt.
183
186
  """
184
- prompt = f"USER:\n{prompt}\nASSISTANT:\n"
187
+ prompt = f"USER:\n{prompt}\nASSISTANT:\n"
185
188
  output = self.create_completion(
186
189
  prompt,
187
190
  stream=True,
188
- repeat_penalty=self.generation_kwargs["repetition_penalty"],
191
+ repeat_penalty=self.generation_kwargs["repetition_penalty"],
189
192
  max_tokens=self.generation_kwargs["max_new_tokens"],
190
193
  stop=stop,
191
194
  echo=False,
@@ -212,14 +215,14 @@ class GGUF_M(Llama):
212
215
  # server_port: int = 8000,
213
216
  # ):
214
217
  # """Initialize the model with the provided model path and optional parameters.
215
-
218
+
216
219
  # Args:
217
220
  # model_path (str): The path to the model.
218
221
  # device (str, optional): The device to use for model initialization. Defaults to 'gpu'.
219
222
  # generation_kwargs (dict, optional): Additional keyword arguments for model generation. Defaults to {}.
220
223
  # server_ip (str, optional): The IP address of the server. Defaults to "127.0.0.1".
221
224
  # server_port (int, optional): The port of the server. Defaults to 8000.
222
- # """
225
+ # """
223
226
  # # 从本地初始化模型
224
227
  # # super().__init__()
225
228
  # self.generation_kwargs = generation_kwargs
@@ -238,27 +241,27 @@ class GGUF_M(Llama):
238
241
  # n_gpu_layers=-1,
239
242
  # flash_attn=True
240
243
  # )
241
-
244
+
242
245
  # print("完成本地模型的加载")
243
246
 
244
247
  # def invoke(
245
- # self,
248
+ # self,
246
249
  # prompt : str,
247
250
  # stop: list[str] | None = ["USER:", "ASSISTANT:"],
248
251
  # # history: list = [],
249
252
  # **kwargs: t.Any,
250
253
  # ) -> str:
251
254
  # """Invoke the model to generate a response based on the given prompt.
252
-
255
+
253
256
  # Args:
254
257
  # prompt (str): The prompt to be used for generating the response.
255
258
  # stop (list[str], optional): List of strings that indicate when the model should stop generating the response. Defaults to ["USER:", "ASSISTANT:"].
256
259
  # **kwargs: Additional keyword arguments to be passed to the model.
257
-
260
+
258
261
  # Returns:
259
262
  # str: The generated response based on the prompt.
260
263
  # """
261
- # prompt_final = f"USER:\n{prompt}\nASSISTANT:\n"
264
+ # prompt_final = f"USER:\n{prompt}\nASSISTANT:\n"
262
265
 
263
266
  # result = self.model.create_completion(
264
267
  # prompt_final,
@@ -276,7 +279,7 @@ class GGUF_M(Llama):
276
279
  # # [prompt, resp]
277
280
  # # )
278
281
  # return resp
279
-
282
+
280
283
  # def stream(
281
284
  # self,
282
285
  # prompt: str,
@@ -285,20 +288,20 @@ class GGUF_M(Llama):
285
288
  # **kwargs: t.Any,
286
289
  # ):
287
290
  # """Generate text responses based on the given prompt using the model.
288
-
291
+
289
292
  # Args:
290
293
  # prompt (str): The prompt to generate text responses.
291
294
  # stop (list[str], optional): List of strings to stop the generation. Defaults to ["USER:", "ASSISTANT:"].
292
295
  # **kwargs: Additional keyword arguments for the model.
293
-
296
+
294
297
  # Yields:
295
298
  # str: Text responses generated by the model based on the prompt.
296
299
  # """
297
- # prompt = f"USER:\n{prompt}\nASSISTANT:\n"
300
+ # prompt = f"USER:\n{prompt}\nASSISTANT:\n"
298
301
  # output = self.model.create_completion(
299
302
  # prompt,
300
303
  # stream=True,
301
- # repeat_penalty=self.generation_kwargs["repetition_penalty"],
304
+ # repeat_penalty=self.generation_kwargs["repetition_penalty"],
302
305
  # max_tokens=self.generation_kwargs["max_new_tokens"],
303
306
  # stop=stop,
304
307
  # echo=False,
@@ -324,11 +327,13 @@ class OpenAI_M():
324
327
  server_ip: str = "172.28.1.2",
325
328
  server_port: int = 8000,
326
329
  api_key: str = "dummy_key",
330
+ tools: list = None,
331
+ tool_desc: dict = None,
327
332
  *args,
328
333
  **kwargs
329
334
  ):
330
335
  """Initialize the OpenAI client.
331
-
336
+
332
337
  Args:
333
338
  model_path (str): Path to the model file. Defaults to "default_model".
334
339
  device (str): Device to use for model inference. Defaults to 'gpu'.
@@ -338,7 +343,7 @@ class OpenAI_M():
338
343
  api_key (str): API key for authentication. Defaults to "dummy_key".
339
344
  *args: Variable length argument list.
340
345
  **kwargs: Arbitrary keyword arguments.
341
-
346
+
342
347
  Attributes:
343
348
  model_path (str): Path to the model file.
344
349
  server_ip (str): IP address of the server.
@@ -358,9 +363,13 @@ class OpenAI_M():
358
363
  *args,
359
364
  **kwargs
360
365
  )
361
-
366
+ self.tools = tools
367
+ self.tool_desc = FN_DESC
368
+ if tool_desc is not None:
369
+ self.tool_desc = self.tool_desc | tool_desc
370
+
362
371
  def invoke(
363
- self,
372
+ self,
364
373
  prompt : str,
365
374
  stop: list[str] | None = ["USER:", "ASSISTANT:"],
366
375
  # history: list = [],
@@ -368,12 +377,12 @@ class OpenAI_M():
368
377
  **kwargs: t.Any,
369
378
  ) -> str:
370
379
  """Invoke the chatbot with the given prompt and return the response.
371
-
380
+
372
381
  Args:
373
382
  prompt (str): The prompt to provide to the chatbot.
374
383
  stop (list[str], optional): List of strings that indicate the end of the conversation. Defaults to ["USER:", "ASSISTANT:"].
375
384
  **kwargs: Additional keyword arguments to pass to the chatbot.
376
-
385
+
377
386
  Returns:
378
387
  str: The response generated by the chatbot.
379
388
  """
@@ -391,7 +400,7 @@ class OpenAI_M():
391
400
  return response.choices[0].message.content
392
401
 
393
402
  def stream(
394
- self,
403
+ self,
395
404
  prompt : str,
396
405
  stop: list[str] | None = ["USER:", "ASSISTANT:"],
397
406
  # history: list = [],
@@ -399,12 +408,12 @@ class OpenAI_M():
399
408
  **kwargs: t.Any,
400
409
  ):
401
410
  """Generate text completion in a streaming fashion.
402
-
411
+
403
412
  Args:
404
413
  prompt (str): The text prompt to generate completion for.
405
414
  stop (list[str], optional): List of strings to stop streaming at. Defaults to ["USER:", "ASSISTANT:"].
406
415
  **kwargs: Additional keyword arguments to pass to the completion API.
407
-
416
+
408
417
  Yields:
409
418
  str: The generated text completion in a streaming fashion.
410
419
  """
@@ -419,8 +428,59 @@ class OpenAI_M():
419
428
  # model=kwargs.get("model", "default_model")
420
429
  **kwargs
421
430
  )
422
-
431
+
423
432
  for chunk in response:
424
433
  content = chunk.choices[0].delta.content
425
434
  if content:
426
- yield content
435
+ yield content
436
+
437
+ def get_decision(
438
+ self,
439
+ prompt: str,
440
+ **kwargs: t.Any
441
+ ):
442
+ prompt_final = FN_TEMPLATE
443
+ for tool in self.tools:
444
+ prompt_final += self.tool_desc.get(tool.__name__, "")
445
+ prompt_final += f"\n用户的问题:\n{prompt}"
446
+ # print(prompt_final)
447
+ decision_dict = self.invoke(prompt_final ,**kwargs)
448
+ print(decision_dict)
449
+ return json.loads(decision_dict)
450
+
451
+ def get_tool_result(
452
+ self,
453
+ prompt: str,
454
+ **kwargs: t.Any
455
+ ):
456
+ decision_dict = self.get_decision(prompt, **kwargs)
457
+ if decision_dict.get("function_name", None) is None:
458
+ return ""
459
+ else:
460
+ func_name = decision_dict.get("function_name")
461
+ for tool in self.tools:
462
+ if tool.__name__ == func_name:
463
+ tool_final = tool
464
+ func_kwargs = decision_dict.get("params")
465
+ return tool_final(**func_kwargs)
466
+
467
+
468
+ def agent_response(
469
+ self,
470
+ prompt : str,
471
+ stream = True,
472
+ **kwargs: t.Any
473
+ ):
474
+
475
+ decision_dict = self.get_decision(prompt, **kwargs)
476
+ if decision_dict.get("function_name", None) is None:
477
+ return self.stream(prompt, **kwargs)
478
+ else:
479
+ tool_result = str(self.get_tool_result(prompt, **kwargs))
480
+ prompt_final = "根据上下文回答最后的用户问题:\n上下文信息:\n"
481
+ prompt_final += tool_result
482
+ prompt_final += f"\n用户的问题:\n{prompt}"
483
+ if stream:
484
+ return self.stream(prompt_final, **kwargs)
485
+ else:
486
+ return self.invoke(prompt_final, **kwargs)
@@ -2,7 +2,11 @@ import requests
2
2
  from pathlib import Path
3
3
  import os
4
4
  import json
5
+
5
6
  from bs4 import BeautifulSoup
7
+ import numpy as np
8
+
9
+ from ..llm.embs import HFEmbedder
6
10
 
7
11
 
8
12
  def get_city_codes():
@@ -32,37 +36,75 @@ def get_page_data(html):
32
36
  weather_info = soup.find('div', id='7d')
33
37
  seven_weather = weather_info.find('ul')
34
38
  weather_list = seven_weather.find_all('li')
39
+
40
+ weather_str = ""
41
+
35
42
  for weather in weather_list:
36
- print('=' * 60)
37
- print(weather.find('h1').get_text())
38
- print('天气状况:', weather.find('p', class_='wea').get_text())
43
+ # print("\n")
44
+ weather_str += (weather.find('h1').get_text() + "\n") # 日期
45
+ weather_str += ('天气状况:' + weather.find('p', class_='wea').get_text() + "\n")
39
46
  # 判断标签'p','tem'下是否有标签'span',以此判断是否有最高温
40
47
  if weather.find('p', class_='tem').find('span'):
41
48
  temp_high = weather.find('p', class_='tem').find('span').get_text()
42
49
  else:
43
50
  temp_high = '' # 最高温
44
51
  temp_low = weather.find('p', class_='tem').find('i').get_text() # 最低温
45
- print(f'天气温度:{temp_low}/{temp_high}')
52
+ weather_str += (f'天气温度:{temp_low}/{temp_high}' + "\n")
46
53
  win_list_tag = weather.find('p', class_='win').find('em').find_all('span')
47
54
  win_list = []
48
55
  for win in win_list_tag:
49
56
  win_list.append(win.get('title'))
50
- print('风向:', '-'.join(win_list))
51
- print('风力:', weather.find('p', class_='win').find('i').get_text())
57
+ weather_str += ('风向:' + '-'.join(win_list) + "\n")
58
+ weather_str += ('风力:' + weather.find('p', class_='win').find('i').get_text() + "\n")
59
+ weather_str += "\n"
60
+
61
+ return weather_str
62
+
63
+
64
+ def get_weather(city):
65
+ code_dic = get_city_codes()
66
+ city_name = city
67
+ weather_str = ""
68
+ if city not in code_dic:
69
+ city_name = get_standard_cityname(city)
70
+ weather_str += f"{city}识别为{city_name},若识别错误,请提供更为准确的城市名\n"
71
+ html = get_html(code_dic[city_name])
72
+ result = get_page_data(html)
73
+ weather_str += f"\n{city}的天气信息如下:\n\n"
74
+ weather_str += result
75
+ return weather_str
52
76
 
53
77
 
54
- def main():
78
+ def main(city):
55
79
  code_dic = get_city_codes()
56
- print('=' * 60)
57
- print('\t' * 5, '天气预报查询系统')
58
- print('=' * 60)
59
- city = input("请输入您要查询的城市:")
80
+ city = city
60
81
  if city in code_dic:
61
- html = get_html(code_dic[city]['AREAID'])
82
+ html = get_html(code_dic[city])
62
83
  get_page_data(html)
63
84
  else:
64
85
  print('你要查询的地方不存在')
65
86
 
66
87
 
67
- if __name__ == '__main__':
68
- main()
88
+ def get_standard_cityname(
89
+ city,
90
+ emb_dir: str = os.getenv(
91
+ 'EMB_MODEL_DIR',
92
+ '/home/jhu/dev/models/bge-m3'
93
+ )
94
+ ):
95
+ code_dic = get_city_codes()
96
+ city_list = list(code_dic.keys())
97
+
98
+ city_embs = np.load(
99
+ Path(__file__).resolve().parent.parent.parent \
100
+ / "datasets" \
101
+ / "city_embs.npy"
102
+ )
103
+
104
+ emb = HFEmbedder(
105
+ emb_dir=emb_dir,
106
+ )
107
+ query_emb = emb.encode(city)
108
+ sims = city_embs @ query_emb.T
109
+
110
+ return city_list[np.argmax(sims)]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hjxdl
3
- Version: 0.1.16
3
+ Version: 0.1.17
4
4
  Summary: A collection of functions for Jupyter notebooks
5
5
  Home-page: https://github.com/huluxiaohuowa/hdl
6
6
  Author: Jianxing Hu
@@ -1,5 +1,5 @@
1
- hdl/__init__.py,sha256=5sZZNySv08wwfzJcSDssGTqUn9wlmDsR6R4XB8J8mFM,70
2
- hdl/_version.py,sha256=rA1LdDxj-8LJmPkCOu32O_el1jpvxISCr8X4yOogHgc,413
1
+ hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
2
+ hdl/_version.py,sha256=U6sY9_fIsSw5WZCknnMPWLcgRhj8eWXLoDeffP6UUFA,413
3
3
  hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
5
5
  hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -39,6 +39,7 @@ hdl/data/dataset/samplers/chiral.py,sha256=ZS83kg5e2gdHVGgIuCjCepDwk2SKqWDgJawH3
39
39
  hdl/data/dataset/seq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
40
  hdl/data/dataset/seq/rxn_dataset.py,sha256=jfXFlR3ITAf0KwUfIevzUZHnLBnFYrL69Cc81EMv0x0,1668
41
41
  hdl/datasets/city_code.json,sha256=qnTL6ldpGnQanDXN3oitx12E6oiayaCHTh2Zi9RyQjM,60816
42
+ hdl/datasets/city_embs.npy,sha256=jrkUZ_Dz6lwZAiNZpHcldbe7TzelvV7QgLhNZa1Fen0,132
42
43
  hdl/datasets/defined_BaseFeatures.fdef,sha256=5QhCEcu6fjSTXaTcZZ8-LSgf72_aJj_ykoDk82ZwVBI,7383
43
44
  hdl/datasets/las.tsv,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
45
  hdl/datasets/route_template.json,sha256=2qhkbtEZUrUod6PXCWXxAgQmU-jAC0yLcWGBBk2IwgE,3757
@@ -50,8 +51,6 @@ hdl/features/graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
50
51
  hdl/features/graph/featurization.py,sha256=QLbj33JsgO-OWarIC2HXQP7eMu8pd-GWmppZQj_tQ_k,10902
51
52
  hdl/features/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
53
  hdl/features/utils/utils.py,sha256=aL4UAALblaw1w0AjK7MX8nSj9zwTmrp9CTLwJUX8ZtE,4225
53
- hdl/ju/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
- hdl/ju/setup.py,sha256=MB3rndQYt9QC-bdyGt81HYR0Rdr0l8DbAHktIuFMYU0,1725
55
54
  hdl/jupyfuncs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
55
  hdl/jupyfuncs/chem/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
56
  hdl/jupyfuncs/chem/mol.py,sha256=FDb2L61JL6xkNK7bxXWVjAT-r-st7iMyQNoFezBvTUE,15294
@@ -120,17 +119,20 @@ hdl/utils/chemical_tools/query_info.py,sha256=wyQXwKSY_gBGVUNvYggHpYBtOLAtpYKq3P
120
119
  hdl/utils/chemical_tools/sdf.py,sha256=71PEqU0H885L6IeGHEa6n7ZLZThvMsZOVLuFG2wnoyM,542
121
120
  hdl/utils/database_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
122
121
  hdl/utils/database_tools/connect.py,sha256=KUnVG-8raifEJ_N0b3c8LkTTIfn9NIyw8LX6qvpA3YU,723
122
+ hdl/utils/desc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
123
+ hdl/utils/desc/func_desc.py,sha256=Vw7GWhnPvr-WxMkU2P5lch8bnlsW6a0NEaJDtI4mFPw,316
124
+ hdl/utils/desc/template.py,sha256=yhXtuNV474zbCD7SP_iZHR3cnGJQYozkb0Ro3V_UkaM,1039
123
125
  hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
124
126
  hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
125
127
  hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
126
- hdl/utils/llm/chat.py,sha256=Xypu7KlnTeI9oep02tuw04GooAfysZEgY2T_P29xZuU,15086
128
+ hdl/utils/llm/chat.py,sha256=RJgiDTW16Ah-bYDySW9QWhkD_VZf9Q3uBZcO7vXZuvA,16877
127
129
  hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
128
130
  hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
129
131
  hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
130
132
  hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
131
133
  hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
132
- hdl/utils/weather/weather.py,sha256=LiIgEX-XPZxltGWgrwJNXSvBuinF-VaBT3ohRWYDjPI,2315
133
- hjxdl-0.1.16.dist-info/METADATA,sha256=8yEQaClAH1z9xw77MfSaZtEWhOjdW7NDRKAfbePA2lA,590
134
- hjxdl-0.1.16.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
135
- hjxdl-0.1.16.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
136
- hjxdl-0.1.16.dist-info/RECORD,,
134
+ hdl/utils/weather/weather.py,sha256=xGWkqGoDAM2N4Gkv5lDyHb_Xa5RmFlWTWhfZxDbB9T8,3353
135
+ hjxdl-0.1.17.dist-info/METADATA,sha256=36AqD_SB3KKRsuELmon7XDlKYw2OEMw_lwAAp-a4I1g,590
136
+ hjxdl-0.1.17.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
137
+ hjxdl-0.1.17.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
138
+ hjxdl-0.1.17.dist-info/RECORD,,
hdl/ju/setup.py DELETED
@@ -1,55 +0,0 @@
1
- from setuptools import setup, find_packages
2
- import setuptools_scm
3
-
4
- def read_requirements():
5
- """读取 requirements.txt 文件并返回依赖列表"""
6
- with open('requirements.txt', 'r', encoding='utf-8') as file:
7
- return [
8
- line.strip()
9
- for line in file
10
- if line.strip() and not line.startswith('#')
11
- ]
12
-
13
-
14
- def custom_version_scheme(version):
15
- """自定义版本号方案,确保没有 .dev 后缀"""
16
- if version.exact:
17
- return version.format_with("{tag}")
18
- elif version.distance:
19
- return f"{version.format_next_version()}.post{version.distance}"
20
- else:
21
- return version.format_with("0.0.0")
22
-
23
- def custom_local_scheme(version):
24
- """自定义本地版本方案,确保没有本地版本后缀"""
25
- return ""
26
-
27
- setup(
28
- name="jupyfuncs",
29
- use_scm_version={
30
- "version_scheme": custom_version_scheme,
31
- "local_scheme": custom_local_scheme,
32
- "write_to": "jupyfuncs/_version.py"
33
- },
34
- author="Jianxing Hu",
35
- author_email="j.hu@pku.edu.cn",
36
- description="A collection of functions for Jupyter notebooks",
37
- long_description=open('README.md', 'r', encoding='utf-8').read(),
38
- long_description_content_type='text/markdown',
39
- url="https://github.com/huluxiaohuowa/jupyfuncs",
40
- packages=find_packages(),
41
- package_data={
42
- 'jupyfuncs': [
43
- 'datasets/*.*',
44
- 'datasets/*/*.*',
45
- ]
46
- },
47
- classifiers=[
48
- "Programming Language :: Python :: 3",
49
- "License :: OSI Approved :: MIT License",
50
- "Operating System :: OS Independent",
51
- ],
52
- python_requires='>=3.6',
53
- setup_requires=['setuptools_scm'],
54
- install_requires=read_requirements()
55
- )
File without changes
File without changes