pycoze 0.1.225__py3-none-any.whl → 0.1.226__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
pycoze/ai/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
- from .vram_reserve import reserve_vram, reserve_vram_retry, unreserve_vram
2
- from .llm import chat, chat_stream, extract, yes_or_no, extract_code, text_to_image_prompt
1
+ from .vram_reserve import reserve_vram, reserve_vram_retry, unreserve_vram
2
+ from .llm import chat, chat_stream, extract, yes_or_no, extract_code, text_to_image_prompt
pycoze/ai/llm/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
1
  from .text_to_image_prompt import text_to_image_prompt
2
- from .chat import chat, chat_stream, extract, yes_or_no, extract_code
2
+ from .chat import chat, chat_stream, extract, yes_or_no, extract_code
@@ -1,65 +1,65 @@
1
- import re
2
- from .chat import yes_or_no, extract
3
-
4
-
5
- def contains_chinese(text):
6
- pattern = re.compile(r'[\u4e00-\u9fa5]') # Regular expression to match Chinese characters
7
- return bool(re.search(pattern, text))
8
-
9
-
10
- requirement = """
11
- Please reflect the scene content as prompts for the drawing AI.
12
-
13
- ## Prompt Concept
14
- - A prompt is used to describe the image content, composed of common words, using English commas (",") as separators. For example, a prompt like "woman" indicates that the image should include a woman.
15
-
16
- ## Tag Restrictions
17
- - Tags should be described using English words or phrases, avoiding Chinese.
18
- - Tags can only contain keywords or key phrases, and should not include personal names, place names, etc.
19
- - Tags should try to preserve physical characteristics of people, like body shape and hairstyle, but not use personal names, instead using terms like "man" to refer to people.
20
- - The number of tags in a prompt is limited to 40, and the number of words is limited to 60.
21
-
22
- ## Incorrect Examples of Prompts
23
- "In the bustling Shanghai Bund, there is a young man named Li Yang."
24
- The prompt includes non-keywords like "there is", as well as the personal name "Li Yang" and the place name "Shanghai."
25
- It should be modified to "a young man, in the bustling Bund"
26
-
27
- """
28
-
29
- # As the LLM used is not very effective, negative prompts are not suitable for generation by LLM at this stage
30
- # - Negative prompts describe content that should not appear in the image, for example if "bird, man" appears in the negative prompt, it means the image should not include "birds and men".
31
-
32
-
33
- def text_to_image_prompt(query, style, negative_style, with_prompt="best quality,4k,", with_negative_prompt=""):
34
- query = query.replace("{", "【").replace("}", "】")
35
- needed = f"The scene I need: {query}"
36
- style = style.replace(",", " ").replace(",", " ")
37
- if len(style) > 0:
38
- needed += f"\nThe style I need: {style}"
39
- # if len(negative_style) > 0:
40
- # needed += f"\nStyles to avoid: {negative_style}"
41
-
42
- for i in range(15):
43
- try:
44
- print("doing")
45
- output_obj = extract([("prompt", "Describe the image content with keywords")], needed + "\n" + requirement)
46
- print("done")
47
- if contains_chinese(output_obj["prompt"]):
48
- print("Contains Chinese, regenerating")
49
- continue
50
- if yes_or_no("Does it include personal names:\n"+output_obj["prompt"]):
51
- print(output_obj["prompt"]+" contains personal names, correcting")
52
- output_obj["prompt"] = extract(
53
- [("text without personal names", "Modified result (personal names can be changed to man, woman, he, she, etc.)")],
54
- "Modify the following text, personal names can be changed to man, woman, he, she, etc.: \n"+output_obj["prompt"])["text without personal names"]
55
- break
56
- except Exception as e:
57
- print(e)
58
- output_obj["prompt"] = with_prompt + output_obj["prompt"].replace(" and ", ",")
59
- output_obj["negative_prompt"] = with_negative_prompt
60
- output_obj["with_prompt"] = with_prompt
61
- output_obj["with_negative_prompt"] = with_negative_prompt
62
- output_obj["query"] = query
63
- output_obj["style"] = style
64
- output_obj["negative_style"] = negative_style
65
- return output_obj
1
+ import re
2
+ from .chat import yes_or_no, extract
3
+
4
+
5
+ def contains_chinese(text):
6
+ pattern = re.compile(r'[\u4e00-\u9fa5]') # Regular expression to match Chinese characters
7
+ return bool(re.search(pattern, text))
8
+
9
+
10
+ requirement = """
11
+ Please reflect the scene content as prompts for the drawing AI.
12
+
13
+ ## Prompt Concept
14
+ - A prompt is used to describe the image content, composed of common words, using English commas (",") as separators. For example, a prompt like "woman" indicates that the image should include a woman.
15
+
16
+ ## Tag Restrictions
17
+ - Tags should be described using English words or phrases, avoiding Chinese.
18
+ - Tags can only contain keywords or key phrases, and should not include personal names, place names, etc.
19
+ - Tags should try to preserve physical characteristics of people, like body shape and hairstyle, but not use personal names, instead using terms like "man" to refer to people.
20
+ - The number of tags in a prompt is limited to 40, and the number of words is limited to 60.
21
+
22
+ ## Incorrect Examples of Prompts
23
+ "In the bustling Shanghai Bund, there is a young man named Li Yang."
24
+ The prompt includes non-keywords like "there is", as well as the personal name "Li Yang" and the place name "Shanghai."
25
+ It should be modified to "a young man, in the bustling Bund"
26
+
27
+ """
28
+
29
+ # As the LLM used is not very effective, negative prompts are not suitable for generation by LLM at this stage
30
+ # - Negative prompts describe content that should not appear in the image, for example if "bird, man" appears in the negative prompt, it means the image should not include "birds and men".
31
+
32
+
33
+ def text_to_image_prompt(query, style, negative_style, with_prompt="best quality,4k,", with_negative_prompt=""):
34
+ query = query.replace("{", "【").replace("}", "】")
35
+ needed = f"The scene I need: {query}"
36
+ style = style.replace(",", " ").replace(",", " ")
37
+ if len(style) > 0:
38
+ needed += f"\nThe style I need: {style}"
39
+ # if len(negative_style) > 0:
40
+ # needed += f"\nStyles to avoid: {negative_style}"
41
+
42
+ for i in range(15):
43
+ try:
44
+ print("doing")
45
+ output_obj = extract([("prompt", "Describe the image content with keywords")], needed + "\n" + requirement)
46
+ print("done")
47
+ if contains_chinese(output_obj["prompt"]):
48
+ print("Contains Chinese, regenerating")
49
+ continue
50
+ if yes_or_no("Does it include personal names:\n"+output_obj["prompt"]):
51
+ print(output_obj["prompt"]+" contains personal names, correcting")
52
+ output_obj["prompt"] = extract(
53
+ [("text without personal names", "Modified result (personal names can be changed to man, woman, he, she, etc.)")],
54
+ "Modify the following text, personal names can be changed to man, woman, he, she, etc.: \n"+output_obj["prompt"])["text without personal names"]
55
+ break
56
+ except Exception as e:
57
+ print(e)
58
+ output_obj["prompt"] = with_prompt + output_obj["prompt"].replace(" and ", ",")
59
+ output_obj["negative_prompt"] = with_negative_prompt
60
+ output_obj["with_prompt"] = with_prompt
61
+ output_obj["with_negative_prompt"] = with_negative_prompt
62
+ output_obj["query"] = query
63
+ output_obj["style"] = style
64
+ output_obj["negative_style"] = negative_style
65
+ return output_obj
pycoze/ai/llm/think.py ADDED
@@ -0,0 +1,131 @@
1
+ from pycoze import utils
2
+ import json5
3
+ import re
4
+ from retrying import retry
5
+ from openai import OpenAI
6
+ import openai
7
+ import requests
8
+ import os
9
+
10
+
11
+ def never_retry_on_rate_limit_error(exception):
12
+ """Return True if we should retry (in this case when it's NOT a RateLimitError), False otherwise"""
13
+ return not isinstance(exception, openai.RateLimitError)
14
+
15
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
16
+ def think(user_text, history, temperature=0.2, stop=None, **kwargs):
17
+ user_msg = {"role": "user", "content": user_text}
18
+ cfg = utils.read_json_file("llm.json")
19
+
20
+ base_url = cfg["baseURL"]
21
+ api_key = cfg["apiKey"]
22
+ model = cfg["model"]
23
+
24
+ base_url = base_url.replace("/chat/completions", "")
25
+
26
+ client = OpenAI(api_key=api_key, base_url=base_url)
27
+
28
+ response = client.chat.completions.create(model=model,
29
+ messages=simple_history(history) + [user_msg],
30
+ temperature=temperature,
31
+ stop=stop)
32
+ return response.choices[0].message.content
33
+
34
+
35
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
36
+ def think_stream(user_text, history, temperature=0.2, stop=None, **kwargs):
37
+ user_msg = {"role": "user", "content": user_text}
38
+ cfg = utils.read_json_file("llm.json")
39
+
40
+ base_url = cfg["baseURL"]
41
+ api_key = cfg["apiKey"]
42
+ model = cfg["model"]
43
+
44
+ base_url = base_url.replace("/chat/completions", "")
45
+
46
+ client = OpenAI(api_key=api_key, base_url=base_url)
47
+
48
+
49
+ stream = client.chat.completions.create(model=model,
50
+ messages=simple_history(history) + [user_msg],
51
+ stream=True,
52
+ temperature=temperature,
53
+ stop=stop)
54
+ for chunk in stream:
55
+ yield chunk.choices[0].delta.content or ""
56
+
57
+
58
+ def simple_history(history):
59
+ return [{"role": h["role"], "content": h["content"]} for h in history]
60
+
61
+
62
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
63
+ def extract(response_data, text: str, temperature=0, **kwargs):
64
+ """print(extract({"name": "lowercase"}, "hello XiaoMing"))"""
65
+ if isinstance(response_data, dict):
66
+ response_items = [[res, response_data[res]] for res in response_data]
67
+ else:
68
+ response_items = response_data
69
+
70
+ json_text = ""
71
+ for i, res in enumerate(response_items):
72
+ comma = "," if i != len(response_items) - 1 else ""
73
+ json_text += f' "{res[0]}": {res[1]}{comma}\n'
74
+
75
+ # Combine the provided text with the formatted JSON schema
76
+ think_text = f"""
77
+ The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```" tags:
78
+ ```json
79
+ {{
80
+ {json_text}
81
+ }}
82
+ ```
83
+
84
+ Request:
85
+ {text}
86
+ """
87
+ # text放后面,当翻译等情况时,不会把"The output should"之类翻译了,导致错误
88
+ markdown = think(think_text, [], temperature=temperature, **kwargs)
89
+ pattern = r'```json(.*?)```'
90
+ matches = re.findall(pattern, markdown, re.DOTALL)
91
+ if matches:
92
+ json_str = matches[0].strip()
93
+ # lines = [line.split("//")[0] for line in json_str.split("\n")]//这样当json中有//时会出错,例如https://
94
+ json_dict = json5.loads(json_str)
95
+ for item in response_items:
96
+ if item[0] not in json_dict:
97
+ raise "item:" + item + " not exists"
98
+ return json_dict
99
+
100
+
101
+ def yes_or_no(question, temperature=0, **kwargs):
102
+ result = extract([("Result", "Yes or No")], question,
103
+ temperature=temperature, **kwargs)["Result"]
104
+ if isinstance(result, bool):
105
+ return result
106
+ return result.upper() == "YES"
107
+
108
+
109
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
110
+ def extract_code(text: str, temperature=0, language="python", markdown_word='python', **kwargs):
111
+ """print(extract_code("sum 1~100"))"""
112
+ think_text = text + f"""
113
+ The output should be a complete and usable {language} code snippet, including the leading and trailing "```{markdown_word}" and "```":
114
+ """
115
+ markdown = think(think_text, [], temperature=temperature, **kwargs)
116
+ # 使用正则表达式匹配围绕在```{markdown_word} 和 ```之间的文本
117
+ pattern = rf'```{markdown_word}(.*?)```'
118
+ matches = re.findall(pattern, markdown, re.DOTALL)
119
+ if matches:
120
+ # 去除可能的前后空白字符
121
+ return matches[0].strip()
122
+ else:
123
+ raise Exception("The string is not a valid python code.")
124
+
125
+
126
+ if __name__ == "__main__":
127
+ print(think("你好", []))
128
+ for chunk in think_stream("你好", []):
129
+ print(chunk)
130
+ print(extract({"name": "lowercase"}, "hello XiaoMing"))
131
+ print(extract_code("sum 1~100"))
pycoze/ui/ui_def.py CHANGED
@@ -138,10 +138,12 @@ def multi_folder_select(
138
138
 
139
139
 
140
140
  def folder_tree(
141
- name, root="", default: List[str] = None, tip="", hide_if="", style="", cls=""
141
+ name, root="", default: List[str] = None, ignore_list=None, tip="", hide_if="", style="", cls=""
142
142
  ) -> dict:
143
143
  if default is None:
144
144
  default = []
145
+ if ignore_list is None:
146
+ ignore_list = []
145
147
  value = {"root": root, "paths": default}
146
148
  return useDefault(name, value)
147
149
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pycoze
3
- Version: 0.1.225
3
+ Version: 0.1.226
4
4
  Summary: Package for pycoze only!
5
5
  Author: Yuan Jie Xiong
6
6
  Author-email: aiqqqqqqq@qq.com
@@ -1,9 +1,10 @@
1
1
  pycoze/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- pycoze/ai/__init__.py,sha256=uhG4Z3HSTp_o6H4ouNy8gW1hzcU0B8zDB0nJgbmnHpk,165
2
+ pycoze/ai/__init__.py,sha256=NY6treq6PVA52c62RaP3S3v9Xsmj5xE1YOl169Fr0pg,168
3
3
  pycoze/ai/vram_reserve.py,sha256=brgXP42yj3yaZRgW8pfgc4Jg9EivAhcbp5W4igVHcow,4256
4
- pycoze/ai/llm/__init__.py,sha256=B4apuRE3PDputKTELVYWNP-V5GhdAOBq9BTjzP1Ne3c,125
4
+ pycoze/ai/llm/__init__.py,sha256=kAXcQ7SefJYysgKeVInlwYZoDk0BPuEnUuixy-quD_A,127
5
5
  pycoze/ai/llm/chat.py,sha256=izriC7nCp5qeJRqcUVQBVqTHiH6MJS77ROzGBJufdNI,5133
6
- pycoze/ai/llm/text_to_image_prompt.py,sha256=NdfhPTOTSR4whGgd10UtyQjzQCat0wdcX94OLSt4gUU,3333
6
+ pycoze/ai/llm/text_to_image_prompt.py,sha256=0bx2C_YRvjAo7iphHGp1-pmGKsKqwur7dM0t3SiA8kA,3398
7
+ pycoze/ai/llm/think.py,sha256=sUgTBdGzcZtL3r-Wx8M3lDuVUmDVz8g3qC0VU8uiKAI,5143
7
8
  pycoze/automation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
9
  pycoze/automation/browser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  pycoze/automation/browser/edge_driver_manager.py,sha256=gpgseunph5owZH6EskSYthuhey2SU3UP204gY0yIcuI,3022
@@ -26,13 +27,13 @@ pycoze/ui/__init__.py,sha256=RTEUVXdMo7AYC1wsyS1yl5sLi5vf4bNFv4iggghGEgg,469
26
27
  pycoze/ui/base.py,sha256=sbBZGMUtlosWHQJpxMULa1bGByeSlcldtE9QXNyiJmM,1093
27
28
  pycoze/ui/color.py,sha256=cT9Ib8uNzkOKxyW0IwVj46o4LwdB1xgNCj1_Rou9d_4,854
28
29
  pycoze/ui/typ.py,sha256=NpT0FrbHvByOszBZMFtroRp7I7pN-38tYz_zPOPejF4,1723
29
- pycoze/ui/ui_def.py,sha256=PEvTQWJ9iErXRXWQkY56Pem9fPT4z8F8l6ylN5X52DM,4450
30
+ pycoze/ui/ui_def.py,sha256=lGWZGpzRoegP34D562PvK0EJHrmVZrlHW1JjsIG9A9Q,4521
30
31
  pycoze/utils/__init__.py,sha256=Gi5EnrWZGMD2JRejgV4c_VLCXyvA2wwBFI_niDF5MUE,110
31
32
  pycoze/utils/arg.py,sha256=GtfGbMTMdaK75Fwh6MpUe1pCA5X6Ep4LFG7a72YrzjI,525
32
33
  pycoze/utils/env.py,sha256=W04lhvTHhAAC6EldP6kk2xrctqtu8K6kl1vDLZDNeh8,561
33
34
  pycoze/utils/text_or_file.py,sha256=gpxZVWt2DW6YiEg_MnMuwg36VNf3TX383QD_1oZNB0Y,551
34
- pycoze-0.1.225.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
35
- pycoze-0.1.225.dist-info/METADATA,sha256=N4r76t8hsUkcTCQzuJ_M1WM1L6-rG6V6qUqi5DzrK9M,726
36
- pycoze-0.1.225.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
37
- pycoze-0.1.225.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
38
- pycoze-0.1.225.dist-info/RECORD,,
35
+ pycoze-0.1.226.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
36
+ pycoze-0.1.226.dist-info/METADATA,sha256=Nx7J6gvZSKT8q7vyzbuSX_R1abxBh2MoKPbrfLsDbSo,726
37
+ pycoze-0.1.226.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
38
+ pycoze-0.1.226.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
39
+ pycoze-0.1.226.dist-info/RECORD,,