npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +942 -0
- npcsh/alicanto.py +1074 -0
- npcsh/guac.py +785 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_npcsh.py +822 -0
- npcsh/mcp_server.py +184 -0
- npcsh/npc.py +218 -0
- npcsh/npcsh.py +1161 -0
- npcsh/plonk.py +387 -269
- npcsh/pti.py +234 -0
- npcsh/routes.py +958 -0
- npcsh/spool.py +315 -0
- npcsh/wander.py +550 -0
- npcsh/yap.py +573 -0
- npcsh-1.0.0.dist-info/METADATA +596 -0
- npcsh-1.0.0.dist-info/RECORD +21 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
- npcsh-1.0.0.dist-info/entry_points.txt +9 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
- npcsh/audio.py +0 -210
- npcsh/cli.py +0 -545
- npcsh/command_history.py +0 -566
- npcsh/conversation.py +0 -291
- npcsh/data_models.py +0 -46
- npcsh/dataframes.py +0 -163
- npcsh/embeddings.py +0 -168
- npcsh/helpers.py +0 -641
- npcsh/image.py +0 -298
- npcsh/image_gen.py +0 -79
- npcsh/knowledge_graph.py +0 -1006
- npcsh/llm_funcs.py +0 -2027
- npcsh/load_data.py +0 -83
- npcsh/main.py +0 -5
- npcsh/model_runner.py +0 -189
- npcsh/npc_compiler.py +0 -2870
- npcsh/npc_sysenv.py +0 -383
- npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcsh/npc_team/corca.npc +0 -13
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/npcsh.ctx +0 -11
- npcsh/npc_team/sibiji.npc +0 -4
- npcsh/npc_team/templates/analytics/celona.npc +0 -0
- npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- npcsh/npc_team/templates/humanities/eriane.npc +0 -4
- npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- npcsh/npc_team/templates/marketing/slean.npc +0 -4
- npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcsh/npc_team/templates/sales/turnic.npc +0 -4
- npcsh/npc_team/templates/software/welxor.npc +0 -0
- npcsh/npc_team/tools/bash_executer.tool +0 -32
- npcsh/npc_team/tools/calculator.tool +0 -8
- npcsh/npc_team/tools/code_executor.tool +0 -16
- npcsh/npc_team/tools/generic_search.tool +0 -27
- npcsh/npc_team/tools/image_generation.tool +0 -25
- npcsh/npc_team/tools/local_search.tool +0 -149
- npcsh/npc_team/tools/npcsh_executor.tool +0 -9
- npcsh/npc_team/tools/screen_cap.tool +0 -27
- npcsh/npc_team/tools/sql_executor.tool +0 -26
- npcsh/response.py +0 -623
- npcsh/search.py +0 -248
- npcsh/serve.py +0 -1460
- npcsh/shell.py +0 -538
- npcsh/shell_helpers.py +0 -3529
- npcsh/stream.py +0 -700
- npcsh/video.py +0 -49
- npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
- npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
- npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
- npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
- npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
- npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
- npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
- npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
- npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
- npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
- npcsh-0.3.31.dist-info/METADATA +0 -1853
- npcsh-0.3.31.dist-info/RECORD +0 -76
- npcsh-0.3.31.dist-info/entry_points.txt +0 -3
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/image.py
DELETED
|
@@ -1,298 +0,0 @@
|
|
|
1
|
-
# image.py
|
|
2
|
-
# import os
|
|
3
|
-
import time
|
|
4
|
-
import platform
|
|
5
|
-
import subprocess
|
|
6
|
-
from typing import Dict, Any
|
|
7
|
-
from PIL import ImageGrab # Import ImageGrab from Pillow
|
|
8
|
-
|
|
9
|
-
from .npc_sysenv import NPCSH_VISION_MODEL, NPCSH_VISION_PROVIDER, NPCSH_API_URL
|
|
10
|
-
from .llm_funcs import get_llm_response, get_stream
|
|
11
|
-
import os
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def _windows_snip_to_file(file_path: str) -> bool:
|
|
15
|
-
"""Helper function to trigger Windows snipping and save to file."""
|
|
16
|
-
try:
|
|
17
|
-
# Import Windows-specific modules only when needed
|
|
18
|
-
import win32clipboard
|
|
19
|
-
from PIL import ImageGrab
|
|
20
|
-
from ctypes import windll
|
|
21
|
-
|
|
22
|
-
# Simulate Windows + Shift + S
|
|
23
|
-
windll.user32.keybd_event(0x5B, 0, 0, 0) # WIN down
|
|
24
|
-
windll.user32.keybd_event(0x10, 0, 0, 0) # SHIFT down
|
|
25
|
-
windll.user32.keybd_event(0x53, 0, 0, 0) # S down
|
|
26
|
-
windll.user32.keybd_event(0x53, 0, 0x0002, 0) # S up
|
|
27
|
-
windll.user32.keybd_event(0x10, 0, 0x0002, 0) # SHIFT up
|
|
28
|
-
windll.user32.keybd_event(0x5B, 0, 0x0002, 0) # WIN up
|
|
29
|
-
|
|
30
|
-
# Wait for user to complete the snip
|
|
31
|
-
print("Please select an area to capture...")
|
|
32
|
-
time.sleep(1) # Give a moment for snipping tool to start
|
|
33
|
-
|
|
34
|
-
# Keep checking clipboard for new image
|
|
35
|
-
max_wait = 30 # Maximum seconds to wait
|
|
36
|
-
start_time = time.time()
|
|
37
|
-
|
|
38
|
-
while time.time() - start_time < max_wait:
|
|
39
|
-
try:
|
|
40
|
-
image = ImageGrab.grabclipboard()
|
|
41
|
-
if image:
|
|
42
|
-
image.save(file_path, "PNG")
|
|
43
|
-
return True
|
|
44
|
-
except Exception:
|
|
45
|
-
pass
|
|
46
|
-
time.sleep(0.5)
|
|
47
|
-
|
|
48
|
-
return False
|
|
49
|
-
|
|
50
|
-
except ImportError:
|
|
51
|
-
print("Required packages not found. Please install: pip install pywin32 Pillow")
|
|
52
|
-
return False
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def capture_screenshot(npc: Any = None, full=False) -> Dict[str, str]:
|
|
56
|
-
"""
|
|
57
|
-
Function Description:
|
|
58
|
-
This function captures a screenshot of the current screen and saves it to a file.
|
|
59
|
-
Args:
|
|
60
|
-
npc: The NPC object representing the current NPC.
|
|
61
|
-
full: Boolean to determine if full screen capture is needed
|
|
62
|
-
Returns:
|
|
63
|
-
A dictionary containing the filename, file path, and model kwargs.
|
|
64
|
-
"""
|
|
65
|
-
# Ensure the directory exists
|
|
66
|
-
directory = os.path.expanduser("~/.npcsh/screenshots")
|
|
67
|
-
os.makedirs(directory, exist_ok=True)
|
|
68
|
-
|
|
69
|
-
# Generate a unique filename
|
|
70
|
-
filename = f"screenshot_{int(time.time())}.png"
|
|
71
|
-
file_path = os.path.join(directory, filename)
|
|
72
|
-
|
|
73
|
-
system = platform.system()
|
|
74
|
-
model_kwargs = {}
|
|
75
|
-
|
|
76
|
-
if npc is not None:
|
|
77
|
-
if npc.provider is not None:
|
|
78
|
-
model_kwargs["provider"] = npc.provider
|
|
79
|
-
if npc.model is not None:
|
|
80
|
-
model_kwargs["model"] = npc.model
|
|
81
|
-
|
|
82
|
-
if full:
|
|
83
|
-
if system == "Darwin":
|
|
84
|
-
subprocess.run(["screencapture", file_path])
|
|
85
|
-
elif system == "Linux":
|
|
86
|
-
if (
|
|
87
|
-
subprocess.run(
|
|
88
|
-
["which", "gnome-screenshot"], capture_output=True
|
|
89
|
-
).returncode
|
|
90
|
-
== 0
|
|
91
|
-
):
|
|
92
|
-
subprocess.Popen(["gnome-screenshot", "-f", file_path])
|
|
93
|
-
while not os.path.exists(file_path):
|
|
94
|
-
time.sleep(0.5)
|
|
95
|
-
elif (
|
|
96
|
-
subprocess.run(["which", "scrot"], capture_output=True).returncode == 0
|
|
97
|
-
):
|
|
98
|
-
subprocess.Popen(["scrot", file_path])
|
|
99
|
-
while not os.path.exists(file_path):
|
|
100
|
-
time.sleep(0.5)
|
|
101
|
-
else:
|
|
102
|
-
print(
|
|
103
|
-
"No supported screenshot tool found. Please install gnome-screenshot or scrot."
|
|
104
|
-
)
|
|
105
|
-
return None
|
|
106
|
-
elif system == "Windows":
|
|
107
|
-
# For full screen on Windows, we'll use a different approach
|
|
108
|
-
try:
|
|
109
|
-
import win32gui
|
|
110
|
-
import win32ui
|
|
111
|
-
import win32con
|
|
112
|
-
from PIL import Image
|
|
113
|
-
|
|
114
|
-
# Get screen dimensions
|
|
115
|
-
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
|
|
116
|
-
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
|
|
117
|
-
|
|
118
|
-
# Create device context
|
|
119
|
-
hdesktop = win32gui.GetDesktopWindow()
|
|
120
|
-
desktop_dc = win32gui.GetWindowDC(hdesktop)
|
|
121
|
-
img_dc = win32ui.CreateDCFromHandle(desktop_dc)
|
|
122
|
-
mem_dc = img_dc.CreateCompatibleDC()
|
|
123
|
-
|
|
124
|
-
# Create bitmap
|
|
125
|
-
screenshot = win32ui.CreateBitmap()
|
|
126
|
-
screenshot.CreateCompatibleBitmap(img_dc, width, height)
|
|
127
|
-
mem_dc.SelectObject(screenshot)
|
|
128
|
-
mem_dc.BitBlt((0, 0), (width, height), img_dc, (0, 0), win32con.SRCCOPY)
|
|
129
|
-
|
|
130
|
-
# Save
|
|
131
|
-
screenshot.SaveBitmapFile(mem_dc, file_path)
|
|
132
|
-
|
|
133
|
-
# Cleanup
|
|
134
|
-
mem_dc.DeleteDC()
|
|
135
|
-
win32gui.DeleteObject(screenshot.GetHandle())
|
|
136
|
-
|
|
137
|
-
except ImportError:
|
|
138
|
-
print(
|
|
139
|
-
"Required packages not found. Please install: pip install pywin32"
|
|
140
|
-
)
|
|
141
|
-
return None
|
|
142
|
-
else:
|
|
143
|
-
print(f"Unsupported operating system: {system}")
|
|
144
|
-
return None
|
|
145
|
-
else:
|
|
146
|
-
if system == "Darwin":
|
|
147
|
-
subprocess.run(["screencapture", "-i", file_path])
|
|
148
|
-
elif system == "Linux":
|
|
149
|
-
if (
|
|
150
|
-
subprocess.run(
|
|
151
|
-
["which", "gnome-screenshot"], capture_output=True
|
|
152
|
-
).returncode
|
|
153
|
-
== 0
|
|
154
|
-
):
|
|
155
|
-
subprocess.Popen(["gnome-screenshot", "-a", "-f", file_path])
|
|
156
|
-
while not os.path.exists(file_path):
|
|
157
|
-
time.sleep(0.5)
|
|
158
|
-
elif (
|
|
159
|
-
subprocess.run(["which", "scrot"], capture_output=True).returncode == 0
|
|
160
|
-
):
|
|
161
|
-
subprocess.Popen(["scrot", "-s", file_path])
|
|
162
|
-
while not os.path.exists(file_path):
|
|
163
|
-
time.sleep(0.5)
|
|
164
|
-
else:
|
|
165
|
-
print(
|
|
166
|
-
"No supported screenshot tool found. Please install gnome-screenshot or scrot."
|
|
167
|
-
)
|
|
168
|
-
return None
|
|
169
|
-
elif system == "Windows":
|
|
170
|
-
success = _windows_snip_to_file(file_path)
|
|
171
|
-
if not success:
|
|
172
|
-
print("Screenshot capture failed or timed out.")
|
|
173
|
-
return None
|
|
174
|
-
else:
|
|
175
|
-
print(f"Unsupported operating system: {system}")
|
|
176
|
-
return None
|
|
177
|
-
|
|
178
|
-
# Check if screenshot was successfully saved
|
|
179
|
-
if os.path.exists(file_path):
|
|
180
|
-
print(f"Screenshot saved to: {file_path}")
|
|
181
|
-
return {
|
|
182
|
-
"filename": filename,
|
|
183
|
-
"file_path": file_path,
|
|
184
|
-
"model_kwargs": model_kwargs,
|
|
185
|
-
}
|
|
186
|
-
else:
|
|
187
|
-
print("Screenshot capture failed or was cancelled.")
|
|
188
|
-
return None
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
def analyze_image_base(
|
|
192
|
-
user_prompt: str,
|
|
193
|
-
file_path: str,
|
|
194
|
-
filename: str,
|
|
195
|
-
npc: Any = None,
|
|
196
|
-
stream: bool = False,
|
|
197
|
-
**model_kwargs,
|
|
198
|
-
) -> Dict[str, str]:
|
|
199
|
-
"""
|
|
200
|
-
Function Description:
|
|
201
|
-
This function analyzes an image using the LLM model and returns the response.
|
|
202
|
-
Args:
|
|
203
|
-
user_prompt: The user prompt to provide to the LLM model.
|
|
204
|
-
file_path: The path to the image file.
|
|
205
|
-
filename: The name of the image file.
|
|
206
|
-
Keyword Args:
|
|
207
|
-
npc: The NPC object representing the current NPC.
|
|
208
|
-
Returns:
|
|
209
|
-
The response from the LLM model
|
|
210
|
-
|
|
211
|
-
"""
|
|
212
|
-
|
|
213
|
-
if os.path.exists(file_path):
|
|
214
|
-
image_info = {"filename": filename, "file_path": file_path}
|
|
215
|
-
|
|
216
|
-
if user_prompt:
|
|
217
|
-
try:
|
|
218
|
-
response = get_llm_response(
|
|
219
|
-
user_prompt, images=[image_info], npc=npc, **model_kwargs
|
|
220
|
-
)
|
|
221
|
-
return response
|
|
222
|
-
except Exception as e:
|
|
223
|
-
error_message = f"Error during LLM processing: {e}"
|
|
224
|
-
print(error_message)
|
|
225
|
-
return {"response": error_message}
|
|
226
|
-
else:
|
|
227
|
-
print("Skipping LLM processing.")
|
|
228
|
-
return {"response": str(image_info)}
|
|
229
|
-
else:
|
|
230
|
-
print("Screenshot capture failed or was cancelled.")
|
|
231
|
-
return {"response": "Screenshot capture failed or was cancelled."}
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
def analyze_image(
|
|
235
|
-
user_prompt: str,
|
|
236
|
-
file_path: str,
|
|
237
|
-
filename: str,
|
|
238
|
-
npc: Any = None,
|
|
239
|
-
stream: bool = False,
|
|
240
|
-
messages: list = None,
|
|
241
|
-
model: str = NPCSH_VISION_MODEL,
|
|
242
|
-
provider: str = NPCSH_VISION_PROVIDER,
|
|
243
|
-
api_key: str = None,
|
|
244
|
-
api_url: str = NPCSH_API_URL,
|
|
245
|
-
) -> Dict[str, str]:
|
|
246
|
-
"""
|
|
247
|
-
Function Description:
|
|
248
|
-
This function captures a screenshot, analyzes it using the LLM model, and returns the response.
|
|
249
|
-
Args:
|
|
250
|
-
|
|
251
|
-
user_prompt: The user prompt to provide to the LLM model.
|
|
252
|
-
file_path: The path to the image file.
|
|
253
|
-
filename: The name of the image file.
|
|
254
|
-
Keyword Args:
|
|
255
|
-
npc: The NPC object representing the current NPC.
|
|
256
|
-
model_kwargs: Additional keyword arguments for the LLM model.
|
|
257
|
-
Returns:
|
|
258
|
-
The response from the LLM model.
|
|
259
|
-
"""
|
|
260
|
-
|
|
261
|
-
if os.path.exists(file_path):
|
|
262
|
-
image_info = {"filename": filename, "file_path": file_path}
|
|
263
|
-
|
|
264
|
-
if user_prompt:
|
|
265
|
-
try:
|
|
266
|
-
# print("Analyzing image...")
|
|
267
|
-
# print(model_kwargs)
|
|
268
|
-
# print("stream", stream)
|
|
269
|
-
if stream:
|
|
270
|
-
# print("going to stream")
|
|
271
|
-
return get_stream(
|
|
272
|
-
messages, images=[image_info], npc=npc, **model_kwargs
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
else:
|
|
276
|
-
response = get_llm_response(
|
|
277
|
-
user_prompt,
|
|
278
|
-
images=[image_info],
|
|
279
|
-
npc=npc,
|
|
280
|
-
model=model,
|
|
281
|
-
provider=provider,
|
|
282
|
-
api_url=api_url,
|
|
283
|
-
api_key=api_key,
|
|
284
|
-
)
|
|
285
|
-
|
|
286
|
-
return response
|
|
287
|
-
|
|
288
|
-
except Exception as e:
|
|
289
|
-
error_message = f"Error during LLM processing: {e}"
|
|
290
|
-
print(error_message)
|
|
291
|
-
return error_message
|
|
292
|
-
|
|
293
|
-
else: # This part needs to be inside the outer 'if os.path.exists...' block
|
|
294
|
-
print("Skipping LLM processing.")
|
|
295
|
-
return image_info # Return image info if no prompt is given
|
|
296
|
-
else: # This else also needs to be part of the outer 'if os.path.exists...' block
|
|
297
|
-
print("Screenshot capture failed or was cancelled.")
|
|
298
|
-
return None
|
npcsh/image_gen.py
DELETED
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
########
|
|
3
|
-
########
|
|
4
|
-
########
|
|
5
|
-
########
|
|
6
|
-
########
|
|
7
|
-
######## IMAGE GENERATION
|
|
8
|
-
########
|
|
9
|
-
|
|
10
|
-
import os
|
|
11
|
-
|
|
12
|
-
from openai import OpenAI
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def generate_image_openai(
|
|
16
|
-
prompt: str,
|
|
17
|
-
model: str,
|
|
18
|
-
api_key: str = None,
|
|
19
|
-
size: str = None,
|
|
20
|
-
npc=None,
|
|
21
|
-
) -> str:
|
|
22
|
-
"""
|
|
23
|
-
Function Description:
|
|
24
|
-
This function generates an image using the OpenAI API.
|
|
25
|
-
Args:
|
|
26
|
-
prompt (str): The prompt for generating the image.
|
|
27
|
-
model (str): The model to use for generating the image.
|
|
28
|
-
api_key (str): The API key for accessing the OpenAI API.
|
|
29
|
-
Keyword Args:
|
|
30
|
-
None
|
|
31
|
-
Returns:
|
|
32
|
-
str: The URL of the generated image.
|
|
33
|
-
"""
|
|
34
|
-
if api_key is None:
|
|
35
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
|
36
|
-
if model is None:
|
|
37
|
-
model = "dall-e-2"
|
|
38
|
-
client = OpenAI(api_key=api_key)
|
|
39
|
-
if size is None:
|
|
40
|
-
size = "1024x1024"
|
|
41
|
-
if model not in ["dall-e-3", "dall-e-2"]:
|
|
42
|
-
# raise ValueError(f"Invalid model: {model}")
|
|
43
|
-
print(f"Invalid model: {model}")
|
|
44
|
-
print("Switching to dall-e-3")
|
|
45
|
-
model = "dall-e-3"
|
|
46
|
-
image = client.images.generate(model=model, prompt=prompt, n=1, size=size)
|
|
47
|
-
if image is not None:
|
|
48
|
-
# print(image)
|
|
49
|
-
return image
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def generate_image_hf_diffusion(
|
|
53
|
-
prompt: str,
|
|
54
|
-
model: str = "runwayml/stable-diffusion-v1-5",
|
|
55
|
-
device: str = "cpu",
|
|
56
|
-
):
|
|
57
|
-
"""
|
|
58
|
-
Function Description:
|
|
59
|
-
This function generates an image using the Stable Diffusion API.
|
|
60
|
-
Args:
|
|
61
|
-
prompt (str): The prompt for generating the image.
|
|
62
|
-
model_id (str): The Hugging Face model ID to use for Stable Diffusion.
|
|
63
|
-
device (str): The device to run the model on ('cpu' or 'cuda').
|
|
64
|
-
Returns:
|
|
65
|
-
PIL.Image: The generated image.
|
|
66
|
-
"""
|
|
67
|
-
# Load the Stable Diffusion pipeline
|
|
68
|
-
from diffusers import StableDiffusionPipeline
|
|
69
|
-
|
|
70
|
-
pipe = StableDiffusionPipeline.from_pretrained(model)
|
|
71
|
-
pipe = pipe.to(device)
|
|
72
|
-
|
|
73
|
-
# Generate the image
|
|
74
|
-
image = pipe(prompt)
|
|
75
|
-
image = image.images[0]
|
|
76
|
-
# ["sample"][0]
|
|
77
|
-
image.show()
|
|
78
|
-
|
|
79
|
-
return image
|