pygpt-net 2.5.98.post1__py3-none-any.whl → 2.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. pygpt_net/CHANGELOG.txt +9 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/ctx/ctx.py +7 -2
  4. pygpt_net/core/agents/runners/openai_workflow.py +9 -6
  5. pygpt_net/core/render/plain/pid.py +3 -2
  6. pygpt_net/core/render/web/body.py +21 -5
  7. pygpt_net/core/render/web/pid.py +26 -6
  8. pygpt_net/core/render/web/renderer.py +4 -10
  9. pygpt_net/data/config/config.json +3 -3
  10. pygpt_net/data/config/models.json +3 -3
  11. pygpt_net/data/css/style.dark.css +13 -0
  12. pygpt_net/data/css/style.light.css +29 -0
  13. pygpt_net/data/icon.ico +0 -0
  14. pygpt_net/data/icon.png +0 -0
  15. pygpt_net/data/locale/locale.de.ini +1 -1
  16. pygpt_net/data/locale/locale.en.ini +1 -1
  17. pygpt_net/data/locale/locale.es.ini +1 -1
  18. pygpt_net/data/locale/locale.fr.ini +1 -1
  19. pygpt_net/data/locale/locale.it.ini +1 -1
  20. pygpt_net/data/locale/locale.pl.ini +1 -1
  21. pygpt_net/data/locale/locale.uk.ini +1 -1
  22. pygpt_net/data/locale/locale.zh.ini +1 -1
  23. pygpt_net/provider/agents/llama_index/code_act.py +5 -4
  24. pygpt_net/provider/agents/llama_index/openai.py +3 -3
  25. pygpt_net/provider/agents/llama_index/openai_assistant.py +3 -3
  26. pygpt_net/provider/agents/llama_index/planner.py +6 -6
  27. pygpt_net/provider/agents/llama_index/react.py +3 -7
  28. pygpt_net/provider/agents/llama_index/react_workflow.py +4 -7
  29. pygpt_net/provider/agents/openai/agent_b2b.py +45 -10
  30. pygpt_net/provider/agents/openai/agent_planner.py +24 -2
  31. pygpt_net/provider/agents/openai/agent_with_experts.py +1 -38
  32. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +23 -38
  33. pygpt_net/provider/agents/openai/agent_with_feedback.py +23 -2
  34. pygpt_net/provider/agents/openai/bot_researcher.py +2 -6
  35. pygpt_net/provider/agents/openai/evolve.py +23 -2
  36. pygpt_net/provider/core/config/patch.py +7 -0
  37. pygpt_net/provider/gpt/__init__.py +21 -3
  38. pygpt_net/tools/html_canvas/ui/widgets.py +4 -0
  39. pygpt_net/tools/media_player/tool.py +11 -2
  40. pygpt_net/tools/media_player/ui/widgets.py +99 -94
  41. pygpt_net/ui/widget/calendar/select.py +10 -2
  42. pygpt_net/ui/widget/filesystem/explorer.py +1 -0
  43. {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.dist-info}/METADATA +12 -147
  44. {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.dist-info}/RECORD +47 -47
  45. {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.dist-info}/LICENSE +0 -0
  46. {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.dist-info}/WHEEL +0 -0
  47. {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -93,6 +93,24 @@ class Agent(BaseAgent):
93
93
  kwargs.update(tool_kwargs) # update kwargs with tools
94
94
  return OpenAIAgent(**kwargs)
95
95
 
96
+ def reverse_history(
97
+ self,
98
+ items: list[TResponseInputItem]
99
+ ):
100
+ """
101
+ Reverse the roles of items in the input list in history (assistant to user)
102
+
103
+ :param items: List of input items
104
+ :return: List of input items with reversed roles
105
+ """
106
+ counter = 1
107
+ for item in items:
108
+ if item.get("role") == "assistant":
109
+ if counter % 2 == 0:
110
+ item["role"] = "user"
111
+ counter += 1
112
+ return items
113
+
96
114
  def reverse_items(
97
115
  self,
98
116
  items: list[TResponseInputItem],
@@ -236,6 +254,10 @@ class Agent(BaseAgent):
236
254
  }
237
255
  input_items: list[TResponseInputItem] = messages
238
256
 
257
+ # reverse history if needed
258
+ if use_partial_ctx:
259
+ input_items = self.reverse_history(input_items)
260
+
239
261
  if not stream:
240
262
  while True:
241
263
  # -------- bot 1 --------
@@ -250,6 +272,7 @@ class Agent(BaseAgent):
250
272
  **kwargs
251
273
  )
252
274
  response_id = result.last_response_id
275
+ output_1 = final_output
253
276
  if verbose:
254
277
  print("Final response:", result)
255
278
 
@@ -263,6 +286,15 @@ class Agent(BaseAgent):
263
286
  input_items = result.to_input_list()
264
287
  input_items = self.reverse_items(input_items, verbose=reverse_verbose)
265
288
 
289
+ if use_partial_ctx:
290
+ ctx = bridge.on_next_ctx(
291
+ ctx=ctx,
292
+ input="", # new ctx: input
293
+ output=output_1, # prev ctx: output
294
+ response_id=response_id,
295
+ stream=False,
296
+ )
297
+
266
298
  # -------- bot 2 --------
267
299
  kwargs["input"] = input_items
268
300
  kwargs = self.prepare_model(model_2, window, previous_response_id, kwargs)
@@ -271,6 +303,7 @@ class Agent(BaseAgent):
271
303
  **kwargs
272
304
  )
273
305
  response_id = result.last_response_id
306
+ output_2 = final_output
274
307
  if verbose:
275
308
  print("Final response:", result)
276
309
 
@@ -282,17 +315,17 @@ class Agent(BaseAgent):
282
315
  # get and reverse items
283
316
  input_items = result.to_input_list()
284
317
  input_items = self.reverse_items(input_items, verbose=reverse_verbose)
318
+
319
+ if use_partial_ctx:
320
+ ctx = bridge.on_next_ctx(
321
+ ctx=ctx,
322
+ input="", # new ctx: input
323
+ output=output_2, # prev ctx: output
324
+ response_id=response_id,
325
+ stream=False,
326
+ )
285
327
  else:
286
328
  handler = StreamHandler(window, bridge)
287
- if use_partial_ctx:
288
- # we must replace message roles at beginning, second bot will be user
289
- msg_counter = 1
290
- for item in input_items:
291
- if item.get("role") == "assistant":
292
- if msg_counter % 2 == 0:
293
- item["role"] = "user"
294
- msg_counter += 1
295
-
296
329
  begin = True
297
330
  while True:
298
331
  # -------- bot 1 --------
@@ -335,6 +368,7 @@ class Agent(BaseAgent):
335
368
  input="", # new ctx: input
336
369
  output=output_1, # prev ctx: output
337
370
  response_id=response_id,
371
+ stream=True,
338
372
  )
339
373
  handler.new()
340
374
  else:
@@ -377,6 +411,7 @@ class Agent(BaseAgent):
377
411
  input="", # new ctx: input
378
412
  output=output_2, # prev ctx: output
379
413
  response_id=response_id,
414
+ stream=True,
380
415
  )
381
416
  handler.new()
382
417
  else:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from dataclasses import dataclass
@@ -338,10 +338,30 @@ class Agent(BaseAgent):
338
338
 
339
339
  print(f"Evaluator score: {result.score}")
340
340
  if result.score == "pass":
341
- print("Response is good enough, exiting.")
341
+ if use_partial_ctx:
342
+ ctx = bridge.on_next_ctx(
343
+ ctx=ctx,
344
+ input=result.feedback, # new ctx: input
345
+ output=final_output, # prev ctx: output
346
+ response_id=response_id,
347
+ finish=True,
348
+ stream=False,
349
+ )
350
+ else:
351
+ print("Response is good enough, exiting.")
342
352
  break
353
+
343
354
  print("Re-running with feedback")
344
355
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
356
+
357
+ if use_partial_ctx:
358
+ ctx = bridge.on_next_ctx(
359
+ ctx=ctx,
360
+ input=result.feedback, # new ctx: input
361
+ output=final_output, # prev ctx: output
362
+ response_id=response_id,
363
+ stream=False,
364
+ )
345
365
  else:
346
366
  final_output = result.plan + "\n___\n"
347
367
  handler = StreamHandler(window, bridge, final_output)
@@ -379,6 +399,7 @@ class Agent(BaseAgent):
379
399
  output=final_output, # prev ctx: output
380
400
  response_id=response_id,
381
401
  finish=True,
402
+ stream=True,
382
403
  )
383
404
  else:
384
405
  ctx.stream = info
@@ -395,6 +416,7 @@ class Agent(BaseAgent):
395
416
  input=result.feedback, # new ctx: input
396
417
  output=final_output, # prev ctx: output
397
418
  response_id=response_id,
419
+ stream=True,
398
420
  )
399
421
  handler.new()
400
422
  else:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, Tuple, Optional
@@ -79,43 +79,6 @@ class Agent(BaseAgent):
79
79
  kwargs.update(tool_kwargs) # update kwargs with tools
80
80
  return OpenAIAgent(**kwargs)
81
81
 
82
-
83
- def get_expert(
84
- self,
85
- window,
86
- prompt: str,
87
- model: ModelItem,
88
- preset: PresetItem = None,
89
- tools: list = None,
90
- ) -> OpenAIAgent:
91
- """
92
- Return Agent provider instance
93
-
94
- :param window: window instance
95
- :param prompt: Expert prompt
96
- :param model: Model item
97
- :param preset: Preset item
98
- :param tools: List of function tools
99
- :return: Agent provider instance
100
- """
101
- agent_name = preset.name if preset else "Agent"
102
- kwargs = {
103
- "name": agent_name,
104
- "instructions": prompt,
105
- "model": model.id,
106
- }
107
- tool_kwargs = append_tools(
108
- tools=tools,
109
- window=window,
110
- model=model,
111
- preset=preset,
112
- allow_local_tools=True,
113
- allow_remote_tools=True,
114
- is_expert_call=True,
115
- )
116
- kwargs.update(tool_kwargs) # update kwargs with tools
117
- return OpenAIAgent(**kwargs)
118
-
119
82
  async def run(
120
83
  self,
121
84
  window: Any = None,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from dataclasses import dataclass
@@ -98,42 +98,6 @@ class Agent(BaseAgent):
98
98
  agent_kwargs.update(tool_kwargs) # update kwargs with tools
99
99
  return OpenAIAgent(**agent_kwargs)
100
100
 
101
- def get_expert(
102
- self,
103
- window,
104
- prompt: str,
105
- model: ModelItem,
106
- preset: PresetItem = None,
107
- tools: list = None,
108
- ) -> OpenAIAgent:
109
- """
110
- Return Agent provider instance
111
-
112
- :param window: window instance
113
- :param prompt: Expert prompt
114
- :param model: Model item
115
- :param preset: Preset item
116
- :param tools: List of function tools
117
- :return: Agent provider instance
118
- """
119
- agent_name = preset.name if preset else "Agent"
120
- kwargs = {
121
- "name": agent_name,
122
- "instructions": prompt,
123
- "model": model.id,
124
- }
125
- tool_kwargs = append_tools(
126
- tools=tools,
127
- window=window,
128
- model=model,
129
- preset=preset,
130
- allow_local_tools=True,
131
- allow_remote_tools=True,
132
- is_expert_call=True,
133
- )
134
- kwargs.update(tool_kwargs) # update kwargs with tools
135
- return OpenAIAgent(**kwargs)
136
-
137
101
  def get_evaluator(
138
102
  self,
139
103
  window,
@@ -274,10 +238,29 @@ class Agent(BaseAgent):
274
238
 
275
239
  print(f"Evaluator score: {result.score}")
276
240
  if result.score == "pass":
277
- print("Response is good enough, exiting.")
241
+ if use_partial_ctx:
242
+ ctx = bridge.on_next_ctx(
243
+ ctx=ctx,
244
+ input=result.feedback, # new ctx: input
245
+ output=final_output, # prev ctx: output
246
+ response_id=response_id,
247
+ finish=True,
248
+ stream=False,
249
+ )
250
+ else:
251
+ print("Response is good enough, exiting.")
278
252
  break
279
253
  print("Re-running with feedback")
280
254
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
255
+
256
+ if use_partial_ctx:
257
+ ctx = bridge.on_next_ctx(
258
+ ctx=ctx,
259
+ input=result.feedback, # new ctx: input
260
+ output=final_output, # prev ctx: output
261
+ response_id=response_id,
262
+ stream=False,
263
+ )
281
264
  else:
282
265
  handler = StreamHandler(window, bridge)
283
266
  while True:
@@ -314,6 +297,7 @@ class Agent(BaseAgent):
314
297
  output=final_output, # prev ctx: output
315
298
  response_id=response_id,
316
299
  finish=True,
300
+ stream=True,
317
301
  )
318
302
  else:
319
303
  ctx.stream = info
@@ -330,6 +314,7 @@ class Agent(BaseAgent):
330
314
  input=result.feedback, # new ctx: input
331
315
  output=final_output, # prev ctx: output
332
316
  response_id=response_id,
317
+ stream=True,
333
318
  )
334
319
  handler.new()
335
320
  else:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from dataclasses import dataclass
@@ -238,10 +238,29 @@ class Agent(BaseAgent):
238
238
 
239
239
  print(f"Evaluator score: {result.score}")
240
240
  if result.score == "pass":
241
- print("Response is good enough, exiting.")
241
+ if use_partial_ctx:
242
+ ctx = bridge.on_next_ctx(
243
+ ctx=ctx,
244
+ input=result.feedback, # new ctx: input
245
+ output=final_output, # prev ctx: output
246
+ response_id=response_id,
247
+ finish=True,
248
+ stream=False,
249
+ )
250
+ else:
251
+ print("Response is good enough, exiting.")
242
252
  break
243
253
  print("Re-running with feedback")
244
254
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
255
+
256
+ if use_partial_ctx:
257
+ ctx = bridge.on_next_ctx(
258
+ ctx=ctx,
259
+ input=result.feedback, # new ctx: input
260
+ output=final_output, # prev ctx: output
261
+ response_id=response_id,
262
+ stream=False,
263
+ )
245
264
  else:
246
265
  handler = StreamHandler(window, bridge)
247
266
  while True:
@@ -278,6 +297,7 @@ class Agent(BaseAgent):
278
297
  output=final_output, # prev ctx: output
279
298
  response_id=response_id,
280
299
  finish=True,
300
+ stream=True,
281
301
  )
282
302
  else:
283
303
  ctx.stream = info
@@ -294,6 +314,7 @@ class Agent(BaseAgent):
294
314
  input=result.feedback, # new ctx: input
295
315
  output=final_output, # prev ctx: output
296
316
  response_id=response_id,
317
+ stream=True,
297
318
  )
298
319
  handler.new()
299
320
  else:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, Tuple, Union, Optional
@@ -211,11 +211,7 @@ class Agent(BaseAgent):
211
211
  if model.provider == "openai":
212
212
  set_openai_env(window)
213
213
 
214
- if not stream:
215
- final_output = await bot.run(query)
216
- else:
217
- final_output = await bot.run(query)
218
-
214
+ final_output = await bot.run(query)
219
215
  return ctx, final_output, response_id
220
216
 
221
217
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 19:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -358,11 +358,30 @@ class Agent(BaseAgent):
358
358
 
359
359
  print(f"Evaluator score: {result.score}")
360
360
  if result.score == "pass":
361
- print("Response is good enough, exiting.")
361
+ if use_partial_ctx:
362
+ ctx = bridge.on_next_ctx(
363
+ ctx=ctx,
364
+ input=result.feedback, # new ctx: input
365
+ output=final_output, # prev ctx: output
366
+ response_id=response_id,
367
+ finish=True,
368
+ stream=False,
369
+ )
370
+ else:
371
+ print("Response is good enough, exiting.")
362
372
  break
363
373
  print("Re-running with feedback")
364
374
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
365
375
 
376
+ if use_partial_ctx:
377
+ ctx = bridge.on_next_ctx(
378
+ ctx=ctx,
379
+ input=result.feedback, # new ctx: input
380
+ output=final_output, # prev ctx: output
381
+ response_id=response_id,
382
+ stream=False,
383
+ )
384
+
366
385
  if num_generation >= max_generations > 0:
367
386
  info = f"\n\n**Max generations reached ({max_generations}), exiting.**\n"
368
387
  ctx.stream = info
@@ -439,6 +458,7 @@ class Agent(BaseAgent):
439
458
  output=final_output, # prev ctx: output
440
459
  response_id=response_id,
441
460
  finish=True,
461
+ stream=True,
442
462
  )
443
463
  else:
444
464
  ctx.stream = info
@@ -457,6 +477,7 @@ class Agent(BaseAgent):
457
477
  input=result.feedback, # new ctx: input
458
478
  output=final_output, # prev ctx: output
459
479
  response_id=response_id,
480
+ stream=True,
460
481
  )
461
482
  handler.new()
462
483
  else:
@@ -2243,6 +2243,13 @@ class Patch:
2243
2243
  data["agent.openai.response.split"] = True
2244
2244
  updated = True
2245
2245
 
2246
+ # < 2.6.0
2247
+ if old < parse_version("2.6.0"):
2248
+ print("Migrating config from < 2.6.0...")
2249
+ self.window.core.updater.patch_css('style.light.css', True) # scrollbar, calendar fix
2250
+ self.window.core.updater.patch_css('style.dark.css', True) # calendar fix
2251
+ updated = True
2252
+
2246
2253
  # update file
2247
2254
  migrated = False
2248
2255
  if updated:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.08 05:00:00 #
9
+ # Updated Date: 2025.08.12 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from openai import OpenAI
@@ -62,6 +62,7 @@ class Gpt:
62
62
  self.tools = Tools(window)
63
63
  self.vision = Vision(window)
64
64
  self.client = None
65
+ self.locked = False
65
66
 
66
67
  def get_client(
67
68
  self,
@@ -233,9 +234,12 @@ class Gpt:
233
234
  if context.request:
234
235
  context.stream = False
235
236
  context.mode = "chat" # fake mode for redirect
236
- result = self.call(context, extra)
237
+ self.locked = True
238
+ self.call(context, extra)
239
+ self.locked = False
237
240
  return context.ctx.output
238
241
 
242
+ self.locked = True
239
243
  ctx = context.ctx
240
244
  mode = context.mode
241
245
  prompt = context.prompt
@@ -291,7 +295,21 @@ class Gpt:
291
295
  except Exception as e:
292
296
  self.window.core.debug.log(e)
293
297
  print("Error in GPT quick call: " + str(e))
298
+ finally:
299
+ self.locked = False
294
300
 
295
301
  def stop(self):
296
- """Stop OpenAI API"""
302
+ """On global event stop"""
297
303
  pass
304
+
305
+ def close(self):
306
+ """Close OpenAI client"""
307
+ if self.locked:
308
+ return
309
+ if self.client is not None:
310
+ try:
311
+ self.client.close()
312
+ self.client = None
313
+ except Exception as e:
314
+ self.window.core.debug.log(e)
315
+ print("Error closing GPT client:", e)
@@ -34,6 +34,10 @@ class ToolWidget:
34
34
  self.edit = None # canvas edit
35
35
  self.btn_edit = None # edit checkbox
36
36
 
37
+ def on_open(self):
38
+ """On open"""
39
+ pass
40
+
37
41
  def on_delete(self):
38
42
  """On delete"""
39
43
  if self.tool:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.08.12 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import datetime
@@ -36,9 +36,12 @@ class MediaPlayer(BaseTool):
36
36
  self.id = "player"
37
37
  self.opened = False
38
38
  self.dialog = None
39
+ self.initialized = False
39
40
 
40
- def setup(self):
41
+ def lazy_setup(self):
41
42
  """Setup media player"""
43
+ if self.initialized:
44
+ return
42
45
  if self.window.core.config.has("video.player.path"):
43
46
  path = self.window.core.config.get("video.player.path")
44
47
  if path:
@@ -50,6 +53,11 @@ class MediaPlayer(BaseTool):
50
53
  if self.window.core.config.has("video.player.volume.mute"):
51
54
  self.window.video_player.set_muted(self.window.core.config.get("video.player.volume.mute"))
52
55
  self.window.video_player.update() # update player volume, slider, etc.
56
+ self.initialized = True
57
+
58
+ def setup(self):
59
+ """Setup media player"""
60
+ pass
53
61
 
54
62
  def update(self):
55
63
  """Update menu"""
@@ -130,6 +138,7 @@ class MediaPlayer(BaseTool):
130
138
  )
131
139
  def open(self):
132
140
  """Open player window"""
141
+ self.lazy_setup()
133
142
  self.window.ui.dialogs.open('video_player', width=800, height=600)
134
143
  self.window.video_player.force_resize()
135
144
  self.opened = True