pyglet 2.1.6__py3-none-any.whl → 2.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. pyglet/__init__.py +27 -42
  2. pyglet/app/base.py +2 -2
  3. pyglet/clock.py +1 -1
  4. pyglet/display/base.py +31 -21
  5. pyglet/display/cocoa.py +25 -1
  6. pyglet/display/headless.py +12 -1
  7. pyglet/display/win32.py +134 -18
  8. pyglet/display/xlib.py +285 -70
  9. pyglet/event.py +17 -1
  10. pyglet/experimental/README.md +1 -1
  11. pyglet/experimental/jobs.py +1 -1
  12. pyglet/experimental/multitexture_sprite.py +2 -2
  13. pyglet/font/__init__.py +1 -1
  14. pyglet/font/base.py +8 -5
  15. pyglet/font/dwrite/__init__.py +13 -8
  16. pyglet/font/dwrite/dwrite_lib.py +1 -1
  17. pyglet/font/user.py +1 -1
  18. pyglet/gl/base.py +8 -4
  19. pyglet/gl/cocoa.py +4 -0
  20. pyglet/gl/gl.py +4 -3
  21. pyglet/gl/gl.pyi +2320 -0
  22. pyglet/gl/gl_compat.py +7 -18
  23. pyglet/gl/gl_compat.pyi +3097 -0
  24. pyglet/gl/xlib.py +24 -0
  25. pyglet/graphics/shader.py +8 -1
  26. pyglet/graphics/vertexbuffer.py +1 -1
  27. pyglet/gui/frame.py +2 -2
  28. pyglet/gui/widgets.py +1 -1
  29. pyglet/image/__init__.py +3 -3
  30. pyglet/image/buffer.py +3 -3
  31. pyglet/input/base.py +22 -12
  32. pyglet/input/linux/evdev.py +96 -23
  33. pyglet/input/linux/evdev_constants.py +2 -1
  34. pyglet/input/win32/xinput.py +6 -3
  35. pyglet/libs/darwin/cocoapy/cocoalibs.py +3 -1
  36. pyglet/libs/ioctl.py +2 -2
  37. pyglet/libs/win32/__init__.py +12 -0
  38. pyglet/libs/win32/constants.py +4 -0
  39. pyglet/libs/win32/types.py +97 -0
  40. pyglet/libs/x11/xrandr.py +166 -0
  41. pyglet/libs/x11/xrender.py +43 -0
  42. pyglet/libs/x11/xsync.py +43 -0
  43. pyglet/math.py +65 -54
  44. pyglet/media/buffered_logger.py +1 -1
  45. pyglet/media/codecs/ffmpeg.py +18 -34
  46. pyglet/media/codecs/gstreamer.py +3 -3
  47. pyglet/media/codecs/pyogg.py +1 -1
  48. pyglet/media/codecs/wave.py +6 -0
  49. pyglet/media/codecs/wmf.py +33 -7
  50. pyglet/media/devices/win32.py +1 -1
  51. pyglet/media/drivers/base.py +1 -1
  52. pyglet/media/drivers/directsound/interface.py +4 -0
  53. pyglet/media/drivers/listener.py +2 -2
  54. pyglet/media/drivers/xaudio2/interface.py +6 -2
  55. pyglet/media/drivers/xaudio2/lib_xaudio2.py +1 -1
  56. pyglet/media/instrumentation.py +2 -2
  57. pyglet/media/player.py +2 -2
  58. pyglet/media/player_worker_thread.py +1 -1
  59. pyglet/media/synthesis.py +1 -1
  60. pyglet/model/codecs/gltf.py +1 -1
  61. pyglet/shapes.py +1 -1
  62. pyglet/sprite.py +1 -1
  63. pyglet/text/caret.py +44 -5
  64. pyglet/text/layout/base.py +3 -3
  65. pyglet/util.py +1 -1
  66. pyglet/window/__init__.py +54 -14
  67. pyglet/window/cocoa/__init__.py +27 -0
  68. pyglet/window/mouse.py +11 -1
  69. pyglet/window/win32/__init__.py +40 -14
  70. pyglet/window/xlib/__init__.py +21 -7
  71. {pyglet-2.1.6.dist-info → pyglet-2.1.9.dist-info}/METADATA +1 -1
  72. {pyglet-2.1.6.dist-info → pyglet-2.1.9.dist-info}/RECORD +74 -70
  73. {pyglet-2.1.6.dist-info → pyglet-2.1.9.dist-info}/LICENSE +0 -0
  74. {pyglet-2.1.6.dist-info → pyglet-2.1.9.dist-info}/WHEEL +0 -0
@@ -126,7 +126,7 @@ class FFmpegException(MediaFormatException):
126
126
  def ffmpeg_get_audio_buffer_size(audio_format):
127
127
  """Return the audio buffer size
128
128
 
129
- Buffer size can accomodate 1 sec of audio data.
129
+ Buffer size can accommodate 1 sec of audio data.
130
130
  """
131
131
  return audio_format.bytes_per_second + FF_INPUT_BUFFER_PADDING_SIZE
132
132
 
@@ -385,16 +385,6 @@ def ffmpeg_stream_info(file: FFmpegFile, stream_index: int) -> StreamAudioInfo |
385
385
  context.sample_rate,
386
386
  channel_count,
387
387
  )
388
- if context.format in (AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8P):
389
- info.sample_bits = 8
390
- elif context.format in (AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
391
- AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP):
392
- info.sample_bits = 16
393
- elif context.format in (AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P):
394
- info.sample_bits = 32
395
- else:
396
- info.sample_format = None
397
- info.sample_bits = None
398
388
  else:
399
389
  return None
400
390
  return info
@@ -466,7 +456,7 @@ def ffmpeg_seek_file(file: FFmpegFile, timestamp: float) -> None:
466
456
  buf = create_string_buffer(128)
467
457
  avutil.av_strerror(result, buf, 128)
468
458
  descr = buf.value
469
- raise FFmpegException('Error occured while seeking. ' +
459
+ raise FFmpegException('Error occurred while seeking. ' +
470
460
  descr.decode())
471
461
 
472
462
 
@@ -634,35 +624,29 @@ class FFmpegSource(StreamingSource):
634
624
  self._video_stream = stream
635
625
  self._video_stream_index = i
636
626
 
637
- elif isinstance(info, StreamAudioInfo) and info.sample_bits in (8, 16, 24) and self._audio_stream is None:
627
+ elif isinstance(info, StreamAudioInfo) and self._audio_stream is None:
638
628
  stream = ffmpeg_open_stream(self._file, i)
639
629
 
640
- self.audio_format = AudioFormat(
641
- channels=min(2, info.channels),
642
- sample_size=info.sample_bits,
643
- sample_rate=info.sample_rate)
644
- self._audio_stream = stream
645
- self._audio_stream_index = i
646
-
647
- channel_input = self._get_default_channel_layout(info.channels)
648
630
  channels_out = min(2, info.channels)
631
+ channel_input = self._get_default_channel_layout(info.channels)
649
632
  channel_output = self._get_default_channel_layout(channels_out)
650
633
 
651
- sample_rate = stream.codec_context.contents.sample_rate
652
- sample_format = stream.codec_context.contents.sample_fmt
653
-
654
- if sample_format in (AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8P):
634
+ sample_bits = info.sample_bits
635
+ if info.sample_format in (AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8P):
655
636
  self.tgt_format = AV_SAMPLE_FMT_U8
656
- elif sample_format in (AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P):
657
- self.tgt_format = AV_SAMPLE_FMT_S16
658
- elif sample_format in (AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P):
659
- self.tgt_format = AV_SAMPLE_FMT_S32
660
- elif sample_format in (AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP):
661
- self.tgt_format = AV_SAMPLE_FMT_S16
662
637
  else:
663
- raise FFmpegException('Audio format not supported.')
638
+ # No matter the input format, produce S16 samples.
639
+ sample_bits = 16
640
+ self.tgt_format = AV_SAMPLE_FMT_S16
641
+
642
+ self.audio_format = AudioFormat(
643
+ channels=channels_out,
644
+ sample_size=sample_bits,
645
+ sample_rate=info.sample_rate)
646
+ self._audio_stream = stream
647
+ self._audio_stream_index = i
664
648
 
665
- self.audio_convert_ctx = self.get_formatted_swr_context(channel_output, sample_rate, channel_input, sample_format)
649
+ self.audio_convert_ctx = self.get_formatted_swr_context(channel_output, info.sample_rate, channel_input, info.sample_format)
666
650
  if not self.audio_convert_ctx:
667
651
  swresample.swr_free(self.audio_convert_ctx)
668
652
  raise FFmpegException('Cannot create sample rate converter.')
@@ -676,7 +660,7 @@ class FFmpegSource(StreamingSource):
676
660
  self._events = [] # They don't seem to be used!
677
661
 
678
662
  self.audioq = deque()
679
- # Make queue big enough to accomodate 1.2 sec?
663
+ # Make queue big enough to accommodate 1.2 sec?
680
664
  self._max_len_audioq = self.MAX_QUEUE_SIZE # Need to figure out a correct amount
681
665
  if self.audio_format:
682
666
  # Buffer 1 sec worth of audio
@@ -48,7 +48,7 @@ class _MessageHandler:
48
48
  """The main message callback"""
49
49
  if message.type == Gst.MessageType.EOS:
50
50
 
51
- self.source.queue.put(self.source.sentinal)
51
+ self.source.queue.put(self.source.sentinel)
52
52
  if not self.source.caps:
53
53
  raise GStreamerDecodeException("Appears to be an unsupported file")
54
54
 
@@ -105,7 +105,7 @@ class _MessageHandler:
105
105
  class GStreamerSource(StreamingSource):
106
106
 
107
107
  source_instances = weakref.WeakSet()
108
- sentinal = object()
108
+ sentinel = object()
109
109
 
110
110
  def __init__(self, filename, file=None):
111
111
  self._pipeline = Gst.Pipeline()
@@ -199,7 +199,7 @@ class GStreamerSource(StreamingSource):
199
199
  data = bytes()
200
200
  while len(data) < num_bytes:
201
201
  packet = self.queue.get()
202
- if packet == self.sentinal:
202
+ if packet == self.sentinel:
203
203
  self._finished.set()
204
204
  break
205
205
  data += packet
@@ -287,7 +287,7 @@ class MemoryFLACFileStream(UnclosedFLACFileStream):
287
287
 
288
288
  metadata_status = pyogg.flac.FLAC__stream_decoder_process_until_end_of_metadata(self.decoder)
289
289
  if not metadata_status: # error
290
- raise DecodeException("An error occured when trying to decode the metadata of {}".format(path))
290
+ raise DecodeException("An error occurred when trying to decode the metadata of {}".format(path))
291
291
 
292
292
  def read_callback(self, decoder, buffer, size, data):
293
293
  chunk = size.contents.value
@@ -25,6 +25,12 @@ class WaveSource(StreamingSource):
25
25
 
26
26
  nchannels, sampwidth, framerate, nframes, comptype, compname = self._wave.getparams()
27
27
 
28
+ if nchannels not in (1, 2):
29
+ raise WAVEDecodeException(f"incompatible channel count {nchannels}")
30
+
31
+ if sampwidth not in (1, 2):
32
+ raise WAVEDecodeException(f"incompatible sample width {sampwidth}")
33
+
28
34
  self.audio_format = AudioFormat(channels=nchannels, sample_size=sampwidth * 8, sample_rate=framerate)
29
35
 
30
36
  self._bytes_per_frame = nchannels * sampwidth
@@ -539,19 +539,45 @@ class WMFSource(Source):
539
539
  self._source_reader.SetStreamSelection(MF_SOURCE_READER_FIRST_AUDIO_STREAM, True)
540
540
 
541
541
  # Check sub media type, AKA what kind of codec
542
- guid_compressed = com.GUID(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
543
- imfmedia.GetGUID(MF_MT_SUBTYPE, byref(guid_compressed))
542
+ source_subtype_guid = com.GUID(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
543
+ source_sample_size = c_uint32()
544
+ source_channel_count = c_uint32()
544
545
 
545
- if guid_compressed == MFAudioFormat_PCM or guid_compressed == MFAudioFormat_Float:
546
- assert _debug(f'WMFAudioDecoder: Found Uncompressed Audio: {guid_compressed}')
546
+ imfmedia.GetGUID(MF_MT_SUBTYPE, byref(source_subtype_guid))
547
+ try:
548
+ # Some formats such as mp3 do not report this value
549
+ imfmedia.GetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, byref(source_sample_size))
550
+ except OSError:
551
+ source_sample_size.value = 0
552
+ imfmedia.GetUINT32(MF_MT_AUDIO_NUM_CHANNELS, byref(source_channel_count))
553
+
554
+ if (
555
+ source_subtype_guid == MFAudioFormat_PCM and
556
+ source_sample_size.value in (8, 16) and
557
+ source_channel_count.value in (1, 2)
558
+ ):
559
+ assert _debug(f'WMFAudioDecoder: Found compatible Integer PCM Audio: {source_subtype_guid}')
547
560
  else:
548
- assert _debug(f'WMFAudioDecoder: Found Compressed Audio: {guid_compressed}')
549
- # If audio is compressed, attempt to decompress it by forcing source reader to use PCM
550
- mf_mediatype = IMFMediaType()
561
+ assert _debug(f'WMFAudioDecoder: Found incompatible Audio: {source_subtype_guid}, '
562
+ f'sample size={source_sample_size.value}, channel count={source_channel_count.value}.'
563
+ f'Attempting to decode/resample.')
564
+ # If audio is compressed or incompatible, attempt to decompress or resample it
565
+ # to standard 16bit integer PCM
566
+ samples_per_sec = c_uint32()
567
+ imfmedia.GetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, byref(samples_per_sec))
551
568
 
569
+ channels_out = min(2, source_channel_count.value)
570
+
571
+ mf_mediatype = IMFMediaType()
552
572
  MFCreateMediaType(byref(mf_mediatype))
553
573
  mf_mediatype.SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio)
554
574
  mf_mediatype.SetGUID(MF_MT_SUBTYPE, MFAudioFormat_PCM)
575
+ mf_mediatype.SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, channels_out)
576
+ mf_mediatype.SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, 16)
577
+ mf_mediatype.SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, samples_per_sec.value)
578
+ mf_mediatype.SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, channels_out * 2)
579
+ mf_mediatype.SetUINT32(MF_MT_AUDIO_AVG_BYTES_PER_SECOND, samples_per_sec.value * channels_out * 2)
580
+ mf_mediatype.SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1)
555
581
 
556
582
  try:
557
583
  self._source_reader.SetCurrentMediaType(self._audio_stream_index, None, mf_mediatype)
@@ -318,7 +318,7 @@ class Win32AudioDeviceManager(base.AbstractAudioDeviceManager):
318
318
  return dev_id.value, name, description, state.value
319
319
 
320
320
  def get_devices(self, flow=eRender, state=DEVICE_STATE_ACTIVE):
321
- """Get's all of the specified devices (by default, all output and active)."""
321
+ """Gets all of the specified devices (by default, all output and active)."""
322
322
  collection = IMMDeviceCollection()
323
323
  self._device_enum.EnumAudioEndpoints(flow, state, byref(collection))
324
324
 
@@ -378,7 +378,7 @@ class AbstractAudioPlayer(metaclass=ABCMeta):
378
378
  # Player falling behind
379
379
  # Skip at most 12ms if this is a minor desync, otherwise skip the entire
380
380
  # difference. this will be noticeable, but the desync is
381
- # likely already noticable in context of whatever the application does.
381
+ # likely already noticeable in context of whatever the application does.
382
382
  compensated_bytes = (-desync_bytes
383
383
  if extreme_desync
384
384
  else min(-desync_bytes, self.desync_correction_bytes_minor))
@@ -5,6 +5,7 @@ import ctypes
5
5
  import weakref
6
6
  from collections import namedtuple
7
7
 
8
+ from pyglet.media.exceptions import MediaException
8
9
  from pyglet.util import debug_print
9
10
  from pyglet.window.win32 import _user32
10
11
 
@@ -20,6 +21,9 @@ def _check(hresult):
20
21
 
21
22
 
22
23
  def _create_wave_format(audio_format):
24
+ if audio_format.channels > 2 or audio_format.sample_size not in (8, 16):
25
+ raise MediaException(f'Unsupported audio format: {audio_format}')
26
+
23
27
  wfx = lib.WAVEFORMATEX()
24
28
  wfx.wFormatTag = lib.WAVE_FORMAT_PCM
25
29
  wfx.nChannels = audio_format.channels
@@ -53,7 +53,7 @@ class AbstractListener(metaclass=ABCMeta):
53
53
  listener is facing.
54
54
 
55
55
  The orientation is given as a tuple of floats (x, y, z), and has
56
- no unit. The forward orientation should be orthagonal to the
56
+ no unit. The forward orientation should be orthogonal to the
57
57
  up orientation.
58
58
 
59
59
  :type: 3-tuple of float
@@ -69,7 +69,7 @@ class AbstractListener(metaclass=ABCMeta):
69
69
  of the listener.
70
70
 
71
71
  The orientation is given as a tuple of floats (x, y, z), and has
72
- no unit. The up orientation should be orthagonal to the
72
+ no unit. The up orientation should be orthogonal to the
73
73
  forward orientation.
74
74
 
75
75
  :type: 3-tuple of float
@@ -8,6 +8,7 @@ import pyglet
8
8
  from pyglet.libs.win32 import com
9
9
  from pyglet.media.devices import get_audio_device_manager
10
10
  from pyglet.media.devices.base import DeviceFlow
11
+ from pyglet.media.exceptions import MediaException
11
12
  from pyglet.util import debug_print
12
13
 
13
14
  from . import lib_xaudio2 as lib
@@ -27,6 +28,9 @@ def create_xa2_buffer(audio_data):
27
28
 
28
29
 
29
30
  def create_xa2_waveformat(audio_format):
31
+ if audio_format.channels > 2 or audio_format.sample_size not in (8, 16):
32
+ raise MediaException(f'Unsupported audio format: {audio_format}')
33
+
30
34
  wfx = lib.WAVEFORMATEX()
31
35
  wfx.wFormatTag = lib.WAVE_FORMAT_PCM
32
36
  wfx.nChannels = audio_format.channels
@@ -88,7 +92,7 @@ class XA2EngineCallback(com.COMObject):
88
92
  def OnCriticalError(self, hresult):
89
93
  # This is a textbook bad example, yes.
90
94
  # It's probably safe though: assuming that XA2 has ceased to operate if we ever end up
91
- # here, nothing can release the lock inbetween.
95
+ # here, nothing can release the lock in between.
92
96
  if self._lock.locked():
93
97
  self._lock.release()
94
98
  raise Exception("Critical Error:", hresult)
@@ -438,7 +442,7 @@ class XA2SourceVoice:
438
442
  self.channel_count = channel_count
439
443
  self.sample_size = sample_size
440
444
 
441
- # How many samples the voice had played when it was most recently readded into the
445
+ # How many samples the voice had played when it was most recently re-added into the
442
446
  # pool of available voices.
443
447
  self.samples_played_at_last_recycle = 0
444
448
 
@@ -576,7 +576,7 @@ class XAUDIO2FX_REVERB_PARAMETERS(Structure):
576
576
  ('RearDelay', UINT32), # 7.1: [0, 20] in ms, all other: [0, 5] in ms
577
577
  ('SideDelay', UINT32), # .1: [0, 5] in ms, all other: not used, but still validated # WIN 10 only.
578
578
 
579
- # Indexed Paremeters
579
+ # Indexed Parameters
580
580
  ('PositionLeft', BYTE), # [0, 30] no units
581
581
  ('PositionRight', BYTE), # 0, 30] no units, ignored when configured to mono
582
582
  ('PositionMatrixLeft', BYTE), # [0, 30] no units
@@ -1,5 +1,5 @@
1
1
  """
2
- Responsabilities
2
+ Responsibilities
3
3
 
4
4
  Defines the events that modify media_player state
5
5
  Defines which events are potential defects
@@ -15,7 +15,7 @@ mp_events = {
15
15
  # <evname>: {
16
16
  # "desc": <description used in reports to mention the event>,
17
17
  # "update_names": <list of names of fields updated>,
18
- # "other_fields": <list of additionals fields to show when mention the event in a report>
18
+ # "other_fields": <list of additional fields to show when mention the event in a report>
19
19
  # },
20
20
 
21
21
  "crash": {
pyglet/media/player.py CHANGED
@@ -69,7 +69,7 @@ class _PlayerProperty:
69
69
 
70
70
  We want the Player to have attributes like volume, pitch, etc. These are
71
71
  actually implemented by the AudioPlayer. So this descriptor will forward
72
- an assignement to one of the attributes to the AudioPlayer. For example
72
+ an assignment to one of the attributes to the AudioPlayer. For example
73
73
  `player.volume = 0.5` will call `player._audio_player.set_volume(0.5)`.
74
74
 
75
75
  The Player class has default values at the class level which are retrieved
@@ -97,7 +97,7 @@ class _PlayerProperty:
97
97
  class Player(pyglet.event.EventDispatcher):
98
98
  """High-level sound and video player."""
99
99
 
100
- # Spacialisation attributes, preserved between audio players
100
+ # Specialisation attributes, preserved between audio players
101
101
  _volume = 1.0
102
102
  _min_distance = 1.0
103
103
  _max_distance = 100000000.
@@ -24,7 +24,7 @@ class PlayerWorkerThread(threading.Thread):
24
24
  to keep their buffers filled (and perform event dispatching tasks), but
25
25
  does not block the main thread.
26
26
 
27
- This thread will sleep for a small period betwen updates, but provides a
27
+ This thread will sleep for a small period between updates, but provides a
28
28
  :py:meth:`~notify` method to allow waking it immediately. A :py:meth:`~stop`
29
29
  method is provided to terminate the thread, but under normal operation it
30
30
  will exit cleanly on interpreter shutdown.
pyglet/media/synthesis.py CHANGED
@@ -352,7 +352,7 @@ def sine_operator(sample_rate: int = 44800, frequency: float = 440, index: float
352
352
  def composite_operator(*operators: Generator) -> Generator:
353
353
  """Combine the output from multiple generators.
354
354
 
355
- This does a simple sum & devision of the output of
355
+ This does a simple sum & division of the output of
356
356
  two or more generators. A new generator is returned.
357
357
  """
358
358
  return (sum(samples) / len(samples) for samples in zip(*operators))
@@ -127,7 +127,7 @@ class Accessor:
127
127
  # This is a 'sparse' accessor:
128
128
  self.sparse = data.get('sparse')
129
129
  if self.sparse:
130
- raise NotImplementedError("Not yet implmented")
130
+ raise NotImplementedError("Not yet implemented")
131
131
 
132
132
  # The Python format type:
133
133
  self.fmt = _array_types[self.component_type]
pyglet/shapes.py CHANGED
@@ -229,7 +229,7 @@ def _get_segment(p0: tuple[float, float] | list[float], p1: tuple[float, float]
229
229
  scale2 = min(scale2, 2.0 * thickness)
230
230
 
231
231
  # Make these tuples instead of Vec2 because accessing
232
- # members of Vec2 is suprisingly slow
232
+ # members of Vec2 is surprisingly slow
233
233
  miter1_scaled_p = (v_miter1.x * scale1, v_miter1.y * scale1)
234
234
  miter2_scaled_p = (v_miter2.x * scale2, v_miter2.y * scale2)
235
235
 
pyglet/sprite.py CHANGED
@@ -180,7 +180,7 @@ def get_default_array_shader() -> ShaderProgram:
180
180
  class SpriteGroup(graphics.Group):
181
181
  """Shared Sprite rendering Group.
182
182
 
183
- The Group defines custom ``__eq__`` ane ``__hash__`` methods, and so will
183
+ The Group defines custom ``__eq__`` and ``__hash__`` methods, and so will
184
184
  be automatically coalesced with other Sprite Groups sharing the same parent
185
185
  Group, Texture and blend parameters.
186
186
  """
pyglet/text/caret.py CHANGED
@@ -18,16 +18,17 @@ from typing import TYPE_CHECKING, Any, Pattern
18
18
 
19
19
  from pyglet import clock, event
20
20
  from pyglet.window import key
21
+ from pyglet.event import EventDispatcher
21
22
 
22
23
  if TYPE_CHECKING:
23
24
  from pyglet.graphics import Batch
25
+ from pyglet.window import Window
24
26
  from pyglet.text.layout import IncrementalTextLayout
25
27
 
26
-
27
- class Caret:
28
+ class Caret(EventDispatcher):
28
29
  """Visible text insertion marker for `pyglet.text.layout.IncrementalTextLayout`.
29
30
 
30
- The caret is drawn as a single vertical bar at the document `position`
31
+ The caret is drawn as a single vertical bar at the document `position`
31
32
  on a text layout object. If ``mark`` is not None, it gives the unmoving
32
33
  end of the current text selection. The visible text selection on the
33
34
  layout is updated along with ``mark`` and ``position``.
@@ -39,6 +40,9 @@ class Caret:
39
40
  Updates to the document (and so the layout) are automatically propagated
40
41
  to the caret.
41
42
 
43
+ If the window argument is supplied, the caret object dispatches the on_clipboard_copy event when copying text and copies the text.
44
+ Pasting also works, which will dispatch the on_clipboard_paste event, and pastes the text to the current position of the caret, overriding selection.
45
+
42
46
  The caret object can be pushed onto a window event handler stack with
43
47
  ``Window.push_handlers``. The caret will respond correctly to keyboard,
44
48
  text, mouse and activation events, including double- and triple-clicks.
@@ -70,7 +74,7 @@ class Caret:
70
74
  _next_attributes: dict[str, Any]
71
75
 
72
76
  def __init__(self, layout: IncrementalTextLayout, batch: Batch | None = None,
73
- color: tuple[int, int, int, int] = (0, 0, 0, 255)) -> None:
77
+ color: tuple[int, int, int, int] = (0, 0, 0, 255), window: Window | None = None) -> None:
74
78
  """Create a caret for a layout.
75
79
 
76
80
  By default the layout's batch is used, so the caret does not need to
@@ -84,7 +88,8 @@ class Caret:
84
88
  `color` : (int, int, int, int)
85
89
  An RGBA or RGB tuple with components in the range [0, 255].
86
90
  RGB colors will be treated as having an opacity of 255.
87
-
91
+ `window` : `~pyglet.window.Window`
92
+ For the clipboard feature to work, a window object is needed to be passed in to access and set clipboard content.
88
93
  """
89
94
  from pyglet import gl
90
95
  self._layout = layout
@@ -110,6 +115,7 @@ class Caret:
110
115
 
111
116
  self.visible = True
112
117
 
118
+ self._window = window
113
119
  layout.push_handlers(self)
114
120
 
115
121
  @property
@@ -467,7 +473,26 @@ class Caret:
467
473
  self.position = 0
468
474
  else:
469
475
  self.position = m.start()
476
+ elif motion == key.MOTION_COPY and self._window:
477
+ pos = self._position
478
+ mark = self._mark
479
+ if pos > mark:
480
+ text = self._layout.document.text[mark:pos]
481
+ else:
482
+ text = self._layout.document.text[pos:mark]
483
+
484
+ self._window.set_clipboard_text(text)
485
+ self.dispatch_event("on_clipboard_copy", text)
486
+ elif motion == key.MOTION_PASTE and self._window:
487
+ if self._mark is not None:
488
+ self._delete_selection()
470
489
 
490
+ text = self._window.get_clipboard_text().replace("\r", "\n")
491
+ pos = self._position
492
+ self._position += len(text)
493
+ self._layout.document.insert_text(pos, text, self._next_attributes)
494
+ self._nudge()
495
+ self.dispatch_event("on_clipboard_paste", text)
471
496
  if self._mark is not None and not select:
472
497
  self._mark = None
473
498
  self._layout.set_selection(0, 0)
@@ -564,3 +589,17 @@ class Caret:
564
589
  self._active = False
565
590
  self.visible = self._active
566
591
  return event.EVENT_HANDLED
592
+
593
+ def on_clipboard_copy(self, text: str) -> bool:
594
+ """Dispatched when text is copied.
595
+ This default handler does nothing.
596
+ """
597
+ return event.EVENT_HANDLED
598
+ def on_clipboard_paste(self, text: str) -> bool:
599
+ """Dispatched when text is pasted.
600
+ The default handler does nothing.
601
+ """
602
+ return event.EVENT_HANDLED
603
+
604
+ Caret.register_event_type("on_clipboard_copy")
605
+ Caret.register_event_type("on_clipboard_paste")
@@ -15,6 +15,7 @@ from typing import (
15
15
 
16
16
  import pyglet
17
17
  from pyglet import graphics
18
+ from pyglet.font.base import GlyphPosition
18
19
  from pyglet.gl import (
19
20
  GL_BLEND,
20
21
  GL_DEPTH_ATTACHMENT,
@@ -33,7 +34,6 @@ from pyglet.gl import (
33
34
  )
34
35
  from pyglet.graphics import Group
35
36
  from pyglet.text import runlist
36
- from pyglet.font.base import GlyphPosition
37
37
 
38
38
  if TYPE_CHECKING:
39
39
  from pyglet.customtypes import AnchorX, AnchorY, ContentVAlign, HorizontalAlign
@@ -448,7 +448,7 @@ class _GlyphBox(_AbstractBox):
448
448
  rotation: float, visible: bool, anchor_x: float, anchor_y: float, context: _LayoutContext) -> None:
449
449
  # Creates the initial attributes and vertex lists of the glyphs.
450
450
  # line_x/line_y are calculated when lines shift. To prevent having to destroy and recalculate the layout
451
- # everytime it moves, they are merged into the vertices. This way the translation can be moved directly.
451
+ # every time it moves, they are merged into the vertices. This way the translation can be moved directly.
452
452
  assert self.glyphs
453
453
  assert not self.vertex_lists
454
454
  try:
@@ -1918,7 +1918,7 @@ class TextLayout:
1918
1918
  owner_glyphs.extend(zip([kern] * (kern_end - kern_start), gs, os))
1919
1919
  if owner is None:
1920
1920
  # Assume glyphs are already boxes.
1921
- for kern, glyph in owner_glyphs:
1921
+ for _, glyph, _ in owner_glyphs:
1922
1922
  line.add_box(glyph)
1923
1923
  else:
1924
1924
  line.add_box(_GlyphBox(owner, font, owner_glyphs, width))
pyglet/util.py CHANGED
@@ -208,7 +208,7 @@ class Decoder:
208
208
  raise NotImplementedError()
209
209
 
210
210
  def decode(self, *args, **kwargs):
211
- """Read and decode the given file object and return an approprite
211
+ """Read and decode the given file object and return an appropriate
212
212
  pyglet object. Throws DecodeException if there is an error.
213
213
  `filename` can be a file type hint.
214
214
  """