psychopy 2025.1.1__py3-none-any.whl → 2025.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of psychopy might be problematic. Click here for more details.
- psychopy/VERSION +1 -1
- psychopy/alerts/alertsCatalogue/4810.yaml +19 -0
- psychopy/alerts/alertsCatalogue/alertCategories.yaml +4 -0
- psychopy/alerts/alertsCatalogue/alertmsg.py +15 -1
- psychopy/alerts/alertsCatalogue/generateAlertmsg.py +2 -2
- psychopy/app/Resources/classic/add_many.png +0 -0
- psychopy/app/Resources/classic/add_many@2x.png +0 -0
- psychopy/app/Resources/classic/devices.png +0 -0
- psychopy/app/Resources/classic/devices@2x.png +0 -0
- psychopy/app/Resources/classic/photometer.png +0 -0
- psychopy/app/Resources/classic/photometer@2x.png +0 -0
- psychopy/app/Resources/dark/add_many.png +0 -0
- psychopy/app/Resources/dark/add_many@2x.png +0 -0
- psychopy/app/Resources/dark/devices.png +0 -0
- psychopy/app/Resources/dark/devices@2x.png +0 -0
- psychopy/app/Resources/dark/photometer.png +0 -0
- psychopy/app/Resources/dark/photometer@2x.png +0 -0
- psychopy/app/Resources/light/add_many.png +0 -0
- psychopy/app/Resources/light/add_many@2x.png +0 -0
- psychopy/app/Resources/light/devices.png +0 -0
- psychopy/app/Resources/light/devices@2x.png +0 -0
- psychopy/app/Resources/light/photometer.png +0 -0
- psychopy/app/Resources/light/photometer@2x.png +0 -0
- psychopy/app/_psychopyApp.py +35 -13
- psychopy/app/builder/builder.py +88 -35
- psychopy/app/builder/dialogs/__init__.py +69 -220
- psychopy/app/builder/dialogs/dlgsCode.py +29 -8
- psychopy/app/builder/dialogs/paramCtrls.py +1468 -904
- psychopy/app/builder/validators.py +25 -17
- psychopy/app/coder/coder.py +12 -1
- psychopy/app/coder/repl.py +5 -2
- psychopy/app/colorpicker/__init__.py +1 -1
- psychopy/app/deviceManager/__init__.py +1 -0
- psychopy/app/deviceManager/addDialog.py +218 -0
- psychopy/app/deviceManager/dialog.py +185 -0
- psychopy/app/deviceManager/panel.py +191 -0
- psychopy/app/deviceManager/utils.py +60 -0
- psychopy/app/idle.py +7 -0
- psychopy/app/locale/ar_001/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/ar_001/LC_MESSAGE/messages.po +12695 -10592
- psychopy/app/locale/cs_CZ/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/cs_CZ/LC_MESSAGE/messages.po +10199 -24
- psychopy/app/locale/da_DK/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/da_DK/LC_MESSAGE/messages.po +10199 -24
- psychopy/app/locale/de_DE/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/de_DE/LC_MESSAGE/messages.po +11221 -9712
- psychopy/app/locale/el_GR/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/el_GR/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/en_NZ/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/en_NZ/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/en_US/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/en_US/LC_MESSAGE/messages.po +10195 -18
- psychopy/app/locale/es_CO/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/es_CO/LC_MESSAGE/messages.po +11917 -9101
- psychopy/app/locale/es_ES/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/es_ES/LC_MESSAGE/messages.po +11924 -9103
- psychopy/app/locale/es_US/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/es_US/LC_MESSAGE/messages.po +11917 -9101
- psychopy/app/locale/et_EE/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/et_EE/LC_MESSAGE/messages.po +11084 -9569
- psychopy/app/locale/fa_IR/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/fa_IR/LC_MESSAGE/messages.po +11590 -5806
- psychopy/app/locale/fi_FI/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/fi_FI/LC_MESSAGE/messages.po +10199 -24
- psychopy/app/locale/fr_FR/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/fr_FR/LC_MESSAGE/messages.po +11091 -9577
- psychopy/app/locale/he_IL/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/he_IL/LC_MESSAGE/messages.po +11072 -9549
- psychopy/app/locale/hi_IN/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/hi_IN/LC_MESSAGE/messages.po +11071 -9559
- psychopy/app/locale/hu_HU/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/hu_HU/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/it_IT/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/it_IT/LC_MESSAGE/messages.po +11072 -9560
- psychopy/app/locale/ja_JP/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/ja_JP/LC_MESSAGE/messages.po +1485 -1137
- psychopy/app/locale/ko_KR/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/ko_KR/LC_MESSAGE/messages.po +10199 -24
- psychopy/app/locale/ms_MY/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/ms_MY/LC_MESSAGE/messages.po +11463 -8757
- psychopy/app/locale/nl_NL/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/nl_NL/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/nn_NO/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/nn_NO/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/pl_PL/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/pl_PL/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/pt_PT/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/pt_PT/LC_MESSAGE/messages.po +11288 -9434
- psychopy/app/locale/ro_RO/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/ro_RO/LC_MESSAGE/messages.po +10200 -25
- psychopy/app/locale/ru_RU/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/ru_RU/LC_MESSAGE/messages.po +10199 -24
- psychopy/app/locale/sv_SE/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/sv_SE/LC_MESSAGE/messages.po +11441 -8747
- psychopy/app/locale/tr_TR/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/tr_TR/LC_MESSAGE/messages.po +11069 -9545
- psychopy/app/locale/zh_CN/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/zh_CN/LC_MESSAGE/messages.po +12085 -8268
- psychopy/app/locale/zh_TW/LC_MESSAGE/messages.mo +0 -0
- psychopy/app/locale/zh_TW/LC_MESSAGE/messages.po +11929 -8022
- psychopy/app/plugin_manager/dialog.py +12 -3
- psychopy/app/plugin_manager/packageIndex.py +303 -0
- psychopy/app/plugin_manager/packages.py +203 -63
- psychopy/app/plugin_manager/plugins.py +120 -240
- psychopy/app/preferencesDlg.py +6 -1
- psychopy/app/psychopyApp.py +16 -4
- psychopy/app/runner/runner.py +10 -2
- psychopy/app/runner/scriptProcess.py +8 -3
- psychopy/app/stdout/stdOutRich.py +11 -4
- psychopy/app/themes/icons.py +3 -0
- psychopy/app/utils.py +61 -0
- psychopy/data/experiment.py +133 -23
- psychopy/data/routine.py +12 -0
- psychopy/data/staircase.py +42 -20
- psychopy/data/trial.py +20 -12
- psychopy/data/utils.py +42 -2
- psychopy/demos/builder/Experiments/dragAndDrop/drag_and_drop.psyexp +22 -5
- psychopy/demos/builder/Experiments/dragAndDrop/stimuli/solutions.xlsx +0 -0
- psychopy/demos/builder/Experiments/stroopVoice/stroopVoice.psyexp +2 -12
- psychopy/demos/builder/Feature Demos/buttonBox/buttonBoxDemo.psyexp +3 -8
- psychopy/demos/builder/Feature Demos/movies/movie.psyexp +220 -0
- psychopy/demos/builder/Feature Demos/movies/readme.md +3 -0
- psychopy/demos/builder/Feature Demos/visualValidator/visualValidator.psyexp +1 -2
- psychopy/demos/builder/Hardware/camera/camera.psyexp +3 -16
- psychopy/demos/builder/Hardware/microphone/microphone.psyexp +3 -16
- psychopy/demos/coder/hardware/hdf5_extract.py +133 -0
- psychopy/event.py +20 -15
- psychopy/experiment/_experiment.py +86 -10
- psychopy/experiment/components/__init__.py +3 -10
- psychopy/experiment/components/_base.py +9 -20
- psychopy/experiment/components/button/__init__.py +1 -1
- psychopy/experiment/components/buttonBox/__init__.py +50 -54
- psychopy/experiment/components/camera/__init__.py +137 -359
- psychopy/experiment/components/keyboard/__init__.py +17 -24
- psychopy/experiment/components/microphone/__init__.py +61 -110
- psychopy/experiment/components/movie/__init__.py +2 -3
- psychopy/experiment/components/serialOut/__init__.py +192 -93
- psychopy/experiment/components/settings/__init__.py +45 -27
- psychopy/experiment/components/sound/__init__.py +82 -73
- psychopy/experiment/components/soundsensor/__init__.py +43 -80
- psychopy/experiment/devices.py +303 -0
- psychopy/experiment/exports.py +20 -18
- psychopy/experiment/flow.py +7 -0
- psychopy/experiment/loops.py +47 -29
- psychopy/experiment/monitor.py +74 -0
- psychopy/experiment/params.py +48 -10
- psychopy/experiment/plugins.py +28 -108
- psychopy/experiment/py2js_transpiler.py +1 -1
- psychopy/experiment/routines/__init__.py +1 -1
- psychopy/experiment/routines/_base.py +59 -24
- psychopy/experiment/routines/audioValidator/__init__.py +19 -155
- psychopy/experiment/routines/visualValidator/__init__.py +25 -25
- psychopy/hardware/__init__.py +20 -57
- psychopy/hardware/button.py +15 -2
- psychopy/hardware/camera/__init__.py +2237 -1394
- psychopy/hardware/joystick/__init__.py +1 -1
- psychopy/hardware/keyboard.py +5 -8
- psychopy/hardware/listener.py +4 -1
- psychopy/hardware/manager.py +75 -35
- psychopy/hardware/microphone.py +52 -6
- psychopy/hardware/monitor.py +144 -0
- psychopy/hardware/photometer/__init__.py +156 -117
- psychopy/hardware/serialdevice.py +16 -2
- psychopy/hardware/soundsensor.py +4 -1
- psychopy/iohub/devices/deviceConfigValidation.py +2 -1
- psychopy/iohub/devices/keyboard/darwin.py +8 -5
- psychopy/iohub/util/__init__.py +7 -8
- psychopy/localization/generateTranslationTemplate.py +208 -116
- psychopy/localization/messages.pot +4305 -3502
- psychopy/monitors/MonitorCenter.py +174 -74
- psychopy/plugins/__init__.py +6 -4
- psychopy/preferences/devices.py +80 -0
- psychopy/preferences/generateHints.py +2 -1
- psychopy/preferences/preferences.py +35 -11
- psychopy/scripts/psychopy-pkgutil.py +969 -0
- psychopy/scripts/psyexpCompile.py +1 -1
- psychopy/session.py +34 -38
- psychopy/sound/__init__.py +6 -260
- psychopy/sound/audioclip.py +164 -0
- psychopy/sound/backend_ptb.py +8 -0
- psychopy/sound/backend_pygame.py +10 -0
- psychopy/sound/backend_pysound.py +9 -0
- psychopy/sound/backends/__init__.py +0 -0
- psychopy/sound/microphone.py +3 -0
- psychopy/sound/sound.py +58 -0
- psychopy/tests/data/correctScript/python/correctNoiseStimComponent.py +1 -1
- psychopy/tests/data/duplicateHeaders.csv +2 -0
- psychopy/tests/test_app/test_builder/test_BuilderFrame.py +22 -7
- psychopy/tests/test_app/test_builder/test_CompileFromBuilder.py +0 -2
- psychopy/tests/test_data/test_utils.py +5 -1
- psychopy/tests/test_experiment/test_components/test_ButtonBoxComponent.py +22 -2
- psychopy/tests/test_hardware/test_ports.py +0 -12
- psychopy/tests/test_tools/test_stringtools.py +1 -1
- psychopy/tools/attributetools.py +12 -5
- psychopy/tools/fontmanager.py +17 -14
- psychopy/tools/movietools.py +43 -2
- psychopy/tools/stringtools.py +33 -8
- psychopy/tools/versionchooser.py +1 -1
- psychopy/validation/audio.py +5 -1
- psychopy/validation/visual.py +5 -1
- psychopy/visual/basevisual.py +8 -7
- psychopy/visual/circle.py +2 -2
- psychopy/visual/image.py +29 -109
- psychopy/visual/movies/__init__.py +1800 -313
- psychopy/visual/polygon.py +4 -0
- psychopy/visual/shape.py +2 -2
- psychopy/visual/window.py +34 -11
- psychopy/voicekey/__init__.py +41 -669
- psychopy/voicekey/labjack_vks.py +7 -48
- psychopy/voicekey/parallel_vks.py +7 -42
- psychopy/voicekey/vk_tools.py +114 -263
- {psychopy-2025.1.1.dist-info → psychopy-2025.2.1.dist-info}/METADATA +17 -11
- {psychopy-2025.1.1.dist-info → psychopy-2025.2.1.dist-info}/RECORD +216 -184
- {psychopy-2025.1.1.dist-info → psychopy-2025.2.1.dist-info}/WHEEL +1 -1
- psychopy/visual/movies/players/__init__.py +0 -62
- psychopy/visual/movies/players/ffpyplayer_player.py +0 -1401
- psychopy/voicekey/demo_vks.py +0 -12
- psychopy/voicekey/signal.py +0 -42
- {psychopy-2025.1.1.dist-info → psychopy-2025.2.1.dist-info}/entry_points.txt +0 -0
- {psychopy-2025.1.1.dist-info → psychopy-2025.2.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -60,9 +60,13 @@ import threading
|
|
|
60
60
|
import queue
|
|
61
61
|
import time
|
|
62
62
|
import numpy as np
|
|
63
|
+
import ctypes
|
|
64
|
+
import collections
|
|
63
65
|
|
|
66
|
+
from psychopy import core
|
|
64
67
|
from psychopy.constants import NOT_STARTED
|
|
65
68
|
from psychopy.hardware import DeviceManager
|
|
69
|
+
from psychopy.hardware.base import BaseDevice
|
|
66
70
|
from psychopy.visual.movies.frame import MovieFrame, NULL_MOVIE_FRAME_INFO
|
|
67
71
|
from psychopy.sound.microphone import Microphone
|
|
68
72
|
from psychopy.hardware.microphone import MicrophoneDevice
|
|
@@ -71,7 +75,6 @@ import psychopy.tools.movietools as movietools
|
|
|
71
75
|
import psychopy.logging as logging
|
|
72
76
|
from psychopy.localization import _translate
|
|
73
77
|
|
|
74
|
-
|
|
75
78
|
# ------------------------------------------------------------------------------
|
|
76
79
|
# Constants
|
|
77
80
|
#
|
|
@@ -79,10 +82,11 @@ from psychopy.localization import _translate
|
|
|
79
82
|
VIDEO_DEVICE_ROOT_LINUX = '/dev'
|
|
80
83
|
CAMERA_UNKNOWN_VALUE = u'Unknown' # fields where we couldn't get a value
|
|
81
84
|
CAMERA_NULL_VALUE = u'Null' # fields where we couldn't get a value
|
|
85
|
+
|
|
82
86
|
# camera operating modes
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
# CAMERA_MODE_PHOTO = u'photo'
|
|
87
|
+
CAMERA_MODE_VIDEO = u'video'
|
|
88
|
+
CAMERA_MODE_CV = u'cv'
|
|
89
|
+
# CAMERA_MODE_PHOTO = u'photo' # planned
|
|
86
90
|
|
|
87
91
|
# camera status
|
|
88
92
|
CAMERA_STATUS_OK = 'ok'
|
|
@@ -98,10 +102,10 @@ CAMERA_API_UNKNOWN = u'Unknown' # unknown API
|
|
|
98
102
|
CAMERA_API_NULL = u'Null' # empty field
|
|
99
103
|
|
|
100
104
|
# camera libraries for playback nad recording
|
|
101
|
-
CAMERA_LIB_FFPYPLAYER = u'
|
|
102
|
-
CAMERA_LIB_OPENCV = u'
|
|
103
|
-
CAMERA_LIB_UNKNOWN = u'
|
|
104
|
-
CAMERA_LIB_NULL = u'
|
|
105
|
+
CAMERA_LIB_FFPYPLAYER = u'ffpyplayer'
|
|
106
|
+
CAMERA_LIB_OPENCV = u'opencv'
|
|
107
|
+
CAMERA_LIB_UNKNOWN = u'unknown'
|
|
108
|
+
CAMERA_LIB_NULL = u'null'
|
|
105
109
|
|
|
106
110
|
# special values
|
|
107
111
|
CAMERA_FRAMERATE_NOMINAL_NTSC = '30.000030'
|
|
@@ -137,6 +141,13 @@ standardResolutions = {
|
|
|
137
141
|
'dci': (4096, 2160)
|
|
138
142
|
}
|
|
139
143
|
|
|
144
|
+
# ------------------------------------------------------------------------------
|
|
145
|
+
# Keep track of open capture interfaces so we can close them at shutdown in the
|
|
146
|
+
# event that the user forrgets or the program crashes.
|
|
147
|
+
#
|
|
148
|
+
|
|
149
|
+
_openCaptureInterfaces = set()
|
|
150
|
+
|
|
140
151
|
|
|
141
152
|
# ------------------------------------------------------------------------------
|
|
142
153
|
# Exceptions
|
|
@@ -400,121 +411,372 @@ class CameraInfo:
|
|
|
400
411
|
)
|
|
401
412
|
|
|
402
413
|
|
|
403
|
-
class
|
|
404
|
-
"""
|
|
405
|
-
|
|
406
|
-
This interface handles the opening, closing, and reading of camera streams.
|
|
407
|
-
Subclasses provide a specific implementation for a camera interface.
|
|
414
|
+
class CameraDevice(BaseDevice):
|
|
415
|
+
"""Class providing an interface with a camera attached to the system.
|
|
408
416
|
|
|
409
|
-
|
|
410
|
-
returning immediately with the same data as before if no new frame data is
|
|
411
|
-
available. This is to ensure that the main thread is not blocked by the
|
|
412
|
-
camera interface and can continue to process other events.
|
|
417
|
+
This interface handles the opening, closing, and reading of camera streams.
|
|
413
418
|
|
|
414
419
|
Parameters
|
|
415
420
|
----------
|
|
416
421
|
device : Any
|
|
417
|
-
Camera device to open a stream with. The type of this value is
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
422
|
+
Camera device to open a stream with. The type of this value is dependent
|
|
423
|
+
on the platform and the camera library being used. This can be an integer
|
|
424
|
+
index, a string representing the camera device name.
|
|
425
|
+
captureLib : str
|
|
426
|
+
Camera library to use for opening the camera stream. This can be either
|
|
427
|
+
'ffpyplayer' or 'opencv'. If `None`, the default recommend library is
|
|
428
|
+
used.
|
|
429
|
+
frameSize : tuple
|
|
430
|
+
Frame size of the camera stream. This is a tuple of the form
|
|
431
|
+
`(width, height)`.
|
|
432
|
+
frameRate : float
|
|
433
|
+
Frame rate of the camera stream. This is the number of frames per
|
|
434
|
+
second that the camera will capture. If `None`, the default frame rate
|
|
435
|
+
is used. The default value is 30.0.
|
|
436
|
+
pixelFormat : str or None
|
|
437
|
+
Pixel format of the camera stream. This is the format in which the
|
|
438
|
+
camera will capture frames. If `None`, the default pixel format is used.
|
|
439
|
+
The default value is `None`.
|
|
440
|
+
codecFormat : str or None
|
|
441
|
+
Codec format of the camera stream. This is the codec that will be used
|
|
442
|
+
to encode the camera stream. If `None`, the default codec format is
|
|
443
|
+
used. The default value is `None`.
|
|
444
|
+
captureAPI: str
|
|
445
|
+
Camera API to use for opening the camera stream. This can be either
|
|
446
|
+
'AVFoundation', 'DirectShow', or 'Video4Linux2'. If `None`, the default
|
|
447
|
+
camera API is used based on the platform. The default value is `None`.
|
|
448
|
+
decoderOpts : dict or None
|
|
449
|
+
Decoder options for the camera stream. This is a dictionary of options
|
|
450
|
+
that will be passed to the decoder when opening the camera stream. If
|
|
451
|
+
`None`, the default decoder options are used. The default value is an
|
|
452
|
+
empty dictionary.
|
|
453
|
+
bufferSecs : float
|
|
454
|
+
Number of seconds to buffer frames from the capture stream. This allows
|
|
455
|
+
frames to be buffered in memory until they are needed. This allows
|
|
456
|
+
the camera stream to be read asynchronously and prevents frames from
|
|
457
|
+
being dropped if the main thread is busy. The default value is 5.0
|
|
458
|
+
seconds.
|
|
421
459
|
|
|
422
460
|
"""
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
461
|
+
def __init__(self, device, captureLib='ffpyplayer', frameSize=(640, 480),
|
|
462
|
+
frameRate=30.0, pixelFormat=None, codecFormat=None,
|
|
463
|
+
captureAPI=None, decoderOpts=None, bufferSecs=5.0):
|
|
464
|
+
|
|
465
|
+
BaseDevice.__init__(self)
|
|
466
|
+
|
|
467
|
+
# transform some of the params
|
|
468
|
+
pixelFormat = pixelFormat if pixelFormat is not None else ''
|
|
469
|
+
codecFormat = codecFormat if codecFormat is not None else ''
|
|
470
|
+
|
|
471
|
+
# if device is an integer, get name from index
|
|
472
|
+
foundProfile = None
|
|
473
|
+
if isinstance(device, int):
|
|
474
|
+
for profile in self.getAvailableDevices(False):
|
|
475
|
+
if profile['device'] == device:
|
|
476
|
+
foundProfile = profile
|
|
477
|
+
device = profile['deviceName']
|
|
478
|
+
break
|
|
479
|
+
elif isinstance(device, str):
|
|
480
|
+
# if device is a string, use it as the device name
|
|
481
|
+
for profile in self.getAvailableDevices(False):
|
|
482
|
+
# find a device which best matches the settings
|
|
483
|
+
if profile['deviceName'] != device:
|
|
484
|
+
continue
|
|
485
|
+
|
|
486
|
+
# check if all the other params match
|
|
487
|
+
paramsMatch = all([
|
|
488
|
+
profile['deviceName'] == device,
|
|
489
|
+
profile['captureLib'] == captureLib if captureLib else True,
|
|
490
|
+
profile['frameSize'] == frameSize if frameSize else True,
|
|
491
|
+
profile['frameRate'] == frameRate if frameRate else True,
|
|
492
|
+
profile['pixelFormat'] == pixelFormat if pixelFormat else True,
|
|
493
|
+
profile['codecFormat'] == codecFormat if codecFormat else True,
|
|
494
|
+
profile['captureAPI'] == captureAPI if captureAPI else True
|
|
495
|
+
])
|
|
496
|
+
|
|
497
|
+
if not paramsMatch:
|
|
498
|
+
continue
|
|
499
|
+
|
|
500
|
+
foundProfile = profile
|
|
501
|
+
device = profile['device']
|
|
502
|
+
|
|
503
|
+
break
|
|
504
|
+
|
|
505
|
+
if foundProfile is None:
|
|
506
|
+
raise CameraNotFoundError(
|
|
507
|
+
"Cannot find camera with index or name '{}'.".format(device))
|
|
508
|
+
|
|
434
509
|
self._device = device
|
|
435
|
-
|
|
510
|
+
|
|
511
|
+
# camera settings from profile
|
|
512
|
+
self._frameSize = foundProfile['frameSize']
|
|
513
|
+
self._frameRate = foundProfile['frameRate']
|
|
514
|
+
self._pixelFormat = foundProfile['pixelFormat']
|
|
515
|
+
self._codecFormat = foundProfile['codecFormat']
|
|
516
|
+
self._captureLib = foundProfile['captureLib']
|
|
517
|
+
self._captureAPI = foundProfile['captureAPI']
|
|
518
|
+
|
|
519
|
+
# capture interface
|
|
520
|
+
self._capture = None # camera stream capture object
|
|
521
|
+
self._decoderOpts = decoderOpts if decoderOpts is not None else {}
|
|
522
|
+
self._bufferSecs = bufferSecs # number of seconds to buffer frames
|
|
523
|
+
self._absRecStreamStartTime = -1.0 # absolute recording start time
|
|
524
|
+
self._absRecExpStartTime = -1.0
|
|
525
|
+
|
|
526
|
+
# stream properties
|
|
527
|
+
self._metadata = {} # metadata about the camera stream
|
|
528
|
+
|
|
529
|
+
# recording properties
|
|
530
|
+
self._frameStore = [] # store frames read from the camera stream
|
|
531
|
+
self._isRecording = False # `True` if the camera is recording and frames will be captured
|
|
532
|
+
|
|
533
|
+
# camera API to use with FFMPEG
|
|
534
|
+
if captureAPI is None:
|
|
535
|
+
if platform.system() == 'Windows':
|
|
536
|
+
self._cameraAPI = CAMERA_API_DIRECTSHOW
|
|
537
|
+
elif platform.system() == 'Darwin':
|
|
538
|
+
self._cameraAPI = CAMERA_API_AVFOUNDATION
|
|
539
|
+
elif platform.system() == 'Linux':
|
|
540
|
+
self._cameraAPI = CAMERA_API_VIDEO4LINUX2
|
|
541
|
+
else:
|
|
542
|
+
raise RuntimeError(
|
|
543
|
+
"Unsupported platform: {}. Supported platforms are: {}".format(
|
|
544
|
+
platform.system(), ', '.join(self._supportedPlatforms)))
|
|
545
|
+
else:
|
|
546
|
+
self._cameraAPI = captureAPI
|
|
547
|
+
|
|
548
|
+
# store device info
|
|
549
|
+
profile = self.getDeviceProfile()
|
|
550
|
+
if profile:
|
|
551
|
+
self.info = CameraInfo(
|
|
552
|
+
name=profile['deviceName'],
|
|
553
|
+
frameSize=profile['frameSize'],
|
|
554
|
+
frameRate=profile['frameRate'],
|
|
555
|
+
pixelFormat=profile['pixelFormat'],
|
|
556
|
+
codecFormat=profile['codecFormat'],
|
|
557
|
+
cameraLib=profile['captureLib'],
|
|
558
|
+
cameraAPI=profile['captureAPI']
|
|
559
|
+
)
|
|
560
|
+
else:
|
|
561
|
+
self.info = CameraInfo()
|
|
562
|
+
|
|
563
|
+
def isSameDevice(self, other):
|
|
564
|
+
"""
|
|
565
|
+
Determine whether this object represents the same physical device as a given other object.
|
|
566
|
+
|
|
567
|
+
Parameters
|
|
568
|
+
----------
|
|
569
|
+
other : BaseDevice, dict
|
|
570
|
+
Other device object to compare against, or a dict of params.
|
|
571
|
+
|
|
572
|
+
Returns
|
|
573
|
+
-------
|
|
574
|
+
bool
|
|
575
|
+
True if the two objects represent the same physical device
|
|
576
|
+
"""
|
|
577
|
+
if isinstance(other, CameraDevice):
|
|
578
|
+
return other._device == self._device
|
|
579
|
+
elif isinstance(other, Camera):
|
|
580
|
+
return getattr(other, "_capture", None) == self
|
|
581
|
+
elif isinstance(other, dict) and "device" in other:
|
|
582
|
+
return other['deviceName'] == self._device
|
|
583
|
+
else:
|
|
584
|
+
return False
|
|
585
|
+
|
|
586
|
+
@staticmethod
|
|
587
|
+
def getAvailableDevices(best=True):
|
|
588
|
+
"""
|
|
589
|
+
Get all available devices of this type.
|
|
590
|
+
|
|
591
|
+
Parameters
|
|
592
|
+
----------
|
|
593
|
+
best : bool
|
|
594
|
+
If True, return only the best available frame rate/resolution for each device, rather
|
|
595
|
+
than returning all. Best available spec is chosen as the highest resolution with a
|
|
596
|
+
frame rate above 30fps (or just highest resolution, if none are over 30fps).
|
|
597
|
+
|
|
598
|
+
Returns
|
|
599
|
+
-------
|
|
600
|
+
list[dict]
|
|
601
|
+
List of dictionaries containing the parameters needed to initialise each device.
|
|
602
|
+
"""
|
|
603
|
+
profiles = []
|
|
604
|
+
# iterate through cameras
|
|
605
|
+
for cams in CameraDevice.getCameras().values():
|
|
606
|
+
# if requested, filter for best spec for each device
|
|
607
|
+
if best:
|
|
608
|
+
allCams = cams.copy()
|
|
609
|
+
lastBest = {
|
|
610
|
+
'pixels': 0,
|
|
611
|
+
'frameRate': 0
|
|
612
|
+
}
|
|
613
|
+
bestResolution = None
|
|
614
|
+
minFrameRate = max(28, min([cam.frameRate for cam in allCams]))
|
|
615
|
+
for cam in allCams:
|
|
616
|
+
# summarise spec of this cam
|
|
617
|
+
current = {
|
|
618
|
+
'pixels': cam.frameSize[0] * cam.frameSize[1],
|
|
619
|
+
'frameRate': cam.frameRate
|
|
620
|
+
}
|
|
621
|
+
# store best frame rate as a fallback
|
|
622
|
+
if bestResolution is None or current['pixels'] > lastBest['pixels']:
|
|
623
|
+
bestResolution = cam
|
|
624
|
+
# if it's better than the last, set it as the only cam
|
|
625
|
+
if current['pixels'] > lastBest['pixels'] and current['frameRate'] >= minFrameRate:
|
|
626
|
+
cams = [cam]
|
|
627
|
+
# if no cameras meet frame rate requirement, use one with best resolution
|
|
628
|
+
cams = [bestResolution]
|
|
629
|
+
# iterate through all (possibly filtered) cameras
|
|
630
|
+
for cam in cams:
|
|
631
|
+
# construct a dict profile from the CameraInfo object
|
|
632
|
+
profiles.append({
|
|
633
|
+
'deviceName': cam.name,
|
|
634
|
+
'deviceClass': "psychopy.hardware.camera.CameraDevice",
|
|
635
|
+
'device': cam.index,
|
|
636
|
+
'captureLib': cam.cameraLib,
|
|
637
|
+
'frameSize': cam.frameSize,
|
|
638
|
+
'frameRate': cam.frameRate,
|
|
639
|
+
'pixelFormat': cam.pixelFormat,
|
|
640
|
+
'codecFormat': cam.codecFormat,
|
|
641
|
+
'captureAPI': cam.cameraAPI
|
|
642
|
+
})
|
|
643
|
+
|
|
644
|
+
return profiles
|
|
436
645
|
|
|
437
646
|
@staticmethod
|
|
438
|
-
def getCameras():
|
|
647
|
+
def getCameras(cameraLib=None):
|
|
439
648
|
"""Get a list of devices this interface can open.
|
|
440
649
|
|
|
650
|
+
Parameters
|
|
651
|
+
----------
|
|
652
|
+
cameraLib : str or None
|
|
653
|
+
Camera library to use for opening the camera stream. This can be
|
|
654
|
+
either 'ffpyplayer' or 'opencv'. If `None`, the default recommend
|
|
655
|
+
library is used.
|
|
656
|
+
|
|
441
657
|
Returns
|
|
442
658
|
-------
|
|
443
|
-
|
|
659
|
+
dict
|
|
444
660
|
List of objects which represent cameras that can be opened by this
|
|
445
661
|
interface. Pass any of these values to `device` to open a stream.
|
|
446
662
|
|
|
447
663
|
"""
|
|
448
|
-
|
|
664
|
+
if cameraLib is None:
|
|
665
|
+
cameraLib = CAMERA_LIB_FFPYPLAYER
|
|
666
|
+
|
|
667
|
+
if cameraLib == CAMERA_LIB_FFPYPLAYER:
|
|
668
|
+
global _cameraGetterFuncTbl
|
|
669
|
+
systemName = platform.system() # get the system name
|
|
449
670
|
|
|
671
|
+
# lookup the function for the given platform
|
|
672
|
+
getCamerasFunc = _cameraGetterFuncTbl.get(systemName, None)
|
|
673
|
+
if getCamerasFunc is None: # if unsupported
|
|
674
|
+
raise OSError(
|
|
675
|
+
"Cannot get cameras, unsupported platform '{}'.".format(
|
|
676
|
+
systemName))
|
|
677
|
+
|
|
678
|
+
return getCamerasFunc()
|
|
679
|
+
|
|
680
|
+
def _clearFrameStore(self):
|
|
681
|
+
"""Clear the frame store.
|
|
682
|
+
"""
|
|
683
|
+
self._frameStore.clear()
|
|
684
|
+
|
|
450
685
|
@property
|
|
451
686
|
def device(self):
|
|
452
687
|
"""Camera device this interface is using (`Any`).
|
|
688
|
+
|
|
689
|
+
This is the camera device that was passed to the constructor. It may be
|
|
690
|
+
a `CameraInfo` object or a string representing the camera device.
|
|
691
|
+
|
|
453
692
|
"""
|
|
454
693
|
return self._device
|
|
455
694
|
|
|
456
695
|
@property
|
|
457
|
-
def
|
|
458
|
-
"""
|
|
459
|
-
|
|
696
|
+
def cameraLib(self):
|
|
697
|
+
"""Camera library this interface is using (`str`).
|
|
698
|
+
|
|
699
|
+
This is the camera library that was passed to the constructor. It may be
|
|
700
|
+
'ffpyplayer' or 'opencv'. If `None`, the default recommend library is
|
|
701
|
+
used.
|
|
702
|
+
|
|
460
703
|
"""
|
|
461
|
-
return self.
|
|
462
|
-
|
|
704
|
+
return self.info.captureLib if self.info else None
|
|
705
|
+
|
|
463
706
|
@property
|
|
464
|
-
def
|
|
465
|
-
"""
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
return
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
"""The last frame read from the camera. If `None`, no frames have been
|
|
472
|
-
read yet.
|
|
707
|
+
def frameSize(self):
|
|
708
|
+
"""Frame size of the camera stream (`tuple`).
|
|
709
|
+
|
|
710
|
+
This is the frame size of the camera stream. It is a tuple of the form
|
|
711
|
+
`(width, height)`. If the camera stream is not open, this will return
|
|
712
|
+
`None`.
|
|
713
|
+
|
|
473
714
|
"""
|
|
474
|
-
return self.
|
|
715
|
+
return self.info.frameSize if self.info else None
|
|
475
716
|
|
|
476
|
-
|
|
477
|
-
|
|
717
|
+
@property
|
|
718
|
+
def frameRate(self):
|
|
719
|
+
"""Frame rate of the camera stream (`float`).
|
|
720
|
+
|
|
721
|
+
This is the frame rate of the camera stream. If the camera stream is
|
|
722
|
+
not open, this will return `None`.
|
|
478
723
|
|
|
479
|
-
Returns
|
|
480
|
-
-------
|
|
481
|
-
bool
|
|
482
|
-
`True` if the media player is available.
|
|
483
|
-
|
|
484
724
|
"""
|
|
485
|
-
return
|
|
725
|
+
return self.info.frameRate if self.info else None
|
|
486
726
|
|
|
487
|
-
|
|
488
|
-
|
|
727
|
+
@property
|
|
728
|
+
def frameInterval(self):
|
|
729
|
+
"""Frame interval of the camera stream (`float`).
|
|
730
|
+
|
|
731
|
+
This is the time between frames in seconds. It is calculated as
|
|
732
|
+
`1.0 / frameRate`. If the camera stream is not open, this will return
|
|
733
|
+
`None`.
|
|
734
|
+
|
|
489
735
|
"""
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
736
|
+
return self._frameInterval
|
|
737
|
+
|
|
738
|
+
@property
|
|
739
|
+
def pixelFormat(self):
|
|
740
|
+
"""Pixel format of the camera stream (`str`).
|
|
741
|
+
|
|
742
|
+
This is the pixel format of the camera stream. If the camera stream is
|
|
743
|
+
not open, this will return `None`.
|
|
744
|
+
|
|
500
745
|
"""
|
|
501
|
-
return
|
|
746
|
+
return self.info.pixelFormat if self.info else None
|
|
502
747
|
|
|
503
|
-
|
|
504
|
-
|
|
748
|
+
@property
|
|
749
|
+
def codecFormat(self):
|
|
750
|
+
"""Codec format of the camera stream (`str`).
|
|
751
|
+
|
|
752
|
+
This is the codec format of the camera stream. If the camera stream is
|
|
753
|
+
not open, this will return `None`.
|
|
754
|
+
|
|
505
755
|
"""
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
756
|
+
return self.info.codecFormat if self.info else None
|
|
757
|
+
|
|
758
|
+
@property
|
|
759
|
+
def cameraAPI(self):
|
|
760
|
+
"""Camera API used to access the camera stream (`str`).
|
|
761
|
+
|
|
762
|
+
This is the camera API used to access the camera stream. If the camera
|
|
763
|
+
stream is not open, this will return `None`.
|
|
764
|
+
|
|
510
765
|
"""
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
766
|
+
return self.info.cameraAPI if self.info else None
|
|
767
|
+
|
|
768
|
+
@property
|
|
769
|
+
def bufferSecs(self):
|
|
770
|
+
"""Number of seconds to buffer frames from the camera stream (`float`).
|
|
771
|
+
|
|
772
|
+
This is the number of seconds to buffer frames from the camera stream.
|
|
773
|
+
This allows frames to be buffered in memory until they are needed. This
|
|
774
|
+
allows the camera stream to be read asynchronously and prevents frames
|
|
775
|
+
from being dropped if the main thread is busy.
|
|
776
|
+
|
|
515
777
|
"""
|
|
516
|
-
|
|
517
|
-
|
|
778
|
+
return self._bufferSecs
|
|
779
|
+
|
|
518
780
|
def getMetadata(self):
|
|
519
781
|
"""Get metadata about the camera stream.
|
|
520
782
|
|
|
@@ -525,285 +787,136 @@ class CameraInterface:
|
|
|
525
787
|
empty dictionary if no metadata is available.
|
|
526
788
|
|
|
527
789
|
"""
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
def _enqueueFrame(self):
|
|
531
|
-
"""Enqueue a frame from the camera stream.
|
|
532
|
-
"""
|
|
533
|
-
pass
|
|
534
|
-
|
|
535
|
-
def update(self):
|
|
536
|
-
"""Update the camera stream.
|
|
537
|
-
"""
|
|
538
|
-
pass
|
|
539
|
-
|
|
540
|
-
def getRecentFrame(self):
|
|
541
|
-
"""Get the most recent frame from the camera stream.
|
|
542
|
-
|
|
543
|
-
Returns
|
|
544
|
-
-------
|
|
545
|
-
numpy.ndarray
|
|
546
|
-
Most recent frame from the camera stream. Returns `None` if no
|
|
547
|
-
frames are available.
|
|
548
|
-
|
|
549
|
-
"""
|
|
550
|
-
return NULL_MOVIE_FRAME_INFO
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
class CameraInterfaceFFmpeg(CameraInterface):
|
|
554
|
-
"""Camera interface using FFmpeg (ffpyplayer) to open and read camera
|
|
555
|
-
streams.
|
|
556
|
-
|
|
557
|
-
Parameters
|
|
558
|
-
----------
|
|
559
|
-
device : CameraInfo
|
|
560
|
-
Camera device to open a stream with. Calling `start()` will open a
|
|
561
|
-
stream with this device. Afterwards, `getRecentFrame()` can be called
|
|
562
|
-
to get the most recent frame from the camera.
|
|
563
|
-
mic : MicrophoneInterface or None
|
|
564
|
-
Microphone interface to use for audio recording. If `None`, no audio
|
|
565
|
-
recording is performed.
|
|
790
|
+
if self._capture is None:
|
|
791
|
+
return {}
|
|
566
792
|
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
def __init__(self, device, mic=None):
|
|
571
|
-
super().__init__(device=device)
|
|
572
|
-
|
|
573
|
-
self._bufferSecs = 0.5 # number of seconds to buffer
|
|
574
|
-
self._cameraInfo = device
|
|
575
|
-
self._mic = mic # microphone interface
|
|
576
|
-
self._frameQueue = queue.Queue()
|
|
577
|
-
self._enableEvent = threading.Event()
|
|
578
|
-
self._enableEvent.clear()
|
|
579
|
-
self._exitEvent = threading.Event()
|
|
580
|
-
self._exitEvent.clear()
|
|
581
|
-
self._syncBarrier = None
|
|
582
|
-
self._recordBarrier = None # created in `open()`
|
|
583
|
-
self._playerThread = None
|
|
584
|
-
|
|
585
|
-
def _assertMediaPlayer(self):
|
|
586
|
-
return self._playerThread is not None
|
|
793
|
+
# get metadata from the capture stream
|
|
794
|
+
return self._capture.get_metadata() if self._capture else {}
|
|
587
795
|
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
"""
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
-------
|
|
598
|
-
list
|
|
599
|
-
List of objects which represent cameras that can be opened by this
|
|
600
|
-
interface. Pass any of these values to `device` to open a stream.
|
|
601
|
-
|
|
796
|
+
@property
|
|
797
|
+
def frameSizeBytes(self):
|
|
798
|
+
"""Size of the image in bytes (`int`).
|
|
799
|
+
|
|
800
|
+
This is the size of the image in bytes. It is calculated as
|
|
801
|
+
`width * height * 3`, where `width` and `height` are the dimensions of
|
|
802
|
+
the camera stream. If the camera stream is not open, this will return
|
|
803
|
+
`0`.
|
|
804
|
+
|
|
602
805
|
"""
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
if getCamerasFunc is None: # if unsupported
|
|
609
|
-
raise OSError(
|
|
610
|
-
"Cannot get cameras, unsupported platform '{}'.".format(
|
|
611
|
-
systemName))
|
|
612
|
-
|
|
613
|
-
return getCamerasFunc()
|
|
614
|
-
|
|
806
|
+
if self._frameSize is None:
|
|
807
|
+
return 0
|
|
808
|
+
|
|
809
|
+
return self._frameSizeBytes
|
|
810
|
+
|
|
615
811
|
@property
|
|
616
|
-
def
|
|
617
|
-
"""
|
|
812
|
+
def frameCount(self):
|
|
813
|
+
"""Number of frames read from the camera stream (`int`).
|
|
814
|
+
|
|
815
|
+
This is the number of frames read from the camera stream since the last
|
|
816
|
+
time the camera was opened. If the camera stream is not open, this will
|
|
817
|
+
return `0`.
|
|
818
|
+
|
|
618
819
|
"""
|
|
619
|
-
return self.
|
|
820
|
+
return self._frameCount
|
|
620
821
|
|
|
621
822
|
@property
|
|
622
|
-
def
|
|
623
|
-
"""
|
|
823
|
+
def streamTime(self):
|
|
824
|
+
"""Current stream time in seconds (`float`).
|
|
825
|
+
|
|
826
|
+
This is the current stream time in seconds. It is calculated as the
|
|
827
|
+
difference between the current time and the absolute recording start
|
|
828
|
+
time. If the camera stream is not open, this will return `-1.0`.
|
|
829
|
+
|
|
624
830
|
"""
|
|
625
|
-
|
|
831
|
+
if self._cameraAPI == CAMERA_API_AVFOUNDATION:
|
|
832
|
+
return time.time() if self._capture is not None else -1.0
|
|
833
|
+
else:
|
|
834
|
+
return self._capture.get_pts() if self._capture is not None else -1.0
|
|
835
|
+
|
|
836
|
+
def _toNumpyView(self, frame):
|
|
837
|
+
"""Convert a frame to a Numpy view.
|
|
626
838
|
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
839
|
+
This function converts a frame to a Numpy view. The frame is returned as
|
|
840
|
+
a Numpy array. The resulting array will be in the correct format to
|
|
841
|
+
upload to OpenGL as a texture.
|
|
630
842
|
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
843
|
+
Parameters
|
|
844
|
+
----------
|
|
845
|
+
frame : Any
|
|
846
|
+
The frame to convert.
|
|
634
847
|
|
|
635
|
-
|
|
636
|
-
|
|
848
|
+
Returns
|
|
849
|
+
-------
|
|
850
|
+
numpy.ndarray
|
|
851
|
+
The converted frame in RGB format.
|
|
637
852
|
|
|
638
|
-
def isOpen(self):
|
|
639
|
-
"""Check if the camera stream is open (`bool`).
|
|
640
853
|
"""
|
|
641
|
-
|
|
642
|
-
return self._playerThread.is_alive()
|
|
643
|
-
|
|
644
|
-
return False
|
|
854
|
+
return np.asarray(frame, dtype=np.uint8)
|
|
645
855
|
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
856
|
+
# --------------------------------------------------------------------------
|
|
857
|
+
# Platform-specific camera frame aquisition methods
|
|
858
|
+
#
|
|
859
|
+
# These methods are used to open, close, and read frames from the camera
|
|
860
|
+
# stream. They are platform-specific and are called depending on the
|
|
861
|
+
# camera library being used.
|
|
862
|
+
#
|
|
863
|
+
|
|
864
|
+
# --------------------------------------------------------------------------
|
|
865
|
+
# FFPyPlayer-specific methods
|
|
866
|
+
#
|
|
867
|
+
|
|
868
|
+
def _openFFPyPlayer(self):
|
|
869
|
+
"""Open the camera stream using FFmpeg (ffpyplayer).
|
|
655
870
|
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
def _frameGetterAsync(videoCapture, frameQueue, exitEvent, recordEvent,
|
|
659
|
-
warmUpBarrier, recordingBarrier, audioCapture):
|
|
660
|
-
"""Get frames from the camera stream asynchronously.
|
|
661
|
-
|
|
662
|
-
Parameters
|
|
663
|
-
----------
|
|
664
|
-
videoCapture : ffpyplayer.player.MediaPlayer
|
|
665
|
-
FFmpeg media player object. This object will be under direct
|
|
666
|
-
control of this function.
|
|
667
|
-
frameQueue : queue.Queue
|
|
668
|
-
Queue to put frames into. The queue has an unlimited size, so
|
|
669
|
-
be careful with memory use. This queue should be flushed when
|
|
670
|
-
camera thread is paused.
|
|
671
|
-
exitEvent : threading.Event
|
|
672
|
-
Event used to signal the thread to stop.
|
|
673
|
-
recordEvent : threading.Event
|
|
674
|
-
Event used to signal the thread to pass frames along to the main
|
|
675
|
-
thread.
|
|
676
|
-
warmUpBarrier : threading.Barrier
|
|
677
|
-
Barrier which is used hold until camera capture is ready.
|
|
678
|
-
recordingBarrier : threading.Barrier
|
|
679
|
-
Barrier which is used to synchronize audio and video recording.
|
|
680
|
-
This ensures that the audio device is ready before buffering
|
|
681
|
-
frames captured by the camera.
|
|
682
|
-
audioCapture : psychopy.sound.microphone.Microphone or None
|
|
683
|
-
Microphone object to use for audio capture. This will be used to
|
|
684
|
-
synchronize the audio and video streams. If `None`, no audio
|
|
685
|
-
will be captured.
|
|
686
|
-
|
|
687
|
-
"""
|
|
688
|
-
# warmup the stream, wait for metadata
|
|
689
|
-
ptsStart = 0.0 # may be used in the future
|
|
690
|
-
while True:
|
|
691
|
-
frame, val = videoCapture.get_frame()
|
|
692
|
-
if frame is not None:
|
|
693
|
-
ptsStart = videoCapture.get_pts()
|
|
694
|
-
break
|
|
695
|
-
|
|
696
|
-
time.sleep(0.001)
|
|
697
|
-
|
|
698
|
-
# if we have a valid frame, determine the polling rate
|
|
699
|
-
metadata = videoCapture.get_metadata()
|
|
700
|
-
numer, divisor = metadata['frame_rate']
|
|
701
|
-
|
|
702
|
-
# poll interval is half the frame period, this makes sure we don't
|
|
703
|
-
# miss frames while not wasting CPU cycles
|
|
704
|
-
pollInterval = (1.0 / float(numer / divisor)) * 0.5
|
|
705
|
-
|
|
706
|
-
# holds main-thread execution until its ready for frames
|
|
707
|
-
# frameQueue.put((frame, val, metadata)) # put the first frame
|
|
708
|
-
|
|
709
|
-
warmUpBarrier.wait() # wait for main thread to be ready
|
|
710
|
-
|
|
711
|
-
# start capturing frames in background thread
|
|
712
|
-
isRecording = False
|
|
713
|
-
lastAbsTime = -1.0 # presentation timestamp of the last frame
|
|
714
|
-
while not exitEvent.is_set(): # quit if signaled
|
|
715
|
-
# pull a frame from the stream, we keep this running 'hot' so
|
|
716
|
-
# that we don't miss frames, we just discard them if we don't
|
|
717
|
-
# need them
|
|
718
|
-
frame, val = videoCapture.get_frame(force_refresh=False)
|
|
719
|
-
|
|
720
|
-
if val == 'eof': # thread should exit if stream is done
|
|
721
|
-
break
|
|
722
|
-
elif val == 'paused':
|
|
723
|
-
continue
|
|
724
|
-
elif frame is None:
|
|
725
|
-
continue
|
|
726
|
-
else:
|
|
727
|
-
# don't queue frames unless they are newer than the last
|
|
728
|
-
if isRecording:
|
|
729
|
-
thisFrameAbsTime = videoCapture.get_pts()
|
|
730
|
-
if lastAbsTime < thisFrameAbsTime:
|
|
731
|
-
frameQueue.put((frame, val, metadata))
|
|
732
|
-
lastAbsTime = thisFrameAbsTime
|
|
733
|
-
|
|
734
|
-
if recordEvent.is_set() and not isRecording:
|
|
735
|
-
if audioCapture is not None:
|
|
736
|
-
audioCapture.start(waitForStart=1)
|
|
737
|
-
recordingBarrier.wait()
|
|
738
|
-
isRecording = True
|
|
739
|
-
elif not recordEvent.is_set() and isRecording:
|
|
740
|
-
if audioCapture is not None:
|
|
741
|
-
audioCapture.stop(blockUntilStopped=1)
|
|
742
|
-
recordingBarrier.wait()
|
|
743
|
-
isRecording = False
|
|
744
|
-
|
|
745
|
-
if not isRecording:
|
|
746
|
-
time.sleep(pollInterval)
|
|
747
|
-
continue
|
|
748
|
-
|
|
749
|
-
if audioCapture is not None:
|
|
750
|
-
if audioCapture.isRecording:
|
|
751
|
-
audioCapture.poll()
|
|
752
|
-
|
|
753
|
-
time.sleep(pollInterval)
|
|
754
|
-
|
|
755
|
-
videoCapture.close_player()
|
|
756
|
-
|
|
757
|
-
if audioCapture is not None:
|
|
758
|
-
audioCapture.stop(blockUntilStopped=1)
|
|
759
|
-
|
|
760
|
-
# thread is dead when we get here
|
|
871
|
+
This method should be called to open the camera stream using FFmpeg.
|
|
872
|
+
It should initialize the camera and prepare it for reading frames.
|
|
761
873
|
|
|
874
|
+
"""
|
|
762
875
|
# configure the camera stream reader
|
|
763
876
|
ff_opts = {} # ffmpeg options
|
|
764
877
|
lib_opts = {} # ffpyplayer options
|
|
765
878
|
_camera = CAMERA_NULL_VALUE
|
|
766
879
|
_frameRate = CAMERA_NULL_VALUE
|
|
767
|
-
_cameraInfo = self._cameraInfo
|
|
768
880
|
|
|
769
881
|
# setup commands for FFMPEG
|
|
770
|
-
if
|
|
882
|
+
if self._captureAPI == CAMERA_API_DIRECTSHOW: # windows
|
|
771
883
|
ff_opts['f'] = 'dshow'
|
|
772
|
-
_camera = 'video={}'.format(
|
|
773
|
-
_frameRate =
|
|
774
|
-
if
|
|
775
|
-
ff_opts['pixel_format'] =
|
|
776
|
-
if
|
|
777
|
-
ff_opts['vcodec'] =
|
|
778
|
-
elif
|
|
884
|
+
_camera = 'video={}'.format(self.info.name)
|
|
885
|
+
_frameRate = self._frameRate
|
|
886
|
+
if self._pixelFormat:
|
|
887
|
+
ff_opts['pixel_format'] = self._pixelFormat
|
|
888
|
+
if self._codecFormat:
|
|
889
|
+
ff_opts['vcodec'] = self._codecFormat
|
|
890
|
+
elif self._captureAPI == CAMERA_API_AVFOUNDATION: # darwin
|
|
779
891
|
ff_opts['f'] = 'avfoundation'
|
|
780
|
-
ff_opts['i'] = _camera = self.
|
|
892
|
+
ff_opts['i'] = _camera = self._device
|
|
781
893
|
|
|
782
894
|
# handle pixel formats using FourCC
|
|
783
895
|
global pixelFormatTbl
|
|
784
|
-
ffmpegPixFmt = pixelFormatTbl.get(
|
|
896
|
+
ffmpegPixFmt = pixelFormatTbl.get(self._pixelFormat, None)
|
|
785
897
|
|
|
786
898
|
if ffmpegPixFmt is None:
|
|
787
899
|
raise FormatNotFoundError(
|
|
788
900
|
"Cannot find suitable FFMPEG pixel format for '{}'. Try a "
|
|
789
901
|
"different format or camera.".format(
|
|
790
|
-
|
|
902
|
+
self._pixelFormat))
|
|
791
903
|
|
|
792
|
-
|
|
904
|
+
self._pixelFormat = ffmpegPixFmt
|
|
793
905
|
|
|
794
906
|
# this needs to be exactly specified if using NTSC
|
|
795
|
-
if math.isclose(CAMERA_FRAMERATE_NTSC,
|
|
907
|
+
if math.isclose(CAMERA_FRAMERATE_NTSC, self._frameRate):
|
|
796
908
|
_frameRate = CAMERA_FRAMERATE_NOMINAL_NTSC
|
|
797
909
|
else:
|
|
798
|
-
_frameRate = str(
|
|
910
|
+
_frameRate = str(self._frameRate)
|
|
799
911
|
|
|
800
912
|
# need these since hardware acceleration is not possible on Mac yet
|
|
801
913
|
lib_opts['fflags'] = 'nobuffer'
|
|
802
914
|
lib_opts['flags'] = 'low_delay'
|
|
803
|
-
lib_opts['pixel_format'] =
|
|
804
|
-
|
|
805
|
-
ff_opts['
|
|
806
|
-
|
|
915
|
+
lib_opts['pixel_format'] = self._pixelFormat
|
|
916
|
+
lib_opts['use_wallclock_as_timestamps'] = '1'
|
|
917
|
+
# ff_opts['framedrop'] = True
|
|
918
|
+
# ff_opts['fast'] = True
|
|
919
|
+
elif self._captureAPI == CAMERA_API_VIDEO4LINUX2:
|
|
807
920
|
raise OSError(
|
|
808
921
|
"Sorry, camera does not support Linux at this time. However, "
|
|
809
922
|
"it will in future versions.")
|
|
@@ -812,610 +925,359 @@ class CameraInterfaceFFmpeg(CameraInterface):
|
|
|
812
925
|
raise RuntimeError("Unsupported camera API specified.")
|
|
813
926
|
|
|
814
927
|
# set library options
|
|
815
|
-
camWidth =
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
928
|
+
camWidth, camHeight = self._frameSize
|
|
929
|
+
logging.debug(
|
|
930
|
+
"Using camera mode {}x{} at {} fps".format(
|
|
931
|
+
camWidth, camHeight, _frameRate))
|
|
932
|
+
|
|
933
|
+
# configure the real-time buffer size, we compute using RGB8 since this
|
|
934
|
+
# is uncompressed and represents the largest size we can expect
|
|
935
|
+
self._frameSizeBytes = int(camWidth * camHeight * 3)
|
|
936
|
+
framesToBufferCount = int(self._bufferSecs * self._frameRate)
|
|
937
|
+
_bufferSize = int(self._frameSizeBytes * framesToBufferCount)
|
|
938
|
+
logging.debug(
|
|
939
|
+
"Setting real-time buffer size to {} bytes "
|
|
940
|
+
"for {} seconds of video ({} frames @ {} fps)".format(
|
|
941
|
+
_bufferSize,
|
|
942
|
+
self._bufferSecs,
|
|
943
|
+
framesToBufferCount,
|
|
944
|
+
self._frameRate)
|
|
945
|
+
)
|
|
820
946
|
|
|
821
947
|
# common settings across libraries
|
|
822
|
-
|
|
823
|
-
|
|
948
|
+
ff_opts['low_delay'] = True # low delay for real-time playback
|
|
949
|
+
# ff_opts['framedrop'] = True
|
|
950
|
+
# ff_opts['use_wallclock_as_timestamps'] = True
|
|
951
|
+
ff_opts['fast'] = True
|
|
952
|
+
# ff_opts['sync'] = 'ext'
|
|
953
|
+
ff_opts['rtbufsize'] = str(_bufferSize) # set the buffer size
|
|
954
|
+
ff_opts['an'] = True
|
|
955
|
+
# ff_opts['infbuf'] = True # enable infinite buffering
|
|
956
|
+
|
|
957
|
+
# for ffpyplayer, we need to set the video size and framerate
|
|
958
|
+
lib_opts['video_size'] = '{width}x{height}'.format(
|
|
959
|
+
width=camWidth, height=camHeight)
|
|
824
960
|
lib_opts['framerate'] = str(_frameRate)
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
self._recordBarrier = threading.Barrier(2)
|
|
961
|
+
ff_opts['loglevel'] = 'error'
|
|
962
|
+
ff_opts['nostdin'] = True
|
|
828
963
|
|
|
829
964
|
# open the media player
|
|
830
965
|
from ffpyplayer.player import MediaPlayer
|
|
831
|
-
|
|
966
|
+
self._capture = MediaPlayer(
|
|
967
|
+
_camera,
|
|
968
|
+
ff_opts=ff_opts,
|
|
969
|
+
lib_opts=lib_opts)
|
|
832
970
|
|
|
833
|
-
#
|
|
834
|
-
self.
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
971
|
+
# compute the frame interval, needed for generating timestamps
|
|
972
|
+
self._frameInterval = 1.0 / self._frameRate
|
|
973
|
+
|
|
974
|
+
# get metadata from the capture stream
|
|
975
|
+
tStart = time.time() # start time for the stream
|
|
976
|
+
metadataTimeout = 5.0 # timeout for metadata retrieval
|
|
977
|
+
while time.time() - tStart < metadataTimeout: # wait for metadata
|
|
978
|
+
streamMetadata = self._capture.get_metadata()
|
|
979
|
+
if streamMetadata['src_vid_size'] != (0, 0):
|
|
980
|
+
break
|
|
981
|
+
time.sleep(0.001) # wait for metadata to be available
|
|
982
|
+
else:
|
|
983
|
+
msg = (
|
|
984
|
+
"Failed to obtain stream metadata (possibly caused by a device "
|
|
985
|
+
"already in use by other application)."
|
|
986
|
+
)
|
|
987
|
+
logging.error(msg)
|
|
988
|
+
raise CameraNotReadyError(msg)
|
|
989
|
+
|
|
990
|
+
self._metadata = streamMetadata # store the metadata for later use
|
|
991
|
+
|
|
992
|
+
# check if the camera metadata matches the requested settings
|
|
993
|
+
if streamMetadata['src_vid_size'] != tuple(self._frameSize):
|
|
994
|
+
raise CameraFrameSizeNotSupportedError(
|
|
995
|
+
"Camera does not support the requested frame size "
|
|
996
|
+
"{size}. Supported sizes are: {supportedSizes}".format(
|
|
997
|
+
size=self._frameSize,
|
|
998
|
+
supportedSizes=streamMetadata['src_vid_size']))
|
|
999
|
+
|
|
1000
|
+
# pause the camera stream
|
|
1001
|
+
self._capture.set_pause(True)
|
|
847
1002
|
|
|
848
|
-
|
|
849
|
-
|
|
1003
|
+
def _closeFFPyPlayer(self):
|
|
1004
|
+
"""Close the camera stream opened with FFmpeg (ffpyplayer).
|
|
1005
|
+
|
|
1006
|
+
This method should be called to close the camera stream and release any
|
|
1007
|
+
resources associated with it.
|
|
850
1008
|
|
|
851
|
-
|
|
852
|
-
|
|
1009
|
+
"""
|
|
1010
|
+
if self._capture is not None:
|
|
1011
|
+
# self._capture.set_pause(True) # pause the stream
|
|
1012
|
+
self._capture.close_player()
|
|
853
1013
|
|
|
1014
|
+
def _getFramesFFPyPlayer(self):
|
|
1015
|
+
"""Get the most recent frames from the camera stream opened with FFmpeg
|
|
1016
|
+
(ffpyplayer).
|
|
1017
|
+
|
|
854
1018
|
Returns
|
|
855
1019
|
-------
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
1020
|
+
numpy.ndarray
|
|
1021
|
+
Most recent frames from the camera stream. Returns `None` if no
|
|
1022
|
+
frames are available.
|
|
859
1023
|
|
|
860
1024
|
"""
|
|
861
|
-
self.
|
|
1025
|
+
if self._capture is None:
|
|
1026
|
+
raise PlayerNotAvailableError(
|
|
1027
|
+
"Camera stream is not open. Call `open()` first.")
|
|
1028
|
+
|
|
1029
|
+
# read all buffered frames from the camera stream until we get nothing
|
|
1030
|
+
recentFrames = []
|
|
1031
|
+
while 1:
|
|
1032
|
+
frame, status = self._capture.get_frame()
|
|
1033
|
+
|
|
1034
|
+
if status == CAMERA_STATUS_EOF or status == CAMERA_STATUS_PAUSED:
|
|
1035
|
+
break
|
|
862
1036
|
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
except queue.Empty:
|
|
866
|
-
return False
|
|
1037
|
+
if frame is None: # ditto
|
|
1038
|
+
break
|
|
867
1039
|
|
|
868
|
-
|
|
1040
|
+
img, curPts = frame
|
|
1041
|
+
if curPts < self._absRecStreamStartTime and self._isRecording:
|
|
1042
|
+
del img # free the memory used by the frame
|
|
1043
|
+
# if the frame is before the recording start time, skip it
|
|
1044
|
+
continue
|
|
869
1045
|
|
|
870
|
-
|
|
871
|
-
return False
|
|
872
|
-
elif val == CAMERA_STATUS_PAUSED: # handle when paused
|
|
873
|
-
return False
|
|
874
|
-
elif frame is None: # handle when no frame is available
|
|
875
|
-
return False
|
|
876
|
-
|
|
877
|
-
frameImage, pts = frame # otherwise, unpack the frame
|
|
1046
|
+
self._frameCount += 1 # increment the frame count
|
|
878
1047
|
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
1048
|
+
recentFrames.append((
|
|
1049
|
+
img,
|
|
1050
|
+
curPts-self._absRecStreamStartTime,
|
|
1051
|
+
curPts))
|
|
882
1052
|
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
size=frameImage.get_size(),
|
|
889
|
-
colorData=videoFrameArray,
|
|
890
|
-
audioChannels=0,
|
|
891
|
-
audioSamples=None,
|
|
892
|
-
metadata=metadata,
|
|
893
|
-
movieLib=self._cameraLib,
|
|
894
|
-
userData=None)
|
|
895
|
-
|
|
896
|
-
return True
|
|
1053
|
+
return recentFrames
|
|
1054
|
+
|
|
1055
|
+
# --------------------------------------------------------------------------
|
|
1056
|
+
# OpenCV-specific methods
|
|
1057
|
+
#
|
|
897
1058
|
|
|
898
|
-
def
|
|
899
|
-
"""
|
|
1059
|
+
def _convertFrameToRGBOpenCV(self, frame):
|
|
1060
|
+
"""Convert a frame to RGB format using OpenCV.
|
|
900
1061
|
|
|
901
|
-
This
|
|
1062
|
+
This function converts a frame to RGB format. The frame is returned as
|
|
1063
|
+
a Numpy array. The resulting array will be in the correct format to
|
|
1064
|
+
upload to OpenGL as a texture.
|
|
902
1065
|
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
self._exitEvent.set() # signal the thread to stop
|
|
908
|
-
self._playerThread.join() # wait for the thread to stop
|
|
1066
|
+
Parameters
|
|
1067
|
+
----------
|
|
1068
|
+
frame : numpy.ndarray
|
|
1069
|
+
The frame to convert.
|
|
909
1070
|
|
|
910
|
-
|
|
1071
|
+
Returns
|
|
1072
|
+
-------
|
|
1073
|
+
numpy.ndarray
|
|
1074
|
+
The converted frame in RGB format.
|
|
911
1075
|
|
|
912
|
-
@property
|
|
913
|
-
def isEnabled(self):
|
|
914
|
-
"""`True` if the camera is enabled.
|
|
915
1076
|
"""
|
|
916
|
-
|
|
1077
|
+
import cv2
|
|
917
1078
|
|
|
918
|
-
|
|
919
|
-
|
|
1079
|
+
# this can be done in the shader to save CPU use, will figure out later
|
|
1080
|
+
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
920
1081
|
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
Parameters
|
|
925
|
-
----------
|
|
926
|
-
state : bool
|
|
927
|
-
`True` to enable recording frames to the queue, `False` to disable.
|
|
928
|
-
On state change, the audio interface will be started or stopped.
|
|
929
|
-
|
|
930
|
-
"""
|
|
931
|
-
if state:
|
|
932
|
-
self._enableEvent.set()
|
|
933
|
-
else:
|
|
934
|
-
self._enableEvent.clear()
|
|
935
|
-
|
|
936
|
-
self._recordBarrier.wait()
|
|
937
|
-
self._enqueueFrame()
|
|
938
|
-
|
|
939
|
-
def disable(self):
|
|
940
|
-
"""Stop passing frames to the frame queue.
|
|
1082
|
+
def _openOpenCV(self):
|
|
1083
|
+
"""Open the camera stream using OpenCV.
|
|
941
1084
|
|
|
942
|
-
|
|
1085
|
+
This method should be called to open the camera stream using OpenCV.
|
|
1086
|
+
It should initialize the camera and prepare it for reading frames.
|
|
943
1087
|
|
|
944
1088
|
"""
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
def getFrames(self):
|
|
948
|
-
"""Get all frames from the camera stream which are waiting to be
|
|
949
|
-
processed.
|
|
1089
|
+
pass
|
|
950
1090
|
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
1091
|
+
def _closeOpenCV(self):
|
|
1092
|
+
"""Close the camera stream opened with OpenCV.
|
|
1093
|
+
|
|
1094
|
+
This method should be called to close the camera stream and release any
|
|
1095
|
+
resources associated with it.
|
|
956
1096
|
|
|
957
1097
|
"""
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
frames = []
|
|
961
|
-
while self._enqueueFrame():
|
|
962
|
-
frames.append(self._lastFrame)
|
|
963
|
-
|
|
964
|
-
return frames
|
|
965
|
-
|
|
966
|
-
def getRecentFrame(self):
|
|
967
|
-
"""Get the most recent frame captured from the camera, discarding all
|
|
968
|
-
others.
|
|
1098
|
+
pass
|
|
969
1099
|
|
|
1100
|
+
def _getFramesOpenCV(self):
|
|
1101
|
+
"""Get the most recent frames from the camera stream opened with OpenCV.
|
|
1102
|
+
|
|
970
1103
|
Returns
|
|
971
1104
|
-------
|
|
972
|
-
|
|
973
|
-
|
|
1105
|
+
numpy.ndarray
|
|
1106
|
+
Most recent frames from the camera stream. Returns `None` if no
|
|
1107
|
+
frames are available.
|
|
974
1108
|
|
|
975
1109
|
"""
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
class CameraInterfaceOpenCV(CameraInterface):
|
|
983
|
-
"""Camera interface using OpenCV to open and read camera streams.
|
|
984
|
-
|
|
985
|
-
Parameters
|
|
986
|
-
----------
|
|
987
|
-
device : int
|
|
988
|
-
Camera device to open a stream with. This value is platform dependent.
|
|
989
|
-
mic : MicrophoneInterface or None
|
|
990
|
-
Microphone interface to use for audio recording. If `None`, no audio
|
|
991
|
-
recording is performed.
|
|
992
|
-
|
|
993
|
-
"""
|
|
994
|
-
_cameraLib = u'opencv'
|
|
1110
|
+
if self._capture is None:
|
|
1111
|
+
raise PlayerNotAvailableError(
|
|
1112
|
+
"Camera stream is not open. Call `open()` first.")
|
|
1113
|
+
|
|
1114
|
+
pass
|
|
995
1115
|
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
import cv2 # just import to check if it's available
|
|
1000
|
-
except ImportError:
|
|
1001
|
-
raise ImportError(
|
|
1002
|
-
"Could not import `cv2`. Please install OpenCV2 to use this "
|
|
1003
|
-
"camera interface.")
|
|
1004
|
-
|
|
1005
|
-
self._cameraInfo = device
|
|
1006
|
-
self._mic = mic # microphone interface
|
|
1007
|
-
self._frameQueue = queue.Queue()
|
|
1008
|
-
self._enableEvent = threading.Event()
|
|
1009
|
-
self._exitEvent = threading.Event()
|
|
1010
|
-
self._warmUpBarrier = None
|
|
1011
|
-
self._recordBarrier = None
|
|
1116
|
+
# --------------------------------------------------------------------------
|
|
1117
|
+
# Public methods for camera stream management
|
|
1118
|
+
#
|
|
1012
1119
|
|
|
1013
|
-
def
|
|
1014
|
-
"""
|
|
1015
|
-
|
|
1016
|
-
return self._playerThread is not None
|
|
1120
|
+
def __hash__(self):
|
|
1121
|
+
"""Hash on the camera device name and library used."""
|
|
1122
|
+
return hash((self._device, self._captureLib))
|
|
1017
1123
|
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
about them. Therefore, we must open a stream with each camera index
|
|
1024
|
-
and query the information from the stream. This process is quite slow
|
|
1025
|
-
on systems with many cameras. It's best to run this function once and
|
|
1026
|
-
save the results for later use if the camera configuration is not
|
|
1027
|
-
expected to change.
|
|
1028
|
-
|
|
1029
|
-
Parameters
|
|
1030
|
-
----------
|
|
1031
|
-
maxCameraEnum : int
|
|
1032
|
-
Maximum number of cameras to check. This is the maximum camera index
|
|
1033
|
-
to check. For example, if `maxCameraEnum` is 16, then cameras 0-15
|
|
1034
|
-
will be checked.
|
|
1035
|
-
|
|
1036
|
-
Returns
|
|
1037
|
-
-------
|
|
1038
|
-
dict
|
|
1039
|
-
Mapping containing information about each camera. The keys are the
|
|
1040
|
-
camera index, and the values are `CameraInfo` objects.
|
|
1124
|
+
def open(self):
|
|
1125
|
+
"""Open the camera stream.
|
|
1126
|
+
|
|
1127
|
+
This method should be called to open the camera stream. It should
|
|
1128
|
+
initialize the camera and prepare it for reading frames.
|
|
1041
1129
|
|
|
1042
1130
|
"""
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
# recommended camera drivers for each platform
|
|
1046
|
-
cameraPlatformDrivers = {
|
|
1047
|
-
'Linux': (cv2.CAP_V4L2, CAMERA_API_VIDEO4LINUX2),
|
|
1048
|
-
'Windows': (cv2.CAP_DSHOW, CAMERA_API_DIRECTSHOW),
|
|
1049
|
-
'Darwin': (cv2.CAP_AVFOUNDATION, CAMERA_API_AVFOUNDATION)
|
|
1050
|
-
}
|
|
1131
|
+
if self._captureLib == 'ffpyplayer':
|
|
1132
|
+
self._openFFPyPlayer()
|
|
1051
1133
|
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
platform.system(), (cv2.CAP_ANY, CAMERA_API_ANY))
|
|
1134
|
+
global _openCaptureInterfaces
|
|
1135
|
+
_openCaptureInterfaces.add(self)
|
|
1055
1136
|
|
|
1056
|
-
|
|
1057
|
-
|
|
1137
|
+
def close(self):
|
|
1138
|
+
"""Close the camera stream.
|
|
1058
1139
|
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
# open a camera
|
|
1062
|
-
thisCamera = cv2.VideoCapture(cameraIndex, cameraDriver)
|
|
1140
|
+
This method should be called to close the camera stream and release any
|
|
1141
|
+
resources associated with it.
|
|
1063
1142
|
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
frameRate = thisCamera.get(cv2.CAP_PROP_FPS)
|
|
1070
|
-
frameSize = (
|
|
1071
|
-
int(thisCamera.get(cv2.CAP_PROP_FRAME_WIDTH)),
|
|
1072
|
-
int(thisCamera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
|
|
1073
|
-
|
|
1074
|
-
genName = 'camera:{}'.format(cameraIndex)
|
|
1075
|
-
cameraInfo = CameraInfo(
|
|
1076
|
-
index=cameraIndex,
|
|
1077
|
-
name=genName,
|
|
1078
|
-
frameSize=frameSize or (-1, -1),
|
|
1079
|
-
frameRate=frameRate or -1.0,
|
|
1080
|
-
pixelFormat='bgr24', # always BGR with 8 bpc for OpenCV
|
|
1081
|
-
cameraLib=CameraInterfaceOpenCV._cameraLib,
|
|
1082
|
-
cameraAPI=cameraAPI
|
|
1083
|
-
)
|
|
1143
|
+
"""
|
|
1144
|
+
if self.isRecording:
|
|
1145
|
+
self.stop() # stop the recording if it is in progress
|
|
1146
|
+
logging.warning(
|
|
1147
|
+
"CameraDevice.close() called while recording. Stopping.")
|
|
1084
1148
|
|
|
1085
|
-
|
|
1086
|
-
|
|
1149
|
+
if self._captureLib == 'ffpyplayer':
|
|
1150
|
+
self._closeFFPyPlayer()
|
|
1087
1151
|
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1152
|
+
self._capture = None # reset the capture object
|
|
1153
|
+
|
|
1154
|
+
global _openCaptureInterfaces
|
|
1155
|
+
if self in _openCaptureInterfaces:
|
|
1156
|
+
_openCaptureInterfaces.remove(self)
|
|
1091
1157
|
|
|
1092
1158
|
@property
|
|
1093
|
-
def
|
|
1094
|
-
"""
|
|
1159
|
+
def isOpen(self):
|
|
1160
|
+
"""Check if the camera stream is open.
|
|
1095
1161
|
|
|
1096
|
-
Returns
|
|
1097
|
-
|
|
1098
|
-
|
|
1162
|
+
Returns
|
|
1163
|
+
-------
|
|
1164
|
+
bool
|
|
1165
|
+
`True` if the camera stream is open, `False` otherwise.
|
|
1099
1166
|
|
|
1100
1167
|
"""
|
|
1101
|
-
return self.
|
|
1102
|
-
|
|
1103
|
-
@property
|
|
1104
|
-
def frameRate(self):
|
|
1105
|
-
"""Get the frame rate of the camera stream (`float`).
|
|
1106
|
-
"""
|
|
1107
|
-
if self._cameraInfo is None:
|
|
1108
|
-
return -1.0
|
|
1109
|
-
|
|
1110
|
-
return self._cameraInfo.frameRate
|
|
1168
|
+
return self._capture is not None
|
|
1111
1169
|
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
"""Get the frame size of the camera stream (`tuple`).
|
|
1115
|
-
"""
|
|
1116
|
-
if self._cameraInfo is None:
|
|
1117
|
-
return (-1, -1)
|
|
1118
|
-
|
|
1119
|
-
return self._cameraInfo.frameSize
|
|
1170
|
+
def record(self):
|
|
1171
|
+
"""Start recording camera frames to memory.
|
|
1120
1172
|
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
return self._playerThread.is_alive()
|
|
1126
|
-
|
|
1127
|
-
return False
|
|
1128
|
-
|
|
1129
|
-
def open(self):
|
|
1130
|
-
"""Open the camera stream and start reading frames using OpenCV2.
|
|
1131
|
-
"""
|
|
1132
|
-
import cv2
|
|
1133
|
-
|
|
1134
|
-
def _frameGetterAsync(videoCapture, frameQueue, exitEvent, recordEvent,
|
|
1135
|
-
warmUpBarrier, recordingBarrier, audioCapture):
|
|
1136
|
-
"""Get frames asynchronously from the camera stream.
|
|
1137
|
-
|
|
1138
|
-
Parameters
|
|
1139
|
-
----------
|
|
1140
|
-
videoCapture : cv2.VideoCapture
|
|
1141
|
-
Handle for the video capture object. This is opened outside the
|
|
1142
|
-
thread and passed in.
|
|
1143
|
-
frameQueue : queue.Queue
|
|
1144
|
-
Queue to store frames in.
|
|
1145
|
-
exitEvent : threading.Event
|
|
1146
|
-
Event to signal when the thread should stop.
|
|
1147
|
-
recordEvent : threading.Event
|
|
1148
|
-
Event used to signal the thread to pass frames along to the main
|
|
1149
|
-
thread.
|
|
1150
|
-
warmUpBarrier : threading.Barrier
|
|
1151
|
-
Barrier which is used hold until camera capture is ready.
|
|
1152
|
-
recordingBarrier : threading.Barrier
|
|
1153
|
-
Barrier which is used to synchronize audio and video recording.
|
|
1154
|
-
This ensures that the audio device is ready before buffering
|
|
1155
|
-
frames captured by the camera.
|
|
1156
|
-
audioCapture : psychopy.sound.microphone.Microphone or None
|
|
1157
|
-
Microphone object to use for audio capture. This will be used to
|
|
1158
|
-
synchronize the audio and video streams. If `None`, no audio
|
|
1159
|
-
will be captured.
|
|
1160
|
-
|
|
1161
|
-
"""
|
|
1162
|
-
# poll interval is half the frame period, this makes sure we don't
|
|
1163
|
-
# miss frames while not wasting CPU cycles
|
|
1164
|
-
# fps = videoCapture.get(cv2.CAP_PROP_FPS)
|
|
1165
|
-
# if fps > 0.0:
|
|
1166
|
-
# pollInterval = (1.0 / fps) * 0.5
|
|
1167
|
-
# else:
|
|
1168
|
-
# pollInterval = 1 / 60.0
|
|
1169
|
-
|
|
1170
|
-
# if the camera is opened, wait until the main thread is ready to
|
|
1171
|
-
# take frames
|
|
1172
|
-
warmUpBarrier.wait()
|
|
1173
|
-
|
|
1174
|
-
# start capturing frames
|
|
1175
|
-
isRecording = False
|
|
1176
|
-
while not exitEvent.is_set():
|
|
1177
|
-
# Capture frame-by-frame
|
|
1178
|
-
ret, frame = videoCapture.read()
|
|
1179
|
-
|
|
1180
|
-
# if frame is read correctly ret is True
|
|
1181
|
-
if not ret: # eol or something else
|
|
1182
|
-
# val = 'eof'
|
|
1183
|
-
break
|
|
1184
|
-
else:
|
|
1185
|
-
# don't queue frames unless they are newer than the last
|
|
1186
|
-
if isRecording:
|
|
1187
|
-
# color conversion is done in the thread here
|
|
1188
|
-
colorData = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
1189
|
-
# colorData = frame
|
|
1190
|
-
frameQueue.put((colorData, 0.0, None))
|
|
1191
|
-
|
|
1192
|
-
# check if we should start or stop recording
|
|
1193
|
-
if recordEvent.is_set() and not isRecording:
|
|
1194
|
-
if audioCapture is not None:
|
|
1195
|
-
audioCapture.start(waitForStart=1)
|
|
1196
|
-
recordingBarrier.wait()
|
|
1197
|
-
isRecording = True
|
|
1198
|
-
elif not recordEvent.is_set() and isRecording:
|
|
1199
|
-
if audioCapture is not None:
|
|
1200
|
-
audioCapture.stop(blockUntilStopped=1)
|
|
1201
|
-
recordingBarrier.wait()
|
|
1202
|
-
isRecording = False
|
|
1203
|
-
|
|
1204
|
-
if not isRecording:
|
|
1205
|
-
# time.sleep(pollInterval)
|
|
1206
|
-
continue
|
|
1173
|
+
This method should be called to start recording the camera stream.
|
|
1174
|
+
Frame timestamps will be generated based on the current time when
|
|
1175
|
+
this method is called. The frames will be stored and made available
|
|
1176
|
+
through the `getFrames()` method.
|
|
1207
1177
|
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
self._warmUpBarrier = threading.Barrier(parties) # camera is ready
|
|
1223
|
-
self._recordBarrier = threading.Barrier(parties) # audio/video is ready
|
|
1224
|
-
|
|
1225
|
-
# drivers for the given camera API
|
|
1226
|
-
cameraDrivers = {
|
|
1227
|
-
CAMERA_API_ANY: cv2.CAP_ANY,
|
|
1228
|
-
CAMERA_API_VIDEO4LINUX2: cv2.CAP_V4L2,
|
|
1229
|
-
CAMERA_API_DIRECTSHOW: cv2.CAP_DSHOW,
|
|
1230
|
-
CAMERA_API_AVFOUNDATION: cv2.CAP_AVFOUNDATION
|
|
1231
|
-
}
|
|
1232
|
-
_cameraInfo = self._cameraInfo
|
|
1233
|
-
|
|
1234
|
-
# create the camera capture object, we keep this internal to the thread
|
|
1235
|
-
# so that we can control when it is released
|
|
1236
|
-
cap = cv2.VideoCapture(
|
|
1237
|
-
_cameraInfo.index,
|
|
1238
|
-
cameraDrivers[_cameraInfo.cameraAPI])
|
|
1239
|
-
|
|
1240
|
-
# check if the camera is opened
|
|
1241
|
-
if not cap.isOpened():
|
|
1242
|
-
raise RuntimeError("Cannot open camera using `cv2`")
|
|
1243
|
-
|
|
1244
|
-
# if the user didn't specify a frame rate or size, use the defaults
|
|
1245
|
-
# pulled from the camera
|
|
1246
|
-
usingDefaults = False
|
|
1247
|
-
if _cameraInfo.frameRate is None:
|
|
1248
|
-
_cameraInfo.frameRate = cap.get(cv2.CAP_PROP_FPS)
|
|
1249
|
-
usingDefaults = True
|
|
1250
|
-
|
|
1251
|
-
if _cameraInfo.frameSize is None:
|
|
1252
|
-
_cameraInfo.frameSize = (
|
|
1253
|
-
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
|
|
1254
|
-
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
|
|
1255
|
-
usingDefaults = True
|
|
1256
|
-
|
|
1257
|
-
if not usingDefaults:
|
|
1258
|
-
# set frame rate and size and check if they were set correctly
|
|
1259
|
-
cap.set(cv2.CAP_PROP_FPS, _cameraInfo.frameRate)
|
|
1260
|
-
cap.set(cv2.CAP_PROP_FRAME_WIDTH, _cameraInfo.frameSize[0])
|
|
1261
|
-
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, _cameraInfo.frameSize[1])
|
|
1262
|
-
|
|
1263
|
-
if cap.get(cv2.CAP_PROP_FPS) != _cameraInfo.frameRate:
|
|
1264
|
-
raise CameraFormatNotSupportedError(
|
|
1265
|
-
"Unsupported frame rate (%s), try %s instead." % (
|
|
1266
|
-
_cameraInfo.frameRate, cap.get(cv2.CAP_PROP_FPS)))
|
|
1267
|
-
|
|
1268
|
-
frameSizeMismatch = (
|
|
1269
|
-
cap.get(cv2.CAP_PROP_FRAME_WIDTH) != _cameraInfo.frameSize[0] or
|
|
1270
|
-
cap.get(cv2.CAP_PROP_FRAME_HEIGHT) != _cameraInfo.frameSize[1])
|
|
1271
|
-
if frameSizeMismatch:
|
|
1272
|
-
raise CameraFormatNotSupportedError(
|
|
1273
|
-
"Unsupported frame size: %s" % str(_cameraInfo.frameSize))
|
|
1274
|
-
|
|
1275
|
-
# open a stream and pause it until ready
|
|
1276
|
-
self._playerThread = threading.Thread(
|
|
1277
|
-
target=_frameGetterAsync,
|
|
1278
|
-
args=(cap,
|
|
1279
|
-
self._frameQueue,
|
|
1280
|
-
self._exitEvent,
|
|
1281
|
-
self._enableEvent,
|
|
1282
|
-
self._warmUpBarrier,
|
|
1283
|
-
self._recordBarrier,
|
|
1284
|
-
self._mic))
|
|
1285
|
-
self._playerThread.daemon=True
|
|
1286
|
-
self._playerThread.start()
|
|
1287
|
-
|
|
1288
|
-
self._warmUpBarrier.wait() # wait until the camera is ready
|
|
1289
|
-
|
|
1290
|
-
# pass off the player to the thread which will process the stream
|
|
1291
|
-
self._enqueueFrame() # pull metadata from first frame
|
|
1292
|
-
|
|
1293
|
-
def _enqueueFrame(self):
|
|
1294
|
-
"""Grab the latest frame from the stream.
|
|
1178
|
+
To get precise audio synchronization:
|
|
1179
|
+
|
|
1180
|
+
1. Start the microphone recording
|
|
1181
|
+
2. Store samples somehwere keeping track of the absolute time of the
|
|
1182
|
+
first audio sample.
|
|
1183
|
+
3. Call this method to start the camera recording and store the
|
|
1184
|
+
returned start time.
|
|
1185
|
+
4. When the recording is stopped, compute the offset between the
|
|
1186
|
+
absolute start time of the audio recording and the absolute start
|
|
1187
|
+
time of the camera recording. Compute the postion of the first
|
|
1188
|
+
audio sample in the audio buffer by multiplying the offset by the
|
|
1189
|
+
sample rate of the audio recording. This will give you the
|
|
1190
|
+
position of the first audio sample in the audio buffer
|
|
1191
|
+
corresponding to the very beginning of the first camera frame.
|
|
1295
1192
|
|
|
1296
1193
|
Returns
|
|
1297
1194
|
-------
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1195
|
+
float
|
|
1196
|
+
The absolute start time of the recording in seconds. Use this value
|
|
1197
|
+
to syncronize audio recording with the capture stream.
|
|
1301
1198
|
|
|
1302
1199
|
"""
|
|
1303
|
-
self.
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1200
|
+
if not self.isOpen:
|
|
1201
|
+
raise RuntimeError("Camera stream is not open. Call `open()` first.")
|
|
1202
|
+
|
|
1203
|
+
self._frameCount = 0 # reset the frame count
|
|
1204
|
+
self._clearFrameStore() # clear the frame store
|
|
1205
|
+
self._capture.set_pause(False) # start the capture stream
|
|
1309
1206
|
|
|
1310
|
-
|
|
1207
|
+
# need to use a different timebase on macOS, due to a bug
|
|
1208
|
+
if self._cameraAPI == CAMERA_API_AVFOUNDATION:
|
|
1209
|
+
self._absRecStreamStartTime = time.time()
|
|
1210
|
+
else:
|
|
1211
|
+
self._absRecStreamStartTime = self._capture.get_pts() # get the absolute start time
|
|
1311
1212
|
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
elif val == 'paused': # handle when paused, not used for OpenCV yet
|
|
1315
|
-
return False
|
|
1316
|
-
elif frame is None: # handle when no frame is available
|
|
1317
|
-
return False
|
|
1318
|
-
|
|
1319
|
-
frameImage = frame # otherwise, unpack the frame
|
|
1320
|
-
|
|
1321
|
-
# if we have a new frame, update the frame information
|
|
1322
|
-
# videoBuffer = frameImage.to_bytearray()[0]
|
|
1323
|
-
videoFrameArray = np.ascontiguousarray(
|
|
1324
|
-
frameImage.flatten(), dtype=np.uint8)
|
|
1325
|
-
|
|
1326
|
-
# provide the last frame
|
|
1327
|
-
self._lastFrame = MovieFrame(
|
|
1328
|
-
frameIndex=self._frameIndex,
|
|
1329
|
-
absTime=0.0,
|
|
1330
|
-
# displayTime=self._recentMetadata['frame_size'],
|
|
1331
|
-
size=self._cameraInfo.frameSize,
|
|
1332
|
-
colorFormat='rgb24', # converted in thread
|
|
1333
|
-
colorData=videoFrameArray,
|
|
1334
|
-
audioChannels=0,
|
|
1335
|
-
audioSamples=None,
|
|
1336
|
-
metadata=None,
|
|
1337
|
-
movieLib=self._cameraLib,
|
|
1338
|
-
userData=None)
|
|
1339
|
-
|
|
1340
|
-
return True
|
|
1213
|
+
self._absRecExpStartTime = core.getTime() # experiment start time in seconds
|
|
1214
|
+
self._isRecording = True
|
|
1341
1215
|
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
self._playerThread.join() # hold the thread until it stops
|
|
1216
|
+
return self._absRecStreamStartTime
|
|
1217
|
+
|
|
1218
|
+
def start(self):
|
|
1219
|
+
"""Start recording the camera stream.
|
|
1347
1220
|
|
|
1348
|
-
|
|
1221
|
+
Alias for `record()`. This method is provided for compatibility with
|
|
1222
|
+
other camera interfaces that may use `start()` to begin recording.
|
|
1349
1223
|
|
|
1350
|
-
@property
|
|
1351
|
-
def isEnabled(self):
|
|
1352
|
-
"""`True` if the camera is enabled.
|
|
1353
1224
|
"""
|
|
1354
|
-
return self.
|
|
1355
|
-
|
|
1356
|
-
def enable(self, state=True):
|
|
1357
|
-
"""Start passing frames to the frame queue.
|
|
1225
|
+
return self.record() # start recording and return the start time
|
|
1358
1226
|
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
method returns quicker.
|
|
1227
|
+
def stop(self):
|
|
1228
|
+
"""Stop recording the camera stream.
|
|
1362
1229
|
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
state : bool
|
|
1366
|
-
`True` to enable recording frames to the queue, `False` to disable.
|
|
1367
|
-
On state change, the audio interface will be started or stopped.
|
|
1230
|
+
This method should be called to stop recording the camera stream. It
|
|
1231
|
+
will stop capturing frames from the camera and clear the frame store.
|
|
1368
1232
|
|
|
1369
1233
|
"""
|
|
1370
|
-
|
|
1371
|
-
|
|
1234
|
+
self._capture.set_pause(True) # pause the capture stream
|
|
1235
|
+
|
|
1236
|
+
if self._cameraAPI == CAMERA_API_AVFOUNDATION:
|
|
1237
|
+
absStopTime = time.time()
|
|
1372
1238
|
else:
|
|
1373
|
-
self.
|
|
1374
|
-
|
|
1375
|
-
self._recordBarrier.wait()
|
|
1376
|
-
self._enqueueFrame()
|
|
1377
|
-
|
|
1378
|
-
def disable(self):
|
|
1379
|
-
"""Stop passing frames to the frame queue.
|
|
1380
|
-
|
|
1381
|
-
Calling this is equivalent to calling `enable(False)`.
|
|
1239
|
+
absStopTime = self._capture.get_pts()
|
|
1382
1240
|
|
|
1383
|
-
|
|
1384
|
-
self.enable(False)
|
|
1241
|
+
self._isRecording = False
|
|
1385
1242
|
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1243
|
+
return absStopTime
|
|
1244
|
+
|
|
1245
|
+
@property
|
|
1246
|
+
def isRecording(self):
|
|
1247
|
+
"""Check if the camera stream is currently recording (`bool`).
|
|
1389
1248
|
|
|
1390
1249
|
Returns
|
|
1391
1250
|
-------
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1251
|
+
bool
|
|
1252
|
+
`True` if the camera stream is currently recording, `False`
|
|
1253
|
+
otherwise.
|
|
1395
1254
|
|
|
1396
1255
|
"""
|
|
1397
|
-
self.
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
frames.append(self._lastFrame)
|
|
1256
|
+
return self._isRecording
|
|
1257
|
+
|
|
1258
|
+
def getFrames(self):
|
|
1259
|
+
"""Get the most recent frames from the camera stream.
|
|
1402
1260
|
|
|
1403
|
-
|
|
1261
|
+
This method returns frame captured since the last call to this method.
|
|
1262
|
+
If no frames are available or `record()` has not been previously called,
|
|
1263
|
+
it returns an empty list.
|
|
1404
1264
|
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
others.
|
|
1265
|
+
You must call this method periodically at an interval of at least
|
|
1266
|
+
`bufferSecs` seconds or risk losing frames.
|
|
1408
1267
|
|
|
1409
1268
|
Returns
|
|
1410
1269
|
-------
|
|
1411
|
-
|
|
1412
|
-
|
|
1270
|
+
list
|
|
1271
|
+
List of frames from the camera stream. Returns an empty list if no
|
|
1272
|
+
frames are available.
|
|
1413
1273
|
|
|
1414
1274
|
"""
|
|
1415
|
-
|
|
1416
|
-
|
|
1275
|
+
if self._captureLib == 'ffpyplayer':
|
|
1276
|
+
return self._getFramesFFPyPlayer()
|
|
1277
|
+
|
|
1417
1278
|
|
|
1418
|
-
|
|
1279
|
+
# class name alias for legacy support
|
|
1280
|
+
CameraInterface = CameraDevice
|
|
1419
1281
|
|
|
1420
1282
|
|
|
1421
1283
|
# keep track of camera devices that are opened
|
|
@@ -1469,10 +1331,11 @@ class Camera:
|
|
|
1469
1331
|
libraries could help resolve issues with camera compatibility. More
|
|
1470
1332
|
camera libraries may be installed via extension packages.
|
|
1471
1333
|
bufferSecs : float
|
|
1472
|
-
Size of the real-time camera stream buffer specified in seconds
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1334
|
+
Size of the real-time camera stream buffer specified in seconds. This
|
|
1335
|
+
will tell the library to allocate a buffer that can hold enough
|
|
1336
|
+
frames to cover the specified number of seconds of video. This should
|
|
1337
|
+
be large enough to cover the time it takes to process frames in the
|
|
1338
|
+
main thread.
|
|
1476
1339
|
win : :class:`~psychopy.visual.Window` or None
|
|
1477
1340
|
Optional window associated with this camera. Some functionality may
|
|
1478
1341
|
require an OpenGL context for presenting frames to the screen. If you
|
|
@@ -1480,6 +1343,29 @@ class Camera:
|
|
|
1480
1343
|
safely ignored.
|
|
1481
1344
|
name : str
|
|
1482
1345
|
Label for the camera for logging purposes.
|
|
1346
|
+
keepFrames : int
|
|
1347
|
+
Number of frames to keep in memory for the camera stream. Calling
|
|
1348
|
+
`getVideoFrames()` will return the most recent `keepFrames` frames from
|
|
1349
|
+
the camera stream. If `keepFrames` is set to `0`, no frames will be kept
|
|
1350
|
+
in memory and the camera stream will not be buffered. This is useful if
|
|
1351
|
+
the user desires to access raw frame data from the camera stream.
|
|
1352
|
+
latencyBias : float
|
|
1353
|
+
Latency bias to correct for asychrony between the camera and the
|
|
1354
|
+
microphone. This is the amount of time in seconds to add to the
|
|
1355
|
+
microphone recording start time to shift the audio track to match
|
|
1356
|
+
corresponding events in the video stream. This is needed for some
|
|
1357
|
+
cameras whose drivers do not accurately report timestamps for camera
|
|
1358
|
+
frames. Positive values will shift the audio track forward in time, and
|
|
1359
|
+
negative values will shift backwards.
|
|
1360
|
+
usageMode : str
|
|
1361
|
+
Usage mode hint for the camera aquisition. This with enable
|
|
1362
|
+
optimizations for specific applications that will improve performance
|
|
1363
|
+
and reduce memory usage. The default value is 'video', which is suitable
|
|
1364
|
+
for recording video streams with audio efficently. The 'cv' mode is for
|
|
1365
|
+
computer vision applications where frames from the camera stream are
|
|
1366
|
+
processed in real-time (e.g. object detection, tracking, etc.) and the
|
|
1367
|
+
video is not being saved to disk. Audio will not be recorded in this
|
|
1368
|
+
mode even if a microphone is provided.
|
|
1483
1369
|
|
|
1484
1370
|
Examples
|
|
1485
1371
|
--------
|
|
@@ -1516,7 +1402,7 @@ class Camera:
|
|
|
1516
1402
|
"""
|
|
1517
1403
|
def __init__(self, device=0, mic=None, cameraLib=u'ffpyplayer',
|
|
1518
1404
|
frameRate=None, frameSize=None, bufferSecs=4, win=None,
|
|
1519
|
-
name='cam'):
|
|
1405
|
+
name='cam', keepFrames=5, usageMode='video'):
|
|
1520
1406
|
# add attributes for setters
|
|
1521
1407
|
self.__dict__.update(
|
|
1522
1408
|
{'_device': None,
|
|
@@ -1526,238 +1412,240 @@ class Camera:
|
|
|
1526
1412
|
'_mode': u'video',
|
|
1527
1413
|
'_frameRate': None,
|
|
1528
1414
|
'_frameRateFrac': None,
|
|
1415
|
+
'_frameSize': None,
|
|
1529
1416
|
'_size': None,
|
|
1530
1417
|
'_cameraLib': u''})
|
|
1531
|
-
|
|
1532
|
-
# ----------------------------------------------------------------------
|
|
1533
|
-
# Process camera settings
|
|
1534
|
-
#
|
|
1535
|
-
|
|
1536
|
-
# camera library in use
|
|
1537
|
-
self._cameraLib = cameraLib
|
|
1538
1418
|
|
|
1539
|
-
|
|
1540
|
-
if device in (None, "None", "none", "Default", "default"):
|
|
1541
|
-
device = 0 # use the first enumerated camera
|
|
1542
|
-
|
|
1543
|
-
# handle all possible input for `frameRate` and `frameSize`
|
|
1544
|
-
if frameRate is None:
|
|
1545
|
-
pass # no change
|
|
1546
|
-
elif isinstance(frameRate, str):
|
|
1547
|
-
if frameRate in ("None", "none", "Default", "default"):
|
|
1548
|
-
frameRate = None
|
|
1549
|
-
elif frameRate.lower() == 'ntsc':
|
|
1550
|
-
frameRate = CAMERA_FRAMERATE_NTSC
|
|
1551
|
-
else:
|
|
1552
|
-
try: # try and convert to float
|
|
1553
|
-
frameRate = float(frameRate)
|
|
1554
|
-
except ValueError:
|
|
1555
|
-
raise ValueError(
|
|
1556
|
-
"`frameRate` must be a number, string or None")
|
|
1557
|
-
|
|
1558
|
-
# catch the value converted to float and process it
|
|
1559
|
-
if isinstance(frameRate, (int, float)):
|
|
1560
|
-
if frameRate <= 0:
|
|
1561
|
-
raise ValueError("`frameRate` must be a positive number")
|
|
1562
|
-
|
|
1563
|
-
if frameSize is None:
|
|
1564
|
-
pass # use the camera default
|
|
1565
|
-
elif isinstance(frameSize, str):
|
|
1566
|
-
if frameSize in ("None", "none", "Default", "default"):
|
|
1567
|
-
frameSize = None
|
|
1568
|
-
elif len(frameSize.split('x')) == 2:
|
|
1569
|
-
frameSize = tuple(map(int, frameSize.split('x')))
|
|
1570
|
-
elif frameSize.upper() in movietools.VIDEO_RESOLUTIONS.keys():
|
|
1571
|
-
frameSize = movietools.VIDEO_RESOLUTIONS[frameSize.upper()]
|
|
1572
|
-
else:
|
|
1573
|
-
raise ValueError("`frameSize` specified incorrectly")
|
|
1574
|
-
elif isinstance(frameSize, (tuple, list)):
|
|
1575
|
-
if len(frameSize) != 2:
|
|
1576
|
-
raise ValueError("`frameSize` must be a 2-tuple or 2-list")
|
|
1577
|
-
frameSize = tuple(map(int, frameSize))
|
|
1578
|
-
else:
|
|
1579
|
-
raise ValueError("`frameSize` specified incorrectly")
|
|
1580
|
-
|
|
1581
|
-
# recommended camera drivers for each platform
|
|
1582
|
-
cameraPlatformDrivers = {
|
|
1583
|
-
'Linux': CAMERA_API_VIDEO4LINUX2,
|
|
1584
|
-
'Windows': CAMERA_API_DIRECTSHOW,
|
|
1585
|
-
'Darwin': CAMERA_API_AVFOUNDATION
|
|
1586
|
-
}
|
|
1587
|
-
# get the recommended camera driver for the current platform
|
|
1588
|
-
cameraAPI = cameraPlatformDrivers[platform.system()]
|
|
1589
|
-
|
|
1590
|
-
self._cameraInfo = CameraInfo(
|
|
1591
|
-
index=device,
|
|
1592
|
-
frameRate=frameRate, # dummy value
|
|
1593
|
-
frameSize=frameSize, # dummy value
|
|
1594
|
-
pixelFormat='bgr24',
|
|
1595
|
-
cameraLib=cameraLib,
|
|
1596
|
-
cameraAPI=cameraAPI)
|
|
1597
|
-
|
|
1598
|
-
self._device = self._cameraInfo.description()
|
|
1599
|
-
|
|
1600
|
-
elif self._cameraLib == u'ffpyplayer':
|
|
1601
|
-
supportedCameraSettings = CameraInterfaceFFmpeg.getCameras()
|
|
1602
|
-
|
|
1603
|
-
# create a mapping of supported camera formats
|
|
1604
|
-
_formatMapping = dict()
|
|
1605
|
-
for _, formats in supportedCameraSettings.items():
|
|
1606
|
-
for _format in formats:
|
|
1607
|
-
desc = _format.description()
|
|
1608
|
-
_formatMapping[desc] = _format
|
|
1609
|
-
# sort formats by resolution then frame rate
|
|
1610
|
-
orderedFormats = list(_formatMapping.values())
|
|
1611
|
-
orderedFormats.sort(key=lambda obj: obj.frameRate, reverse=True)
|
|
1612
|
-
orderedFormats.sort(key=lambda obj: np.prod(obj.frameSize),
|
|
1613
|
-
reverse=True)
|
|
1614
|
-
|
|
1615
|
-
# list of devices
|
|
1616
|
-
devList = list(_formatMapping)
|
|
1617
|
-
|
|
1618
|
-
if not devList: # no cameras found if list is empty
|
|
1619
|
-
raise CameraNotFoundError('No cameras found of the system!')
|
|
1620
|
-
|
|
1621
|
-
# Get best device
|
|
1622
|
-
bestDevice = _formatMapping[devList[-1]]
|
|
1623
|
-
for mode in orderedFormats:
|
|
1624
|
-
sameFrameRate = mode.frameRate == frameRate or frameRate is None
|
|
1625
|
-
sameFrameSize = mode.frameSize == frameSize or frameSize is None
|
|
1626
|
-
if sameFrameRate and sameFrameSize:
|
|
1627
|
-
bestDevice = mode
|
|
1628
|
-
break
|
|
1419
|
+
self._cameraLib = cameraLib
|
|
1629
1420
|
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
)
|
|
1649
|
-
byHeight = sorted(
|
|
1650
|
-
supportedCameraSettings[device],
|
|
1651
|
-
key=lambda mode: abs(frameSize[1] - mode.frameSize[1])
|
|
1652
|
-
)
|
|
1653
|
-
byFrameRate = sorted(
|
|
1654
|
-
supportedCameraSettings[device],
|
|
1655
|
-
key=lambda mode: abs(mode.frameRate)
|
|
1656
|
-
)
|
|
1657
|
-
deltas = [
|
|
1658
|
-
byWidth.index(mode) + byHeight.index(mode) + byFrameRate.index(mode)
|
|
1659
|
-
for mode in supportedCameraSettings[device]
|
|
1660
|
-
]
|
|
1661
|
-
i = deltas.index(min(deltas))
|
|
1662
|
-
closest = supportedCameraSettings[device][i]
|
|
1663
|
-
# log warning that settings won't match requested
|
|
1664
|
-
logging.warn(_translate(
|
|
1665
|
-
"Device {device} does not support frame rate of "
|
|
1666
|
-
"{frameRate} and frame size of {frameSize}, using "
|
|
1667
|
-
"closest supported format: {desc}"
|
|
1668
|
-
).format(device=device,
|
|
1669
|
-
frameRate=frameRate,
|
|
1670
|
-
frameSize=frameSize,
|
|
1671
|
-
desc=closest.description()))
|
|
1672
|
-
# use closest
|
|
1673
|
-
device = closest
|
|
1674
|
-
|
|
1675
|
-
# self._origDevSpecifier = device # what the user provided
|
|
1676
|
-
self._device = None # device identifier
|
|
1677
|
-
|
|
1678
|
-
# alias device None or Default as being device 0
|
|
1679
|
-
if device in (None, "None", "none", "Default", "default"):
|
|
1680
|
-
self._device = bestDevice.description()
|
|
1681
|
-
elif isinstance(device, CameraInfo):
|
|
1682
|
-
if self._cameraLib != device.cameraLib:
|
|
1683
|
-
raise CameraFormatNotSupportedError(
|
|
1684
|
-
'Wrong configuration for camera library!')
|
|
1685
|
-
self._device = device.description()
|
|
1421
|
+
# handle device
|
|
1422
|
+
self._capture = None
|
|
1423
|
+
if isinstance(device, CameraDevice):
|
|
1424
|
+
# if given a device object, use it
|
|
1425
|
+
self._capture = device
|
|
1426
|
+
elif device is None:
|
|
1427
|
+
# if given None, get the first available device
|
|
1428
|
+
for name, obj in DeviceManager.getInitialisedDevices(CameraDevice).items():
|
|
1429
|
+
self._capture = obj
|
|
1430
|
+
break
|
|
1431
|
+
# if there are none, set one up
|
|
1432
|
+
if self._capture is None:
|
|
1433
|
+
for profile in CameraDevice.getAvailableDevices():
|
|
1434
|
+
self._capture = DeviceManager.addDevice(**profile)
|
|
1435
|
+
break
|
|
1436
|
+
elif isinstance(device, str):
|
|
1437
|
+
if DeviceManager.getDevice(device):
|
|
1438
|
+
self._capture = DeviceManager.getDevice(device)
|
|
1686
1439
|
else:
|
|
1687
|
-
#
|
|
1688
|
-
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1440
|
+
# get available devices
|
|
1441
|
+
availableDevices = CameraDevice.getAvailableDevices()
|
|
1442
|
+
# if given a device name, try to find it
|
|
1443
|
+
for profile in availableDevices:
|
|
1444
|
+
if profile['deviceName'] != device:
|
|
1445
|
+
continue
|
|
1446
|
+
paramsMatch = all([
|
|
1447
|
+
profile.get(key) == value
|
|
1448
|
+
for key, value in {
|
|
1449
|
+
'deviceName': device,
|
|
1450
|
+
'captureLib': cameraLib,
|
|
1451
|
+
'frameRate': frameRate if frameRate is not None else True, # get first
|
|
1452
|
+
'frameSize': frameSize if frameSize is not None else True
|
|
1453
|
+
}.items() if value is not None
|
|
1454
|
+
])
|
|
1455
|
+
if not paramsMatch:
|
|
1456
|
+
continue
|
|
1457
|
+
|
|
1458
|
+
device = profile['device']
|
|
1459
|
+
break
|
|
1699
1460
|
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1461
|
+
# anything else, try to initialise a new device from params
|
|
1462
|
+
self._capture = CameraDevice(
|
|
1463
|
+
device=device,
|
|
1464
|
+
captureLib=cameraLib,
|
|
1465
|
+
frameRate=frameRate,
|
|
1466
|
+
frameSize=frameSize,
|
|
1467
|
+
pixelFormat=None, # use default pixel format
|
|
1468
|
+
codecFormat=None, # use default codec format
|
|
1469
|
+
captureAPI=None # use default capture API
|
|
1470
|
+
)
|
|
1471
|
+
else:
|
|
1472
|
+
# anything else, try to initialise a new device from params
|
|
1473
|
+
self._capture = CameraDevice(
|
|
1474
|
+
device=device,
|
|
1475
|
+
captureLib=cameraLib,
|
|
1476
|
+
frameRate=frameRate,
|
|
1477
|
+
frameSize=frameSize,
|
|
1478
|
+
pixelFormat=None, # use default pixel format
|
|
1479
|
+
codecFormat=None, # use default codec format
|
|
1480
|
+
captureAPI=None # use default capture API
|
|
1481
|
+
)
|
|
1482
|
+
# from here on in the init, use the device index as `device`
|
|
1483
|
+
device = self._capture.device
|
|
1484
|
+
# get info from device
|
|
1485
|
+
self._cameraInfo = self._capture.info
|
|
1486
|
+
|
|
1487
|
+
# handle microphone
|
|
1488
|
+
self.mic = None
|
|
1489
|
+
if isinstance(mic, MicrophoneDevice):
|
|
1490
|
+
# if given a device object, use it
|
|
1491
|
+
self.mic = mic
|
|
1492
|
+
elif isinstance(mic, Microphone):
|
|
1493
|
+
# if given a Microphone, use its device
|
|
1494
|
+
self.mic = mic.device
|
|
1495
|
+
elif mic is None:
|
|
1496
|
+
# if given None, get the first available device
|
|
1497
|
+
for name, obj in DeviceManager.getInitialisedDevices(MicrophoneDevice).items():
|
|
1498
|
+
self.mic = obj
|
|
1499
|
+
break
|
|
1500
|
+
# if there are none, set one up
|
|
1501
|
+
if self.mic is None:
|
|
1502
|
+
for profile in MicrophoneDevice.getAvailableDevices():
|
|
1503
|
+
self.mic = DeviceManager.addDevice(**profile)
|
|
1504
|
+
break
|
|
1505
|
+
elif isinstance(mic, str) and DeviceManager.getDevice(mic) is not None:
|
|
1506
|
+
# if given a device name, get the device
|
|
1507
|
+
self.mic = DeviceManager.getDevice(mic)
|
|
1508
|
+
else:
|
|
1509
|
+
# anything else, try to initialise a new device from params
|
|
1510
|
+
self.mic = MicrophoneDevice(
|
|
1511
|
+
index=mic
|
|
1512
|
+
)
|
|
1725
1513
|
|
|
1726
1514
|
# current camera frame since the start of recording
|
|
1727
|
-
self._player = None # media player instance
|
|
1728
1515
|
self.status = NOT_STARTED
|
|
1729
|
-
self._isRecording = False
|
|
1730
1516
|
self._bufferSecs = float(bufferSecs)
|
|
1731
1517
|
self._lastFrame = None # use None to avoid imports for ImageStim
|
|
1518
|
+
self._keepFrames = keepFrames # number of frames to keep in memory
|
|
1519
|
+
self._frameCount = 0 # number of frames read from the camera stream
|
|
1520
|
+
self._frameStore = collections.deque(maxlen=keepFrames)
|
|
1521
|
+
self._usageMode = usageMode # usage mode for the camera
|
|
1522
|
+
self._unsaved = False # is there any footage not saved?
|
|
1732
1523
|
|
|
1733
|
-
# microphone instance, this is controlled by the camera interface and
|
|
1734
|
-
# is not meant to be used by the user
|
|
1735
|
-
self.mic = mic
|
|
1736
1524
|
# other information
|
|
1737
1525
|
self.name = name
|
|
1738
1526
|
# timestamp data
|
|
1739
1527
|
self._streamTime = 0.0
|
|
1740
1528
|
# store win (unused but needs to be set/got safely for parity with JS)
|
|
1741
|
-
self.
|
|
1529
|
+
self._win = None
|
|
1530
|
+
|
|
1531
|
+
# recording properties
|
|
1532
|
+
self._isStarted = False # is the stream started?
|
|
1533
|
+
self._audioReady = False
|
|
1534
|
+
self._videoReady = False
|
|
1535
|
+
|
|
1536
|
+
self._latencyBias = 0.0 # latency bias in seconds
|
|
1537
|
+
|
|
1538
|
+
self._absVideoRecStartTime = -1.0
|
|
1539
|
+
self._absVideoRecStopTime = -1.0
|
|
1540
|
+
self._absAudioRecStartTime = -1.0
|
|
1541
|
+
self._absAudioRecStopTime = -1.0
|
|
1542
|
+
|
|
1543
|
+
# computed timestamps for when
|
|
1544
|
+
self._absAudioActualRecStartTime = -1.0
|
|
1545
|
+
|
|
1546
|
+
self._absAudioRecStartPos = -1.0 # in samples
|
|
1547
|
+
self._absAudioRecStopPos = -1.0
|
|
1548
|
+
|
|
1549
|
+
self._curPTS = 0.0 # current display timestamp
|
|
1550
|
+
self._isRecording = False
|
|
1551
|
+
self._generatePTS = False # use genreated PTS values for frames
|
|
1742
1552
|
|
|
1743
1553
|
# movie writer instance, this runs in a separate thread
|
|
1744
1554
|
self._movieWriter = None
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
# self._audioThread = None
|
|
1748
|
-
self._captureFrames = [] # array for storing frames
|
|
1555
|
+
self._tempVideoFile = None # temporary video file for recording
|
|
1556
|
+
|
|
1749
1557
|
# thread for polling the microphone
|
|
1750
1558
|
self._audioTrack = None # audio track from the recent recording
|
|
1751
|
-
# used to sync threads spawned by this class, created on `open()`
|
|
1752
|
-
self._syncBarrier = None
|
|
1753
1559
|
# keep track of the last video file saved
|
|
1754
1560
|
self._lastVideoFile = None
|
|
1755
1561
|
|
|
1562
|
+
# OpenGL stuff, just declare these attributes for now
|
|
1563
|
+
self._pixbuffId = None
|
|
1564
|
+
self._textureId = None
|
|
1565
|
+
self._interpolate = True # use bilinear interpolation by default
|
|
1566
|
+
self._texFilterNeedsUpdate = True # flag to update texture filtering
|
|
1567
|
+
self._texBufferSizeBytes = None # size of the texture buffer
|
|
1568
|
+
|
|
1569
|
+
# computer vison mode
|
|
1570
|
+
self._objClassfiers = {} # list of classifiers for CV mode
|
|
1571
|
+
|
|
1572
|
+
# keep track of files to merge
|
|
1573
|
+
self._filesToMerge = [] # list of tuples (videoFile, audioFile)
|
|
1574
|
+
|
|
1575
|
+
self.setWin(win) # sets up OpenGL stuff if needed
|
|
1576
|
+
|
|
1756
1577
|
def authorize(self):
|
|
1757
1578
|
"""Get permission to access the camera. Not implemented locally yet.
|
|
1758
1579
|
"""
|
|
1759
1580
|
pass # NOP
|
|
1760
1581
|
|
|
1582
|
+
@property
|
|
1583
|
+
def latencyBias(self):
|
|
1584
|
+
"""Latency bias in seconds (`float`).
|
|
1585
|
+
|
|
1586
|
+
This is the latency bias that is applied to the timestamps of the frames
|
|
1587
|
+
in the camera stream. This is useful for synchronizing the camera stream
|
|
1588
|
+
with other devices such as microphones or audio interfaces. The default
|
|
1589
|
+
value is `0.0`, which means no latency bias is applied.
|
|
1590
|
+
|
|
1591
|
+
"""
|
|
1592
|
+
return self._latencyBias
|
|
1593
|
+
|
|
1594
|
+
@latencyBias.setter
|
|
1595
|
+
def latencyBias(self, value):
|
|
1596
|
+
"""Set the latency bias in seconds (`float`).
|
|
1597
|
+
|
|
1598
|
+
This is the latency bias that is applied to the timestamps of the frames
|
|
1599
|
+
in the camera stream. This is useful for synchronizing the camera stream
|
|
1600
|
+
with other devices such as microphones or audio interfaces. The default
|
|
1601
|
+
value is `0.0`, which means no latency bias is applied.
|
|
1602
|
+
|
|
1603
|
+
Parameters
|
|
1604
|
+
----------
|
|
1605
|
+
value : float
|
|
1606
|
+
Latency bias in seconds.
|
|
1607
|
+
|
|
1608
|
+
"""
|
|
1609
|
+
if not isinstance(value, (int, float)):
|
|
1610
|
+
raise TypeError("Latency bias must be a number.")
|
|
1611
|
+
|
|
1612
|
+
self._latencyBias = float(value)
|
|
1613
|
+
|
|
1614
|
+
@property
|
|
1615
|
+
def streamTime(self):
|
|
1616
|
+
"""Current stream time in seconds (`float`).
|
|
1617
|
+
|
|
1618
|
+
This is the current absolute time in seconds from the time the PC was
|
|
1619
|
+
booted. This is not the same as the recording time, which is the time
|
|
1620
|
+
since the recording started. This is useful for generating timestamps
|
|
1621
|
+
across multiple cameras or devices using the same time source.
|
|
1622
|
+
|
|
1623
|
+
"""
|
|
1624
|
+
return self._capture.streamTime
|
|
1625
|
+
|
|
1626
|
+
@property
|
|
1627
|
+
def recordingTime(self):
|
|
1628
|
+
"""Time in seconds since the recording started (`float`).
|
|
1629
|
+
|
|
1630
|
+
This is the time since the recording started. This is useful for
|
|
1631
|
+
generating timestamps for frames in the recording. If the recording has
|
|
1632
|
+
not started, this will return `0.0`.
|
|
1633
|
+
|
|
1634
|
+
"""
|
|
1635
|
+
if self._absRecStreamStartTime < 0:
|
|
1636
|
+
return 0.0
|
|
1637
|
+
|
|
1638
|
+
if self._cameraAPI == CAMERA_API_AVFOUNDATION:
|
|
1639
|
+
return time.time() - self._absRecStreamStartTime
|
|
1640
|
+
|
|
1641
|
+
# for other APIs, use the PTS value
|
|
1642
|
+
curPts = self._capture.get_pts()
|
|
1643
|
+
if curPts is None:
|
|
1644
|
+
return 0.0
|
|
1645
|
+
|
|
1646
|
+
# return the difference between the current PTS and the absolute start time
|
|
1647
|
+
return self._capture.get_pts() - self._absRecStreamStartTime
|
|
1648
|
+
|
|
1761
1649
|
@property
|
|
1762
1650
|
def isReady(self):
|
|
1763
1651
|
"""Is the camera ready (`bool`)?
|
|
@@ -1801,6 +1689,20 @@ class Camera:
|
|
|
1801
1689
|
|
|
1802
1690
|
return self._cameraInfo.frameRate
|
|
1803
1691
|
|
|
1692
|
+
@property
|
|
1693
|
+
def frameInterval(self):
|
|
1694
|
+
"""Frame interval in seconds (`float`).
|
|
1695
|
+
|
|
1696
|
+
This is the time between frames in the video stream. This is computed
|
|
1697
|
+
from the frame rate of the video stream. If the frame rate is not set,
|
|
1698
|
+
this will return `None`.
|
|
1699
|
+
|
|
1700
|
+
"""
|
|
1701
|
+
if self._cameraInfo is None or self._cameraInfo.frameRate is None:
|
|
1702
|
+
return -1.0
|
|
1703
|
+
|
|
1704
|
+
return 1.0 / self._cameraInfo.frameRate
|
|
1705
|
+
|
|
1804
1706
|
def _assertCameraReady(self):
|
|
1805
1707
|
"""Assert that the camera is ready. Raises a `CameraNotReadyError` if
|
|
1806
1708
|
the camera is not ready.
|
|
@@ -1822,10 +1724,8 @@ class Camera:
|
|
|
1822
1724
|
"""`True` if the stream has started (`bool`). This status is given after
|
|
1823
1725
|
`open()` has been called on this object.
|
|
1824
1726
|
"""
|
|
1825
|
-
if self
|
|
1826
|
-
return
|
|
1827
|
-
|
|
1828
|
-
return self._captureThread.isOpen()
|
|
1727
|
+
if hasattr(self, "_isStarted"):
|
|
1728
|
+
return self._isStarted
|
|
1829
1729
|
|
|
1830
1730
|
@property
|
|
1831
1731
|
def isNotStarted(self):
|
|
@@ -1855,23 +1755,19 @@ class Camera:
|
|
|
1855
1755
|
|
|
1856
1756
|
Returns
|
|
1857
1757
|
-------
|
|
1858
|
-
MovieMetadata
|
|
1758
|
+
MovieMetadata vor None
|
|
1859
1759
|
Metadata about the video stream, retrieved during the last frame
|
|
1860
|
-
update (`_enqueueFrame` call).
|
|
1760
|
+
update (`_enqueueFrame` call). If no metadata is available,
|
|
1761
|
+
returns `None`. This is useful for getting information about the
|
|
1762
|
+
video stream such as frame size, frame rate, pixel format, etc.
|
|
1861
1763
|
|
|
1862
1764
|
"""
|
|
1863
|
-
return self.
|
|
1864
|
-
|
|
1865
|
-
# @property
|
|
1866
|
-
# def mode(self):
|
|
1867
|
-
# """Operating mode in use for this camera.
|
|
1868
|
-
# """
|
|
1869
|
-
# return self._mode
|
|
1765
|
+
return self._capture.getMetadata() if self._capture else None
|
|
1870
1766
|
|
|
1871
1767
|
_getCamerasCache = {}
|
|
1872
1768
|
|
|
1873
1769
|
@staticmethod
|
|
1874
|
-
def getCameras(cameraLib=
|
|
1770
|
+
def getCameras(cameraLib='ffpyplayer'):
|
|
1875
1771
|
"""Get information about installed cameras on this system.
|
|
1876
1772
|
|
|
1877
1773
|
Returns
|
|
@@ -1881,18 +1777,8 @@ class Camera:
|
|
|
1881
1777
|
|
|
1882
1778
|
"""
|
|
1883
1779
|
# not pluggable yet, needs to be made available via extensions
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
Camera._getCamerasCache['opencv'] = \
|
|
1887
|
-
CameraInterfaceOpenCV.getCameras()
|
|
1888
|
-
return Camera._getCamerasCache['opencv']
|
|
1889
|
-
elif cameraLib == 'ffpyplayer':
|
|
1890
|
-
if 'ffpyplayer' not in Camera._getCamerasCache:
|
|
1891
|
-
Camera._getCamerasCache['ffpyplayer'] = \
|
|
1892
|
-
CameraInterfaceFFmpeg.getCameras()
|
|
1893
|
-
return Camera._getCamerasCache['ffpyplayer']
|
|
1894
|
-
else:
|
|
1895
|
-
raise ValueError("Invalid value for parameter `cameraLib`")
|
|
1780
|
+
return CameraDevice.getCameras(
|
|
1781
|
+
cameraLib=cameraLib)
|
|
1896
1782
|
|
|
1897
1783
|
@staticmethod
|
|
1898
1784
|
def getAvailableDevices():
|
|
@@ -1901,8 +1787,12 @@ class Camera:
|
|
|
1901
1787
|
for spec in dev:
|
|
1902
1788
|
devices.append({
|
|
1903
1789
|
'device': spec['index'],
|
|
1790
|
+
'name': spec['device_name'],
|
|
1904
1791
|
'frameRate': spec['frameRate'],
|
|
1905
1792
|
'frameSize': spec['frameSize'],
|
|
1793
|
+
'pixelFormat': spec['pixelFormat'],
|
|
1794
|
+
'codecFormat': spec['codecFormat'],
|
|
1795
|
+
'cameraAPI': spec['cameraAPI']
|
|
1906
1796
|
})
|
|
1907
1797
|
|
|
1908
1798
|
return devices
|
|
@@ -2015,429 +1905,1350 @@ class Camera:
|
|
|
2015
1905
|
return totalFramesBuffered
|
|
2016
1906
|
|
|
2017
1907
|
@property
|
|
2018
|
-
def
|
|
2019
|
-
"""
|
|
2020
|
-
|
|
1908
|
+
def keepFrames(self):
|
|
1909
|
+
"""Number of frames to keep in memory for the camera stream (`int`).
|
|
1910
|
+
"""
|
|
1911
|
+
return self._keepFrames
|
|
1912
|
+
|
|
1913
|
+
@keepFrames.setter
|
|
1914
|
+
def keepFrames(self, value):
|
|
1915
|
+
if value < 0:
|
|
1916
|
+
raise ValueError("`keepFrames` must be a non-negative integer.")
|
|
2021
1917
|
|
|
2022
|
-
|
|
2023
|
-
|
|
1918
|
+
self._keepFrames = value
|
|
1919
|
+
oldFrames = self._frameStore
|
|
1920
|
+
oldStoreSize = len(self._frameStore)
|
|
1921
|
+
|
|
1922
|
+
if oldStoreSize == self._keepFrames:
|
|
1923
|
+
# nothing to do, size is the same
|
|
1924
|
+
return
|
|
1925
|
+
|
|
1926
|
+
# change the size of the frame store
|
|
1927
|
+
self._frameStore = collections.deque(maxlen=self._keepFrames)
|
|
1928
|
+
|
|
1929
|
+
if oldStoreSize > self._keepFrames:
|
|
1930
|
+
logging.warning(
|
|
1931
|
+
"Reducing `keepFrames` from {} to {} will discard the oldest "
|
|
1932
|
+
"frames in the buffer.".format(oldStoreSize, self._keepFrames))
|
|
1933
|
+
|
|
1934
|
+
# add back frames
|
|
1935
|
+
if oldStoreSize > 0:
|
|
1936
|
+
# copy the last `keepFrames` frames to the new store
|
|
1937
|
+
for i in range(oldStoreSize - self._keepFrames, oldStoreSize):
|
|
1938
|
+
self._frameStore.append(oldFrames[i])
|
|
1939
|
+
|
|
1940
|
+
@property
|
|
1941
|
+
def recordingTime(self):
|
|
1942
|
+
"""Current recording timestamp (`float`).
|
|
1943
|
+
|
|
1944
|
+
This returns the timestamp of the last frame captured in the recording.
|
|
1945
|
+
|
|
1946
|
+
This value increases monotonically from the last `record()` call. It
|
|
1947
|
+
will reset once `stop()` is called. This value is invalid outside
|
|
1948
|
+
`record()` and `stop()` calls.
|
|
1949
|
+
|
|
1950
|
+
"""
|
|
1951
|
+
return self.frameCount * self._capture.frameInterval
|
|
1952
|
+
|
|
1953
|
+
@property
|
|
1954
|
+
def recordingBytes(self):
|
|
1955
|
+
"""Current size of the recording in bytes (`int`).
|
|
1956
|
+
"""
|
|
1957
|
+
if not self._isRecording:
|
|
1958
|
+
return 0
|
|
1959
|
+
|
|
1960
|
+
return -1
|
|
1961
|
+
|
|
1962
|
+
def _assertMediaPlayer(self):
|
|
1963
|
+
"""Assert that we have a media player instance open.
|
|
1964
|
+
|
|
1965
|
+
This will raise a `RuntimeError` if there is no player open. Use this
|
|
1966
|
+
function to ensure that a player is present before running subsequent
|
|
1967
|
+
code.
|
|
1968
|
+
"""
|
|
1969
|
+
if self._capture is not None:
|
|
1970
|
+
return
|
|
1971
|
+
|
|
1972
|
+
raise PlayerNotAvailableError('Media player not initialized.')
|
|
1973
|
+
|
|
1974
|
+
@property
|
|
1975
|
+
def isReady(self):
|
|
1976
|
+
"""`True` if the video and audio capture devices are in a ready state
|
|
1977
|
+
(`bool`).
|
|
1978
|
+
|
|
1979
|
+
When this is `True`, the audio and video streams are properly started.
|
|
1980
|
+
|
|
1981
|
+
"""
|
|
1982
|
+
return self._audioReady and self._videoReady
|
|
1983
|
+
|
|
1984
|
+
def open(self):
|
|
1985
|
+
"""Open the camera stream and begin decoding frames (if available).
|
|
1986
|
+
|
|
1987
|
+
This function returns when the camera is ready to start getting
|
|
1988
|
+
frames.
|
|
1989
|
+
|
|
1990
|
+
Call `record()` to start recording frames to memory. Captured frames
|
|
1991
|
+
came be saved to disk using `save()`.
|
|
1992
|
+
|
|
1993
|
+
"""
|
|
1994
|
+
if self._hasPlayer:
|
|
1995
|
+
raise RuntimeError('Cannot open `MediaPlayer`, already opened.')
|
|
1996
|
+
|
|
1997
|
+
# Camera interface to use, these are hard coded but support for each is
|
|
1998
|
+
# provided by an extension.
|
|
1999
|
+
# desc = self._cameraInfo.description()
|
|
2000
|
+
|
|
2001
|
+
self._capture.open()
|
|
2002
|
+
|
|
2003
|
+
if self.win is not None:
|
|
2004
|
+
# if we have a window, setup texture buffers for displaying
|
|
2005
|
+
self._setupTextureBuffers()
|
|
2006
|
+
|
|
2007
|
+
# open the mic when the camera opens
|
|
2008
|
+
if hasattr(self.mic, "open"):
|
|
2009
|
+
self.mic.open()
|
|
2010
|
+
|
|
2011
|
+
self._isStarted = True
|
|
2012
|
+
|
|
2013
|
+
def record(self, clearLastRecording=True, waitForStart=False):
|
|
2014
|
+
"""Start recording frames.
|
|
2015
|
+
|
|
2016
|
+
This function will start recording frames and audio (if available). The
|
|
2017
|
+
value of `lastFrame` will be updated as new frames arrive and the
|
|
2018
|
+
`frameCount` will increase. You can access image data for the most
|
|
2019
|
+
recent frame to be captured using `lastFrame`.
|
|
2020
|
+
|
|
2021
|
+
If this is called before `open()` the camera stream will be opened
|
|
2022
|
+
automatically. This is not recommended as it may incur a longer than
|
|
2023
|
+
expected delay in the recording start time.
|
|
2024
|
+
|
|
2025
|
+
Warnings
|
|
2026
|
+
--------
|
|
2027
|
+
If a recording has been previously made without calling `save()` it will
|
|
2028
|
+
be discarded if `record()` is called again unless
|
|
2029
|
+
`clearLastRecording=False`.
|
|
2030
|
+
|
|
2031
|
+
Parameters
|
|
2032
|
+
----------
|
|
2033
|
+
clearLastRecording : bool
|
|
2034
|
+
Clear the frame buffer before starting the recording. If `True`,
|
|
2035
|
+
the frame buffer will be cleared before starting the recording. If
|
|
2036
|
+
`False`, the frame buffer will be kept and new frames will be added
|
|
2037
|
+
to the buffer. Default is `True`. This is deprecated and will
|
|
2038
|
+
eventually be removed in a future version of PsychoPy. The recording
|
|
2039
|
+
is always cleared when `record()` is called, so this parameter is
|
|
2040
|
+
ignored.
|
|
2041
|
+
waitForStart : bool
|
|
2042
|
+
Capture video only when the camera and microphone are ready. This
|
|
2043
|
+
will result in a longer delay before the recording starts, but will
|
|
2044
|
+
ensure the microphone is actually recording valid samples. In some
|
|
2045
|
+
cases this will result in a delay of up to 1 second before the
|
|
2046
|
+
recording starts.
|
|
2047
|
+
|
|
2048
|
+
"""
|
|
2049
|
+
if self.isNotStarted:
|
|
2050
|
+
self.open() # open the camera stream if we call record() first
|
|
2051
|
+
logging.warning(
|
|
2052
|
+
"Called `Camera.record()` before opening the camera stream, "
|
|
2053
|
+
"opening now. This is not recommended as it may incur a longer "
|
|
2054
|
+
"than expected delay in the recording start time."
|
|
2055
|
+
)
|
|
2056
|
+
|
|
2057
|
+
if self._isRecording:
|
|
2058
|
+
logging.warning(
|
|
2059
|
+
"Called `Camera.record()` while already recording, stopping "
|
|
2060
|
+
"the previous recording first."
|
|
2061
|
+
)
|
|
2062
|
+
self.stop()
|
|
2063
|
+
|
|
2064
|
+
# clear previous frames
|
|
2065
|
+
if clearLastRecording:
|
|
2066
|
+
self._frameStore.clear() # clear frames from last recording
|
|
2067
|
+
|
|
2068
|
+
self._capture._clearFrameStore()
|
|
2069
|
+
|
|
2070
|
+
# reset the movie writer
|
|
2071
|
+
self._openMovieFileWriter()
|
|
2072
|
+
|
|
2073
|
+
# reset audio flags
|
|
2074
|
+
self._audioReady = self._videoReady = False
|
|
2075
|
+
|
|
2076
|
+
# reset the last frame
|
|
2077
|
+
self._lastFrame = None
|
|
2078
|
+
|
|
2079
|
+
# start camera recording
|
|
2080
|
+
self._absVideoRecStartTime = self._capture.record()
|
|
2081
|
+
|
|
2082
|
+
# start microphone recording
|
|
2083
|
+
if self._usageMode == CAMERA_MODE_VIDEO:
|
|
2084
|
+
if self.mic is not None:
|
|
2085
|
+
audioStartTime = self.mic.start(
|
|
2086
|
+
waitForStart=int(waitForStart), # wait until the mic is ready
|
|
2087
|
+
)
|
|
2088
|
+
self._absAudioRecStartTime = self._capture.streamTime
|
|
2089
|
+
if waitForStart:
|
|
2090
|
+
self._absAudioActualRecStartTime = audioStartTime # time it will be ready
|
|
2091
|
+
else:
|
|
2092
|
+
self._absAudioActualRecStartTime = self._absAudioRecStartTime
|
|
2093
|
+
|
|
2094
|
+
self._isRecording = True # set recording flag
|
|
2095
|
+
# do an initial poll to avoid frame dropping
|
|
2096
|
+
self.update()
|
|
2097
|
+
# mark that there's unsaved footage
|
|
2098
|
+
self._unsaved = True
|
|
2099
|
+
|
|
2100
|
+
def start(self, waitForStart=True):
|
|
2101
|
+
"""Start the camera stream.
|
|
2102
|
+
|
|
2103
|
+
This will start the camera stream and begin decoding frames. If the
|
|
2104
|
+
camera is already started, this will do nothing. Use `record()` to start
|
|
2105
|
+
recording frames to memory.
|
|
2106
|
+
|
|
2107
|
+
"""
|
|
2108
|
+
return self.record(clearLastRecording=False, waitForStart=waitForStart)
|
|
2109
|
+
|
|
2110
|
+
def stop(self):
|
|
2111
|
+
"""Stop recording frames and audio (if available).
|
|
2112
|
+
"""
|
|
2113
|
+
# poll any remaining frames and stop
|
|
2114
|
+
self.update()
|
|
2115
|
+
|
|
2116
|
+
# stop the camera stream
|
|
2117
|
+
self._absVideoRecStopTime = self._capture.stop()
|
|
2118
|
+
|
|
2119
|
+
# stop audio recording if we have a microphone
|
|
2120
|
+
if self.hasMic and not self.mic._stream._closed:
|
|
2121
|
+
_, overflows = self.mic.poll()
|
|
2122
|
+
|
|
2123
|
+
if overflows > 0:
|
|
2124
|
+
logging.warning(
|
|
2125
|
+
"Audio recording overflowed {} times before stopping, "
|
|
2126
|
+
"some audio samples may be lost.".format(overflows))
|
|
2127
|
+
audioStopTime, _, _, _ = self.mic.stop(
|
|
2128
|
+
blockUntilStopped=0)
|
|
2129
|
+
|
|
2130
|
+
self._audioReady = self._videoReady = False # reset camera ready flags
|
|
2131
|
+
self._isRecording = False
|
|
2132
|
+
|
|
2133
|
+
self._closeMovieFileWriter()
|
|
2134
|
+
|
|
2135
|
+
def close(self):
|
|
2136
|
+
"""Close the camera.
|
|
2137
|
+
|
|
2138
|
+
This will close the camera stream and free up any resources used by the
|
|
2139
|
+
device. If the camera is currently recording, this will stop the
|
|
2140
|
+
recording, but will not discard any frames. You may still call `save()`
|
|
2141
|
+
to save the frames to disk.
|
|
2142
|
+
|
|
2143
|
+
"""
|
|
2144
|
+
self._closeMovieFileWriter()
|
|
2145
|
+
|
|
2146
|
+
self._capture.close() # close the camera stream
|
|
2147
|
+
self._capture = None # clear the capture object
|
|
2148
|
+
|
|
2149
|
+
if self.mic is not None:
|
|
2150
|
+
self.mic.close()
|
|
2151
|
+
|
|
2152
|
+
self._isStarted = False
|
|
2153
|
+
|
|
2154
|
+
def _mergeAudioVideoTracks(self, videoTrackFile, audioTrackFile,
|
|
2155
|
+
filename, writerOpts=None):
|
|
2156
|
+
"""Use FFMPEG to merge audio and video tracks into a single file.
|
|
2157
|
+
|
|
2158
|
+
Parameters
|
|
2159
|
+
----------
|
|
2160
|
+
videoTrackFile : str
|
|
2161
|
+
Path to the video track file to merge.
|
|
2162
|
+
audioTrackFile : str
|
|
2163
|
+
Path to the audio track file to merge.
|
|
2164
|
+
filename : str
|
|
2165
|
+
Path to the output file to save the merged audio and video tracks.
|
|
2166
|
+
writerOpts : dict or None
|
|
2167
|
+
Options to pass to the movie writer. If `None`, default options
|
|
2168
|
+
will be used. This is useful for specifying the codec, bitrate,
|
|
2169
|
+
etc. for the output file.
|
|
2170
|
+
|
|
2171
|
+
Returns
|
|
2172
|
+
-------
|
|
2173
|
+
str
|
|
2174
|
+
Path to the output file with merged audio and video tracks.
|
|
2175
|
+
|
|
2176
|
+
"""
|
|
2177
|
+
import subprocess as sp
|
|
2178
|
+
|
|
2179
|
+
# check if the video and audio track files exist
|
|
2180
|
+
if not os.path.exists(videoTrackFile):
|
|
2181
|
+
raise FileNotFoundError(
|
|
2182
|
+
"Video track file `{}` does not exist.".format(videoTrackFile))
|
|
2183
|
+
if not os.path.exists(audioTrackFile):
|
|
2184
|
+
raise FileNotFoundError(
|
|
2185
|
+
"Audio track file `{}` does not exist.".format(audioTrackFile))
|
|
2186
|
+
|
|
2187
|
+
# check if the output file already exists
|
|
2188
|
+
if os.path.exists(filename):
|
|
2189
|
+
logging.warning(
|
|
2190
|
+
"Output file `{}` already exists, it will be overwritten.".format(filename))
|
|
2191
|
+
os.remove(filename)
|
|
2192
|
+
|
|
2193
|
+
# build the command to merge audio and video tracks
|
|
2194
|
+
cmd = [
|
|
2195
|
+
'ffmpeg',
|
|
2196
|
+
'-loglevel', 'error', # suppress output except errors
|
|
2197
|
+
'-nostdin', # do not read from stdin
|
|
2198
|
+
'-y', # overwrite output file if it exists
|
|
2199
|
+
'-i', videoTrackFile, # input video track
|
|
2200
|
+
'-i', audioTrackFile, # input audio track
|
|
2201
|
+
'-c:v', 'copy', # copy video codec
|
|
2202
|
+
'-c:a', 'aac', # use AAC for audio codec
|
|
2203
|
+
'-strict', 'experimental', # allow experimental codecs
|
|
2204
|
+
'-threads', 'auto', # use all available threads
|
|
2205
|
+
'-shortest' # stop when the shortest input ends
|
|
2206
|
+
]
|
|
2207
|
+
# add output file
|
|
2208
|
+
cmd.append(filename)
|
|
2209
|
+
|
|
2210
|
+
# apply any writer options if provided
|
|
2211
|
+
if writerOpts is not None:
|
|
2212
|
+
for key, value in writerOpts.items():
|
|
2213
|
+
if isinstance(value, str):
|
|
2214
|
+
cmd.append('-' + key)
|
|
2215
|
+
cmd.append(value)
|
|
2216
|
+
elif isinstance(value, bool) and value:
|
|
2217
|
+
cmd.append('-' + key)
|
|
2218
|
+
elif isinstance(value, (int, float)):
|
|
2219
|
+
cmd.append('-' + key)
|
|
2220
|
+
cmd.append(str(value))
|
|
2221
|
+
|
|
2222
|
+
logging.debug(
|
|
2223
|
+
"Merging audio and video tracks with command: {}".format(' '.join(cmd))
|
|
2224
|
+
)
|
|
2225
|
+
|
|
2226
|
+
# run the command to merge audio and video tracks
|
|
2227
|
+
try:
|
|
2228
|
+
proc = sp.Popen(
|
|
2229
|
+
cmd,
|
|
2230
|
+
stdout=sp.PIPE,
|
|
2231
|
+
stderr=sp.PIPE,
|
|
2232
|
+
stdin=sp.DEVNULL if hasattr(sp, 'DEVNULL') else None,
|
|
2233
|
+
universal_newlines=True, # use text mode for output
|
|
2234
|
+
text=True
|
|
2235
|
+
)
|
|
2236
|
+
proc.wait() # wait for the process to finish
|
|
2237
|
+
if proc.returncode != 0:
|
|
2238
|
+
logging.error(
|
|
2239
|
+
"FFMPEG returned non-zero exit code {} for command: {}".format(
|
|
2240
|
+
proc.returncode, cmd
|
|
2241
|
+
)
|
|
2242
|
+
)
|
|
2243
|
+
# wait for the process to finish
|
|
2244
|
+
except sp.CalledProcessError as e:
|
|
2245
|
+
logging.error(
|
|
2246
|
+
"Failed to merge audio and video tracks: {}".format(e))
|
|
2247
|
+
return None
|
|
2248
|
+
|
|
2249
|
+
logging.info(
|
|
2250
|
+
"Merged audio and video tracks into `{}`".format(filename))
|
|
2251
|
+
|
|
2252
|
+
return filename
|
|
2253
|
+
|
|
2254
|
+
def save(self, filename, useThreads=True, mergeAudio=True, writerOpts=None):
|
|
2255
|
+
"""Save the last recording to file.
|
|
2256
|
+
|
|
2257
|
+
This will write frames to `filename` acquired since the last call of
|
|
2258
|
+
`record()` and subsequent `stop()`. If `record()` is called again before
|
|
2259
|
+
`save()`, the previous recording will be deleted and lost.
|
|
2260
|
+
|
|
2261
|
+
This is a slow operation and will block for some time depending on the
|
|
2262
|
+
length of the video. This can be sped up by setting `useThreads=True` if
|
|
2263
|
+
supported.
|
|
2264
|
+
|
|
2265
|
+
Parameters
|
|
2266
|
+
----------
|
|
2267
|
+
filename : str
|
|
2268
|
+
File to save the resulting video to, should include the extension.
|
|
2269
|
+
useThreads : bool
|
|
2270
|
+
Use threading where possible to speed up the saving process.
|
|
2271
|
+
mergeAudio : bool
|
|
2272
|
+
Merge the audio track from the microphone with the video into a
|
|
2273
|
+
single file if `True`. If `False`, the audio track will be saved
|
|
2274
|
+
to a separate file with the same name as `filename`, but with a
|
|
2275
|
+
`.wav` extension. This is useful if you want to process the audio
|
|
2276
|
+
track separately, or merge it with the video later on as the process
|
|
2277
|
+
is computationally expensive and memory consuming. Default is
|
|
2278
|
+
`True`.
|
|
2279
|
+
writerOpts : dict or None
|
|
2280
|
+
Options to pass to the movie writer. If `None`, default options
|
|
2281
|
+
will be used.
|
|
2282
|
+
|
|
2283
|
+
"""
|
|
2284
|
+
# stop if still recording
|
|
2285
|
+
if self._isRecording:
|
|
2286
|
+
self.stop()
|
|
2287
|
+
logging.warning(
|
|
2288
|
+
"Called `Camera.save()` while recording, stopping the "
|
|
2289
|
+
"recording first."
|
|
2290
|
+
)
|
|
2291
|
+
|
|
2292
|
+
# if there's nothing to unsaved, do nothing
|
|
2293
|
+
if not self._unsaved:
|
|
2294
|
+
return
|
|
2295
|
+
|
|
2296
|
+
# check if we have an active movie writer
|
|
2297
|
+
if self._movieWriter is not None:
|
|
2298
|
+
self._movieWriter.close() # close the movie writer
|
|
2299
|
+
|
|
2300
|
+
# check if we have a temp movie file
|
|
2301
|
+
videoTrackFile = self._tempVideoFile
|
|
2302
|
+
|
|
2303
|
+
# write the temporary audio track to file if we have one
|
|
2304
|
+
tStart = time.time() # start time for the operation
|
|
2305
|
+
if self.mic is not None:
|
|
2306
|
+
audioTrack = self.mic.getRecording()
|
|
2307
|
+
|
|
2308
|
+
if audioTrack is not None:
|
|
2309
|
+
logging.debug(
|
|
2310
|
+
"Saving audio track to file `{}`...".format(filename))
|
|
2311
|
+
|
|
2312
|
+
# trim off samples before the recording started
|
|
2313
|
+
audioTrack = audioTrack.trimmed(
|
|
2314
|
+
direction='start',
|
|
2315
|
+
duration=self._absAudioRecStartPos,
|
|
2316
|
+
units='samples')
|
|
2317
|
+
|
|
2318
|
+
if mergeAudio:
|
|
2319
|
+
logging.debug("Merging audio track with video track...")
|
|
2320
|
+
# save it to a temp file
|
|
2321
|
+
import tempfile
|
|
2322
|
+
tempAudioFile = tempfile.NamedTemporaryFile(
|
|
2323
|
+
suffix='.wav', delete=False)
|
|
2324
|
+
audioTrackFile = tempAudioFile.name
|
|
2325
|
+
tempAudioFile.close() # close the file so we can use it later
|
|
2326
|
+
audioTrack.save(audioTrackFile)
|
|
2327
|
+
|
|
2328
|
+
# # composite audio a video tracks using MoviePy (huge thanks to
|
|
2329
|
+
# # that team)
|
|
2330
|
+
# from moviepy.video.io.VideoFileClip import VideoFileClip
|
|
2331
|
+
# from moviepy.audio.io.AudioFileClip import AudioFileClip
|
|
2332
|
+
# from moviepy.audio.AudioClip import CompositeAudioClip
|
|
2333
|
+
|
|
2334
|
+
# videoClip = VideoFileClip(videoTrackFile)
|
|
2335
|
+
# audioClip = AudioFileClip(audioTrackFile)
|
|
2336
|
+
# videoClip.audio = CompositeAudioClip([audioClip])
|
|
2337
|
+
|
|
2338
|
+
# # default options for the writer, needed or we can crash
|
|
2339
|
+
# moviePyOpts = {
|
|
2340
|
+
# 'logger': None
|
|
2341
|
+
# }
|
|
2342
|
+
|
|
2343
|
+
# if writerOpts is not None: # make empty dict if not provided
|
|
2344
|
+
# moviePyOpts.update(writerOpts)
|
|
2345
|
+
|
|
2346
|
+
# # transcode with the format the user wants
|
|
2347
|
+
# videoClip.write_videofile(
|
|
2348
|
+
# filename,
|
|
2349
|
+
# **moviePyOpts) # expand out options
|
|
2350
|
+
|
|
2351
|
+
# videoClip.close() # close the video clip
|
|
2352
|
+
# audioClip.close()
|
|
2353
|
+
|
|
2354
|
+
# merge audio and video tracks using FFMPEG
|
|
2355
|
+
mergedVideo = self._mergeAudioVideoTracks(
|
|
2356
|
+
videoTrackFile,
|
|
2357
|
+
audioTrackFile,
|
|
2358
|
+
filename,
|
|
2359
|
+
writerOpts=writerOpts)
|
|
2360
|
+
|
|
2361
|
+
os.remove(audioTrackFile) # remove the temp file
|
|
2362
|
+
|
|
2363
|
+
else:
|
|
2364
|
+
tAudioStart = time.time() # start time for audio saving
|
|
2365
|
+
# just save the audio file seperatley
|
|
2366
|
+
# check if the filename has an extension
|
|
2367
|
+
if '.' not in filename:
|
|
2368
|
+
audioTrackFile = filename + '.wav'
|
|
2369
|
+
else:
|
|
2370
|
+
# if it has an extension, use the same name but with .wav
|
|
2371
|
+
# extension
|
|
2372
|
+
rootName, _ = os.path.splitext(filename)
|
|
2373
|
+
audioTrackFile = rootName + '.wav'
|
|
2374
|
+
|
|
2375
|
+
audioTrack.save(audioTrackFile)
|
|
2376
|
+
|
|
2377
|
+
logging.info(
|
|
2378
|
+
"Saved recorded audio track to `{}` (took {:.6f} seconds)".format(
|
|
2379
|
+
audioTrackFile, time.time() - tAudioStart))
|
|
2380
|
+
|
|
2381
|
+
# just copy the video from the temp file to the final file
|
|
2382
|
+
import shutil
|
|
2383
|
+
shutil.copyfile(videoTrackFile, filename)
|
|
2384
|
+
|
|
2385
|
+
else:
|
|
2386
|
+
# just copy the video file to the destination
|
|
2387
|
+
import shutil
|
|
2388
|
+
shutil.copyfile(videoTrackFile, filename)
|
|
2389
|
+
|
|
2390
|
+
os.remove(videoTrackFile) # remove the temp file
|
|
2391
|
+
|
|
2392
|
+
logging.info(
|
|
2393
|
+
"Saved recorded video to `{}` (took {:.6f} seconds)".format(
|
|
2394
|
+
filename, time.time() - tStart))
|
|
2395
|
+
|
|
2396
|
+
self._frameStore.clear() # clear the frame store
|
|
2397
|
+
# mark that there's no longer unsaved footage
|
|
2398
|
+
self._unsaved = False
|
|
2399
|
+
|
|
2400
|
+
self._lastVideoFile = filename # store the last video file saved
|
|
2401
|
+
|
|
2402
|
+
return self._lastVideoFile
|
|
2403
|
+
|
|
2404
|
+
def _upload(self):
|
|
2405
|
+
"""Upload video file to an online repository. Not implemented locally,
|
|
2406
|
+
needed for auto translate to JS.
|
|
2407
|
+
"""
|
|
2408
|
+
pass # NOP
|
|
2409
|
+
|
|
2410
|
+
def _download(self):
|
|
2411
|
+
"""Download video file to an online repository. Not implemented locally,
|
|
2412
|
+
needed for auto translate to JS.
|
|
2413
|
+
"""
|
|
2414
|
+
pass # NOP
|
|
2415
|
+
|
|
2416
|
+
@property
|
|
2417
|
+
def lastClip(self):
|
|
2418
|
+
"""File path to the last recording (`str` or `None`).
|
|
2419
|
+
|
|
2420
|
+
This value is only valid if a previous recording has been saved
|
|
2421
|
+
successfully (`save()` was called), otherwise it will be set to `None`.
|
|
2422
|
+
|
|
2423
|
+
"""
|
|
2424
|
+
return self.getLastClip()
|
|
2425
|
+
|
|
2426
|
+
def getLastClip(self):
|
|
2427
|
+
"""File path to the last saved recording.
|
|
2428
|
+
|
|
2429
|
+
This value is only valid if a previous recording has been saved to disk
|
|
2430
|
+
(`save()` was called).
|
|
2431
|
+
|
|
2432
|
+
Returns
|
|
2433
|
+
-------
|
|
2434
|
+
str or None
|
|
2435
|
+
Path to the file the most recent call to `save()` created. Returns
|
|
2436
|
+
`None` if no file is ready.
|
|
2437
|
+
|
|
2438
|
+
"""
|
|
2439
|
+
return self._lastVideoFile
|
|
2440
|
+
|
|
2441
|
+
@property
|
|
2442
|
+
def lastFrame(self):
|
|
2443
|
+
"""Most recent frame pulled from the camera (`VideoFrame`) since the
|
|
2444
|
+
last call of `getVideoFrame`.
|
|
2445
|
+
"""
|
|
2446
|
+
return self._lastFrame
|
|
2447
|
+
|
|
2448
|
+
@property
|
|
2449
|
+
def frameCount(self):
|
|
2450
|
+
"""Total number of frames captured in the current recording (`int`).
|
|
2451
|
+
|
|
2452
|
+
This is the total number of frames captured since the last call to
|
|
2453
|
+
`record()`. This value is reset when `record()` is called again.
|
|
2454
|
+
|
|
2455
|
+
"""
|
|
2456
|
+
return self._frameCount
|
|
2457
|
+
|
|
2458
|
+
@property
|
|
2459
|
+
def hasMic(self):
|
|
2460
|
+
"""`True` if the camera has a microphone attached (`bool`).
|
|
2461
|
+
|
|
2462
|
+
This is `True` if the camera has a microphone attached and is ready to
|
|
2463
|
+
record audio. If the camera does not have a microphone, this will be
|
|
2464
|
+
`False`.
|
|
2465
|
+
|
|
2466
|
+
"""
|
|
2467
|
+
return self.mic is not None
|
|
2468
|
+
|
|
2469
|
+
def _convertFrameToRGBFFPyPlayer(self, frame):
|
|
2470
|
+
"""Convert a frame to RGB format.
|
|
2471
|
+
|
|
2472
|
+
This function converts a frame to RGB format. The frame is returned as
|
|
2473
|
+
a Numpy array. The resulting array will be in the correct format to
|
|
2474
|
+
upload to OpenGL as a texture.
|
|
2475
|
+
|
|
2476
|
+
Parameters
|
|
2477
|
+
----------
|
|
2478
|
+
frame : FFPyPlayer frame
|
|
2479
|
+
The frame to convert.
|
|
2480
|
+
|
|
2481
|
+
Returns
|
|
2482
|
+
-------
|
|
2483
|
+
numpy.ndarray
|
|
2484
|
+
The converted frame in RGB format.
|
|
2485
|
+
|
|
2486
|
+
"""
|
|
2487
|
+
from ffpyplayer.pic import SWScale
|
|
2488
|
+
if frame.get_pixel_format() == 'rgb24': # already converted
|
|
2489
|
+
return frame
|
|
2490
|
+
|
|
2491
|
+
rgbImg = SWScale(
|
|
2492
|
+
self._metadata.size[0], self._metadata.size[1], # width, height
|
|
2493
|
+
frame.get_pixel_format(),
|
|
2494
|
+
ofmt='rgb24').scale(frame)
|
|
2495
|
+
|
|
2496
|
+
return rgbImg
|
|
2497
|
+
|
|
2498
|
+
def update(self):
|
|
2499
|
+
"""Acquire the newest data from the camera and audio streams.
|
|
2500
|
+
|
|
2501
|
+
This must be called periodically to ensure that stream buffers are
|
|
2502
|
+
flushed before they overflow to prevent data loss. Furthermore,
|
|
2503
|
+
calling this too infrequently may result also result in more frames
|
|
2504
|
+
needing to be processed at once, which may result in performance issues.
|
|
2505
|
+
|
|
2506
|
+
Returns
|
|
2507
|
+
-------
|
|
2508
|
+
int
|
|
2509
|
+
Number of frames captured since the last call to this method. This
|
|
2510
|
+
will be `0` if no new frames were captured since the last call,
|
|
2511
|
+
indicating that the poll function is getting called too
|
|
2512
|
+
frequently or that the camera is not producing new frames (i.e.
|
|
2513
|
+
paused or closed). If `-1` is returned, it indicates that the
|
|
2514
|
+
either or both the camera and microphone are not in a ready state
|
|
2515
|
+
albiet both interfaces are open. This can happen if `update()` is
|
|
2516
|
+
called very shortly after `record()`.
|
|
2517
|
+
|
|
2518
|
+
Examples
|
|
2519
|
+
--------
|
|
2520
|
+
Capture camera frames in a loop::
|
|
2521
|
+
|
|
2522
|
+
while cam.recordingTime < 10.0: # record for 10 seconds
|
|
2523
|
+
numFrames = cam.update() # update the camera stream
|
|
2524
|
+
if numFrames > 0:
|
|
2525
|
+
frame = cam.getVideoFrame() # get the most recent frame
|
|
2526
|
+
# do something with the frame, e.g. display it
|
|
2527
|
+
else:
|
|
2528
|
+
# return last frame or placeholder frame if nothing new
|
|
2529
|
+
|
|
2530
|
+
"""
|
|
2531
|
+
# poll camera for new frames
|
|
2532
|
+
newFrames = self._capture.getFrames() # get new frames from the camera
|
|
2533
|
+
|
|
2534
|
+
if not self._videoReady and newFrames:
|
|
2535
|
+
# if we have new frames, we can set the video ready flag
|
|
2536
|
+
self._videoReady = True
|
|
2537
|
+
|
|
2538
|
+
if self.hasMic and not self.mic._stream._closed:
|
|
2539
|
+
# poll the microphone for audio samples
|
|
2540
|
+
audioPos, overflows = self.mic.poll()
|
|
2541
|
+
|
|
2542
|
+
if (not self._audioReady) and self._videoReady:
|
|
2543
|
+
nNewFrames = len(newFrames)
|
|
2544
|
+
# determine which video frame the audio starts at that we aquired
|
|
2545
|
+
keepFrames = []
|
|
2546
|
+
for i, frame in enumerate(newFrames):
|
|
2547
|
+
_, _, streamTime = frame
|
|
2548
|
+
if streamTime >= self._absAudioActualRecStartTime:
|
|
2549
|
+
keepFrames.append(frame)
|
|
2550
|
+
|
|
2551
|
+
# If we arrived at the audio start time and there is a video
|
|
2552
|
+
# frame captured after that, we can compute the exact position
|
|
2553
|
+
# of the sample in the audio track that corresponds to that
|
|
2554
|
+
# frame. This will allow us to align the audio and video streams
|
|
2555
|
+
# when saving the video file.
|
|
2556
|
+
if keepFrames:
|
|
2557
|
+
_, _, streamTime = keepFrames[0]
|
|
2558
|
+
|
|
2559
|
+
# delta between the first video frame's capture timestamp
|
|
2560
|
+
# and the time the mic reported itself as ready. Used to
|
|
2561
|
+
# align the audio and video streams
|
|
2562
|
+
frameSyncFudge = (
|
|
2563
|
+
streamTime - self._absAudioActualRecStartTime)
|
|
2564
|
+
|
|
2565
|
+
# compute exact time the first audio sample was recorded
|
|
2566
|
+
# from the audio position and actual recording start time
|
|
2567
|
+
absFirstAudioSampleTime = \
|
|
2568
|
+
self._absAudioActualRecStartTime - (
|
|
2569
|
+
audioPos / self.mic.sampleRateHz)
|
|
2570
|
+
|
|
2571
|
+
# compute how many samples we will discard from the audio
|
|
2572
|
+
# track to align it with the video stream
|
|
2573
|
+
self._absAudioRecStartPos = \
|
|
2574
|
+
((streamTime - absFirstAudioSampleTime) + \
|
|
2575
|
+
frameSyncFudge + self._latencyBias) * self.mic.sampleRateHz
|
|
2576
|
+
self._absAudioRecStartPos = int(self._absAudioRecStartPos)
|
|
2577
|
+
|
|
2578
|
+
# convert to samples
|
|
2579
|
+
self._audioReady = True
|
|
2580
|
+
|
|
2581
|
+
newFrames = keepFrames # keep only frames after the audio start time
|
|
2582
|
+
|
|
2583
|
+
else:
|
|
2584
|
+
self._audioReady = True # no mic, so we just set the flag
|
|
2585
|
+
|
|
2586
|
+
if not self.isReady:
|
|
2587
|
+
# if the camera is not ready, return -1 to indicate that we are not
|
|
2588
|
+
# ready to process frames yet
|
|
2589
|
+
return -1
|
|
2590
|
+
|
|
2591
|
+
if not newFrames:
|
|
2592
|
+
# if no new frames were captured, return 0 to indicate that we have
|
|
2593
|
+
# no new frames to process
|
|
2594
|
+
return 0
|
|
2595
|
+
|
|
2596
|
+
# put last frames into the frame store
|
|
2597
|
+
nNewFrames = len(newFrames)
|
|
2598
|
+
if nNewFrames > self._frameStore.maxlen:
|
|
2599
|
+
logging.warning(
|
|
2600
|
+
"Frame store overflowed, some frames may have been lost. "
|
|
2601
|
+
"Consider increasing the `keepFrames` parameter when creating "
|
|
2602
|
+
"the camera object or polling the camera more frequently."
|
|
2603
|
+
)
|
|
2604
|
+
|
|
2605
|
+
self._frameCount += nNewFrames # update total frames count
|
|
2606
|
+
# push all frames into the frame store
|
|
2607
|
+
for colorData, pts, streamTime in newFrames:
|
|
2608
|
+
# if camera is in CV mode, convert the frame to RGB
|
|
2609
|
+
if self._usageMode == CAMERA_MODE_CV:
|
|
2610
|
+
colorData = self._convertFrameToRGBFFPyPlayer(colorData)
|
|
2611
|
+
# add the frame to the frame store
|
|
2612
|
+
self._frameStore.append((colorData, pts, streamTime))
|
|
2613
|
+
|
|
2614
|
+
# if we have frames, update the last frame
|
|
2615
|
+
colorData, pts, streamTime = newFrames[-1]
|
|
2616
|
+
self._lastFrame = (
|
|
2617
|
+
self._convertFrameToRGBFFPyPlayer(colorData), # convert to RGB, nop if already
|
|
2618
|
+
pts, # presentation timestamp
|
|
2619
|
+
streamTime
|
|
2620
|
+
)
|
|
2621
|
+
|
|
2622
|
+
self._pixelTransfer() # transfer frames to the GPU if we have a window
|
|
2623
|
+
|
|
2624
|
+
# write frames out to video file
|
|
2625
|
+
if self._usageMode == CAMERA_MODE_VIDEO:
|
|
2626
|
+
for frame in newFrames:
|
|
2627
|
+
self._submitFrameToFile(frame)
|
|
2628
|
+
elif self._usageMode == CAMERA_MODE_CV:
|
|
2629
|
+
pass
|
|
2630
|
+
|
|
2631
|
+
return nNewFrames # return number of frames we got
|
|
2632
|
+
|
|
2633
|
+
def poll(self):
|
|
2634
|
+
"""Poll the camera for new frames.
|
|
2635
|
+
|
|
2636
|
+
Alias for `update()`.
|
|
2637
|
+
"""
|
|
2638
|
+
return self.update()
|
|
2639
|
+
|
|
2640
|
+
def getVideoFrames(self):
|
|
2641
|
+
"""Get the most recent frame from the stream (if available).
|
|
2642
|
+
|
|
2643
|
+
Returns
|
|
2644
|
+
-------
|
|
2645
|
+
list of tuple
|
|
2646
|
+
List of recent video frames. This will return a list of frame images
|
|
2647
|
+
as numpy arrays, their presentation timestamp in the recording, and
|
|
2648
|
+
the absolute stream time in seconds. Frames will be converted
|
|
2649
|
+
to RGB format if they are not already. The number of frames returned
|
|
2650
|
+
will be limited by the `keepFrames` parameter set when creating the
|
|
2651
|
+
camera object. If no frames are available, an empty list will be
|
|
2652
|
+
returned.
|
|
2653
|
+
|
|
2654
|
+
"""
|
|
2655
|
+
self.update()
|
|
2656
|
+
|
|
2657
|
+
recentFrames = [
|
|
2658
|
+
self._convertFrameToRGBFFPyPlayer(frame) for frame in self._frameStore]
|
|
2659
|
+
|
|
2660
|
+
return recentFrames
|
|
2661
|
+
|
|
2662
|
+
def getRecentVideoFrame(self):
|
|
2663
|
+
"""Get the most recent video frame from the camera.
|
|
2664
|
+
|
|
2665
|
+
Returns
|
|
2666
|
+
-------
|
|
2667
|
+
VideoFrame or None
|
|
2668
|
+
Most recent video frame. Returns `None` if no frame was available,
|
|
2669
|
+
or we timed out.
|
|
2670
|
+
|
|
2671
|
+
"""
|
|
2672
|
+
self.update()
|
|
2673
|
+
|
|
2674
|
+
return self._lastFrame[0] if self._lastFrame else None
|
|
2675
|
+
|
|
2676
|
+
# --------------------------------------------------------------------------
|
|
2677
|
+
# Audio track
|
|
2678
|
+
#
|
|
2679
|
+
|
|
2680
|
+
def getAudioTrack(self):
|
|
2681
|
+
"""Get the audio track data.
|
|
2682
|
+
|
|
2683
|
+
Returns
|
|
2684
|
+
-------
|
|
2685
|
+
AudioClip or None
|
|
2686
|
+
Audio track data from the microphone if available, or `None` if
|
|
2687
|
+
no microphone is set or no audio was recorded.
|
|
2688
|
+
|
|
2689
|
+
"""
|
|
2690
|
+
return self.mic.getRecording() if self.mic else None
|
|
2691
|
+
|
|
2692
|
+
# --------------------------------------------------------------------------
|
|
2693
|
+
# Video rendering
|
|
2694
|
+
#
|
|
2695
|
+
# These methods are used to render live video frames to a window. If a
|
|
2696
|
+
# window is set, this class will automamatically create the nessisary
|
|
2697
|
+
# OpenGL texture buffers and transfers the most recent video frame to the
|
|
2698
|
+
# GPU when `update` is called. The `ImageStim` class can access these
|
|
2699
|
+
# buffers for rendering by setting this class as the `image`.
|
|
2700
|
+
#
|
|
2701
|
+
|
|
2702
|
+
@property
|
|
2703
|
+
def win(self):
|
|
2704
|
+
"""Window to render the video frames to (`psychopy.visual.Window` or
|
|
2705
|
+
`None`).
|
|
2706
|
+
|
|
2707
|
+
If `None`, no rendering will be done and the video frames will not be
|
|
2708
|
+
displayed. If a window is set, the video frames will be rendered to the
|
|
2709
|
+
window using OpenGL textures.
|
|
2710
|
+
|
|
2711
|
+
"""
|
|
2712
|
+
return self._win
|
|
2713
|
+
|
|
2714
|
+
@win.setter
|
|
2715
|
+
def win(self, value):
|
|
2716
|
+
"""Set the window to render the video frames to.
|
|
2717
|
+
|
|
2718
|
+
This will set the window to render the video frames to. If the window
|
|
2719
|
+
is not `None`, it will automatically create OpenGL texture buffers for
|
|
2720
|
+
rendering the video frames. If the window is `None`, no rendering will
|
|
2721
|
+
be done and the video frames will not be displayed.
|
|
2722
|
+
|
|
2723
|
+
Parameters
|
|
2724
|
+
----------
|
|
2725
|
+
value : psychopy.visual.Window or None
|
|
2726
|
+
Window to render the video frames to. If `None`, no rendering will
|
|
2727
|
+
be done and the video frames will not be displayed.
|
|
2728
|
+
|
|
2729
|
+
"""
|
|
2730
|
+
self.setWin(value)
|
|
2731
|
+
|
|
2732
|
+
def setWin(self, win):
|
|
2733
|
+
"""Set the window to render the video frames to.
|
|
2734
|
+
|
|
2735
|
+
Parameters
|
|
2736
|
+
----------
|
|
2737
|
+
win : psychopy.visual.Window
|
|
2738
|
+
Window to render the video frames to. If `None`, no rendering will
|
|
2739
|
+
be done and the video frames will not be displayed.
|
|
2024
2740
|
|
|
2025
2741
|
"""
|
|
2026
|
-
|
|
2027
|
-
|
|
2742
|
+
self._win = win
|
|
2743
|
+
|
|
2744
|
+
# if we have a window, setup texture buffers for displaying
|
|
2745
|
+
if self._win is not None:
|
|
2746
|
+
self._setupTextureBuffers()
|
|
2747
|
+
return
|
|
2028
2748
|
|
|
2029
|
-
|
|
2749
|
+
# if we don't have a window, free any texture buffers
|
|
2750
|
+
self._freeTextureBuffers() # free any existing buffers
|
|
2030
2751
|
|
|
2031
2752
|
@property
|
|
2032
|
-
def
|
|
2033
|
-
"""
|
|
2753
|
+
def interpolate(self):
|
|
2754
|
+
"""Whether the video texture should be filtered using linear or nearest
|
|
2755
|
+
neighbor interpolation (`bool`).
|
|
2034
2756
|
|
|
2035
|
-
|
|
2757
|
+
If `True`, the video texture will be filtered using linear interpolation.
|
|
2758
|
+
If `False`, the video texture will be filtered using nearest neighbor
|
|
2759
|
+
interpolation (pass-through). Default is `True`.
|
|
2036
2760
|
|
|
2037
|
-
|
|
2038
|
-
|
|
2039
|
-
|
|
2761
|
+
"""
|
|
2762
|
+
return self._interpolate
|
|
2763
|
+
|
|
2764
|
+
@interpolate.setter
|
|
2765
|
+
def interpolate(self, value):
|
|
2766
|
+
"""Set whether the video texture should be filtered using linear or
|
|
2767
|
+
nearest neighbor interpolation.
|
|
2768
|
+
|
|
2769
|
+
Parameters
|
|
2770
|
+
----------
|
|
2771
|
+
value : bool
|
|
2772
|
+
If `True`, the video texture will be filtered using linear
|
|
2773
|
+
interpolation. If `False`, the video texture will be filtered using
|
|
2774
|
+
nearest neighbor interpolation (pass-through). Default is `True`.
|
|
2040
2775
|
|
|
2041
2776
|
"""
|
|
2042
|
-
|
|
2043
|
-
return 0.0
|
|
2777
|
+
self.setTextureFilter(value)
|
|
2044
2778
|
|
|
2045
|
-
|
|
2779
|
+
def setTextureFilter(self, smooth=True):
|
|
2780
|
+
"""Set whether the video texture should be filtered using linear or
|
|
2781
|
+
nearest neighbor interpolation.
|
|
2046
2782
|
|
|
2047
|
-
|
|
2783
|
+
Parameters
|
|
2784
|
+
----------
|
|
2785
|
+
smooth : bool
|
|
2786
|
+
If `True`, the video texture will be filtered using linear
|
|
2787
|
+
interpolation. If `False`, the video texture will be filtered using
|
|
2788
|
+
nearest neighbor interpolation (pass-through.) Default is `True`.
|
|
2048
2789
|
|
|
2049
|
-
@property
|
|
2050
|
-
def recordingBytes(self):
|
|
2051
|
-
"""Current size of the recording in bytes (`int`).
|
|
2052
2790
|
"""
|
|
2053
|
-
|
|
2054
|
-
|
|
2791
|
+
self._interpolate = bool(smooth)
|
|
2792
|
+
self._texFilterNeedsUpdate = True # flag to update texture filtering
|
|
2055
2793
|
|
|
2056
|
-
|
|
2794
|
+
def _freeTextureBuffers(self):
|
|
2795
|
+
"""Free any texture buffers used by the camera.
|
|
2057
2796
|
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
This will raise a `RuntimeError` if there is no player open. Use this
|
|
2062
|
-
function to ensure that a player is present before running subsequent
|
|
2063
|
-
code.
|
|
2797
|
+
This is used to free up any texture buffers used by the camera. This
|
|
2798
|
+
is called when the camera is closed or when the window is closed.
|
|
2064
2799
|
"""
|
|
2065
|
-
|
|
2066
|
-
return
|
|
2800
|
+
import pyglet.gl as GL # needed for OpenGL texture management
|
|
2067
2801
|
|
|
2068
|
-
|
|
2802
|
+
try:
|
|
2803
|
+
# delete buffers and textures if previously created
|
|
2804
|
+
if self._pixbuffId is not None and self._pixbuffId.value > 0:
|
|
2805
|
+
GL.glDeleteBuffers(1, self._pixbuffId)
|
|
2806
|
+
# delete the old texture if present
|
|
2807
|
+
if self._textureId is not None and self._textureId.value > 0:
|
|
2808
|
+
GL.glDeleteTextures(1, self._textureId)
|
|
2809
|
+
except (TypeError, AttributeError):
|
|
2810
|
+
pass
|
|
2811
|
+
|
|
2812
|
+
# clear the IDs
|
|
2813
|
+
self._pixbuffId = GL.GLuint(0)
|
|
2814
|
+
self._textureId = GL.GLuint(0)
|
|
2815
|
+
|
|
2816
|
+
def _setupTextureBuffers(self):
|
|
2817
|
+
"""Setup texture buffers for the camera.
|
|
2818
|
+
|
|
2819
|
+
This allocates OpenGL texture buffers for video frames to be written
|
|
2820
|
+
to which then can be rendered to the screen. This is only called if the
|
|
2821
|
+
camera is opened and a window is set.
|
|
2822
|
+
|
|
2823
|
+
"""
|
|
2824
|
+
if self.win is None:
|
|
2825
|
+
return
|
|
2826
|
+
|
|
2827
|
+
self._freeTextureBuffers() # free any existing buffers
|
|
2828
|
+
|
|
2829
|
+
import pyglet.gl as GL
|
|
2830
|
+
|
|
2831
|
+
# get the size of the movie frame and compute the buffer size
|
|
2832
|
+
vidWidth, vidHeight = self.frameSize
|
|
2833
|
+
nBufferBytes = self._texBufferSizeBytes = (
|
|
2834
|
+
vidWidth * vidHeight * 3)
|
|
2835
|
+
|
|
2836
|
+
# Create the pixel buffer object which will serve as the texture memory
|
|
2837
|
+
# store. Pixel data will be copied to this buffer each frame.
|
|
2838
|
+
GL.glGenBuffers(1, ctypes.byref(self._pixbuffId))
|
|
2839
|
+
GL.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, self._pixbuffId)
|
|
2840
|
+
GL.glBufferData(
|
|
2841
|
+
GL.GL_PIXEL_UNPACK_BUFFER,
|
|
2842
|
+
nBufferBytes * ctypes.sizeof(GL.GLubyte),
|
|
2843
|
+
None,
|
|
2844
|
+
GL.GL_STREAM_DRAW) # one-way app -> GL
|
|
2845
|
+
GL.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, 0)
|
|
2846
|
+
|
|
2847
|
+
# Create a texture which will hold the data streamed to the pixel
|
|
2848
|
+
# buffer. Only one texture needs to be allocated.
|
|
2849
|
+
GL.glEnable(GL.GL_TEXTURE_2D)
|
|
2850
|
+
GL.glGenTextures(1, ctypes.byref(self._textureId))
|
|
2851
|
+
GL.glBindTexture(GL.GL_TEXTURE_2D, self._textureId)
|
|
2852
|
+
GL.glTexImage2D(
|
|
2853
|
+
GL.GL_TEXTURE_2D,
|
|
2854
|
+
0,
|
|
2855
|
+
GL.GL_RGB8,
|
|
2856
|
+
vidWidth, vidHeight, # frame dims in pixels
|
|
2857
|
+
0,
|
|
2858
|
+
GL.GL_RGB,
|
|
2859
|
+
GL.GL_UNSIGNED_BYTE,
|
|
2860
|
+
None)
|
|
2861
|
+
|
|
2862
|
+
# setup texture filtering
|
|
2863
|
+
if self._interpolate:
|
|
2864
|
+
texFilter = GL.GL_LINEAR
|
|
2865
|
+
else:
|
|
2866
|
+
texFilter = GL.GL_NEAREST
|
|
2069
2867
|
|
|
2070
|
-
|
|
2071
|
-
|
|
2868
|
+
GL.glTexParameteri(
|
|
2869
|
+
GL.GL_TEXTURE_2D,
|
|
2870
|
+
GL.GL_TEXTURE_MAG_FILTER,
|
|
2871
|
+
texFilter)
|
|
2872
|
+
GL.glTexParameteri(
|
|
2873
|
+
GL.GL_TEXTURE_2D,
|
|
2874
|
+
GL.GL_TEXTURE_MIN_FILTER,
|
|
2875
|
+
texFilter)
|
|
2876
|
+
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP)
|
|
2877
|
+
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP)
|
|
2878
|
+
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
|
|
2879
|
+
GL.glDisable(GL.GL_TEXTURE_2D)
|
|
2072
2880
|
|
|
2073
|
-
|
|
2074
|
-
the buffer. The last frame in the buffer will be set as the most recent
|
|
2075
|
-
frame (`lastFrame`).
|
|
2881
|
+
GL.glFlush() # make sure all buffers are ready
|
|
2076
2882
|
|
|
2077
|
-
|
|
2078
|
-
|
|
2079
|
-
bool
|
|
2080
|
-
`True` if a frame has been enqueued. Returns `False` if the camera
|
|
2081
|
-
is not ready or if the stream was closed.
|
|
2883
|
+
def _pixelTransfer(self):
|
|
2884
|
+
"""Copy pixel data from video frame to texture.
|
|
2082
2885
|
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
return False
|
|
2886
|
+
This is called when a new frame is available. The pixel data is copied
|
|
2887
|
+
from the video frame to the texture store on the GPU.
|
|
2086
2888
|
|
|
2087
|
-
|
|
2088
|
-
if
|
|
2089
|
-
return
|
|
2889
|
+
"""
|
|
2890
|
+
if self.win is None:
|
|
2891
|
+
return # no window to render to
|
|
2090
2892
|
|
|
2091
|
-
|
|
2092
|
-
|
|
2893
|
+
import pyglet.gl as GL
|
|
2894
|
+
|
|
2895
|
+
# get the size of the movie frame and compute the buffer size
|
|
2896
|
+
vidWidth, vidHeight = self.frameSize
|
|
2093
2897
|
|
|
2094
|
-
#
|
|
2095
|
-
|
|
2898
|
+
# compute the buffer size
|
|
2899
|
+
nBufferBytes = self._texBufferSizeBytes
|
|
2900
|
+
|
|
2901
|
+
# bind pixel unpack buffer
|
|
2902
|
+
GL.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, self._pixbuffId)
|
|
2903
|
+
|
|
2904
|
+
# Free last storage buffer before mapping and writing new frame
|
|
2905
|
+
# data. This allows the GPU to process the extant buffer in VRAM
|
|
2906
|
+
# uploaded last cycle without being stalled by the CPU accessing it.
|
|
2907
|
+
GL.glBufferData(
|
|
2908
|
+
GL.GL_PIXEL_UNPACK_BUFFER,
|
|
2909
|
+
nBufferBytes * ctypes.sizeof(GL.GLubyte),
|
|
2910
|
+
None,
|
|
2911
|
+
GL.GL_STREAM_DRAW)
|
|
2912
|
+
|
|
2913
|
+
# Map the buffer to client memory, `GL_WRITE_ONLY` to tell the
|
|
2914
|
+
# driver to optimize for a one-way write operation if it can.
|
|
2915
|
+
bufferPtr = GL.glMapBuffer(
|
|
2916
|
+
GL.GL_PIXEL_UNPACK_BUFFER,
|
|
2917
|
+
GL.GL_WRITE_ONLY)
|
|
2918
|
+
|
|
2919
|
+
# map the video frame to a memoryview
|
|
2920
|
+
# suggested by Alex Forrence (aforren1) originally in PR #6439
|
|
2921
|
+
videoBuffer = self._lastFrame[0].to_memoryview()[0].memview
|
|
2922
|
+
videoFrameArray = np.frombuffer(videoBuffer, dtype=np.uint8)
|
|
2096
2923
|
|
|
2097
|
-
|
|
2924
|
+
# copy the frame data to the buffer
|
|
2925
|
+
ctypes.memmove(bufferPtr,
|
|
2926
|
+
videoFrameArray.ctypes.data,
|
|
2927
|
+
nBufferBytes)
|
|
2928
|
+
|
|
2929
|
+
# Very important that we unmap the buffer data after copying, but
|
|
2930
|
+
# keep the buffer bound for setting the texture.
|
|
2931
|
+
GL.glUnmapBuffer(GL.GL_PIXEL_UNPACK_BUFFER)
|
|
2932
|
+
|
|
2933
|
+
# bind the texture in OpenGL
|
|
2934
|
+
GL.glEnable(GL.GL_TEXTURE_2D)
|
|
2935
|
+
GL.glActiveTexture(GL.GL_TEXTURE0)
|
|
2936
|
+
GL.glBindTexture(GL.GL_TEXTURE_2D, self._textureId)
|
|
2937
|
+
|
|
2938
|
+
# copy the PBO to the texture (blocks on AMD for some reason)
|
|
2939
|
+
GL.glTexSubImage2D(
|
|
2940
|
+
GL.GL_TEXTURE_2D, 0, 0, 0,
|
|
2941
|
+
vidWidth, vidHeight,
|
|
2942
|
+
GL.GL_RGB,
|
|
2943
|
+
GL.GL_UNSIGNED_BYTE,
|
|
2944
|
+
0) # point to the presently bound buffer
|
|
2945
|
+
|
|
2946
|
+
# update texture filtering only if needed
|
|
2947
|
+
if self._texFilterNeedsUpdate:
|
|
2948
|
+
if self._interpolate:
|
|
2949
|
+
texFilter = GL.GL_LINEAR
|
|
2950
|
+
else:
|
|
2951
|
+
texFilter = GL.GL_NEAREST
|
|
2098
2952
|
|
|
2099
|
-
|
|
2100
|
-
|
|
2953
|
+
GL.glTexParameteri(
|
|
2954
|
+
GL.GL_TEXTURE_2D,
|
|
2955
|
+
GL.GL_TEXTURE_MAG_FILTER,
|
|
2956
|
+
texFilter)
|
|
2957
|
+
GL.glTexParameteri(
|
|
2958
|
+
GL.GL_TEXTURE_2D,
|
|
2959
|
+
GL.GL_TEXTURE_MIN_FILTER,
|
|
2960
|
+
texFilter)
|
|
2101
2961
|
|
|
2102
|
-
|
|
2103
|
-
frames.
|
|
2962
|
+
self._texFilterNeedsUpdate = False
|
|
2104
2963
|
|
|
2105
|
-
|
|
2106
|
-
|
|
2964
|
+
# important to unbind the PBO
|
|
2965
|
+
GL.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, 0)
|
|
2966
|
+
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
|
|
2967
|
+
GL.glDisable(GL.GL_TEXTURE_2D)
|
|
2107
2968
|
|
|
2108
|
-
|
|
2109
|
-
|
|
2110
|
-
|
|
2969
|
+
@property
|
|
2970
|
+
def colorTexture(self):
|
|
2971
|
+
"""OpenGL texture ID for the most recent video frame (`int` or `None`).
|
|
2111
2972
|
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
if self.
|
|
2116
|
-
|
|
2117
|
-
"Opening camera stream using FFmpeg. (device={})".format(desc))
|
|
2118
|
-
self._captureThread = CameraInterfaceFFmpeg(
|
|
2119
|
-
device=self._cameraInfo,
|
|
2120
|
-
mic=self._mic)
|
|
2121
|
-
elif self._cameraLib == u'opencv':
|
|
2122
|
-
logging.debug(
|
|
2123
|
-
"Opening camera stream using OpenCV. (device={})".format(desc))
|
|
2124
|
-
self._captureThread = CameraInterfaceOpenCV(
|
|
2125
|
-
device=self._cameraInfo,
|
|
2126
|
-
mic=self._mic)
|
|
2127
|
-
else:
|
|
2128
|
-
raise ValueError(
|
|
2129
|
-
"Invalid value for parameter `cameraLib`, expected one of "
|
|
2130
|
-
"`'ffpyplayer'` or `'opencv'`.")
|
|
2973
|
+
This is the OpenGL texture ID that can be used to render the most
|
|
2974
|
+
recent video frame to a window. If no window is set, this will be `None`.
|
|
2975
|
+
"""
|
|
2976
|
+
if self._textureId is None or self._textureId.value <= 0:
|
|
2977
|
+
return None
|
|
2131
2978
|
|
|
2132
|
-
self.
|
|
2979
|
+
return self._textureId
|
|
2133
2980
|
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
# pass
|
|
2981
|
+
@property
|
|
2982
|
+
def colorTextureSizeBytes(self):
|
|
2983
|
+
"""Size of the texture buffer used for rendering video frames
|
|
2984
|
+
(`int` or `None`).
|
|
2139
2985
|
|
|
2140
|
-
|
|
2141
|
-
|
|
2986
|
+
This returns the size of the texture buffer in bytes used for rendering
|
|
2987
|
+
video frames. This is only valid if the camera is opened.
|
|
2142
2988
|
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
|
|
2146
|
-
recent frame to be captured using `lastFrame`.
|
|
2989
|
+
"""
|
|
2990
|
+
if self._cameraInfo is None:
|
|
2991
|
+
return None
|
|
2147
2992
|
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2993
|
+
return self._texBufferSizeBytes
|
|
2994
|
+
|
|
2995
|
+
# --------------------------------------------------------------------------
|
|
2996
|
+
# Movie writer platform-specific methods
|
|
2997
|
+
#
|
|
2998
|
+
# These are used to write frames to a movie file. We used to use the
|
|
2999
|
+
# `MovieFileWriter` class for this, but for now were implimenting this
|
|
3000
|
+
# directly in the camera class. This may change in the future.
|
|
3001
|
+
#
|
|
2151
3002
|
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
If a recording has been previously made without calling `save()` it will
|
|
2155
|
-
be discarded if `record()` is called again unless
|
|
2156
|
-
`clearLastRecording=False`.
|
|
3003
|
+
def _openMovieFileWriterFFPyPlayer(self, filename, encoderOpts=None):
|
|
3004
|
+
"""Open a movie file writer using the FFPyPlayer library.
|
|
2157
3005
|
|
|
2158
3006
|
Parameters
|
|
2159
3007
|
----------
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
|
|
2164
|
-
to the
|
|
3008
|
+
filename : str
|
|
3009
|
+
File to save the resulting video to, should include the extension.
|
|
3010
|
+
encoderOpts : dict or None
|
|
3011
|
+
Options to pass to the encoder. This is a dictionary of options
|
|
3012
|
+
specific to the encoder library being used. See the documentation
|
|
3013
|
+
for `~psychopy.tools.movietools.MovieFileWriter` for more details.
|
|
2165
3014
|
|
|
2166
3015
|
"""
|
|
2167
|
-
|
|
2168
|
-
self.open() # open the camera stream if we call record() first
|
|
2169
|
-
logging.warning(
|
|
2170
|
-
"Called `Camera.record()` before opening the camera stream, "
|
|
2171
|
-
"opening now. This is not recommended as it may incur a longer "
|
|
2172
|
-
"than expected delay in the recording start time."
|
|
2173
|
-
)
|
|
2174
|
-
|
|
2175
|
-
# clear previous frames
|
|
2176
|
-
if clearLastRecording:
|
|
2177
|
-
self._captureFrames.clear()
|
|
2178
|
-
|
|
2179
|
-
self._audioTrack = None
|
|
2180
|
-
self._lastFrame = None
|
|
3016
|
+
from ffpyplayer.writer import MediaWriter
|
|
2181
3017
|
|
|
2182
|
-
|
|
2183
|
-
if self._mic is not None:
|
|
2184
|
-
logging.debug(
|
|
2185
|
-
"Microphone interface available, starting audio recording.")
|
|
2186
|
-
else:
|
|
2187
|
-
logging.debug(
|
|
2188
|
-
"No microphone interface provided, not recording audio.")
|
|
3018
|
+
encoderOpts = encoderOpts or {}
|
|
2189
3019
|
|
|
2190
|
-
|
|
2191
|
-
self.
|
|
3020
|
+
# options to configure the writer
|
|
3021
|
+
frameWidth, frameHeight = self.frameSize
|
|
2192
3022
|
|
|
2193
|
-
|
|
3023
|
+
writerOptions = {
|
|
3024
|
+
'pix_fmt_in': 'yuv420p', # default for now using mp4
|
|
3025
|
+
'width_in': frameWidth,
|
|
3026
|
+
'height_in': frameHeight,
|
|
3027
|
+
'codec': 'libx264',
|
|
3028
|
+
'frame_rate': (int(self._capture.frameRate), 1)}
|
|
2194
3029
|
|
|
2195
|
-
|
|
2196
|
-
"""Stop recording frames and audio (if available).
|
|
2197
|
-
"""
|
|
2198
|
-
if self._captureThread is None: # do nothing if not open
|
|
2199
|
-
return
|
|
3030
|
+
self._curPTS = 0.0 # current pts for the movie writer
|
|
2200
3031
|
|
|
2201
|
-
|
|
2202
|
-
|
|
3032
|
+
self._generatePTS = False # whether to generate PTS for the movie writer
|
|
3033
|
+
if filename.endswith('.mp4'):
|
|
3034
|
+
self._generatePTS = True # generate PTS for mp4 files
|
|
3035
|
+
logging.debug(
|
|
3036
|
+
"MP4 format detected, PTS will be generated for the movie " \
|
|
3037
|
+
"writer.")
|
|
2203
3038
|
|
|
2204
|
-
self.
|
|
2205
|
-
|
|
3039
|
+
self._movieWriter = MediaWriter(
|
|
3040
|
+
filename,
|
|
3041
|
+
[writerOptions],
|
|
3042
|
+
fmt='mp4',
|
|
3043
|
+
overwrite=True, # overwrite existing file
|
|
3044
|
+
libOpts=encoderOpts)
|
|
2206
3045
|
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
self._audioTrack = self._mic.getRecording()
|
|
3046
|
+
def _submitFrameToFileFFPyPlayer(self, frames):
|
|
3047
|
+
"""Submit a frame to the movie file writer thread using FFPyPlayer.
|
|
2210
3048
|
|
|
2211
|
-
|
|
3049
|
+
This is used to submit frames to the movie file writer thread. It is
|
|
3050
|
+
called by the camera interface when a new frame is captured.
|
|
2212
3051
|
|
|
2213
|
-
|
|
2214
|
-
|
|
3052
|
+
Parameters
|
|
3053
|
+
----------
|
|
3054
|
+
frames : list of tuples
|
|
3055
|
+
Color data and presentation timestamps to submit to the movie file
|
|
3056
|
+
writer thread.
|
|
2215
3057
|
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
3058
|
+
Returns
|
|
3059
|
+
-------
|
|
3060
|
+
int
|
|
3061
|
+
Number of bytes written the the movie file.
|
|
2220
3062
|
|
|
2221
3063
|
"""
|
|
2222
|
-
if self.
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
raise RuntimeError("Cannot close stream, stream is not open.")
|
|
3064
|
+
if self._movieWriter is None:
|
|
3065
|
+
raise RuntimeError(
|
|
3066
|
+
"Attempting to call `_submitFrameToFileFFPyPlayer()` before "
|
|
3067
|
+
"`_openMovieFileWriterFFPyPlayer()`.")
|
|
2227
3068
|
|
|
2228
|
-
|
|
2229
|
-
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
3069
|
+
from ffpyplayer.pic import SWScale
|
|
3070
|
+
|
|
3071
|
+
if not isinstance(frames, list):
|
|
3072
|
+
frames = [frames] # ensure frames is a list
|
|
3073
|
+
|
|
3074
|
+
# write frames to the movie file writer
|
|
3075
|
+
bytesOut = 0
|
|
3076
|
+
for colorData, pts, _ in frames:
|
|
3077
|
+
# do color conversion if needed
|
|
3078
|
+
frameWidth, frameHeight = colorData.get_size()
|
|
3079
|
+
sws = SWScale(
|
|
3080
|
+
frameWidth, frameHeight,
|
|
3081
|
+
colorData.get_pixel_format(),
|
|
3082
|
+
ofmt='yuv420p')
|
|
3083
|
+
|
|
3084
|
+
if self._generatePTS:
|
|
3085
|
+
pts = self._curPTS # use current for PTS
|
|
3086
|
+
self._curPTS += self._capture.frameInterval # increment dts by frame interval
|
|
3087
|
+
|
|
3088
|
+
bytesOut = self._movieWriter.write_frame(
|
|
3089
|
+
img=sws.scale(colorData),
|
|
3090
|
+
pts=pts,
|
|
3091
|
+
stream=0)
|
|
2233
3092
|
|
|
2234
|
-
|
|
2235
|
-
self._captureThread = None
|
|
3093
|
+
return bytesOut
|
|
2236
3094
|
|
|
2237
|
-
def
|
|
2238
|
-
|
|
2239
|
-
"""Save the last recording to file.
|
|
3095
|
+
def _closeMovieFileWriterFFPyPlayer(self):
|
|
3096
|
+
"""Close the movie file writer using the FFPyPlayer library.
|
|
2240
3097
|
|
|
2241
|
-
This will
|
|
2242
|
-
|
|
2243
|
-
|
|
3098
|
+
This will close the movie file writer and free up any resources used by
|
|
3099
|
+
the writer. If the writer is not open, this will do nothing.
|
|
3100
|
+
"""
|
|
3101
|
+
if self._movieWriter is not None:
|
|
3102
|
+
logging.debug(
|
|
3103
|
+
"Closing movie file writer using FFPyPlayer...")
|
|
3104
|
+
self._movieWriter.close()
|
|
3105
|
+
else:
|
|
3106
|
+
logging.debug(
|
|
3107
|
+
"Attempting to call `_closeMovieFileWriterFFPyPlayer()` "
|
|
3108
|
+
"without an open movie file writer.")
|
|
2244
3109
|
|
|
2245
|
-
|
|
2246
|
-
|
|
3110
|
+
#
|
|
3111
|
+
# Movie file writer methods
|
|
3112
|
+
#
|
|
3113
|
+
# These methods are used to open and close a movie file writer to save
|
|
3114
|
+
# frames to disk. We don't expose these methods to the user directly, but
|
|
3115
|
+
# they are used internally.
|
|
3116
|
+
#
|
|
3117
|
+
|
|
3118
|
+
def _openMovieFileWriter(self, encoderLib=None, encoderOpts=None):
|
|
3119
|
+
"""Open a movie file writer to save frames to disk.
|
|
3120
|
+
|
|
3121
|
+
This will open a movie file writer to save frames to disk. The frames
|
|
3122
|
+
will be saved to a temporary file and then merged with the audio
|
|
3123
|
+
track (if available) when `save()` is called.
|
|
2247
3124
|
|
|
2248
3125
|
Parameters
|
|
2249
3126
|
----------
|
|
2250
|
-
filename : str
|
|
2251
|
-
File to save the resulting video to, should include the extension.
|
|
2252
|
-
useThreads : bool
|
|
2253
|
-
Use threading where possible to speed up the saving process. If
|
|
2254
|
-
`True`, the video will be saved and composited in a separate thread
|
|
2255
|
-
and this function will return quickly. If `False`, the video will
|
|
2256
|
-
be saved and composited in the main thread and this function will
|
|
2257
|
-
block until the video is saved. Default is `True`.
|
|
2258
|
-
mergeAudio : bool
|
|
2259
|
-
Merge the audio track from the microphone with the video. If `True`,
|
|
2260
|
-
the audio track will be merged with the video. If `False`, the
|
|
2261
|
-
audio track will be saved to a separate file. Default is `True`.
|
|
2262
3127
|
encoderLib : str or None
|
|
2263
3128
|
Encoder library to use for saving the video. This can be either
|
|
2264
3129
|
`'ffpyplayer'` or `'opencv'`. If `None`, the same library that was
|
|
2265
3130
|
used to open the camera stream. Default is `None`.
|
|
2266
|
-
encoderOpts : dict
|
|
3131
|
+
encoderOpts : dict or None
|
|
2267
3132
|
Options to pass to the encoder. This is a dictionary of options
|
|
2268
3133
|
specific to the encoder library being used. See the documentation
|
|
2269
3134
|
for `~psychopy.tools.movietools.MovieFileWriter` for more details.
|
|
2270
3135
|
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
if os.path.exists(filename):
|
|
2278
|
-
msg = (
|
|
2279
|
-
"Video file '{}' already exists, overwriting.".format(filename))
|
|
2280
|
-
logging.warning(msg)
|
|
2281
|
-
os.remove(filename)
|
|
3136
|
+
Returns
|
|
3137
|
+
-------
|
|
3138
|
+
str
|
|
3139
|
+
Path to the temporary file that will be used to save the video. The
|
|
3140
|
+
file will be deleted when the movie file writer is closed or when
|
|
3141
|
+
`save()` is called.
|
|
2282
3142
|
|
|
2283
|
-
|
|
3143
|
+
"""
|
|
3144
|
+
if self._movieWriter is not None:
|
|
3145
|
+
return self._tempVideoFile # already open, return temp file
|
|
3146
|
+
|
|
2284
3147
|
if encoderLib is None:
|
|
2285
3148
|
encoderLib = self._cameraLib
|
|
2286
|
-
|
|
2287
3149
|
logging.debug(
|
|
2288
3150
|
"Using encoder library '{}' to save video.".format(encoderLib))
|
|
2289
|
-
|
|
3151
|
+
|
|
3152
|
+
# check if we have a temporary file to write to
|
|
3153
|
+
import tempfile
|
|
3154
|
+
# create a temporary file to write the video to
|
|
3155
|
+
tempVideoFile = tempfile.NamedTemporaryFile(
|
|
3156
|
+
suffix='.mp4', delete=True)
|
|
3157
|
+
self._tempVideoFile = tempVideoFile.name
|
|
3158
|
+
tempVideoFile.close()
|
|
3159
|
+
|
|
3160
|
+
logging.debug("Using temporary file '{}' for video.".format(self._tempVideoFile))
|
|
3161
|
+
|
|
2290
3162
|
# check if the encoder library name string is valid
|
|
2291
|
-
if encoderLib not in ('ffpyplayer'
|
|
3163
|
+
if encoderLib not in ('ffpyplayer'):
|
|
2292
3164
|
raise ValueError(
|
|
2293
3165
|
"Invalid value for parameter `encoderLib`, expected one of "
|
|
2294
3166
|
"`'ffpyplayer'` or `'opencv'`.")
|
|
2295
|
-
|
|
2296
|
-
# check if we have an audio track to save
|
|
2297
|
-
hasAudio = self._audioTrack is not None
|
|
2298
|
-
|
|
2299
|
-
# create a temporary file names for the video and audio
|
|
2300
|
-
if hasAudio:
|
|
2301
|
-
if mergeAudio:
|
|
2302
|
-
tempPrefix = (uuid.uuid4().hex)[:16] # 16 char prefix
|
|
2303
|
-
videoFileName = "{}_video.mp4".format(tempPrefix)
|
|
2304
|
-
audioFileName = "{}_audio.wav".format(tempPrefix)
|
|
2305
|
-
else:
|
|
2306
|
-
videoFileName = audioFileName = filename
|
|
2307
|
-
audioFileName += '.wav'
|
|
2308
|
-
else:
|
|
2309
|
-
videoFileName = filename
|
|
2310
|
-
audioFileName = None
|
|
2311
|
-
|
|
2312
|
-
# make sure filenames are absolute paths
|
|
2313
|
-
videoFileName = os.path.abspath(videoFileName)
|
|
2314
|
-
if audioFileName is not None:
|
|
2315
|
-
audioFileName = os.path.abspath(audioFileName)
|
|
2316
|
-
|
|
2317
|
-
# flush outstanding frames from the camera queue
|
|
2318
|
-
self._enqueueFrame()
|
|
2319
|
-
|
|
2320
|
-
# contain video and not audio
|
|
2321
|
-
logging.debug("Saving video to file: {}".format(videoFileName))
|
|
2322
|
-
self._movieWriter = movietools.MovieFileWriter(
|
|
2323
|
-
filename=videoFileName,
|
|
2324
|
-
size=self._cameraInfo.frameSize, # match camera params
|
|
2325
|
-
fps=self._cameraInfo.frameRate,
|
|
2326
|
-
codec=None, # mp4
|
|
2327
|
-
pixelFormat='rgb24',
|
|
2328
|
-
encoderLib=encoderLib,
|
|
2329
|
-
encoderOpts=encoderOpts)
|
|
2330
|
-
self._movieWriter.open() # blocks main thread until opened and ready
|
|
2331
|
-
|
|
2332
|
-
# flush remaining frames to the writer thread, this is really fast since
|
|
2333
|
-
# frames are not copied and don't require much conversion
|
|
2334
|
-
for frame in self._captureFrames:
|
|
2335
|
-
self._movieWriter.addFrame(frame.colorData)
|
|
2336
|
-
|
|
2337
|
-
# push all frames to the queue for the movie recorder
|
|
2338
|
-
self._movieWriter.close() # thread-safe call
|
|
2339
|
-
self._movieWriter = None
|
|
2340
|
-
|
|
2341
|
-
# save audio track if available
|
|
2342
|
-
if hasAudio:
|
|
2343
|
-
logging.debug(
|
|
2344
|
-
"Saving audio track to file: {}".format(audioFileName))
|
|
2345
|
-
self._audioTrack.save(audioFileName, 'wav')
|
|
2346
3167
|
|
|
2347
|
-
|
|
2348
|
-
|
|
2349
|
-
|
|
2350
|
-
|
|
2351
|
-
|
|
2352
|
-
|
|
2353
|
-
|
|
2354
|
-
useThreads=useThreads,
|
|
2355
|
-
removeFiles=True) # disable threading for now
|
|
2356
|
-
|
|
2357
|
-
self._lastVideoFile = filename # remember the last video we saved
|
|
3168
|
+
if encoderLib == 'ffpyplayer':
|
|
3169
|
+
self._openMovieFileWriterFFPyPlayer(
|
|
3170
|
+
self._tempVideoFile, encoderOpts=encoderOpts)
|
|
3171
|
+
else:
|
|
3172
|
+
raise ValueError(
|
|
3173
|
+
"Invalid value for parameter `encoderLib`, expected one of "
|
|
3174
|
+
"`'ffpyplayer'` or `'opencv'`.")
|
|
2358
3175
|
|
|
2359
|
-
|
|
2360
|
-
"""Upload video file to an online repository. Not implemented locally,
|
|
2361
|
-
needed for auto translate to JS.
|
|
2362
|
-
"""
|
|
2363
|
-
pass # NOP
|
|
3176
|
+
return self._tempVideoFile
|
|
2364
3177
|
|
|
2365
|
-
def
|
|
2366
|
-
"""
|
|
2367
|
-
needed for auto translate to JS.
|
|
2368
|
-
"""
|
|
2369
|
-
pass # NOP
|
|
3178
|
+
def _submitFrameToFile(self, frames, pts=None):
|
|
3179
|
+
"""Submit a frame to the movie file writer thread.
|
|
2370
3180
|
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
"""File path to the last recording (`str` or `None`).
|
|
3181
|
+
This is used to submit frames to the movie file writer thread. It is
|
|
3182
|
+
called by the camera interface when a new frame is captured.
|
|
2374
3183
|
|
|
2375
|
-
|
|
2376
|
-
|
|
3184
|
+
Parameters
|
|
3185
|
+
----------
|
|
3186
|
+
frames : MovieFrame
|
|
3187
|
+
Frame to submit to the movie file writer thread.
|
|
3188
|
+
pts : float or None
|
|
3189
|
+
Presentation timestamp for the frame. If `None`, timestamps will be
|
|
3190
|
+
generated automatically by the movie file writer. This is only used
|
|
3191
|
+
if the movie file writer is configured to generate PTS values.
|
|
2377
3192
|
|
|
2378
3193
|
"""
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
This value is only valid if a previous recording has been saved to disk
|
|
2385
|
-
(`save()` was called).
|
|
2386
|
-
|
|
2387
|
-
Returns
|
|
2388
|
-
-------
|
|
2389
|
-
str or None
|
|
2390
|
-
Path to the file the most recent call to `save()` created. Returns
|
|
2391
|
-
`None` if no file is ready.
|
|
3194
|
+
if self._movieWriter is None:
|
|
3195
|
+
raise RuntimeError(
|
|
3196
|
+
"Attempting to call `_submitFrameToFile()` before "
|
|
3197
|
+
"`_openMovieFileWriter()`.")
|
|
2392
3198
|
|
|
2393
|
-
|
|
2394
|
-
|
|
3199
|
+
tStart = time.time() # start time for the operation
|
|
3200
|
+
if self._cameraLib == 'ffpyplayer':
|
|
3201
|
+
toReturn = self._submitFrameToFileFFPyPlayer(frames)
|
|
3202
|
+
else:
|
|
3203
|
+
raise ValueError(
|
|
3204
|
+
"Invalid value for parameter `encoderLib`, expected "
|
|
3205
|
+
"`'ffpyplayer'.")
|
|
3206
|
+
|
|
3207
|
+
logging.debug(
|
|
3208
|
+
"Submitted {} frames to the movie file writer (took {:.6f} seconds)".format(
|
|
3209
|
+
len(frames), time.time() - tStart))
|
|
3210
|
+
|
|
3211
|
+
return toReturn
|
|
3212
|
+
|
|
3213
|
+
def _closeMovieFileWriter(self):
|
|
3214
|
+
"""Close the movie file writer.
|
|
2395
3215
|
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
"""Most recent frame pulled from the camera (`VideoFrame`) since the
|
|
2399
|
-
last call of `getVideoFrame`.
|
|
2400
|
-
"""
|
|
2401
|
-
return self._lastFrame
|
|
2402
|
-
|
|
2403
|
-
def update(self):
|
|
2404
|
-
"""Acquire the newest data from the camera stream. If the `Camera`
|
|
2405
|
-
object is not being monitored by a `ImageStim`, this must be explicitly
|
|
2406
|
-
called.
|
|
3216
|
+
This will close the movie file writer and free up any resources used by
|
|
3217
|
+
the writer. If the writer is not open, this will do nothing.
|
|
2407
3218
|
"""
|
|
2408
|
-
self.
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
3219
|
+
if self._movieWriter is None:
|
|
3220
|
+
logging.warning(
|
|
3221
|
+
"Attempting to call `_closeMovieFileWriter()` without an open "
|
|
3222
|
+
"movie file writer.")
|
|
3223
|
+
return
|
|
3224
|
+
|
|
3225
|
+
if self._cameraLib == 'ffpyplayer':
|
|
3226
|
+
self._closeMovieFileWriterFFPyPlayer()
|
|
3227
|
+
else:
|
|
3228
|
+
raise ValueError(
|
|
3229
|
+
"Invalid value for parameter `encoderLib`, expected one of "
|
|
3230
|
+
"`'ffpyplayer'` or `'opencv'`.")
|
|
2419
3231
|
|
|
2420
|
-
|
|
2421
|
-
self.update()
|
|
3232
|
+
self._movieWriter = None
|
|
2422
3233
|
|
|
2423
|
-
|
|
3234
|
+
# --------------------------------------------------------------------------
|
|
3235
|
+
# Destructor
|
|
3236
|
+
#
|
|
2424
3237
|
|
|
2425
3238
|
def __del__(self):
|
|
2426
3239
|
"""Try to cleanly close the camera and output file.
|
|
2427
3240
|
"""
|
|
2428
|
-
|
|
2429
|
-
|
|
2430
|
-
if self._captureThread is not None:
|
|
3241
|
+
if hasattr(self, '_capture'):
|
|
3242
|
+
if self._capture is not None:
|
|
2431
3243
|
try:
|
|
2432
|
-
self.
|
|
3244
|
+
self.close()
|
|
2433
3245
|
except AttributeError:
|
|
2434
3246
|
pass
|
|
2435
3247
|
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
if self._mic is not None:
|
|
3248
|
+
if hasattr(self, '_movieWriter'):
|
|
3249
|
+
if self._movieWriter is not None:
|
|
2439
3250
|
try:
|
|
2440
|
-
self.
|
|
3251
|
+
self._movieWriter.close()
|
|
2441
3252
|
except AttributeError:
|
|
2442
3253
|
pass
|
|
2443
3254
|
|
|
@@ -2711,7 +3522,7 @@ def getAllCameraInterfaces():
|
|
|
2711
3522
|
# filter for classes that are camera interfaces
|
|
2712
3523
|
cameraInterfaces = {}
|
|
2713
3524
|
for name, cls in classes:
|
|
2714
|
-
if issubclass(cls,
|
|
3525
|
+
if issubclass(cls, CameraDevice):
|
|
2715
3526
|
cameraInterfaces[name] = cls
|
|
2716
3527
|
|
|
2717
3528
|
return cameraInterfaces
|
|
@@ -2810,5 +3621,37 @@ def renderVideo(outputFile, videoFile, audioFile=None, removeFiles=False):
|
|
|
2810
3621
|
return os.path.getsize(outputFile)
|
|
2811
3622
|
|
|
2812
3623
|
|
|
3624
|
+
# ------------------------------------------------------------------------------
|
|
3625
|
+
# Cleanup functions
|
|
3626
|
+
#
|
|
3627
|
+
# These functions are used to clean up resources when the application exits,
|
|
3628
|
+
# usually unexpectedly. This helps to ensure hardware interfaces are closed
|
|
3629
|
+
# and resources are freed up as best we can.
|
|
3630
|
+
#
|
|
3631
|
+
|
|
3632
|
+
import atexit
|
|
3633
|
+
|
|
3634
|
+
|
|
3635
|
+
def _closeAllCaptureInterfaces():
|
|
3636
|
+
"""Close all open capture interfaces.
|
|
3637
|
+
|
|
3638
|
+
This is registered with `atexit` to ensure that all open cameras are closed
|
|
3639
|
+
when the application exits. This is important to free up resources and
|
|
3640
|
+
ensure that cameras are not left open unintentionally.
|
|
3641
|
+
|
|
3642
|
+
"""
|
|
3643
|
+
global _openCaptureInterfaces
|
|
3644
|
+
|
|
3645
|
+
for cap in _openCaptureInterfaces.copy():
|
|
3646
|
+
try:
|
|
3647
|
+
cap.close()
|
|
3648
|
+
except Exception as e:
|
|
3649
|
+
logging.error(f"Error closing camera interface {cap}: {e}")
|
|
3650
|
+
|
|
3651
|
+
|
|
3652
|
+
# Register the function to close all cameras on exit
|
|
3653
|
+
atexit.register(_closeAllCaptureInterfaces)
|
|
3654
|
+
|
|
3655
|
+
# ------------------------------------------------------------------------------
|
|
2813
3656
|
if __name__ == "__main__":
|
|
2814
3657
|
pass
|