@shopify/react-native-skia 0.1.192 → 0.1.193
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/CMakeLists.txt +1 -1
- package/android/build.gradle +1 -0
- package/android/cpp/jni/JniPlatformContext.cpp +1 -1
- package/cpp/api/JsiSkImage.h +25 -4
- package/cpp/api/JsiSkImageFactory.h +2 -2
- package/cpp/api/JsiSkPath.h +8 -6
- package/cpp/rnskia/dom/base/Declaration.h +5 -7
- package/cpp/rnskia/dom/nodes/JsiBlurMaskNode.h +1 -0
- package/cpp/rnskia/dom/nodes/JsiPathNode.h +2 -2
- package/cpp/rnskia/dom/props/BoxShadowProps.h +2 -0
- package/cpp/skia/include/android/SkAndroidFrameworkUtils.h +2 -8
- package/cpp/skia/include/android/SkImageAndroid.h +101 -0
- package/cpp/skia/include/codec/SkAndroidCodec.h +26 -8
- package/cpp/skia/include/codec/SkCodec.h +31 -17
- package/cpp/skia/include/codec/SkEncodedImageFormat.h +36 -0
- package/cpp/skia/include/codec/SkPixmapUtils.h +31 -0
- package/cpp/skia/include/config/SkUserConfig.h +61 -29
- package/cpp/skia/include/core/SkBitmap.h +25 -25
- package/cpp/skia/include/core/SkBlurTypes.h +0 -2
- package/cpp/skia/include/core/SkCanvas.h +32 -15
- package/cpp/skia/include/core/SkCapabilities.h +2 -2
- package/cpp/skia/include/core/SkColor.h +2 -0
- package/cpp/skia/include/core/SkColorPriv.h +19 -4
- package/cpp/skia/include/core/SkColorSpace.h +14 -17
- package/cpp/skia/include/core/SkColorType.h +1 -0
- package/cpp/skia/include/core/SkContourMeasure.h +1 -1
- package/cpp/skia/include/core/SkCoverageMode.h +0 -2
- package/cpp/skia/include/core/SkCubicMap.h +2 -0
- package/cpp/skia/include/core/SkData.h +5 -2
- package/cpp/skia/include/core/SkDataTable.h +6 -2
- package/cpp/skia/include/core/SkDeferredDisplayList.h +11 -10
- package/cpp/skia/include/core/SkDeferredDisplayListRecorder.h +9 -8
- package/cpp/skia/include/core/SkDrawable.h +10 -2
- package/cpp/skia/include/core/SkEncodedImageFormat.h +3 -30
- package/cpp/skia/include/core/SkFlattenable.h +4 -2
- package/cpp/skia/include/core/SkFont.h +1 -0
- package/cpp/skia/include/core/SkFontMetrics.h +1 -0
- package/cpp/skia/include/core/SkFontMgr.h +20 -29
- package/cpp/skia/include/core/SkFontStyle.h +4 -1
- package/cpp/skia/include/core/SkGraphics.h +21 -18
- package/cpp/skia/include/core/SkICC.h +3 -13
- package/cpp/skia/include/core/SkImage.h +395 -717
- package/cpp/skia/include/core/SkImageGenerator.h +19 -74
- package/cpp/skia/include/core/SkImageInfo.h +7 -5
- package/cpp/skia/include/core/SkM44.h +11 -0
- package/cpp/skia/include/core/SkMaskFilter.h +6 -3
- package/cpp/skia/include/core/SkMatrix.h +14 -4
- package/cpp/skia/include/core/SkMesh.h +52 -18
- package/cpp/skia/include/core/SkMilestone.h +1 -1
- package/cpp/skia/include/core/SkPaint.h +11 -34
- package/cpp/skia/include/core/SkPath.h +23 -4
- package/cpp/skia/include/core/SkPathBuilder.h +13 -5
- package/cpp/skia/include/core/SkPathMeasure.h +1 -1
- package/cpp/skia/include/core/SkPathTypes.h +0 -2
- package/cpp/skia/include/core/SkPathUtils.h +42 -0
- package/cpp/skia/include/core/SkPicture.h +3 -2
- package/cpp/skia/include/core/SkPictureRecorder.h +2 -0
- package/cpp/skia/include/core/SkPixelRef.h +4 -8
- package/cpp/skia/include/core/SkPixmap.h +12 -20
- package/cpp/skia/include/core/SkPoint.h +4 -2
- package/cpp/skia/include/core/SkPromiseImageTexture.h +2 -2
- package/cpp/skia/include/core/SkRRect.h +5 -1
- package/cpp/skia/include/core/SkRect.h +6 -3
- package/cpp/skia/include/core/SkRefCnt.h +9 -14
- package/cpp/skia/include/core/SkRegion.h +1 -1
- package/cpp/skia/include/core/SkScalar.h +2 -4
- package/cpp/skia/include/core/SkSerialProcs.h +18 -10
- package/cpp/skia/include/core/SkShader.h +1 -64
- package/cpp/skia/include/core/SkSize.h +2 -0
- package/cpp/skia/include/core/SkSpan.h +4 -112
- package/cpp/skia/include/core/SkStream.h +11 -12
- package/cpp/skia/include/core/SkString.h +9 -25
- package/cpp/skia/include/core/SkStrokeRec.h +1 -1
- package/cpp/skia/include/core/SkSurface.h +83 -61
- package/cpp/skia/include/core/SkSurfaceCharacterization.h +3 -3
- package/cpp/skia/include/core/SkSurfaceProps.h +9 -1
- package/cpp/skia/include/core/SkTextBlob.h +2 -2
- package/cpp/skia/include/core/SkTextureCompressionType.h +30 -0
- package/cpp/skia/include/core/SkTime.h +1 -1
- package/cpp/skia/include/core/SkTypeface.h +9 -2
- package/cpp/skia/include/core/SkTypes.h +37 -466
- package/cpp/skia/include/core/SkVertices.h +2 -0
- package/cpp/skia/include/core/SkYUVAInfo.h +4 -0
- package/cpp/skia/include/core/SkYUVAPixmaps.h +7 -1
- package/cpp/skia/include/docs/SkPDFDocument.h +12 -1
- package/cpp/skia/include/effects/SkColorMatrix.h +2 -1
- package/cpp/skia/include/effects/SkGradientShader.h +65 -14
- package/cpp/skia/include/effects/SkImageFilters.h +0 -11
- package/cpp/skia/include/effects/SkRuntimeEffect.h +41 -11
- package/cpp/skia/include/encode/SkEncoder.h +7 -3
- package/cpp/skia/include/encode/SkICC.h +36 -0
- package/cpp/skia/include/encode/SkJpegEncoder.h +102 -71
- package/cpp/skia/include/encode/SkPngEncoder.h +89 -71
- package/cpp/skia/include/encode/SkWebpEncoder.h +65 -38
- package/cpp/skia/include/gpu/GpuTypes.h +23 -1
- package/cpp/skia/include/gpu/GrBackendSurface.h +9 -7
- package/cpp/skia/include/gpu/GrContextOptions.h +28 -9
- package/cpp/skia/include/gpu/GrContextThreadSafeProxy.h +6 -4
- package/cpp/skia/include/gpu/GrDirectContext.h +84 -63
- package/cpp/skia/include/gpu/GrDriverBugWorkarounds.h +2 -1
- package/cpp/skia/include/gpu/GrRecordingContext.h +9 -5
- package/cpp/skia/include/gpu/GrTypes.h +18 -18
- package/cpp/skia/include/gpu/d3d/GrD3DTypes.h +4 -4
- package/cpp/skia/include/gpu/dawn/GrDawnTypes.h +3 -3
- package/cpp/skia/include/gpu/ganesh/GrTextureGenerator.h +77 -0
- package/cpp/skia/include/gpu/ganesh/SkImageGanesh.h +385 -0
- package/cpp/skia/include/gpu/gl/GrGLExtensions.h +3 -3
- package/cpp/skia/include/gpu/gl/GrGLFunctions.h +1 -1
- package/cpp/skia/include/gpu/gl/GrGLInterface.h +0 -3
- package/cpp/skia/include/gpu/gl/GrGLTypes.h +2 -1
- package/cpp/skia/include/gpu/graphite/BackendTexture.h +72 -3
- package/cpp/skia/include/gpu/graphite/Context.h +85 -32
- package/cpp/skia/include/gpu/graphite/ContextOptions.h +15 -11
- package/cpp/skia/include/gpu/graphite/GraphiteTypes.h +55 -5
- package/cpp/skia/include/gpu/graphite/ImageProvider.h +6 -4
- package/cpp/skia/include/gpu/graphite/Recorder.h +41 -11
- package/cpp/skia/include/gpu/graphite/Recording.h +50 -3
- package/cpp/skia/include/gpu/graphite/TextureInfo.h +47 -8
- package/cpp/skia/include/gpu/graphite/YUVABackendTextures.h +139 -0
- package/cpp/skia/include/gpu/graphite/dawn/DawnTypes.h +40 -0
- package/cpp/skia/include/gpu/graphite/dawn/DawnUtils.h +28 -0
- package/cpp/skia/include/gpu/graphite/mtl/MtlBackendContext.h +1 -1
- package/cpp/skia/include/gpu/graphite/mtl/{MtlTypes.h → MtlGraphiteTypes.h} +7 -6
- package/cpp/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h +27 -0
- package/cpp/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h +4 -9
- package/cpp/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h +28 -0
- package/cpp/skia/include/gpu/mock/GrMockTypes.h +17 -13
- package/cpp/skia/include/gpu/mtl/GrMtlTypes.h +2 -2
- package/cpp/skia/include/gpu/vk/GrVkBackendContext.h +1 -1
- package/cpp/skia/include/gpu/vk/GrVkTypes.h +3 -3
- package/cpp/skia/include/gpu/vk/VulkanExtensions.h +3 -3
- package/cpp/skia/include/gpu/vk/VulkanMemoryAllocator.h +5 -7
- package/cpp/skia/include/pathops/SkPathOps.h +3 -3
- package/cpp/skia/include/ports/SkFontMgr_data.h +22 -0
- package/cpp/skia/include/ports/SkFontMgr_indirect.h +14 -14
- package/cpp/skia/include/ports/SkRemotableFontMgr.h +2 -2
- package/cpp/skia/include/ports/SkTypeface_win.h +2 -1
- package/cpp/skia/include/private/SkChecksum.h +32 -7
- package/cpp/skia/include/private/SkColorData.h +1 -26
- package/cpp/skia/include/private/SkGainmapInfo.h +97 -0
- package/cpp/skia/include/private/SkGainmapShader.h +53 -0
- package/cpp/skia/include/private/SkIDChangeListener.h +4 -3
- package/cpp/skia/include/private/SkJpegGainmapEncoder.h +71 -0
- package/cpp/skia/include/private/SkJpegMetadataDecoder.h +61 -0
- package/cpp/skia/include/private/SkOpts_spi.h +3 -1
- package/cpp/skia/include/private/SkPathRef.h +64 -47
- package/cpp/skia/include/private/SkSLDefines.h +5 -5
- package/cpp/skia/include/private/SkSLSampleUsage.h +0 -4
- package/cpp/skia/include/private/SkSpinlock.h +1 -1
- package/cpp/skia/include/private/SkWeakRefCnt.h +3 -0
- package/cpp/skia/include/private/{SingleOwner.h → base/SingleOwner.h} +8 -5
- package/cpp/skia/include/private/base/SkAPI.h +52 -0
- package/cpp/skia/include/private/base/SkAlign.h +39 -0
- package/cpp/skia/include/private/base/SkAlignedStorage.h +32 -0
- package/cpp/skia/include/private/base/SkAssert.h +92 -0
- package/cpp/skia/include/private/base/SkAttributes.h +102 -0
- package/cpp/skia/include/private/base/SkCPUTypes.h +25 -0
- package/cpp/skia/include/private/base/SkContainers.h +46 -0
- package/cpp/skia/include/private/base/SkDebug.h +27 -0
- package/cpp/skia/include/private/{SkDeque.h → base/SkDeque.h} +3 -1
- package/cpp/skia/include/private/base/SkFeatures.h +151 -0
- package/cpp/skia/include/private/{SkFixed.h → base/SkFixed.h} +9 -7
- package/cpp/skia/include/private/{SkFloatBits.h → base/SkFloatBits.h} +2 -3
- package/cpp/skia/include/private/{SkFloatingPoint.h → base/SkFloatingPoint.h} +18 -9
- package/cpp/skia/include/private/base/SkLoadUserConfig.h +63 -0
- package/cpp/skia/include/private/{SkMacros.h → base/SkMacros.h} +17 -2
- package/cpp/skia/include/private/{SkMalloc.h → base/SkMalloc.h} +4 -7
- package/cpp/skia/include/{core → private/base}/SkMath.h +25 -2
- package/cpp/skia/include/private/{SkMutex.h → base/SkMutex.h} +5 -5
- package/cpp/skia/include/private/{SkNoncopyable.h → base/SkNoncopyable.h} +2 -2
- package/cpp/skia/include/private/{SkOnce.h → base/SkOnce.h} +3 -1
- package/cpp/skia/include/private/base/SkPathEnums.h +25 -0
- package/cpp/skia/include/private/{SkSafe32.h → base/SkSafe32.h} +16 -1
- package/cpp/skia/include/private/{SkSemaphore.h → base/SkSemaphore.h} +4 -3
- package/cpp/skia/include/private/base/SkSpan_impl.h +129 -0
- package/cpp/skia/include/private/base/SkTArray.h +694 -0
- package/cpp/skia/include/private/{SkTDArray.h → base/SkTDArray.h} +17 -54
- package/cpp/skia/include/private/{SkTFitsIn.h → base/SkTFitsIn.h} +14 -8
- package/cpp/skia/include/private/{SkTLogic.h → base/SkTLogic.h} +1 -1
- package/cpp/skia/include/private/{SkTemplates.h → base/SkTemplates.h} +63 -88
- package/cpp/skia/include/private/{SkThreadID.h → base/SkThreadID.h} +5 -2
- package/cpp/skia/include/private/{SkTo.h → base/SkTo.h} +13 -2
- package/cpp/skia/include/private/base/SkTypeTraits.h +33 -0
- package/cpp/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h +130 -0
- package/cpp/skia/include/private/chromium/SkChromeRemoteGlyphCache.h +5 -9
- package/cpp/skia/include/private/chromium/SkDiscardableMemory.h +70 -0
- package/cpp/skia/include/private/chromium/Slug.h +0 -9
- package/cpp/skia/include/private/gpu/ganesh/GrContext_Base.h +2 -1
- package/cpp/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h +1 -1
- package/cpp/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h +1 -1
- package/cpp/skia/include/private/gpu/ganesh/GrGLTypesPriv.h +1 -1
- package/cpp/skia/include/private/gpu/ganesh/GrImageContext.h +1 -1
- package/cpp/skia/include/private/gpu/ganesh/GrMockTypesPriv.h +3 -2
- package/cpp/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h +1 -1
- package/cpp/skia/include/private/gpu/ganesh/GrTypesPriv.h +17 -23
- package/cpp/skia/include/private/gpu/ganesh/GrVkTypesPriv.h +2 -2
- package/cpp/skia/include/private/gpu/graphite/DawnTypesPriv.h +38 -0
- package/cpp/skia/include/private/gpu/graphite/{MtlTypesPriv.h → MtlGraphiteTypesPriv.h} +5 -5
- package/cpp/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h +1 -9
- package/cpp/skia/include/private/gpu/vk/SkiaVulkan.h +4 -0
- package/cpp/skia/include/utils/SkCamera.h +1 -1
- package/cpp/skia/include/utils/SkCustomTypeface.h +7 -1
- package/cpp/skia/include/utils/SkNWayCanvas.h +6 -6
- package/cpp/skia/include/utils/SkOrderedFontMgr.h +7 -6
- package/cpp/skia/include/utils/SkPaintFilterCanvas.h +2 -2
- package/cpp/skia/include/utils/SkParsePath.h +1 -1
- package/cpp/skia/modules/skcms/skcms.h +14 -0
- package/cpp/skia/modules/skcms/src/Transform_inl.h +19 -0
- package/cpp/skia/modules/skparagraph/include/FontCollection.h +2 -2
- package/cpp/skia/modules/skparagraph/include/Paragraph.h +72 -2
- package/cpp/skia/modules/skparagraph/include/ParagraphCache.h +1 -11
- package/cpp/skia/modules/skparagraph/include/ParagraphPainter.h +63 -0
- package/cpp/skia/modules/skparagraph/include/TextStyle.h +27 -4
- package/cpp/skia/modules/skparagraph/include/TypefaceFontProvider.h +13 -13
- package/cpp/skia/modules/skresources/include/SkResources.h +18 -4
- package/cpp/skia/modules/svg/include/SkSVGAttribute.h +1 -1
- package/cpp/skia/modules/svg/include/SkSVGAttributeParser.h +2 -2
- package/cpp/skia/modules/svg/include/SkSVGContainer.h +2 -2
- package/cpp/skia/modules/svg/include/SkSVGDOM.h +1 -1
- package/cpp/skia/modules/svg/include/SkSVGFilterContext.h +2 -2
- package/cpp/skia/modules/svg/include/SkSVGGradient.h +2 -2
- package/cpp/skia/modules/svg/include/SkSVGIDMapper.h +2 -2
- package/cpp/skia/modules/svg/include/SkSVGRenderContext.h +3 -3
- package/cpp/skia/modules/svg/include/SkSVGSVG.h +1 -1
- package/cpp/skia/modules/svg/include/SkSVGTypes.h +22 -21
- package/cpp/skia/modules/svg/include/SkSVGValue.h +1 -1
- package/cpp/skia/{include/private → src/core}/SkTHash.h +41 -37
- package/ios/RNSkia-iOS/ViewScreenshotService.mm +1 -1
- package/libs/android/arm64-v8a/libskia.a +0 -0
- package/libs/android/arm64-v8a/libskottie.a +0 -0
- package/libs/android/arm64-v8a/libsksg.a +0 -0
- package/libs/android/arm64-v8a/libskshaper.a +0 -0
- package/libs/android/arm64-v8a/libsvg.a +0 -0
- package/libs/android/armeabi-v7a/libskia.a +0 -0
- package/libs/android/armeabi-v7a/libskottie.a +0 -0
- package/libs/android/armeabi-v7a/libsksg.a +0 -0
- package/libs/android/armeabi-v7a/libskshaper.a +0 -0
- package/libs/android/armeabi-v7a/libsvg.a +0 -0
- package/libs/android/x86/libskia.a +0 -0
- package/libs/android/x86/libskottie.a +0 -0
- package/libs/android/x86/libsksg.a +0 -0
- package/libs/android/x86/libskshaper.a +0 -0
- package/libs/android/x86/libsvg.a +0 -0
- package/libs/android/x86_64/libskia.a +0 -0
- package/libs/android/x86_64/libskottie.a +0 -0
- package/libs/android/x86_64/libsksg.a +0 -0
- package/libs/android/x86_64/libskshaper.a +0 -0
- package/libs/android/x86_64/libsvg.a +0 -0
- package/libs/ios/libskia.xcframework/Info.plist +5 -5
- package/libs/ios/libskia.xcframework/ios-arm64_arm64e/libskia.a +0 -0
- package/libs/ios/libskia.xcframework/ios-arm64_arm64e_x86_64-simulator/libskia.a +0 -0
- package/libs/ios/libskottie.xcframework/ios-arm64_arm64e/libskottie.a +0 -0
- package/libs/ios/libskottie.xcframework/ios-arm64_arm64e_x86_64-simulator/libskottie.a +0 -0
- package/libs/ios/libsksg.xcframework/ios-arm64_arm64e/libsksg.a +0 -0
- package/libs/ios/libsksg.xcframework/ios-arm64_arm64e_x86_64-simulator/libsksg.a +0 -0
- package/libs/ios/libskshaper.xcframework/ios-arm64_arm64e/libskshaper.a +0 -0
- package/libs/ios/libskshaper.xcframework/ios-arm64_arm64e_x86_64-simulator/libskshaper.a +0 -0
- package/libs/ios/libsvg.xcframework/ios-arm64_arm64e/libsvg.a +0 -0
- package/libs/ios/libsvg.xcframework/ios-arm64_arm64e_x86_64-simulator/libsvg.a +0 -0
- package/package.json +1 -1
- package/react-native-skia.podspec +1 -1
- package/scripts/install-npm.js +11 -1
- package/cpp/skia/include/core/SkImageEncoder.h +0 -71
- package/cpp/skia/include/gpu/GrConfig.h +0 -53
- package/cpp/skia/include/gpu/graphite/CombinationBuilder.h +0 -195
- package/cpp/skia/include/private/SkHalf.h +0 -38
- package/cpp/skia/include/private/SkImageInfoPriv.h +0 -199
- package/cpp/skia/include/private/SkSLIRNode.h +0 -64
- package/cpp/skia/include/private/SkSLLayout.h +0 -144
- package/cpp/skia/include/private/SkSLModifiers.h +0 -178
- package/cpp/skia/include/private/SkSLProgramElement.h +0 -77
- package/cpp/skia/include/private/SkSLProgramKind.h +0 -35
- package/cpp/skia/include/private/SkSLStatement.h +0 -86
- package/cpp/skia/include/private/SkSLString.h +0 -41
- package/cpp/skia/include/private/SkSLSymbol.h +0 -94
- package/cpp/skia/include/private/SkSafe_math.h +0 -52
- package/cpp/skia/include/private/SkStringView.h +0 -51
- package/cpp/skia/include/private/SkTArray.h +0 -655
- package/cpp/skia/include/private/SkUniquePaintParamsID.h +0 -35
- package/cpp/skia/include/private/SkVx.h +0 -1026
- package/cpp/skia/include/sksl/DSL.h +0 -37
- package/cpp/skia/include/sksl/DSLBlock.h +0 -58
- package/cpp/skia/include/sksl/DSLCase.h +0 -62
- package/cpp/skia/include/sksl/DSLCore.h +0 -492
- package/cpp/skia/include/sksl/DSLExpression.h +0 -241
- package/cpp/skia/include/sksl/DSLFunction.h +0 -113
- package/cpp/skia/include/sksl/DSLLayout.h +0 -92
- package/cpp/skia/include/sksl/DSLModifiers.h +0 -69
- package/cpp/skia/include/sksl/DSLStatement.h +0 -82
- package/cpp/skia/include/sksl/DSLSymbols.h +0 -61
- package/cpp/skia/include/sksl/DSLType.h +0 -271
- package/cpp/skia/include/sksl/DSLVar.h +0 -231
- package/cpp/skia/include/sksl/SkSLErrorReporter.h +0 -65
- package/cpp/skia/include/sksl/SkSLOperator.h +0 -154
- package/cpp/skia/include/sksl/SkSLPosition.h +0 -104
- package/cpp/skia/include/utils/SkRandom.h +0 -169
- package/cpp/skia/src/core/SkLRUCache.h +0 -126
- package/cpp/skia/src/core/SkTInternalLList.h +0 -302
- /package/cpp/skia/include/{core → codec}/SkPngChunkReader.h +0 -0
- /package/cpp/skia/include/private/{SkTPin.h → base/SkTPin.h} +0 -0
- /package/cpp/skia/include/private/{SkThreadAnnotations.h → base/SkThreadAnnotations.h} +0 -0
@@ -1,1026 +0,0 @@
|
|
1
|
-
/*
|
2
|
-
* Copyright 2019 Google Inc.
|
3
|
-
*
|
4
|
-
* Use of this source code is governed by a BSD-style license that can be
|
5
|
-
* found in the LICENSE file.
|
6
|
-
*/
|
7
|
-
|
8
|
-
#ifndef SKVX_DEFINED
|
9
|
-
#define SKVX_DEFINED
|
10
|
-
|
11
|
-
// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
|
12
|
-
//
|
13
|
-
// This time we're leaning a bit less on platform-specific intrinsics and a bit
|
14
|
-
// more on Clang/GCC vector extensions, but still keeping the option open to
|
15
|
-
// drop in platform-specific intrinsics, actually more easily than before.
|
16
|
-
//
|
17
|
-
// We've also fixed a few of the caveats that used to make SkNx awkward to work
|
18
|
-
// with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size
|
19
|
-
// and alignment and is safe to use across translation units freely.
|
20
|
-
// (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)
|
21
|
-
|
22
|
-
// Please try to keep this file independent of Skia headers.
|
23
|
-
#include <algorithm> // std::min, std::max
|
24
|
-
#include <cassert> // assert()
|
25
|
-
#include <cmath> // ceilf, floorf, truncf, roundf, sqrtf, etc.
|
26
|
-
#include <cstdint> // intXX_t
|
27
|
-
#include <cstring> // memcpy()
|
28
|
-
#include <initializer_list> // std::initializer_list
|
29
|
-
#include <utility> // std::index_sequence
|
30
|
-
|
31
|
-
// Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags.
|
32
|
-
// The gn build has no option which sets SKNX_NO_SIMD.
|
33
|
-
// Use SKVX_USE_SIMD internally to avoid confusing double negation.
|
34
|
-
// Do not use 'defined' in a macro expansion.
|
35
|
-
#if !defined(SKNX_NO_SIMD)
|
36
|
-
#define SKVX_USE_SIMD 1
|
37
|
-
#else
|
38
|
-
#define SKVX_USE_SIMD 0
|
39
|
-
#endif
|
40
|
-
|
41
|
-
#if SKVX_USE_SIMD
|
42
|
-
#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
|
43
|
-
#include <immintrin.h>
|
44
|
-
#elif defined(__ARM_NEON)
|
45
|
-
#include <arm_neon.h>
|
46
|
-
#elif defined(__wasm_simd128__)
|
47
|
-
#include <wasm_simd128.h>
|
48
|
-
#endif
|
49
|
-
#endif
|
50
|
-
|
51
|
-
// To avoid ODR violations, all methods must be force-inlined...
|
52
|
-
#if defined(_MSC_VER)
|
53
|
-
#define SKVX_ALWAYS_INLINE __forceinline
|
54
|
-
#else
|
55
|
-
#define SKVX_ALWAYS_INLINE __attribute__((always_inline))
|
56
|
-
#endif
|
57
|
-
|
58
|
-
// ... and all standalone functions must be static. Please use these helpers:
|
59
|
-
#define SI static inline
|
60
|
-
#define SIT template < typename T> SI
|
61
|
-
#define SIN template <int N > SI
|
62
|
-
#define SINT template <int N, typename T> SI
|
63
|
-
#define SINTU template <int N, typename T, typename U, \
|
64
|
-
typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
|
65
|
-
|
66
|
-
namespace skvx {
|
67
|
-
|
68
|
-
template <int N, typename T>
|
69
|
-
struct alignas(N*sizeof(T)) Vec;
|
70
|
-
|
71
|
-
template <int... Ix, int N, typename T>
|
72
|
-
SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);
|
73
|
-
|
74
|
-
template <typename D, typename S>
|
75
|
-
SI D bit_pun(const S&);
|
76
|
-
|
77
|
-
// All Vec have the same simple memory layout, the same as `T vec[N]`.
|
78
|
-
template <int N, typename T>
|
79
|
-
struct alignas(N*sizeof(T)) VecStorage {
|
80
|
-
SKVX_ALWAYS_INLINE VecStorage() = default;
|
81
|
-
SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
|
82
|
-
|
83
|
-
Vec<N/2,T> lo, hi;
|
84
|
-
};
|
85
|
-
|
86
|
-
template <typename T>
|
87
|
-
struct VecStorage<4,T> {
|
88
|
-
SKVX_ALWAYS_INLINE VecStorage() = default;
|
89
|
-
SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
|
90
|
-
SKVX_ALWAYS_INLINE VecStorage(T x, T y, T z, T w) : lo(x,y), hi(z, w) {}
|
91
|
-
SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {}
|
92
|
-
SKVX_ALWAYS_INLINE VecStorage(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {}
|
93
|
-
SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {}
|
94
|
-
|
95
|
-
SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; }
|
96
|
-
SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; }
|
97
|
-
SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; }
|
98
|
-
SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; }
|
99
|
-
SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; }
|
100
|
-
SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; }
|
101
|
-
|
102
|
-
SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; }
|
103
|
-
SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; }
|
104
|
-
SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; }
|
105
|
-
SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; }
|
106
|
-
SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; }
|
107
|
-
SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; }
|
108
|
-
|
109
|
-
// Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
|
110
|
-
SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(bit_pun<Vec<4,T>>(*this)); }
|
111
|
-
SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(bit_pun<Vec<4,T>>(*this)); }
|
112
|
-
|
113
|
-
Vec<2,T> lo, hi;
|
114
|
-
};
|
115
|
-
|
116
|
-
template <typename T>
|
117
|
-
struct VecStorage<2,T> {
|
118
|
-
SKVX_ALWAYS_INLINE VecStorage() = default;
|
119
|
-
SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
|
120
|
-
SKVX_ALWAYS_INLINE VecStorage(T x, T y) : lo(x), hi(y) {}
|
121
|
-
|
122
|
-
SKVX_ALWAYS_INLINE T& x() { return lo.val; }
|
123
|
-
SKVX_ALWAYS_INLINE T& y() { return hi.val; }
|
124
|
-
|
125
|
-
SKVX_ALWAYS_INLINE T x() const { return lo.val; }
|
126
|
-
SKVX_ALWAYS_INLINE T y() const { return hi.val; }
|
127
|
-
|
128
|
-
// This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
|
129
|
-
SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(bit_pun<Vec<2,T>>(*this)); }
|
130
|
-
|
131
|
-
SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const {
|
132
|
-
return Vec<4,T>(bit_pun<Vec<2,T>>(*this), bit_pun<Vec<2,T>>(*this));
|
133
|
-
}
|
134
|
-
|
135
|
-
Vec<1,T> lo, hi;
|
136
|
-
};
|
137
|
-
|
138
|
-
template <int N, typename T>
|
139
|
-
struct alignas(N*sizeof(T)) Vec : public VecStorage<N,T> {
|
140
|
-
static_assert((N & (N-1)) == 0, "N must be a power of 2.");
|
141
|
-
static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
|
142
|
-
|
143
|
-
// Methods belong here in the class declaration of Vec only if:
|
144
|
-
// - they must be here, like constructors or operator[];
|
145
|
-
// - they'll definitely never want a specialized implementation.
|
146
|
-
// Other operations on Vec should be defined outside the type.
|
147
|
-
|
148
|
-
SKVX_ALWAYS_INLINE Vec() = default;
|
149
|
-
|
150
|
-
using VecStorage<N,T>::VecStorage;
|
151
|
-
|
152
|
-
// NOTE: Vec{x} produces x000..., whereas Vec(x) produces xxxx.... since this constructor fills
|
153
|
-
// unspecified lanes with 0s, whereas the single T constructor fills all lanes with the value.
|
154
|
-
SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
|
155
|
-
T vals[N] = {0};
|
156
|
-
memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
|
157
|
-
|
158
|
-
this->lo = Vec<N/2,T>::Load(vals + 0);
|
159
|
-
this->hi = Vec<N/2,T>::Load(vals + N/2);
|
160
|
-
}
|
161
|
-
|
162
|
-
SKVX_ALWAYS_INLINE T operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
|
163
|
-
SKVX_ALWAYS_INLINE T& operator[](int i) { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
|
164
|
-
|
165
|
-
SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
|
166
|
-
Vec v;
|
167
|
-
memcpy(&v, ptr, sizeof(Vec));
|
168
|
-
return v;
|
169
|
-
}
|
170
|
-
SKVX_ALWAYS_INLINE void store(void* ptr) const {
|
171
|
-
memcpy(ptr, this, sizeof(Vec));
|
172
|
-
}
|
173
|
-
};
|
174
|
-
|
175
|
-
template <typename T>
|
176
|
-
struct Vec<1,T> {
|
177
|
-
T val;
|
178
|
-
|
179
|
-
SKVX_ALWAYS_INLINE Vec() = default;
|
180
|
-
|
181
|
-
Vec(T s) : val(s) {}
|
182
|
-
|
183
|
-
SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
|
184
|
-
|
185
|
-
SKVX_ALWAYS_INLINE T operator[](int) const { return val; }
|
186
|
-
SKVX_ALWAYS_INLINE T& operator[](int) { return val; }
|
187
|
-
|
188
|
-
SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
|
189
|
-
Vec v;
|
190
|
-
memcpy(&v, ptr, sizeof(Vec));
|
191
|
-
return v;
|
192
|
-
}
|
193
|
-
SKVX_ALWAYS_INLINE void store(void* ptr) const {
|
194
|
-
memcpy(ptr, this, sizeof(Vec));
|
195
|
-
}
|
196
|
-
};
|
197
|
-
|
198
|
-
template <typename D, typename S>
|
199
|
-
SI D bit_pun(const S& s) {
|
200
|
-
static_assert(sizeof(D) == sizeof(S));
|
201
|
-
D d;
|
202
|
-
memcpy(&d, &s, sizeof(D));
|
203
|
-
return d;
|
204
|
-
}
|
205
|
-
|
206
|
-
// Translate from a value type T to its corresponding Mask, the result of a comparison.
|
207
|
-
template <typename T> struct Mask { using type = T; };
|
208
|
-
template <> struct Mask<float > { using type = int32_t; };
|
209
|
-
template <> struct Mask<double> { using type = int64_t; };
|
210
|
-
template <typename T> using M = typename Mask<T>::type;
|
211
|
-
|
212
|
-
// Join two Vec<N,T> into one Vec<2N,T>.
|
213
|
-
SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
|
214
|
-
Vec<2*N,T> v;
|
215
|
-
v.lo = lo;
|
216
|
-
v.hi = hi;
|
217
|
-
return v;
|
218
|
-
}
|
219
|
-
|
220
|
-
// We have three strategies for implementing Vec operations:
|
221
|
-
// 1) lean on Clang/GCC vector extensions when available;
|
222
|
-
// 2) use map() to apply a scalar function lane-wise;
|
223
|
-
// 3) recurse on lo/hi to scalar portable implementations.
|
224
|
-
// We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
|
225
|
-
// or often integrate them directly into the recursion of style 3), allowing fine control.
|
226
|
-
|
227
|
-
#if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__))
|
228
|
-
|
229
|
-
// VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
|
230
|
-
#if defined(__clang__)
|
231
|
-
template <int N, typename T>
|
232
|
-
using VExt = T __attribute__((ext_vector_type(N)));
|
233
|
-
|
234
|
-
#elif defined(__GNUC__)
|
235
|
-
template <int N, typename T>
|
236
|
-
struct VExtHelper {
|
237
|
-
typedef T __attribute__((vector_size(N*sizeof(T)))) type;
|
238
|
-
};
|
239
|
-
|
240
|
-
template <int N, typename T>
|
241
|
-
using VExt = typename VExtHelper<N,T>::type;
|
242
|
-
|
243
|
-
// For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
|
244
|
-
// to_vec<N,T>() below for N=4 and T=float. This workaround seems to help...
|
245
|
-
SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
|
246
|
-
#endif
|
247
|
-
|
248
|
-
SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
|
249
|
-
SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
|
250
|
-
|
251
|
-
SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
|
252
|
-
return to_vec<N,T>(to_vext(x) + to_vext(y));
|
253
|
-
}
|
254
|
-
SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
|
255
|
-
return to_vec<N,T>(to_vext(x) - to_vext(y));
|
256
|
-
}
|
257
|
-
SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
|
258
|
-
return to_vec<N,T>(to_vext(x) * to_vext(y));
|
259
|
-
}
|
260
|
-
SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
|
261
|
-
return to_vec<N,T>(to_vext(x) / to_vext(y));
|
262
|
-
}
|
263
|
-
|
264
|
-
SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
|
265
|
-
return to_vec<N,T>(to_vext(x) ^ to_vext(y));
|
266
|
-
}
|
267
|
-
SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
|
268
|
-
return to_vec<N,T>(to_vext(x) & to_vext(y));
|
269
|
-
}
|
270
|
-
SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
|
271
|
-
return to_vec<N,T>(to_vext(x) | to_vext(y));
|
272
|
-
}
|
273
|
-
|
274
|
-
SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
|
275
|
-
SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
|
276
|
-
SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
|
277
|
-
|
278
|
-
SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
|
279
|
-
SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
|
280
|
-
|
281
|
-
SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
|
282
|
-
return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
|
283
|
-
}
|
284
|
-
SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
285
|
-
return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
|
286
|
-
}
|
287
|
-
SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
288
|
-
return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
|
289
|
-
}
|
290
|
-
SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
291
|
-
return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
|
292
|
-
}
|
293
|
-
SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
|
294
|
-
return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y));
|
295
|
-
}
|
296
|
-
SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
|
297
|
-
return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y));
|
298
|
-
}
|
299
|
-
|
300
|
-
#else
|
301
|
-
|
302
|
-
// Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
|
303
|
-
// We'll implement things portably with N==1 scalar implementations and recursion onto them.
|
304
|
-
|
305
|
-
// N == 1 scalar implementations.
|
306
|
-
SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
|
307
|
-
SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
|
308
|
-
SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
|
309
|
-
SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
|
310
|
-
|
311
|
-
SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
|
312
|
-
SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
|
313
|
-
SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
|
314
|
-
|
315
|
-
SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
|
316
|
-
SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
|
317
|
-
SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
|
318
|
-
|
319
|
-
SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
|
320
|
-
SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
|
321
|
-
|
322
|
-
SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
|
323
|
-
return x.val == y.val ? ~0 : 0;
|
324
|
-
}
|
325
|
-
SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
|
326
|
-
return x.val != y.val ? ~0 : 0;
|
327
|
-
}
|
328
|
-
SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
|
329
|
-
return x.val <= y.val ? ~0 : 0;
|
330
|
-
}
|
331
|
-
SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
|
332
|
-
return x.val >= y.val ? ~0 : 0;
|
333
|
-
}
|
334
|
-
SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
|
335
|
-
return x.val < y.val ? ~0 : 0;
|
336
|
-
}
|
337
|
-
SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
|
338
|
-
return x.val > y.val ? ~0 : 0;
|
339
|
-
}
|
340
|
-
|
341
|
-
// Recurse on lo/hi down to N==1 scalar implementations.
|
342
|
-
SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
|
343
|
-
return join(x.lo + y.lo, x.hi + y.hi);
|
344
|
-
}
|
345
|
-
SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
|
346
|
-
return join(x.lo - y.lo, x.hi - y.hi);
|
347
|
-
}
|
348
|
-
SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
|
349
|
-
return join(x.lo * y.lo, x.hi * y.hi);
|
350
|
-
}
|
351
|
-
SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
|
352
|
-
return join(x.lo / y.lo, x.hi / y.hi);
|
353
|
-
}
|
354
|
-
|
355
|
-
SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
|
356
|
-
return join(x.lo ^ y.lo, x.hi ^ y.hi);
|
357
|
-
}
|
358
|
-
SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
|
359
|
-
return join(x.lo & y.lo, x.hi & y.hi);
|
360
|
-
}
|
361
|
-
SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
|
362
|
-
return join(x.lo | y.lo, x.hi | y.hi);
|
363
|
-
}
|
364
|
-
|
365
|
-
SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
|
366
|
-
SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
|
367
|
-
SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
|
368
|
-
|
369
|
-
SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
|
370
|
-
SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
|
371
|
-
|
372
|
-
SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
|
373
|
-
return join(x.lo == y.lo, x.hi == y.hi);
|
374
|
-
}
|
375
|
-
SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
376
|
-
return join(x.lo != y.lo, x.hi != y.hi);
|
377
|
-
}
|
378
|
-
SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
379
|
-
return join(x.lo <= y.lo, x.hi <= y.hi);
|
380
|
-
}
|
381
|
-
SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
382
|
-
return join(x.lo >= y.lo, x.hi >= y.hi);
|
383
|
-
}
|
384
|
-
SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
|
385
|
-
return join(x.lo < y.lo, x.hi < y.hi);
|
386
|
-
}
|
387
|
-
SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
|
388
|
-
return join(x.lo > y.lo, x.hi > y.hi);
|
389
|
-
}
|
390
|
-
#endif
|
391
|
-
|
392
|
-
// Scalar/vector operations splat the scalar to a vector.
|
393
|
-
SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; }
|
394
|
-
SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; }
|
395
|
-
SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; }
|
396
|
-
SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; }
|
397
|
-
SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; }
|
398
|
-
SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; }
|
399
|
-
SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; }
|
400
|
-
SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
|
401
|
-
SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
|
402
|
-
SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
|
403
|
-
SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
|
404
|
-
SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; }
|
405
|
-
SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; }
|
406
|
-
|
407
|
-
SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); }
|
408
|
-
SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); }
|
409
|
-
SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); }
|
410
|
-
SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); }
|
411
|
-
SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); }
|
412
|
-
SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); }
|
413
|
-
SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); }
|
414
|
-
SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
|
415
|
-
SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
|
416
|
-
SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
|
417
|
-
SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
|
418
|
-
SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); }
|
419
|
-
SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); }
|
420
|
-
|
421
|
-
SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
|
422
|
-
SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
|
423
|
-
SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
|
424
|
-
SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
|
425
|
-
SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
|
426
|
-
SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
|
427
|
-
SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
|
428
|
-
|
429
|
-
SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
|
430
|
-
SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
|
431
|
-
SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
|
432
|
-
SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
|
433
|
-
SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
|
434
|
-
SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
|
435
|
-
SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
|
436
|
-
|
437
|
-
SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
|
438
|
-
SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
|
439
|
-
|
440
|
-
// Some operations we want are not expressible with Clang/GCC vector extensions.
|
441
|
-
|
442
|
-
// Clang can reason about naive_if_then_else() and optimize through it better
|
443
|
-
// than if_then_else(), so it's sometimes useful to call it directly when we
|
444
|
-
// think an entire expression should optimize away, e.g. min()/max().
|
445
|
-
SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
|
446
|
-
return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
|
447
|
-
(~cond & bit_pun<Vec<N, M<T>>>(e)) );
|
448
|
-
}
|
449
|
-
|
450
|
-
SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
|
451
|
-
// In practice this scalar implementation is unlikely to be used. See next if_then_else().
|
452
|
-
return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
|
453
|
-
(~cond & bit_pun<Vec<1, M<T>>>(e)) );
|
454
|
-
}
|
455
|
-
SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
|
456
|
-
// Specializations inline here so they can generalize what types the apply to.
|
457
|
-
#if SKVX_USE_SIMD && defined(__AVX2__)
|
458
|
-
if constexpr (N*sizeof(T) == 32) {
|
459
|
-
return bit_pun<Vec<N,T>>(_mm256_blendv_epi8(bit_pun<__m256i>(e),
|
460
|
-
bit_pun<__m256i>(t),
|
461
|
-
bit_pun<__m256i>(cond)));
|
462
|
-
}
|
463
|
-
#endif
|
464
|
-
#if SKVX_USE_SIMD && defined(__SSE4_1__)
|
465
|
-
if constexpr (N*sizeof(T) == 16) {
|
466
|
-
return bit_pun<Vec<N,T>>(_mm_blendv_epi8(bit_pun<__m128i>(e),
|
467
|
-
bit_pun<__m128i>(t),
|
468
|
-
bit_pun<__m128i>(cond)));
|
469
|
-
}
|
470
|
-
#endif
|
471
|
-
#if SKVX_USE_SIMD && defined(__ARM_NEON)
|
472
|
-
if constexpr (N*sizeof(T) == 16) {
|
473
|
-
return bit_pun<Vec<N,T>>(vbslq_u8(bit_pun<uint8x16_t>(cond),
|
474
|
-
bit_pun<uint8x16_t>(t),
|
475
|
-
bit_pun<uint8x16_t>(e)));
|
476
|
-
}
|
477
|
-
#endif
|
478
|
-
// Recurse for large vectors to try to hit the specializations above.
|
479
|
-
if constexpr (N*sizeof(T) > 16) {
|
480
|
-
return join(if_then_else(cond.lo, t.lo, e.lo),
|
481
|
-
if_then_else(cond.hi, t.hi, e.hi));
|
482
|
-
}
|
483
|
-
// This default can lead to better code than the recursing onto scalars.
|
484
|
-
return naive_if_then_else(cond, t, e);
|
485
|
-
}
|
486
|
-
|
487
|
-
SIT bool any(const Vec<1,T>& x) { return x.val != 0; }
|
488
|
-
SINT bool any(const Vec<N,T>& x) {
|
489
|
-
// For any(), the _mm_testz intrinsics are correct and don't require comparing 'x' to 0, so it's
|
490
|
-
// lower latency compared to _mm_movemask + _mm_compneq on plain SSE.
|
491
|
-
#if SKVX_USE_SIMD && defined(__AVX2__)
|
492
|
-
if constexpr (N*sizeof(T) == 32) {
|
493
|
-
return !_mm256_testz_si256(bit_pun<__m256i>(x), _mm256_set1_epi32(-1));
|
494
|
-
}
|
495
|
-
#endif
|
496
|
-
#if SKVX_USE_SIMD && defined(__SSE_4_1__)
|
497
|
-
if constexpr (N*sizeof(T) == 16) {
|
498
|
-
return !_mm_testz_si128(bit_pun<__m128i>(x), _mm_set1_epi32(-1));
|
499
|
-
}
|
500
|
-
#endif
|
501
|
-
#if SKVX_USE_SIMD && defined(__SSE__)
|
502
|
-
if constexpr (N*sizeof(T) == 16) {
|
503
|
-
// On SSE, movemask checks only the MSB in each lane, which is fine if the lanes were set
|
504
|
-
// directly from a comparison op (which sets all bits to 1 when true), but skvx::Vec<>
|
505
|
-
// treats any non-zero value as true, so we have to compare 'x' to 0 before calling movemask
|
506
|
-
return _mm_movemask_ps(_mm_cmpneq_ps(bit_pun<__m128>(x), _mm_set1_ps(0))) != 0b0000;
|
507
|
-
}
|
508
|
-
#endif
|
509
|
-
#if SKVX_USE_SIMD && defined(__aarch64__)
|
510
|
-
// On 64-bit NEON, take the max across lanes, which will be non-zero if any lane was true.
|
511
|
-
// The specific lane-size doesn't really matter in this case since it's really any set bit
|
512
|
-
// that we're looking for.
|
513
|
-
if constexpr (N*sizeof(T) == 8 ) { return vmaxv_u8 (bit_pun<uint8x8_t> (x)) > 0; }
|
514
|
-
if constexpr (N*sizeof(T) == 16) { return vmaxvq_u8(bit_pun<uint8x16_t>(x)) > 0; }
|
515
|
-
#endif
|
516
|
-
#if SKVX_USE_SIMD && defined(__wasm_simd128__)
|
517
|
-
if constexpr (N == 4 && sizeof(T) == 4) {
|
518
|
-
return wasm_i32x4_any_true(bit_pun<VExt<4,int>>(x));
|
519
|
-
}
|
520
|
-
#endif
|
521
|
-
return any(x.lo)
|
522
|
-
|| any(x.hi);
|
523
|
-
}
|
524
|
-
|
525
|
-
SIT bool all(const Vec<1,T>& x) { return x.val != 0; }
|
526
|
-
SINT bool all(const Vec<N,T>& x) {
|
527
|
-
// Unlike any(), we have to respect the lane layout, or we'll miss cases where a
|
528
|
-
// true lane has a mix of 0 and 1 bits.
|
529
|
-
#if SKVX_USE_SIMD && defined(__SSE__)
|
530
|
-
// Unfortunately, the _mm_testc intrinsics don't let us avoid the comparison to 0 for all()'s
|
531
|
-
// correctness, so always just use the plain SSE version.
|
532
|
-
if constexpr (N == 4 && sizeof(T) == 4) {
|
533
|
-
return _mm_movemask_ps(_mm_cmpneq_ps(bit_pun<__m128>(x), _mm_set1_ps(0))) == 0b1111;
|
534
|
-
}
|
535
|
-
#endif
|
536
|
-
#if SKVX_USE_SIMD && defined(__aarch64__)
|
537
|
-
// On 64-bit NEON, take the min across the lanes, which will be non-zero if all lanes are != 0.
|
538
|
-
if constexpr (sizeof(T)==1 && N==8) {return vminv_u8 (bit_pun<uint8x8_t> (x)) > 0;}
|
539
|
-
if constexpr (sizeof(T)==1 && N==16) {return vminvq_u8 (bit_pun<uint8x16_t>(x)) > 0;}
|
540
|
-
if constexpr (sizeof(T)==2 && N==4) {return vminv_u16 (bit_pun<uint16x4_t>(x)) > 0;}
|
541
|
-
if constexpr (sizeof(T)==2 && N==8) {return vminvq_u16(bit_pun<uint16x8_t>(x)) > 0;}
|
542
|
-
if constexpr (sizeof(T)==4 && N==2) {return vminv_u32 (bit_pun<uint32x2_t>(x)) > 0;}
|
543
|
-
if constexpr (sizeof(T)==4 && N==4) {return vminvq_u32(bit_pun<uint32x4_t>(x)) > 0;}
|
544
|
-
#endif
|
545
|
-
#if SKVX_USE_SIMD && defined(__wasm_simd128__)
|
546
|
-
if constexpr (N == 4 && sizeof(T) == 4) {
|
547
|
-
return wasm_i32x4_all_true(bit_pun<VExt<4,int>>(x));
|
548
|
-
}
|
549
|
-
#endif
|
550
|
-
return all(x.lo)
|
551
|
-
&& all(x.hi);
|
552
|
-
}
|
553
|
-
|
554
|
-
// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
|
555
|
-
// TODO: implement with map()?
|
556
|
-
template <typename D, typename S>
|
557
|
-
SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
|
558
|
-
|
559
|
-
template <typename D, int N, typename S>
|
560
|
-
SI Vec<N,D> cast(const Vec<N,S>& src) {
|
561
|
-
#if SKVX_USE_SIMD && defined(__clang__)
|
562
|
-
return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
|
563
|
-
#else
|
564
|
-
return join(cast<D>(src.lo), cast<D>(src.hi));
|
565
|
-
#endif
|
566
|
-
}
|
567
|
-
|
568
|
-
// min/max match logic of std::min/std::max, which is important when NaN is involved.
|
569
|
-
SIT T min(const Vec<1,T>& x) { return x.val; }
|
570
|
-
SIT T max(const Vec<1,T>& x) { return x.val; }
|
571
|
-
SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
|
572
|
-
SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
|
573
|
-
|
574
|
-
SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); }
|
575
|
-
SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); }
|
576
|
-
|
577
|
-
SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
|
578
|
-
SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
|
579
|
-
SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
|
580
|
-
SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
|
581
|
-
|
582
|
-
// pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
|
583
|
-
// values in the range lo..hi, and if x is NaN, it returns lo.
|
584
|
-
SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {
|
585
|
-
return max(lo, min(x, hi));
|
586
|
-
}
|
587
|
-
|
588
|
-
// Shuffle values from a vector pretty arbitrarily:
|
589
|
-
// skvx::Vec<4,float> rgba = {R,G,B,A};
|
590
|
-
// shuffle<2,1,0,3> (rgba) ~> {B,G,R,A}
|
591
|
-
// shuffle<2,1> (rgba) ~> {B,G}
|
592
|
-
// shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
|
593
|
-
// shuffle<3,3,3,3> (rgba) ~> {A,A,A,A}
|
594
|
-
// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
|
595
|
-
template <int... Ix, int N, typename T>
|
596
|
-
SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
|
597
|
-
#if SKVX_USE_SIMD && defined(__clang__)
|
598
|
-
// TODO: can we just always use { x[Ix]... }?
|
599
|
-
return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
|
600
|
-
#else
|
601
|
-
return { x[Ix]... };
|
602
|
-
#endif
|
603
|
-
}
|
604
|
-
|
605
|
-
// Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
|
606
|
-
// or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.
|
607
|
-
|
608
|
-
template <typename Fn, typename... Args, size_t... I>
|
609
|
-
SI auto map(std::index_sequence<I...>,
|
610
|
-
Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {
|
611
|
-
auto lane = [&](size_t i)
|
612
|
-
#if defined(__clang__)
|
613
|
-
// CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here,
|
614
|
-
// with errors like "control flow integrity check for type 'float (float)
|
615
|
-
// noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined
|
616
|
-
// here". But we can be quite sure fn is the right type: it's all inferred!
|
617
|
-
// So, stifle CFI in this function.
|
618
|
-
__attribute__((no_sanitize("cfi")))
|
619
|
-
#endif
|
620
|
-
{ return fn(args[i]...); };
|
621
|
-
|
622
|
-
return { lane(I)... };
|
623
|
-
}
|
624
|
-
|
625
|
-
template <typename Fn, int N, typename T, typename... Rest>
|
626
|
-
auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {
|
627
|
-
// Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out.
|
628
|
-
return map(std::make_index_sequence<N>{}, fn, first,rest...);
|
629
|
-
}
|
630
|
-
|
631
|
-
SIN Vec<N,float> ceil(const Vec<N,float>& x) { return map( ceilf, x); }
|
632
|
-
SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); }
|
633
|
-
SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); }
|
634
|
-
SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); }
|
635
|
-
SIN Vec<N,float> sqrt(const Vec<N,float>& x) { return map( sqrtf, x); }
|
636
|
-
SIN Vec<N,float> abs(const Vec<N,float>& x) { return map( fabsf, x); }
|
637
|
-
SIN Vec<N,float> fma(const Vec<N,float>& x,
|
638
|
-
const Vec<N,float>& y,
|
639
|
-
const Vec<N,float>& z) {
|
640
|
-
// I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly.
|
641
|
-
auto fn = [](float x, float y, float z) { return fmaf(x,y,z); };
|
642
|
-
return map(fn, x,y,z);
|
643
|
-
}
|
644
|
-
|
645
|
-
SI Vec<1,int> lrint(const Vec<1,float>& x) {
|
646
|
-
return (int)lrintf(x.val);
|
647
|
-
}
|
648
|
-
SIN Vec<N,int> lrint(const Vec<N,float>& x) {
|
649
|
-
#if SKVX_USE_SIMD && defined(__AVX__)
|
650
|
-
if constexpr (N == 8) {
|
651
|
-
return bit_pun<Vec<N,int>>(_mm256_cvtps_epi32(bit_pun<__m256>(x)));
|
652
|
-
}
|
653
|
-
#endif
|
654
|
-
#if SKVX_USE_SIMD && defined(__SSE__)
|
655
|
-
if constexpr (N == 4) {
|
656
|
-
return bit_pun<Vec<N,int>>(_mm_cvtps_epi32(bit_pun<__m128>(x)));
|
657
|
-
}
|
658
|
-
#endif
|
659
|
-
return join(lrint(x.lo),
|
660
|
-
lrint(x.hi));
|
661
|
-
}
|
662
|
-
|
663
|
-
SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); }
|
664
|
-
|
665
|
-
// Assumes inputs are finite and treat/flush denorm half floats as/to zero.
|
666
|
-
// Key constants to watch for:
|
667
|
-
// - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
|
668
|
-
// - a half is 16-bit, 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
|
669
|
-
SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
|
670
|
-
Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
|
671
|
-
s = sem & 0x8000'0000,
|
672
|
-
em = sem ^ s,
|
673
|
-
is_norm = em > 0x387f'd000, // halfway between largest f16 denorm and smallest norm
|
674
|
-
norm = (em>>13) - ((127-15)<<10);
|
675
|
-
return cast<uint16_t>((s>>16) | (is_norm & norm));
|
676
|
-
}
|
677
|
-
SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
|
678
|
-
Vec<N,uint32_t> wide = cast<uint32_t>(x),
|
679
|
-
s = wide & 0x8000,
|
680
|
-
em = wide ^ s,
|
681
|
-
is_norm = em > 0x3ff,
|
682
|
-
norm = (em<<13) + ((127-15)<<23);
|
683
|
-
return bit_pun<Vec<N,float>>((s<<16) | (is_norm & norm));
|
684
|
-
}
|
685
|
-
|
686
|
-
// Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
|
687
|
-
SI Vec<1,uint16_t> to_half(const Vec<1,float>& x) { return to_half_finite_ftz(x); }
|
688
|
-
SI Vec<1,float> from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
|
689
|
-
|
690
|
-
SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
|
691
|
-
#if SKVX_USE_SIMD && defined(__F16C__)
|
692
|
-
if constexpr (N == 8) {
|
693
|
-
return bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(bit_pun<__m256>(x),
|
694
|
-
_MM_FROUND_TO_NEAREST_INT));
|
695
|
-
}
|
696
|
-
#endif
|
697
|
-
#if SKVX_USE_SIMD && defined(__aarch64__)
|
698
|
-
if constexpr (N == 4) {
|
699
|
-
return bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(bit_pun<float32x4_t>(x)));
|
700
|
-
|
701
|
-
}
|
702
|
-
#endif
|
703
|
-
if constexpr (N > 4) {
|
704
|
-
return join(to_half(x.lo),
|
705
|
-
to_half(x.hi));
|
706
|
-
}
|
707
|
-
return to_half_finite_ftz(x);
|
708
|
-
}
|
709
|
-
|
710
|
-
SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
|
711
|
-
#if SKVX_USE_SIMD && defined(__F16C__)
|
712
|
-
if constexpr (N == 8) {
|
713
|
-
return bit_pun<Vec<N,float>>(_mm256_cvtph_ps(bit_pun<__m128i>(x)));
|
714
|
-
}
|
715
|
-
#endif
|
716
|
-
#if SKVX_USE_SIMD && defined(__aarch64__)
|
717
|
-
if constexpr (N == 4) {
|
718
|
-
return bit_pun<Vec<N,float>>(vcvt_f32_f16(bit_pun<float16x4_t>(x)));
|
719
|
-
}
|
720
|
-
#endif
|
721
|
-
if constexpr (N > 4) {
|
722
|
-
return join(from_half(x.lo),
|
723
|
-
from_half(x.hi));
|
724
|
-
}
|
725
|
-
return from_half_finite_ftz(x);
|
726
|
-
}
|
727
|
-
|
728
|
-
// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
|
729
|
-
SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
|
730
|
-
return cast<uint8_t>( (x+127)/255 );
|
731
|
-
}
|
732
|
-
|
733
|
-
// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
|
734
|
-
// and is always perfect when x or y is 0 or 255.
|
735
|
-
SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
|
736
|
-
// All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
|
737
|
-
// We happen to have historically picked (x*y+x)/256.
|
738
|
-
auto X = cast<uint16_t>(x),
|
739
|
-
Y = cast<uint16_t>(y);
|
740
|
-
return cast<uint8_t>( (X*Y+X)/256 );
|
741
|
-
}
|
742
|
-
|
743
|
-
// saturated_add(x,y) sums values and clamps to the maximum value instead of overflowing.
|
744
|
-
SINT std::enable_if_t<std::is_unsigned_v<T>, Vec<N,T>> saturated_add(const Vec<N,T>& x,
|
745
|
-
const Vec<N,T>& y) {
|
746
|
-
#if SKVX_USE_SIMD && (defined(__SSE__) || defined(__ARM_NEON))
|
747
|
-
// Both SSE and ARM have 16-lane saturated adds, so use intrinsics for those and recurse down
|
748
|
-
// or join up to take advantage.
|
749
|
-
if constexpr (N == 16 && sizeof(T) == 1) {
|
750
|
-
#if defined(__SSE__)
|
751
|
-
return bit_pun<Vec<N,T>>(_mm_adds_epu8(bit_pun<__m128i>(x), bit_pun<__m128i>(y)));
|
752
|
-
#else // __ARM_NEON
|
753
|
-
return bit_pun<Vec<N,T>>(vqaddq_u8(bit_pun<uint8x16_t>(x), bit_pun<uint8x16_t>(y)));
|
754
|
-
#endif
|
755
|
-
} else if constexpr (N < 16 && sizeof(T) == 1) {
|
756
|
-
return saturated_add(join(x,x), join(y,y)).lo;
|
757
|
-
} else if constexpr (sizeof(T) == 1) {
|
758
|
-
return join(saturated_add(x.lo, y.lo), saturated_add(x.hi, y.hi));
|
759
|
-
}
|
760
|
-
#endif
|
761
|
-
// Otherwise saturate manually
|
762
|
-
auto sum = x + y;
|
763
|
-
return if_then_else(sum < x, Vec<N,T>(std::numeric_limits<T>::max()), sum);
|
764
|
-
}
|
765
|
-
|
766
|
-
// The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
|
767
|
-
// calculates a numerator / denominator. For this to be rounded properly, numerator should have
|
768
|
-
// half added in:
|
769
|
-
// divide(numerator + half) == floor(numerator/denominator + 1/2).
|
770
|
-
//
|
771
|
-
// This gives an answer within +/- 1 from the true value.
|
772
|
-
//
|
773
|
-
// Derivation of half:
|
774
|
-
// numerator/denominator + 1/2 = (numerator + half) / d
|
775
|
-
// numerator + denominator / 2 = numerator + half
|
776
|
-
// half = denominator / 2.
|
777
|
-
//
|
778
|
-
// Because half is divided by 2, that division must also be rounded.
|
779
|
-
// half == denominator / 2 = (denominator + 1) / 2.
|
780
|
-
//
|
781
|
-
// The divisorFactor is just a scaled value:
|
782
|
-
// divisorFactor = (1 / divisor) * 2 ^ 32.
|
783
|
-
// The maximum that can be divided and rounded is UINT_MAX - half.
|
784
|
-
class ScaledDividerU32 {
|
785
|
-
public:
|
786
|
-
explicit ScaledDividerU32(uint32_t divisor)
|
787
|
-
: fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))}
|
788
|
-
, fHalf{(divisor + 1) >> 1} {
|
789
|
-
assert(divisor > 1);
|
790
|
-
}
|
791
|
-
|
792
|
-
Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const {
|
793
|
-
#if SKVX_USE_SIMD && defined(__ARM_NEON)
|
794
|
-
uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor);
|
795
|
-
uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)), fDivisorFactor);
|
796
|
-
|
797
|
-
return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)));
|
798
|
-
#else
|
799
|
-
return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32);
|
800
|
-
#endif
|
801
|
-
}
|
802
|
-
|
803
|
-
uint32_t half() const { return fHalf; }
|
804
|
-
|
805
|
-
private:
|
806
|
-
const uint32_t fDivisorFactor;
|
807
|
-
const uint32_t fHalf;
|
808
|
-
};
|
809
|
-
|
810
|
-
|
811
|
-
SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
|
812
|
-
const Vec<N,uint8_t>& y) {
|
813
|
-
#if SKVX_USE_SIMD && defined(__ARM_NEON)
|
814
|
-
// With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
|
815
|
-
if constexpr (N == 8) {
|
816
|
-
return to_vec<8,uint16_t>(vmull_u8(to_vext(x), to_vext(y)));
|
817
|
-
} else if constexpr (N < 8) {
|
818
|
-
return mull(join(x,x), join(y,y)).lo;
|
819
|
-
} else { // N > 8
|
820
|
-
return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
|
821
|
-
}
|
822
|
-
#else
|
823
|
-
return cast<uint16_t>(x) * cast<uint16_t>(y);
|
824
|
-
#endif
|
825
|
-
}
|
826
|
-
|
827
|
-
SIN Vec<N,uint32_t> mull(const Vec<N,uint16_t>& x,
|
828
|
-
const Vec<N,uint16_t>& y) {
|
829
|
-
#if SKVX_USE_SIMD && defined(__ARM_NEON)
|
830
|
-
// NEON can do four u16*u16 -> u32 in one instruction, vmull_u16
|
831
|
-
if constexpr (N == 4) {
|
832
|
-
return to_vec<4,uint32_t>(vmull_u16(to_vext(x), to_vext(y)));
|
833
|
-
} else if constexpr (N < 4) {
|
834
|
-
return mull(join(x,x), join(y,y)).lo;
|
835
|
-
} else { // N > 4
|
836
|
-
return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
|
837
|
-
}
|
838
|
-
#else
|
839
|
-
return cast<uint32_t>(x) * cast<uint32_t>(y);
|
840
|
-
#endif
|
841
|
-
}
|
842
|
-
|
843
|
-
SIN Vec<N,uint16_t> mulhi(const Vec<N,uint16_t>& x,
|
844
|
-
const Vec<N,uint16_t>& y) {
|
845
|
-
#if SKVX_USE_SIMD && defined(__SSE__)
|
846
|
-
// Use _mm_mulhi_epu16 for 8xuint16_t and join or split to get there.
|
847
|
-
if constexpr (N == 8) {
|
848
|
-
return bit_pun<Vec<8,uint16_t>>(_mm_mulhi_epu16(bit_pun<__m128i>(x), bit_pun<__m128i>(y)));
|
849
|
-
} else if constexpr (N < 8) {
|
850
|
-
return mulhi(join(x,x), join(y,y)).lo;
|
851
|
-
} else { // N > 8
|
852
|
-
return join(mulhi(x.lo, y.lo), mulhi(x.hi, y.hi));
|
853
|
-
}
|
854
|
-
#else
|
855
|
-
return skvx::cast<uint16_t>(mull(x, y) >> 16);
|
856
|
-
#endif
|
857
|
-
}
|
858
|
-
|
859
|
-
SINT T dot(const Vec<N, T>& a, const Vec<N, T>& b) {
|
860
|
-
auto ab = a*b;
|
861
|
-
if constexpr (N == 2) {
|
862
|
-
return ab[0] + ab[1];
|
863
|
-
} else if constexpr (N == 4) {
|
864
|
-
return ab[0] + ab[1] + ab[2] + ab[3];
|
865
|
-
} else {
|
866
|
-
T sum = ab[0];
|
867
|
-
for (int i = 1; i < N; ++i) {
|
868
|
-
sum += ab[i];
|
869
|
-
}
|
870
|
-
return sum;
|
871
|
-
}
|
872
|
-
}
|
873
|
-
|
874
|
-
SI float cross(const Vec<2, float>& a, const Vec<2, float>& b) {
|
875
|
-
auto x = a * shuffle<1,0>(b);
|
876
|
-
return x[0] - x[1];
|
877
|
-
}
|
878
|
-
|
879
|
-
// De-interleaving load of 4 vectors.
|
880
|
-
//
|
881
|
-
// WARNING: These are really only supported well on NEON. Consider restructuring your data before
|
882
|
-
// resorting to these methods.
|
883
|
-
SIT void strided_load4(const T* v,
|
884
|
-
Vec<1,T>& a,
|
885
|
-
Vec<1,T>& b,
|
886
|
-
Vec<1,T>& c,
|
887
|
-
Vec<1,T>& d) {
|
888
|
-
a.val = v[0];
|
889
|
-
b.val = v[1];
|
890
|
-
c.val = v[2];
|
891
|
-
d.val = v[3];
|
892
|
-
}
|
893
|
-
SINT void strided_load4(const T* v,
|
894
|
-
Vec<N,T>& a,
|
895
|
-
Vec<N,T>& b,
|
896
|
-
Vec<N,T>& c,
|
897
|
-
Vec<N,T>& d) {
|
898
|
-
strided_load4(v, a.lo, b.lo, c.lo, d.lo);
|
899
|
-
strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi);
|
900
|
-
}
|
901
|
-
#if SKVX_USE_SIMD && defined(__ARM_NEON)
|
902
|
-
#define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \
|
903
|
-
SI void strided_load4(const T* v, \
|
904
|
-
Vec<N,T>& a, \
|
905
|
-
Vec<N,T>& b, \
|
906
|
-
Vec<N,T>& c, \
|
907
|
-
Vec<N,T>& d) { \
|
908
|
-
auto mat = VLD(v); \
|
909
|
-
a = bit_pun<Vec<N,T>>(mat.val[0]); \
|
910
|
-
b = bit_pun<Vec<N,T>>(mat.val[1]); \
|
911
|
-
c = bit_pun<Vec<N,T>>(mat.val[2]); \
|
912
|
-
d = bit_pun<Vec<N,T>>(mat.val[3]); \
|
913
|
-
}
|
914
|
-
IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32)
|
915
|
-
IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16)
|
916
|
-
IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8)
|
917
|
-
IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32)
|
918
|
-
IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16)
|
919
|
-
IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8)
|
920
|
-
IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32)
|
921
|
-
IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32)
|
922
|
-
IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16)
|
923
|
-
IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8)
|
924
|
-
IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32)
|
925
|
-
IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16)
|
926
|
-
IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8)
|
927
|
-
IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32)
|
928
|
-
#undef IMPL_LOAD4_TRANSPOSED
|
929
|
-
|
930
|
-
#elif SKVX_USE_SIMD && defined(__SSE__)
|
931
|
-
|
932
|
-
SI void strided_load4(const float* v,
|
933
|
-
Vec<4,float>& a,
|
934
|
-
Vec<4,float>& b,
|
935
|
-
Vec<4,float>& c,
|
936
|
-
Vec<4,float>& d) {
|
937
|
-
__m128 a_ = _mm_loadu_ps(v);
|
938
|
-
__m128 b_ = _mm_loadu_ps(v+4);
|
939
|
-
__m128 c_ = _mm_loadu_ps(v+8);
|
940
|
-
__m128 d_ = _mm_loadu_ps(v+12);
|
941
|
-
_MM_TRANSPOSE4_PS(a_, b_, c_, d_);
|
942
|
-
a = bit_pun<Vec<4,float>>(a_);
|
943
|
-
b = bit_pun<Vec<4,float>>(b_);
|
944
|
-
c = bit_pun<Vec<4,float>>(c_);
|
945
|
-
d = bit_pun<Vec<4,float>>(d_);
|
946
|
-
}
|
947
|
-
#endif
|
948
|
-
|
949
|
-
// De-interleaving load of 2 vectors.
|
950
|
-
//
|
951
|
-
// WARNING: These are really only supported well on NEON. Consider restructuring your data before
|
952
|
-
// resorting to these methods.
|
953
|
-
SIT void strided_load2(const T* v, Vec<1,T>& a, Vec<1,T>& b) {
|
954
|
-
a.val = v[0];
|
955
|
-
b.val = v[1];
|
956
|
-
}
|
957
|
-
SINT void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) {
|
958
|
-
strided_load2(v, a.lo, b.lo);
|
959
|
-
strided_load2(v + 2*(N/2), a.hi, b.hi);
|
960
|
-
}
|
961
|
-
#if SKVX_USE_SIMD && defined(__ARM_NEON)
|
962
|
-
#define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \
|
963
|
-
SI void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) { \
|
964
|
-
auto mat = VLD(v); \
|
965
|
-
a = bit_pun<Vec<N,T>>(mat.val[0]); \
|
966
|
-
b = bit_pun<Vec<N,T>>(mat.val[1]); \
|
967
|
-
}
|
968
|
-
IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32)
|
969
|
-
IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16)
|
970
|
-
IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8)
|
971
|
-
IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32)
|
972
|
-
IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16)
|
973
|
-
IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8)
|
974
|
-
IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32)
|
975
|
-
IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32)
|
976
|
-
IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16)
|
977
|
-
IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8)
|
978
|
-
IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32)
|
979
|
-
IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16)
|
980
|
-
IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8)
|
981
|
-
IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32)
|
982
|
-
#undef IMPL_LOAD2_TRANSPOSED
|
983
|
-
#endif
|
984
|
-
|
985
|
-
// Define commonly used aliases
|
986
|
-
using float2 = Vec< 2, float>;
|
987
|
-
using float4 = Vec< 4, float>;
|
988
|
-
using float8 = Vec< 8, float>;
|
989
|
-
|
990
|
-
using double2 = Vec< 2, double>;
|
991
|
-
using double4 = Vec< 4, double>;
|
992
|
-
using double8 = Vec< 8, double>;
|
993
|
-
|
994
|
-
using byte2 = Vec< 2, uint8_t>;
|
995
|
-
using byte4 = Vec< 4, uint8_t>;
|
996
|
-
using byte8 = Vec< 8, uint8_t>;
|
997
|
-
using byte16 = Vec<16, uint8_t>;
|
998
|
-
|
999
|
-
using int2 = Vec< 2, int32_t>;
|
1000
|
-
using int4 = Vec< 4, int32_t>;
|
1001
|
-
using int8 = Vec< 8, int32_t>;
|
1002
|
-
|
1003
|
-
using uint2 = Vec< 2, uint32_t>;
|
1004
|
-
using uint4 = Vec< 4, uint32_t>;
|
1005
|
-
using uint8 = Vec< 8, uint32_t>;
|
1006
|
-
|
1007
|
-
using long2 = Vec< 2, int64_t>;
|
1008
|
-
using long4 = Vec< 4, int64_t>;
|
1009
|
-
using long8 = Vec< 8, int64_t>;
|
1010
|
-
|
1011
|
-
// Use with from_half and to_half to convert between floatX, and use these for storage.
|
1012
|
-
using half2 = Vec< 2, uint16_t>;
|
1013
|
-
using half4 = Vec< 4, uint16_t>;
|
1014
|
-
using half8 = Vec< 8, uint16_t>;
|
1015
|
-
|
1016
|
-
} // namespace skvx
|
1017
|
-
|
1018
|
-
#undef SINTU
|
1019
|
-
#undef SINT
|
1020
|
-
#undef SIN
|
1021
|
-
#undef SIT
|
1022
|
-
#undef SI
|
1023
|
-
#undef SKVX_ALWAYS_INLINE
|
1024
|
-
#undef SKVX_USE_SIMD
|
1025
|
-
|
1026
|
-
#endif//SKVX_DEFINED
|