Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/ValveSoftware/openvr.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Selan <jeremys@valvesoftware.com>2017-06-13 22:52:38 +0300
committerJeremy Selan <jeremys@valvesoftware.com>2017-06-13 22:52:38 +0300
commitdc860fe405dd93803e55dc662cae590edf6c7cb3 (patch)
tree681b728981d459c3c83a91fe95b3fd106b162d0d
parentb6539291b88eaa65c58c076cafe4eacd0ae2e062 (diff)
OpenVR SDK 1.0.8:
General: - Added VRCompositorError_InvalidBounds - This is returned when the application passes texture bounds to Submit that are outside the range of 0.0 to 1.0 or are otherwise invalid. - Added sample programs: hellovr_vulkan and hellovr_dx12. IVRVirtualDisplay: - The IVRVirtualDisplay interface is provided to allow OpenVR driver authors access to the final composited backbuffer intended for the headset’s display. The primary expected use case is for wireless transport, though this could also be used for saving output to disk or streaming. From the perspective of the runtime, the VR compositor is interfacing with a virtual rather than an actual display. See https://github.com/ValveSoftware/virtual_display IVRSystem: - GetOutputDevice interface added for applications to know which primary graphics adapter to use. This returns a LUID on Win32, vk::PhysicalDevice for Vulkan and id<MTLDevice> on OSX. IVRDriverManager: - Public API to query installed device drivers. Drivers: - Drivers can now set Prop_DriverDirectModeSendsVsyncEvents_Bool to true to indicate they will call VsyncEvent on their own. This enables avoiding the hardcoded 2.8ms vsync offset for IVRDriverDirectModeComponent implementations. - Allow drivers to specify which graphics adapter to use by settings Prop_GraphicsAdapterLuid_Uint64. This is a LUID on Win32. MacOS/OSX: - includes a private framework compatible with Xcode 8 and higher. Embedding frameworks as binaries in your macOS app bundle can be error prone - see detailed instructions in issue #543 https://github.com/ValveSoftware/openvr/issues/543#issuecomment-307637564 Cmake Build System (optional): - cmake script changes to better support Cygwin - supports building universal 32/64 binaries on OSX - supports building as a private OSX framework [git-p4: depot-paths = "//vr/steamvr/sdk_release/": change = 4009583]
-rwxr-xr-xbin/linux32/libopenvr_api.sobin287376 -> 287376 bytes
-rwxr-xr-xbin/linux32/libopenvr_api.so.dbgbin1969890 -> 1903926 bytes
-rwxr-xr-xbin/linux64/libopenvr_api.sobin284827 -> 288923 bytes
-rwxr-xr-xbin/linux64/libopenvr_api.so.dbgbin2453713 -> 2392441 bytes
-rwxr-xr-xbin/osx32/libopenvr_api.dylibbin296772 -> 300868 bytes
-rw-r--r--bin/osx32/libopenvr_api.dylib.dSYM/Contents/Resources/DWARF/libopenvr_api.dylibbin2009602 -> 2010719 bytes
-rw-r--r--bin/win32/openvr_api.dllbin257312 -> 258336 bytes
-rw-r--r--bin/win32/openvr_api.pdbbin4632576 -> 4345856 bytes
-rw-r--r--bin/win64/openvr_api.dllbin302880 -> 304416 bytes
-rw-r--r--bin/win64/openvr_api.pdbbin4435968 -> 4149248 bytes
-rw-r--r--headers/openvr.h87
-rw-r--r--headers/openvr_api.cs63
-rw-r--r--headers/openvr_api.json50
-rw-r--r--headers/openvr_capi.h26
-rw-r--r--headers/openvr_driver.h122
-rwxr-xr-xlib/linux32/libopenvr_api.sobin1963478 -> 1897362 bytes
-rwxr-xr-xlib/linux64/libopenvr_api.sobin2447145 -> 2385721 bytes
-rw-r--r--lib/win32/openvr_api.libbin5964 -> 5964 bytes
-rw-r--r--lib/win64/openvr_api.libbin5876 -> 5876 bytes
-rw-r--r--samples/CMakeLists.txt25
-rw-r--r--samples/bin/linux64/libopenvr_api.sobin284827 -> 288923 bytes
-rw-r--r--samples/bin/osx32/libopenvr_api.dylibbin296772 -> 300868 bytes
-rw-r--r--samples/bin/shaders/axes.hlsl34
-rw-r--r--samples/bin/shaders/axes_ps.spvbin0 -> 1256 bytes
-rw-r--r--samples/bin/shaders/axes_vs.spvbin0 -> 2128 bytes
-rwxr-xr-xsamples/bin/shaders/build_vulkan_shaders.bat5
-rw-r--r--samples/bin/shaders/companion.hlsl40
-rw-r--r--samples/bin/shaders/companion_ps.spvbin0 -> 1924 bytes
-rw-r--r--samples/bin/shaders/companion_vs.spvbin0 -> 1892 bytes
-rw-r--r--samples/bin/shaders/rendermodel.hlsl40
-rw-r--r--samples/bin/shaders/rendermodel_ps.spvbin0 -> 1672 bytes
-rw-r--r--samples/bin/shaders/rendermodel_vs.spvbin0 -> 2480 bytes
-rw-r--r--samples/bin/shaders/scene.hlsl39
-rw-r--r--samples/bin/shaders/scene_ps.spvbin0 -> 1672 bytes
-rw-r--r--samples/bin/shaders/scene_vs.spvbin0 -> 2336 bytes
-rw-r--r--samples/bin/win32/openvr_api.dllbin257312 -> 258336 bytes
-rw-r--r--samples/bin/win64/openvr_api.dllbin302880 -> 304416 bytes
-rw-r--r--samples/hellovr_dx12/d3dx12.h2540
-rw-r--r--samples/hellovr_dx12/hellovr_dx12.vcxproj124
-rw-r--r--samples/hellovr_dx12/hellovr_dx12.vcxproj.filters77
-rw-r--r--samples/hellovr_dx12/hellovr_dx12_main.cpp2287
-rw-r--r--samples/hellovr_vulkan/CMakeLists.txt16
-rw-r--r--samples/hellovr_vulkan/hellovr_vulkan_main.cpp3776
-rw-r--r--samples/shared/strtools.h16
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.h470
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.hpp501
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/GLSL.std.450.h131
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.h971
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp980
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp11980
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.json996
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.lua927
-rwxr-xr-xsamples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.py927
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_icd.h146
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer.h143
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer_dispatch_table.h410
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_platform.h120
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_sdk_platform.h46
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.h5926
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.hpp30540
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/lib/linux64/libvulkan.sobin0 -> 353317 bytes
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/lib/win32/vulkan-1.libbin0 -> 41304 bytes
-rw-r--r--samples/thirdparty/vulkan-1.0.49.0/lib/win64/vulkan-1.libbin0 -> 38268 bytes
-rw-r--r--src/vrcommon/hmderrors_public.cpp18
64 files changed, 53549 insertions, 50 deletions
diff --git a/bin/linux32/libopenvr_api.so b/bin/linux32/libopenvr_api.so
index 00825b9..05a9a39 100755
--- a/bin/linux32/libopenvr_api.so
+++ b/bin/linux32/libopenvr_api.so
Binary files differ
diff --git a/bin/linux32/libopenvr_api.so.dbg b/bin/linux32/libopenvr_api.so.dbg
index 0230ff3..7ec3197 100755
--- a/bin/linux32/libopenvr_api.so.dbg
+++ b/bin/linux32/libopenvr_api.so.dbg
Binary files differ
diff --git a/bin/linux64/libopenvr_api.so b/bin/linux64/libopenvr_api.so
index 8d30531..36e0b24 100755
--- a/bin/linux64/libopenvr_api.so
+++ b/bin/linux64/libopenvr_api.so
Binary files differ
diff --git a/bin/linux64/libopenvr_api.so.dbg b/bin/linux64/libopenvr_api.so.dbg
index ae4f8c5..5511a82 100755
--- a/bin/linux64/libopenvr_api.so.dbg
+++ b/bin/linux64/libopenvr_api.so.dbg
Binary files differ
diff --git a/bin/osx32/libopenvr_api.dylib b/bin/osx32/libopenvr_api.dylib
index 5a20f3a..f0e89a8 100755
--- a/bin/osx32/libopenvr_api.dylib
+++ b/bin/osx32/libopenvr_api.dylib
Binary files differ
diff --git a/bin/osx32/libopenvr_api.dylib.dSYM/Contents/Resources/DWARF/libopenvr_api.dylib b/bin/osx32/libopenvr_api.dylib.dSYM/Contents/Resources/DWARF/libopenvr_api.dylib
index 6fcf050..1e0e61b 100644
--- a/bin/osx32/libopenvr_api.dylib.dSYM/Contents/Resources/DWARF/libopenvr_api.dylib
+++ b/bin/osx32/libopenvr_api.dylib.dSYM/Contents/Resources/DWARF/libopenvr_api.dylib
Binary files differ
diff --git a/bin/win32/openvr_api.dll b/bin/win32/openvr_api.dll
index aafef72..5c13f29 100644
--- a/bin/win32/openvr_api.dll
+++ b/bin/win32/openvr_api.dll
Binary files differ
diff --git a/bin/win32/openvr_api.pdb b/bin/win32/openvr_api.pdb
index 6c8f14f..a552527 100644
--- a/bin/win32/openvr_api.pdb
+++ b/bin/win32/openvr_api.pdb
Binary files differ
diff --git a/bin/win64/openvr_api.dll b/bin/win64/openvr_api.dll
index ddda897..3b70c22 100644
--- a/bin/win64/openvr_api.dll
+++ b/bin/win64/openvr_api.dll
Binary files differ
diff --git a/bin/win64/openvr_api.pdb b/bin/win64/openvr_api.pdb
index e5f67ed..1f794e6 100644
--- a/bin/win64/openvr_api.pdb
+++ b/bin/win64/openvr_api.pdb
Binary files differ
diff --git a/headers/openvr.h b/headers/openvr.h
index 2020e9d..f758909 100644
--- a/headers/openvr.h
+++ b/headers/openvr.h
@@ -143,6 +143,9 @@ enum ETrackingResult
TrackingResult_Running_OutOfRange = 201,
};
+typedef uint32_t DriverId_t;
+static const uint32_t k_nDriverNone = 0xFFFFFFFF;
+
static const uint32_t k_unMaxDriverDebugResponseSize = 32768;
/** Used to pass device IDs to API calls */
@@ -309,6 +312,9 @@ enum ETrackedDeviceProperty
Prop_DisplayMCImageNumChannels_Int32 = 2040,
Prop_DisplayMCImageData_Binary = 2041,
Prop_SecondsFromPhotonsToVblank_Float = 2042,
+ Prop_DriverDirectModeSendsVsyncEvents_Bool = 2043,
+ Prop_DisplayDebugMode_Bool = 2044,
+ Prop_GraphicsAdapterLuid_Uint64 = 2045,
// Properties that are unique to TrackedDeviceClass_Controller
Prop_AttachedDeviceId_String = 3000,
@@ -330,15 +336,15 @@ enum ETrackedDeviceProperty
Prop_ModeLabel_String = 4006,
// Properties that are used for user interface like icons names
- Prop_IconPathName_String = 5000, // usually a directory named "icons"
- Prop_NamedIconPathDeviceOff_String = 5001, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceSearching_String = 5002, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceSearchingAlert_String = 5003, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceReady_String = 5004, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceReadyAlert_String = 5005, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceNotReady_String = 5006, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceStandby_String = 5007, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceAlertLow_String = 5008, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_IconPathName_String = 5000, // DEPRECATED. Value not referenced. Now expected to be part of icon path properties.
+ Prop_NamedIconPathDeviceOff_String = 5001, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceSearching_String = 5002, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceSearchingAlert_String = 5003, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceReady_String = 5004, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceReadyAlert_String = 5005, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceNotReady_String = 5006, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceStandby_String = 5007, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceAlertLow_String = 5008, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
// Properties that are used by helpers, but are opaque to applications
Prop_DisplayHiddenArea_Binary_Start = 5100,
@@ -539,6 +545,7 @@ enum EVREventType
VREvent_ModelSkinSettingsHaveChanged = 853,
VREvent_EnvironmentSettingsHaveChanged = 854,
VREvent_PowerSettingsHaveChanged = 855,
+ VREvent_EnableHomeAppSettingsHaveChanged = 856,
VREvent_StatusUpdate = 900,
@@ -1044,6 +1051,8 @@ enum EVRInitError
VRInitError_Init_WatchdogDisabledInSettings = 132,
VRInitError_Init_VRDashboardNotFound = 133,
VRInitError_Init_VRDashboardStartupFailed = 134,
+ VRInitError_Init_VRHomeNotFound = 135,
+ VRInitError_Init_VRHomeStartupFailed = 136,
VRInitError_Driver_Failed = 200,
VRInitError_Driver_Unknown = 201,
@@ -1074,6 +1083,7 @@ enum EVRInitError
VRInitError_Compositor_FirmwareRequiresUpdate = 402,
VRInitError_Compositor_OverlayInitFailed = 403,
VRInitError_Compositor_ScreenshotsInitFailed = 404,
+ VRInitError_Compositor_UnableToCreateDevice = 405,
VRInitError_VendorSpecific_UnableToConnectToOculusRuntime = 1000,
@@ -1265,6 +1275,22 @@ public:
* and swap chain in DX10 and DX11. If an error occurs the index will be set to -1.
*/
virtual void GetDXGIOutputInfo( int32_t *pnAdapterIndex ) = 0;
+
+ /**
+ * Returns platform- and texture-type specific adapter identification so that applications and the
+ * compositor are creating textures and swap chains on the same GPU. If an error occurs the device
+ * will be set to 0.
+ * [D3D10/11/12 Only (D3D9 Not Supported)]
+ * Returns the adapter LUID that identifies the GPU attached to the HMD. The user should
+ * enumerate all adapters using IDXGIFactory::EnumAdapters and IDXGIAdapter::GetDesc to find
+ * the adapter with the matching LUID, or use IDXGIFactory4::EnumAdapterByLuid.
+ * The discovered IDXGIAdapter should be used to create the device and swap chain.
+ * [Vulkan Only]
+ * Returns the vk::PhysicalDevice that should be used by the application.
+ * [macOS Only]
+ * Returns an id<MTLDevice> that should be used by the application.
+ */
+ virtual void GetOutputDevice( uint64_t *pnDevice, ETextureType textureType ) = 0;
// ------------------------------------
// Display Mode methods
@@ -1482,7 +1508,7 @@ public:
};
-static const char * const IVRSystem_Version = "IVRSystem_015";
+static const char * const IVRSystem_Version = "IVRSystem_016";
}
@@ -1770,7 +1796,7 @@ namespace vr
static const char * const k_pch_SteamVR_SpeakersForwardYawOffsetDegrees_Float = "speakersForwardYawOffsetDegrees";
static const char * const k_pch_SteamVR_BaseStationPowerManagement_Bool = "basestationPowerManagement";
static const char * const k_pch_SteamVR_NeverKillProcesses_Bool = "neverKillProcesses";
- static const char * const k_pch_SteamVR_RenderTargetMultiplier_Float = "renderTargetMultiplier";
+ static const char * const k_pch_SteamVR_SupersampleScale_Float = "supersampleScale";
static const char * const k_pch_SteamVR_AllowAsyncReprojection_Bool = "allowAsyncReprojection";
static const char * const k_pch_SteamVR_AllowReprojection_Bool = "allowInterleavedReprojection";
static const char * const k_pch_SteamVR_ForceReprojection_Bool = "forceReprojection";
@@ -1783,10 +1809,10 @@ namespace vr
static const char * const k_pch_SteamVR_StartDashboardFromAppLaunch_Bool = "startDashboardFromAppLaunch";
static const char * const k_pch_SteamVR_StartOverlayAppsFromDashboard_Bool = "startOverlayAppsFromDashboard";
static const char * const k_pch_SteamVR_EnableHomeApp = "enableHomeApp";
- static const char * const k_pch_SteamVR_SetInitialDefaultHomeApp = "setInitialDefaultHomeApp";
static const char * const k_pch_SteamVR_CycleBackgroundImageTimeSec_Int32 = "CycleBackgroundImageTimeSec";
static const char * const k_pch_SteamVR_RetailDemo_Bool = "retailDemo";
static const char * const k_pch_SteamVR_IpdOffset_Float = "ipdOffset";
+ static const char * const k_pch_SteamVR_AllowSupersampleFiltering_Bool = "allowSupersampleFiltering";
//-----------------------------------------------------------------------------
// lighthouse keys
@@ -1844,6 +1870,7 @@ namespace vr
static const char * const k_pch_Perf_AllowTimingStore_Bool = "allowTimingStore";
static const char * const k_pch_Perf_SaveTimingsOnExit_Bool = "saveTimingsOnExit";
static const char * const k_pch_Perf_TestData_Float = "perfTestData";
+ static const char * const k_pch_Perf_LinuxGPUProfiling_Bool = "linuxGPUProfiling";
//-----------------------------------------------------------------------------
// collision bounds keys
@@ -2087,6 +2114,7 @@ enum EVRCompositorError
VRCompositorError_SharedTexturesNotSupported = 106,
VRCompositorError_IndexOutOfRange = 107,
VRCompositorError_AlreadySubmitted = 108,
+ VRCompositorError_InvalidBounds = 109,
};
const uint32_t VRCompositor_ReprojectionReason_Cpu = 0x01;
@@ -3350,7 +3378,26 @@ public:
static const char * const IVRResources_Version = "IVRResources_001";
-}// End
+}
+// ivrdrivermanager.h
+namespace vr
+{
+
+class IVRDriverManager
+{
+public:
+ virtual uint32_t GetDriverCount() const = 0;
+
+ /** Returns the length of the number of bytes necessary to hold this string including the trailing null. */
+ virtual uint32_t GetDriverName( vr::DriverId_t nDriver, VR_OUT_STRING() char *pchValue, uint32_t unBufferSize ) = 0;
+};
+
+static const char * const IVRDriverManager_Version = "IVRDriverManager_001";
+
+} // namespace vr
+
+
+// End
#endif // _OPENVR_API
@@ -3571,6 +3618,17 @@ namespace vr
return m_pVRTrackedCamera;
}
+ IVRDriverManager *VRDriverManager()
+ {
+ CheckClear();
+ if ( !m_pVRDriverManager )
+ {
+ EVRInitError eError;
+ m_pVRDriverManager = ( IVRDriverManager * )VR_GetGenericInterface( IVRDriverManager_Version, &eError );
+ }
+ return m_pVRDriverManager;
+ }
+
private:
IVRSystem *m_pVRSystem;
IVRChaperone *m_pVRChaperone;
@@ -3584,6 +3642,7 @@ namespace vr
IVRApplications *m_pVRApplications;
IVRTrackedCamera *m_pVRTrackedCamera;
IVRScreenshots *m_pVRScreenshots;
+ IVRDriverManager *m_pVRDriverManager;
};
inline COpenVRContext &OpenVRInternal_ModuleContext()
@@ -3604,6 +3663,7 @@ namespace vr
inline IVRResources *VR_CALLTYPE VRResources() { return OpenVRInternal_ModuleContext().VRResources(); }
inline IVRExtendedDisplay *VR_CALLTYPE VRExtendedDisplay() { return OpenVRInternal_ModuleContext().VRExtendedDisplay(); }
inline IVRTrackedCamera *VR_CALLTYPE VRTrackedCamera() { return OpenVRInternal_ModuleContext().VRTrackedCamera(); }
+ inline IVRDriverManager *VR_CALLTYPE VRDriverManager() { return OpenVRInternal_ModuleContext().VRDriverManager(); }
inline void COpenVRContext::Clear()
{
@@ -3619,6 +3679,7 @@ namespace vr
m_pVRTrackedCamera = nullptr;
m_pVRResources = nullptr;
m_pVRScreenshots = nullptr;
+ m_pVRDriverManager = nullptr;
}
VR_INTERFACE uint32_t VR_CALLTYPE VR_InitInternal( EVRInitError *peError, EVRApplicationType eApplicationType );
diff --git a/headers/openvr_api.cs b/headers/openvr_api.cs
index 377d91a..c21c96e 100644
--- a/headers/openvr_api.cs
+++ b/headers/openvr_api.cs
@@ -56,6 +56,11 @@ public struct IVRSystem
internal _GetDXGIOutputInfo GetDXGIOutputInfo;
[UnmanagedFunctionPointer(CallingConvention.StdCall)]
+ internal delegate void _GetOutputDevice(ref ulong pnDevice, ETextureType textureType);
+ [MarshalAs(UnmanagedType.FunctionPtr)]
+ internal _GetOutputDevice GetOutputDevice;
+
+ [UnmanagedFunctionPointer(CallingConvention.StdCall)]
internal delegate bool _IsDisplayOnDesktop();
[MarshalAs(UnmanagedType.FunctionPtr)]
internal _IsDisplayOnDesktop IsDisplayOnDesktop;
@@ -1477,6 +1482,21 @@ public struct IVRResources
}
+[StructLayout(LayoutKind.Sequential)]
+public struct IVRDriverManager
+{
+ [UnmanagedFunctionPointer(CallingConvention.StdCall)]
+ internal delegate uint _GetDriverCount();
+ [MarshalAs(UnmanagedType.FunctionPtr)]
+ internal _GetDriverCount GetDriverCount;
+
+ [UnmanagedFunctionPointer(CallingConvention.StdCall)]
+ internal delegate uint _GetDriverName(uint nDriver, System.Text.StringBuilder pchValue, uint unBufferSize);
+ [MarshalAs(UnmanagedType.FunctionPtr)]
+ internal _GetDriverName GetDriverName;
+
+}
+
public class CVRSystem
{
@@ -1531,6 +1551,11 @@ public class CVRSystem
pnAdapterIndex = 0;
FnTable.GetDXGIOutputInfo(ref pnAdapterIndex);
}
+ public void GetOutputDevice(ref ulong pnDevice,ETextureType textureType)
+ {
+ pnDevice = 0;
+ FnTable.GetOutputDevice(ref pnDevice,textureType);
+ }
public bool IsDisplayOnDesktop()
{
bool result = FnTable.IsDisplayOnDesktop();
@@ -3145,6 +3170,26 @@ public class CVRResources
}
+public class CVRDriverManager
+{
+ IVRDriverManager FnTable;
+ internal CVRDriverManager(IntPtr pInterface)
+ {
+ FnTable = (IVRDriverManager)Marshal.PtrToStructure(pInterface, typeof(IVRDriverManager));
+ }
+ public uint GetDriverCount()
+ {
+ uint result = FnTable.GetDriverCount();
+ return result;
+ }
+ public uint GetDriverName(uint nDriver,System.Text.StringBuilder pchValue,uint unBufferSize)
+ {
+ uint result = FnTable.GetDriverName(nDriver,pchValue,unBufferSize);
+ return result;
+ }
+}
+
+
public class OpenVRInterop
{
[DllImportAttribute("openvr_api", EntryPoint = "VR_InitInternal", CallingConvention = CallingConvention.Cdecl)]
@@ -3296,6 +3341,9 @@ public enum ETrackedDeviceProperty
Prop_DisplayMCImageNumChannels_Int32 = 2040,
Prop_DisplayMCImageData_Binary = 2041,
Prop_SecondsFromPhotonsToVblank_Float = 2042,
+ Prop_DriverDirectModeSendsVsyncEvents_Bool = 2043,
+ Prop_DisplayDebugMode_Bool = 2044,
+ Prop_GraphicsAdapterLuid_Uint64 = 2045,
Prop_AttachedDeviceId_String = 3000,
Prop_SupportedButtons_Uint64 = 3001,
Prop_Axis0Type_Int32 = 3002,
@@ -3449,6 +3497,7 @@ public enum EVREventType
VREvent_ModelSkinSettingsHaveChanged = 853,
VREvent_EnvironmentSettingsHaveChanged = 854,
VREvent_PowerSettingsHaveChanged = 855,
+ VREvent_EnableHomeAppSettingsHaveChanged = 856,
VREvent_StatusUpdate = 900,
VREvent_MCImageUpdated = 1000,
VREvent_FirmwareUpdateStarted = 1100,
@@ -3632,6 +3681,8 @@ public enum EVRInitError
Init_WatchdogDisabledInSettings = 132,
Init_VRDashboardNotFound = 133,
Init_VRDashboardStartupFailed = 134,
+ Init_VRHomeNotFound = 135,
+ Init_VRHomeStartupFailed = 136,
Driver_Failed = 200,
Driver_Unknown = 201,
Driver_HmdUnknown = 202,
@@ -3658,6 +3709,7 @@ public enum EVRInitError
Compositor_FirmwareRequiresUpdate = 402,
Compositor_OverlayInitFailed = 403,
Compositor_ScreenshotsInitFailed = 404,
+ Compositor_UnableToCreateDevice = 405,
VendorSpecific_UnableToConnectToOculusRuntime = 1000,
VendorSpecific_HmdFound_CantOpenDevice = 1101,
VendorSpecific_HmdFound_UnableToRequestConfigStart = 1102,
@@ -3798,6 +3850,7 @@ public enum EVRCompositorError
SharedTexturesNotSupported = 106,
IndexOutOfRange = 107,
AlreadySubmitted = 108,
+ InvalidBounds = 109,
}
public enum VROverlayInputMethod
{
@@ -4479,6 +4532,7 @@ public enum EVRScreenshotError
public IntPtr m_pVRApplications; // class vr::IVRApplications *
public IntPtr m_pVRTrackedCamera; // class vr::IVRTrackedCamera *
public IntPtr m_pVRScreenshots; // class vr::IVRScreenshots *
+ public IntPtr m_pVRDriverManager; // class vr::IVRDriverManager *
}
public class OpenVR
@@ -4524,6 +4578,7 @@ public class OpenVR
return OpenVRInterop.GetInitToken();
}
+ public const uint k_nDriverNone = 4294967295;
public const uint k_unMaxDriverDebugResponseSize = 32768;
public const uint k_unTrackedDeviceIndex_Hmd = 0;
public const uint k_unMaxTrackedDeviceCount = 16;
@@ -4547,7 +4602,7 @@ public class OpenVR
public const uint k_unControllerStateAxisCount = 5;
public const ulong k_ulOverlayHandleInvalid = 0;
public const uint k_unScreenshotHandleInvalid = 0;
- public const string IVRSystem_Version = "IVRSystem_015";
+ public const string IVRSystem_Version = "IVRSystem_016";
public const string IVRExtendedDisplay_Version = "IVRExtendedDisplay_001";
public const string IVRTrackedCamera_Version = "IVRTrackedCamera_003";
public const uint k_unMaxApplicationKeyLength = 128;
@@ -4598,7 +4653,7 @@ public class OpenVR
public const string k_pch_SteamVR_SpeakersForwardYawOffsetDegrees_Float = "speakersForwardYawOffsetDegrees";
public const string k_pch_SteamVR_BaseStationPowerManagement_Bool = "basestationPowerManagement";
public const string k_pch_SteamVR_NeverKillProcesses_Bool = "neverKillProcesses";
- public const string k_pch_SteamVR_RenderTargetMultiplier_Float = "renderTargetMultiplier";
+ public const string k_pch_SteamVR_SupersampleScale_Float = "supersampleScale";
public const string k_pch_SteamVR_AllowAsyncReprojection_Bool = "allowAsyncReprojection";
public const string k_pch_SteamVR_AllowReprojection_Bool = "allowInterleavedReprojection";
public const string k_pch_SteamVR_ForceReprojection_Bool = "forceReprojection";
@@ -4611,10 +4666,10 @@ public class OpenVR
public const string k_pch_SteamVR_StartDashboardFromAppLaunch_Bool = "startDashboardFromAppLaunch";
public const string k_pch_SteamVR_StartOverlayAppsFromDashboard_Bool = "startOverlayAppsFromDashboard";
public const string k_pch_SteamVR_EnableHomeApp = "enableHomeApp";
- public const string k_pch_SteamVR_SetInitialDefaultHomeApp = "setInitialDefaultHomeApp";
public const string k_pch_SteamVR_CycleBackgroundImageTimeSec_Int32 = "CycleBackgroundImageTimeSec";
public const string k_pch_SteamVR_RetailDemo_Bool = "retailDemo";
public const string k_pch_SteamVR_IpdOffset_Float = "ipdOffset";
+ public const string k_pch_SteamVR_AllowSupersampleFiltering_Bool = "allowSupersampleFiltering";
public const string k_pch_Lighthouse_Section = "driver_lighthouse";
public const string k_pch_Lighthouse_DisableIMU_Bool = "disableimu";
public const string k_pch_Lighthouse_UseDisambiguation_String = "usedisambiguation";
@@ -4654,6 +4709,7 @@ public class OpenVR
public const string k_pch_Perf_AllowTimingStore_Bool = "allowTimingStore";
public const string k_pch_Perf_SaveTimingsOnExit_Bool = "saveTimingsOnExit";
public const string k_pch_Perf_TestData_Float = "perfTestData";
+ public const string k_pch_Perf_LinuxGPUProfiling_Bool = "linuxGPUProfiling";
public const string k_pch_CollisionBounds_Section = "collisionBounds";
public const string k_pch_CollisionBounds_Style_Int32 = "CollisionBoundsStyle";
public const string k_pch_CollisionBounds_GroundPerimeterOn_Bool = "CollisionBoundsGroundPerimeterOn";
@@ -4694,6 +4750,7 @@ public class OpenVR
public const string k_pch_Driver_Enable_Bool = "enable";
public const string IVRScreenshots_Version = "IVRScreenshots_001";
public const string IVRResources_Version = "IVRResources_001";
+ public const string IVRDriverManager_Version = "IVRDriverManager_001";
static uint VRToken { get; set; }
diff --git a/headers/openvr_api.json b/headers/openvr_api.json
index 43c3bf9..7eb4592 100644
--- a/headers/openvr_api.json
+++ b/headers/openvr_api.json
@@ -2,6 +2,7 @@
,{"typedef": "vr::glInt_t","type": "int32_t"}
,{"typedef": "vr::glUInt_t","type": "uint32_t"}
,{"typedef": "vr::SharedTextureHandle_t","type": "uint64_t"}
+,{"typedef": "vr::DriverId_t","type": "uint32_t"}
,{"typedef": "vr::TrackedDeviceIndex_t","type": "uint32_t"}
,{"typedef": "vr::PropertyContainerHandle_t","type": "uint64_t"}
,{"typedef": "vr::PropertyTypeTag_t","type": "uint32_t"}
@@ -153,6 +154,9 @@
,{"name": "Prop_DisplayMCImageNumChannels_Int32","value": "2040"}
,{"name": "Prop_DisplayMCImageData_Binary","value": "2041"}
,{"name": "Prop_SecondsFromPhotonsToVblank_Float","value": "2042"}
+ ,{"name": "Prop_DriverDirectModeSendsVsyncEvents_Bool","value": "2043"}
+ ,{"name": "Prop_DisplayDebugMode_Bool","value": "2044"}
+ ,{"name": "Prop_GraphicsAdapterLuid_Uint64","value": "2045"}
,{"name": "Prop_AttachedDeviceId_String","value": "3000"}
,{"name": "Prop_SupportedButtons_Uint64","value": "3001"}
,{"name": "Prop_Axis0Type_Int32","value": "3002"}
@@ -302,6 +306,7 @@
,{"name": "VREvent_ModelSkinSettingsHaveChanged","value": "853"}
,{"name": "VREvent_EnvironmentSettingsHaveChanged","value": "854"}
,{"name": "VREvent_PowerSettingsHaveChanged","value": "855"}
+ ,{"name": "VREvent_EnableHomeAppSettingsHaveChanged","value": "856"}
,{"name": "VREvent_StatusUpdate","value": "900"}
,{"name": "VREvent_MCImageUpdated","value": "1000"}
,{"name": "VREvent_FirmwareUpdateStarted","value": "1100"}
@@ -473,6 +478,8 @@
,{"name": "VRInitError_Init_WatchdogDisabledInSettings","value": "132"}
,{"name": "VRInitError_Init_VRDashboardNotFound","value": "133"}
,{"name": "VRInitError_Init_VRDashboardStartupFailed","value": "134"}
+ ,{"name": "VRInitError_Init_VRHomeNotFound","value": "135"}
+ ,{"name": "VRInitError_Init_VRHomeStartupFailed","value": "136"}
,{"name": "VRInitError_Driver_Failed","value": "200"}
,{"name": "VRInitError_Driver_Unknown","value": "201"}
,{"name": "VRInitError_Driver_HmdUnknown","value": "202"}
@@ -499,6 +506,7 @@
,{"name": "VRInitError_Compositor_FirmwareRequiresUpdate","value": "402"}
,{"name": "VRInitError_Compositor_OverlayInitFailed","value": "403"}
,{"name": "VRInitError_Compositor_ScreenshotsInitFailed","value": "404"}
+ ,{"name": "VRInitError_Compositor_UnableToCreateDevice","value": "405"}
,{"name": "VRInitError_VendorSpecific_UnableToConnectToOculusRuntime","value": "1000"}
,{"name": "VRInitError_VendorSpecific_HmdFound_CantOpenDevice","value": "1101"}
,{"name": "VRInitError_VendorSpecific_HmdFound_UnableToRequestConfigStart","value": "1102"}
@@ -628,6 +636,7 @@
,{"name": "VRCompositorError_SharedTexturesNotSupported","value": "106"}
,{"name": "VRCompositorError_IndexOutOfRange","value": "107"}
,{"name": "VRCompositorError_AlreadySubmitted","value": "108"}
+ ,{"name": "VRCompositorError_InvalidBounds","value": "109"}
]}
, {"enumname": "vr::VROverlayInputMethod","values": [
{"name": "VROverlayInputMethod_None","value": "0"}
@@ -738,6 +747,8 @@
]}
],
"consts":[{
+ "constname": "k_nDriverNone","consttype": "const uint32_t", "constval": "4294967295"}
+,{
"constname": "k_unMaxDriverDebugResponseSize","consttype": "const uint32_t", "constval": "32768"}
,{
"constname": "k_unTrackedDeviceIndex_Hmd","consttype": "const uint32_t", "constval": "0"}
@@ -784,7 +795,7 @@
,{
"constname": "k_unScreenshotHandleInvalid","consttype": "const uint32_t", "constval": "0"}
,{
- "constname": "IVRSystem_Version","consttype": "const char *const", "constval": "IVRSystem_015"}
+ "constname": "IVRSystem_Version","consttype": "const char *const", "constval": "IVRSystem_016"}
,{
"constname": "IVRExtendedDisplay_Version","consttype": "const char *const", "constval": "IVRExtendedDisplay_001"}
,{
@@ -886,7 +897,7 @@
,{
"constname": "k_pch_SteamVR_NeverKillProcesses_Bool","consttype": "const char *const", "constval": "neverKillProcesses"}
,{
- "constname": "k_pch_SteamVR_RenderTargetMultiplier_Float","consttype": "const char *const", "constval": "renderTargetMultiplier"}
+ "constname": "k_pch_SteamVR_SupersampleScale_Float","consttype": "const char *const", "constval": "supersampleScale"}
,{
"constname": "k_pch_SteamVR_AllowAsyncReprojection_Bool","consttype": "const char *const", "constval": "allowAsyncReprojection"}
,{
@@ -912,14 +923,14 @@
,{
"constname": "k_pch_SteamVR_EnableHomeApp","consttype": "const char *const", "constval": "enableHomeApp"}
,{
- "constname": "k_pch_SteamVR_SetInitialDefaultHomeApp","consttype": "const char *const", "constval": "setInitialDefaultHomeApp"}
-,{
"constname": "k_pch_SteamVR_CycleBackgroundImageTimeSec_Int32","consttype": "const char *const", "constval": "CycleBackgroundImageTimeSec"}
,{
"constname": "k_pch_SteamVR_RetailDemo_Bool","consttype": "const char *const", "constval": "retailDemo"}
,{
"constname": "k_pch_SteamVR_IpdOffset_Float","consttype": "const char *const", "constval": "ipdOffset"}
,{
+ "constname": "k_pch_SteamVR_AllowSupersampleFiltering_Bool","consttype": "const char *const", "constval": "allowSupersampleFiltering"}
+,{
"constname": "k_pch_Lighthouse_Section","consttype": "const char *const", "constval": "driver_lighthouse"}
,{
"constname": "k_pch_Lighthouse_DisableIMU_Bool","consttype": "const char *const", "constval": "disableimu"}
@@ -998,6 +1009,8 @@
,{
"constname": "k_pch_Perf_TestData_Float","consttype": "const char *const", "constval": "perfTestData"}
,{
+ "constname": "k_pch_Perf_LinuxGPUProfiling_Bool","consttype": "const char *const", "constval": "linuxGPUProfiling"}
+,{
"constname": "k_pch_CollisionBounds_Section","consttype": "const char *const", "constval": "collisionBounds"}
,{
"constname": "k_pch_CollisionBounds_Style_Int32","consttype": "const char *const", "constval": "CollisionBoundsStyle"}
@@ -1077,6 +1090,8 @@
"constname": "IVRScreenshots_Version","consttype": "const char *const", "constval": "IVRScreenshots_001"}
,{
"constname": "IVRResources_Version","consttype": "const char *const", "constval": "IVRResources_001"}
+,{
+ "constname": "IVRDriverManager_Version","consttype": "const char *const", "constval": "IVRDriverManager_001"}
],
"structs":[{"struct": "vr::HmdMatrix34_t","fields": [
{ "fieldname": "m", "fieldtype": "float [3][4]"}]}
@@ -1363,7 +1378,8 @@
{ "fieldname": "m_pVRSettings", "fieldtype": "class vr::IVRSettings *"},
{ "fieldname": "m_pVRApplications", "fieldtype": "class vr::IVRApplications *"},
{ "fieldname": "m_pVRTrackedCamera", "fieldtype": "class vr::IVRTrackedCamera *"},
-{ "fieldname": "m_pVRScreenshots", "fieldtype": "class vr::IVRScreenshots *"}]}
+{ "fieldname": "m_pVRScreenshots", "fieldtype": "class vr::IVRScreenshots *"},
+{ "fieldname": "m_pVRDriverManager", "fieldtype": "class vr::IVRDriverManager *"}]}
],
"methods":[{
"classname": "vr::IVRSystem",
@@ -1439,6 +1455,15 @@
}
,{
"classname": "vr::IVRSystem",
+ "methodname": "GetOutputDevice",
+ "returntype": "void",
+ "params": [
+{ "paramname": "pnDevice" ,"paramtype": "uint64_t *"},
+{ "paramname": "textureType" ,"paramtype": "vr::ETextureType"}
+ ]
+}
+,{
+ "classname": "vr::IVRSystem",
"methodname": "IsDisplayOnDesktop",
"returntype": "bool"
}
@@ -3844,5 +3869,20 @@
{ "paramname": "unBufferLen" ,"paramtype": "uint32_t"}
]
}
+,{
+ "classname": "vr::IVRDriverManager",
+ "methodname": "GetDriverCount",
+ "returntype": "uint32_t"
+}
+,{
+ "classname": "vr::IVRDriverManager",
+ "methodname": "GetDriverName",
+ "returntype": "uint32_t",
+ "params": [
+{ "paramname": "nDriver" ,"paramtype": "vr::DriverId_t"},
+{ "paramname": "pchValue" ,"out_string": " " ,"paramtype": "char *"},
+{ "paramname": "unBufferSize" ,"paramtype": "uint32_t"}
+ ]
+}
]
} \ No newline at end of file
diff --git a/headers/openvr_capi.h b/headers/openvr_capi.h
index f895668..471aca6 100644
--- a/headers/openvr_capi.h
+++ b/headers/openvr_capi.h
@@ -60,6 +60,7 @@ typedef uint32_t PropertyTypeTag_t;
// OpenVR Constants
+static const unsigned int k_nDriverNone = 4294967295;
static const unsigned int k_unMaxDriverDebugResponseSize = 32768;
static const unsigned int k_unTrackedDeviceIndex_Hmd = 0;
static const unsigned int k_unMaxTrackedDeviceCount = 16;
@@ -83,7 +84,7 @@ static const unsigned int k_unMaxPropertyStringSize = 32768;
static const unsigned int k_unControllerStateAxisCount = 5;
static const unsigned long k_ulOverlayHandleInvalid = 0;
static const unsigned int k_unScreenshotHandleInvalid = 0;
-static const char * IVRSystem_Version = "IVRSystem_015";
+static const char * IVRSystem_Version = "IVRSystem_016";
static const char * IVRExtendedDisplay_Version = "IVRExtendedDisplay_001";
static const char * IVRTrackedCamera_Version = "IVRTrackedCamera_003";
static const unsigned int k_unMaxApplicationKeyLength = 128;
@@ -134,7 +135,7 @@ static const char * k_pch_SteamVR_UsingSpeakers_Bool = "usingSpeakers";
static const char * k_pch_SteamVR_SpeakersForwardYawOffsetDegrees_Float = "speakersForwardYawOffsetDegrees";
static const char * k_pch_SteamVR_BaseStationPowerManagement_Bool = "basestationPowerManagement";
static const char * k_pch_SteamVR_NeverKillProcesses_Bool = "neverKillProcesses";
-static const char * k_pch_SteamVR_RenderTargetMultiplier_Float = "renderTargetMultiplier";
+static const char * k_pch_SteamVR_SupersampleScale_Float = "supersampleScale";
static const char * k_pch_SteamVR_AllowAsyncReprojection_Bool = "allowAsyncReprojection";
static const char * k_pch_SteamVR_AllowReprojection_Bool = "allowInterleavedReprojection";
static const char * k_pch_SteamVR_ForceReprojection_Bool = "forceReprojection";
@@ -147,10 +148,10 @@ static const char * k_pch_SteamVR_StartCompositorFromAppLaunch_Bool = "startComp
static const char * k_pch_SteamVR_StartDashboardFromAppLaunch_Bool = "startDashboardFromAppLaunch";
static const char * k_pch_SteamVR_StartOverlayAppsFromDashboard_Bool = "startOverlayAppsFromDashboard";
static const char * k_pch_SteamVR_EnableHomeApp = "enableHomeApp";
-static const char * k_pch_SteamVR_SetInitialDefaultHomeApp = "setInitialDefaultHomeApp";
static const char * k_pch_SteamVR_CycleBackgroundImageTimeSec_Int32 = "CycleBackgroundImageTimeSec";
static const char * k_pch_SteamVR_RetailDemo_Bool = "retailDemo";
static const char * k_pch_SteamVR_IpdOffset_Float = "ipdOffset";
+static const char * k_pch_SteamVR_AllowSupersampleFiltering_Bool = "allowSupersampleFiltering";
static const char * k_pch_Lighthouse_Section = "driver_lighthouse";
static const char * k_pch_Lighthouse_DisableIMU_Bool = "disableimu";
static const char * k_pch_Lighthouse_UseDisambiguation_String = "usedisambiguation";
@@ -190,6 +191,7 @@ static const char * k_pch_Perf_NotifyOnlyOnce_Bool = "warnOnlyOnce";
static const char * k_pch_Perf_AllowTimingStore_Bool = "allowTimingStore";
static const char * k_pch_Perf_SaveTimingsOnExit_Bool = "saveTimingsOnExit";
static const char * k_pch_Perf_TestData_Float = "perfTestData";
+static const char * k_pch_Perf_LinuxGPUProfiling_Bool = "linuxGPUProfiling";
static const char * k_pch_CollisionBounds_Section = "collisionBounds";
static const char * k_pch_CollisionBounds_Style_Int32 = "CollisionBoundsStyle";
static const char * k_pch_CollisionBounds_GroundPerimeterOn_Bool = "CollisionBoundsGroundPerimeterOn";
@@ -230,6 +232,7 @@ static const char * k_pch_modelskin_Section = "modelskins";
static const char * k_pch_Driver_Enable_Bool = "enable";
static const char * IVRScreenshots_Version = "IVRScreenshots_001";
static const char * IVRResources_Version = "IVRResources_001";
+static const char * IVRDriverManager_Version = "IVRDriverManager_001";
// OpenVR Enums
@@ -370,6 +373,9 @@ typedef enum ETrackedDeviceProperty
ETrackedDeviceProperty_Prop_DisplayMCImageNumChannels_Int32 = 2040,
ETrackedDeviceProperty_Prop_DisplayMCImageData_Binary = 2041,
ETrackedDeviceProperty_Prop_SecondsFromPhotonsToVblank_Float = 2042,
+ ETrackedDeviceProperty_Prop_DriverDirectModeSendsVsyncEvents_Bool = 2043,
+ ETrackedDeviceProperty_Prop_DisplayDebugMode_Bool = 2044,
+ ETrackedDeviceProperty_Prop_GraphicsAdapterLuid_Uint64 = 2045,
ETrackedDeviceProperty_Prop_AttachedDeviceId_String = 3000,
ETrackedDeviceProperty_Prop_SupportedButtons_Uint64 = 3001,
ETrackedDeviceProperty_Prop_Axis0Type_Int32 = 3002,
@@ -527,6 +533,7 @@ typedef enum EVREventType
EVREventType_VREvent_ModelSkinSettingsHaveChanged = 853,
EVREventType_VREvent_EnvironmentSettingsHaveChanged = 854,
EVREventType_VREvent_PowerSettingsHaveChanged = 855,
+ EVREventType_VREvent_EnableHomeAppSettingsHaveChanged = 856,
EVREventType_VREvent_StatusUpdate = 900,
EVREventType_VREvent_MCImageUpdated = 1000,
EVREventType_VREvent_FirmwareUpdateStarted = 1100,
@@ -722,6 +729,8 @@ typedef enum EVRInitError
EVRInitError_VRInitError_Init_WatchdogDisabledInSettings = 132,
EVRInitError_VRInitError_Init_VRDashboardNotFound = 133,
EVRInitError_VRInitError_Init_VRDashboardStartupFailed = 134,
+ EVRInitError_VRInitError_Init_VRHomeNotFound = 135,
+ EVRInitError_VRInitError_Init_VRHomeStartupFailed = 136,
EVRInitError_VRInitError_Driver_Failed = 200,
EVRInitError_VRInitError_Driver_Unknown = 201,
EVRInitError_VRInitError_Driver_HmdUnknown = 202,
@@ -748,6 +757,7 @@ typedef enum EVRInitError
EVRInitError_VRInitError_Compositor_FirmwareRequiresUpdate = 402,
EVRInitError_VRInitError_Compositor_OverlayInitFailed = 403,
EVRInitError_VRInitError_Compositor_ScreenshotsInitFailed = 404,
+ EVRInitError_VRInitError_Compositor_UnableToCreateDevice = 405,
EVRInitError_VRInitError_VendorSpecific_UnableToConnectToOculusRuntime = 1000,
EVRInitError_VRInitError_VendorSpecific_HmdFound_CantOpenDevice = 1101,
EVRInitError_VRInitError_VendorSpecific_HmdFound_UnableToRequestConfigStart = 1102,
@@ -899,6 +909,7 @@ typedef enum EVRCompositorError
EVRCompositorError_VRCompositorError_SharedTexturesNotSupported = 106,
EVRCompositorError_VRCompositorError_IndexOutOfRange = 107,
EVRCompositorError_VRCompositorError_AlreadySubmitted = 108,
+ EVRCompositorError_VRCompositorError_InvalidBounds = 109,
} EVRCompositorError;
typedef enum VROverlayInputMethod
@@ -1047,6 +1058,7 @@ typedef void * glSharedTextureHandle_t;
typedef int32_t glInt_t;
typedef uint32_t glUInt_t;
typedef uint64_t SharedTextureHandle_t;
+typedef uint32_t DriverId_t;
typedef uint32_t TrackedDeviceIndex_t;
typedef uint64_t PropertyContainerHandle_t;
typedef uint32_t PropertyTypeTag_t;
@@ -1503,6 +1515,7 @@ typedef struct COpenVRContext
intptr_t m_pVRApplications; // class vr::IVRApplications *
intptr_t m_pVRTrackedCamera; // class vr::IVRTrackedCamera *
intptr_t m_pVRScreenshots; // class vr::IVRScreenshots *
+ intptr_t m_pVRDriverManager; // class vr::IVRDriverManager *
} COpenVRContext;
@@ -1560,6 +1573,7 @@ struct VR_IVRSystem_FnTable
bool (OPENVR_FNTABLE_CALLTYPE *GetTimeSinceLastVsync)(float * pfSecondsSinceLastVsync, uint64_t * pulFrameCounter);
int32_t (OPENVR_FNTABLE_CALLTYPE *GetD3D9AdapterIndex)();
void (OPENVR_FNTABLE_CALLTYPE *GetDXGIOutputInfo)(int32_t * pnAdapterIndex);
+ void (OPENVR_FNTABLE_CALLTYPE *GetOutputDevice)(uint64_t * pnDevice, ETextureType textureType);
bool (OPENVR_FNTABLE_CALLTYPE *IsDisplayOnDesktop)();
bool (OPENVR_FNTABLE_CALLTYPE *SetDisplayVisibility)(bool bIsVisibleOnDesktop);
void (OPENVR_FNTABLE_CALLTYPE *GetDeviceToAbsoluteTrackingPose)(ETrackingUniverseOrigin eOrigin, float fPredictedSecondsToPhotonsFromNow, struct TrackedDevicePose_t * pTrackedDevicePoseArray, uint32_t unTrackedDevicePoseArrayCount);
@@ -1882,6 +1896,12 @@ struct VR_IVRResources_FnTable
uint32_t (OPENVR_FNTABLE_CALLTYPE *GetResourceFullPath)(char * pchResourceName, char * pchResourceTypeDirectory, char * pchPathBuffer, uint32_t unBufferLen);
};
+struct VR_IVRDriverManager_FnTable
+{
+ uint32_t (OPENVR_FNTABLE_CALLTYPE *GetDriverCount)();
+ uint32_t (OPENVR_FNTABLE_CALLTYPE *GetDriverName)(DriverId_t nDriver, char * pchValue, uint32_t unBufferSize);
+};
+
#if 0
// Global entry points
diff --git a/headers/openvr_driver.h b/headers/openvr_driver.h
index e0732a0..20614b7 100644
--- a/headers/openvr_driver.h
+++ b/headers/openvr_driver.h
@@ -143,6 +143,9 @@ enum ETrackingResult
TrackingResult_Running_OutOfRange = 201,
};
+typedef uint32_t DriverId_t;
+static const uint32_t k_nDriverNone = 0xFFFFFFFF;
+
static const uint32_t k_unMaxDriverDebugResponseSize = 32768;
/** Used to pass device IDs to API calls */
@@ -309,6 +312,9 @@ enum ETrackedDeviceProperty
Prop_DisplayMCImageNumChannels_Int32 = 2040,
Prop_DisplayMCImageData_Binary = 2041,
Prop_SecondsFromPhotonsToVblank_Float = 2042,
+ Prop_DriverDirectModeSendsVsyncEvents_Bool = 2043,
+ Prop_DisplayDebugMode_Bool = 2044,
+ Prop_GraphicsAdapterLuid_Uint64 = 2045,
// Properties that are unique to TrackedDeviceClass_Controller
Prop_AttachedDeviceId_String = 3000,
@@ -330,15 +336,15 @@ enum ETrackedDeviceProperty
Prop_ModeLabel_String = 4006,
// Properties that are used for user interface like icons names
- Prop_IconPathName_String = 5000, // usually a directory named "icons"
- Prop_NamedIconPathDeviceOff_String = 5001, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceSearching_String = 5002, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceSearchingAlert_String = 5003, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceReady_String = 5004, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceReadyAlert_String = 5005, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceNotReady_String = 5006, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceStandby_String = 5007, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
- Prop_NamedIconPathDeviceAlertLow_String = 5008, // PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_IconPathName_String = 5000, // DEPRECATED. Value not referenced. Now expected to be part of icon path properties.
+ Prop_NamedIconPathDeviceOff_String = 5001, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceSearching_String = 5002, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceSearchingAlert_String = 5003, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceReady_String = 5004, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceReadyAlert_String = 5005, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceNotReady_String = 5006, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceStandby_String = 5007, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
+ Prop_NamedIconPathDeviceAlertLow_String = 5008, // {driver}/icons/icon_filename - PNG for static icon, or GIF for animation, 50x32 for headsets and 32x32 for others
// Properties that are used by helpers, but are opaque to applications
Prop_DisplayHiddenArea_Binary_Start = 5100,
@@ -539,6 +545,7 @@ enum EVREventType
VREvent_ModelSkinSettingsHaveChanged = 853,
VREvent_EnvironmentSettingsHaveChanged = 854,
VREvent_PowerSettingsHaveChanged = 855,
+ VREvent_EnableHomeAppSettingsHaveChanged = 856,
VREvent_StatusUpdate = 900,
@@ -1044,6 +1051,8 @@ enum EVRInitError
VRInitError_Init_WatchdogDisabledInSettings = 132,
VRInitError_Init_VRDashboardNotFound = 133,
VRInitError_Init_VRDashboardStartupFailed = 134,
+ VRInitError_Init_VRHomeNotFound = 135,
+ VRInitError_Init_VRHomeStartupFailed = 136,
VRInitError_Driver_Failed = 200,
VRInitError_Driver_Unknown = 201,
@@ -1074,6 +1083,7 @@ enum EVRInitError
VRInitError_Compositor_FirmwareRequiresUpdate = 402,
VRInitError_Compositor_OverlayInitFailed = 403,
VRInitError_Compositor_ScreenshotsInitFailed = 404,
+ VRInitError_Compositor_UnableToCreateDevice = 405,
VRInitError_VendorSpecific_UnableToConnectToOculusRuntime = 1000,
@@ -1374,7 +1384,7 @@ namespace vr
static const char * const k_pch_SteamVR_SpeakersForwardYawOffsetDegrees_Float = "speakersForwardYawOffsetDegrees";
static const char * const k_pch_SteamVR_BaseStationPowerManagement_Bool = "basestationPowerManagement";
static const char * const k_pch_SteamVR_NeverKillProcesses_Bool = "neverKillProcesses";
- static const char * const k_pch_SteamVR_RenderTargetMultiplier_Float = "renderTargetMultiplier";
+ static const char * const k_pch_SteamVR_SupersampleScale_Float = "supersampleScale";
static const char * const k_pch_SteamVR_AllowAsyncReprojection_Bool = "allowAsyncReprojection";
static const char * const k_pch_SteamVR_AllowReprojection_Bool = "allowInterleavedReprojection";
static const char * const k_pch_SteamVR_ForceReprojection_Bool = "forceReprojection";
@@ -1387,10 +1397,10 @@ namespace vr
static const char * const k_pch_SteamVR_StartDashboardFromAppLaunch_Bool = "startDashboardFromAppLaunch";
static const char * const k_pch_SteamVR_StartOverlayAppsFromDashboard_Bool = "startOverlayAppsFromDashboard";
static const char * const k_pch_SteamVR_EnableHomeApp = "enableHomeApp";
- static const char * const k_pch_SteamVR_SetInitialDefaultHomeApp = "setInitialDefaultHomeApp";
static const char * const k_pch_SteamVR_CycleBackgroundImageTimeSec_Int32 = "CycleBackgroundImageTimeSec";
static const char * const k_pch_SteamVR_RetailDemo_Bool = "retailDemo";
static const char * const k_pch_SteamVR_IpdOffset_Float = "ipdOffset";
+ static const char * const k_pch_SteamVR_AllowSupersampleFiltering_Bool = "allowSupersampleFiltering";
//-----------------------------------------------------------------------------
// lighthouse keys
@@ -1448,6 +1458,7 @@ namespace vr
static const char * const k_pch_Perf_AllowTimingStore_Bool = "allowTimingStore";
static const char * const k_pch_Perf_SaveTimingsOnExit_Bool = "saveTimingsOnExit";
static const char * const k_pch_Perf_TestData_Float = "perfTestData";
+ static const char * const k_pch_Perf_LinuxGPUProfiling_Bool = "linuxGPUProfiling";
//-----------------------------------------------------------------------------
// collision bounds keys
@@ -2385,6 +2396,49 @@ namespace vr
}
+// ivrresources.h
+namespace vr
+{
+
+class IVRResources
+{
+public:
+
+ // ------------------------------------
+ // Shared Resource Methods
+ // ------------------------------------
+
+ /** Loads the specified resource into the provided buffer if large enough.
+ * Returns the size in bytes of the buffer required to hold the specified resource. */
+ virtual uint32_t LoadSharedResource( const char *pchResourceName, char *pchBuffer, uint32_t unBufferLen ) = 0;
+
+ /** Provides the full path to the specified resource. Resource names can include named directories for
+ * drivers and other things, and this resolves all of those and returns the actual physical path.
+ * pchResourceTypeDirectory is the subdirectory of resources to look in. */
+ virtual uint32_t GetResourceFullPath( const char *pchResourceName, const char *pchResourceTypeDirectory, char *pchPathBuffer, uint32_t unBufferLen ) = 0;
+};
+
+static const char * const IVRResources_Version = "IVRResources_001";
+
+
+}
+// ivrdrivermanager.h
+namespace vr
+{
+
+class IVRDriverManager
+{
+public:
+ virtual uint32_t GetDriverCount() const = 0;
+
+ /** Returns the length of the number of bytes necessary to hold this string including the trailing null. */
+ virtual uint32_t GetDriverName( vr::DriverId_t nDriver, VR_OUT_STRING() char *pchValue, uint32_t unBufferSize ) = 0;
+};
+
+static const char * const IVRDriverManager_Version = "IVRDriverManager_001";
+
+} // namespace vr
+
@@ -2402,6 +2456,8 @@ namespace vr
IServerTrackedDeviceProvider_Version,
IVRWatchdogProvider_Version,
IVRVirtualDisplay_Version,
+ IVRDriverManager_Version,
+ IVRResources_Version,
nullptr
};
@@ -2489,14 +2545,37 @@ namespace vr
return VRDriverContext()->GetDriverHandle();
}
+ IVRDriverManager *VRDriverManager()
+ {
+ if ( !m_pVRDriverManager )
+ {
+ EVRInitError eError;
+ m_pVRDriverManager = (IVRDriverManager *)VRDriverContext()->GetGenericInterface( IVRDriverManager_Version, &eError );
+ }
+ return m_pVRDriverManager;
+ }
+
+ IVRResources *VRResources()
+ {
+ if ( !m_pVRResources )
+ {
+ EVRInitError eError;
+ m_pVRResources = (IVRResources *)VRDriverContext()->GetGenericInterface( IVRResources_Version, &eError );
+ }
+ return m_pVRResources;
+ }
+
private:
- IVRSettings *m_pVRSettings;
- IVRProperties *m_pVRProperties;
- CVRPropertyHelpers m_propertyHelpers;
+ CVRPropertyHelpers m_propertyHelpers;
CVRHiddenAreaHelpers m_hiddenAreaHelpers;
- IVRServerDriverHost *m_pVRServerDriverHost;
- IVRWatchdogHost *m_pVRWatchdogHost;
- IVRDriverLog *m_pVRDriverLog;
+
+ IVRSettings *m_pVRSettings;
+ IVRProperties *m_pVRProperties;
+ IVRServerDriverHost *m_pVRServerDriverHost;
+ IVRWatchdogHost *m_pVRWatchdogHost;
+ IVRDriverLog *m_pVRDriverLog;
+ IVRDriverManager *m_pVRDriverManager;
+ IVRResources *m_pVRResources;
};
inline COpenVRDriverContext &OpenVRInternal_ModuleServerDriverContext()
@@ -2513,6 +2592,9 @@ namespace vr
inline IVRServerDriverHost *VR_CALLTYPE VRServerDriverHost() { return OpenVRInternal_ModuleServerDriverContext().VRServerDriverHost(); }
inline IVRWatchdogHost *VR_CALLTYPE VRWatchdogHost() { return OpenVRInternal_ModuleServerDriverContext().VRWatchdogHost(); }
inline DriverHandle_t VR_CALLTYPE VRDriverHandle() { return OpenVRInternal_ModuleServerDriverContext().VRDriverHandle(); }
+ inline IVRDriverManager *VR_CALLTYPE VRDriverManager() { return OpenVRInternal_ModuleServerDriverContext().VRDriverManager(); }
+ inline IVRResources *VR_CALLTYPE VRResources() { return OpenVRInternal_ModuleServerDriverContext().VRResources(); }
+
inline void COpenVRDriverContext::Clear()
{
m_pVRSettings = nullptr;
@@ -2520,6 +2602,8 @@ namespace vr
m_pVRServerDriverHost = nullptr;
m_pVRDriverLog = nullptr;
m_pVRWatchdogHost = nullptr;
+ m_pVRDriverManager = nullptr;
+ m_pVRResources = nullptr;
}
inline EVRInitError COpenVRDriverContext::InitServer()
@@ -2528,7 +2612,9 @@ namespace vr
if ( !VRServerDriverHost()
|| !VRSettings()
|| !VRProperties()
- || !VRDriverLog() )
+ || !VRDriverLog()
+ || !VRDriverManager()
+ || !VRResources() )
return VRInitError_Init_InterfaceNotFound;
return VRInitError_None;
}
diff --git a/lib/linux32/libopenvr_api.so b/lib/linux32/libopenvr_api.so
index ccfeaba..2ffa040 100755
--- a/lib/linux32/libopenvr_api.so
+++ b/lib/linux32/libopenvr_api.so
Binary files differ
diff --git a/lib/linux64/libopenvr_api.so b/lib/linux64/libopenvr_api.so
index d83a610..db1bc07 100755
--- a/lib/linux64/libopenvr_api.so
+++ b/lib/linux64/libopenvr_api.so
Binary files differ
diff --git a/lib/win32/openvr_api.lib b/lib/win32/openvr_api.lib
index 130bad0..0fecb4a 100644
--- a/lib/win32/openvr_api.lib
+++ b/lib/win32/openvr_api.lib
Binary files differ
diff --git a/lib/win64/openvr_api.lib b/lib/win64/openvr_api.lib
index 6d26485..862ef75 100644
--- a/lib/win64/openvr_api.lib
+++ b/lib/win64/openvr_api.lib
Binary files differ
diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt
index 6ec4168..a573f83 100644
--- a/samples/CMakeLists.txt
+++ b/samples/CMakeLists.txt
@@ -91,7 +91,6 @@ if( (${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m32")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Za")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /W2 /DEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MP /INCREMENTAL:NO")
else()
@@ -153,6 +152,23 @@ else()
endif()
set(SDL2_INCLUDE_DIR ${THIRDPARTY_DIR}/sdl2-2.0.3/include)
+## Vulkan
+if(NOT (${CMAKE_SYSTEM_NAME} MATCHES "Darwin"))
+ find_library(VULKAN_LIBRARY
+ NAMES
+ vulkan-1
+ vulkan
+ PATHS
+ ${THIRDPARTY_DIR}/vulkan-1.0.49.0/lib
+ PATH_SUFFIXES
+ linux${PLATFORM}
+ ${WINDOWS_PATH_SUFFIXES}
+ NO_DEFAULT_PATH
+ )
+ set(VULKAN_INCLUDE_DIR ${THIRDPARTY_DIR}/vulkan-1.0.49.0/include)
+endif()
+
+
## Qt 5
## Important :
## Remember to set CMAKE_PREFIX_PATH to Qt5 cmake modules path for your targeted platform
@@ -218,11 +234,18 @@ include_directories(
${OPENVR_INCLUDE_DIR}
)
+if(NOT (${CMAKE_SYSTEM_NAME} MATCHES "Darwin"))
+ include_directories(${VULKAN_INCLUDE_DIR})
+endif()
+
# -----------------------------------------------------------------------------
## SUBDIRECTORIES ##
add_subdirectory(driver_sample)
add_subdirectory(hellovr_opengl)
+if(NOT (${CMAKE_SYSTEM_NAME} MATCHES "Darwin"))
+ add_subdirectory(hellovr_vulkan)
+endif()
add_subdirectory(helloworldoverlay)
add_subdirectory(tracked_camera_openvr_sample)
diff --git a/samples/bin/linux64/libopenvr_api.so b/samples/bin/linux64/libopenvr_api.so
index 7a6de64..36e0b24 100644
--- a/samples/bin/linux64/libopenvr_api.so
+++ b/samples/bin/linux64/libopenvr_api.so
Binary files differ
diff --git a/samples/bin/osx32/libopenvr_api.dylib b/samples/bin/osx32/libopenvr_api.dylib
index 5a20f3a..f0e89a8 100644
--- a/samples/bin/osx32/libopenvr_api.dylib
+++ b/samples/bin/osx32/libopenvr_api.dylib
Binary files differ
diff --git a/samples/bin/shaders/axes.hlsl b/samples/bin/shaders/axes.hlsl
new file mode 100644
index 0000000..da36db4
--- /dev/null
+++ b/samples/bin/shaders/axes.hlsl
@@ -0,0 +1,34 @@
+
+// Vertex Shader
+struct VS_INPUT
+{
+ float3 vPosition : POSITION;
+ float3 vColor: COLOR0;
+};
+
+struct PS_INPUT
+{
+ float4 vPosition : SV_POSITION;
+ float4 vColor : TEXCOORD0;
+};
+
+cbuffer SceneConstantBuffer : register(b0)
+{
+ float4x4 g_MVPMatrix;
+};
+
+PS_INPUT VSMain( VS_INPUT i )
+{
+ PS_INPUT o;
+ o.vPosition = mul( g_MVPMatrix, float4( i.vPosition, 1.0 ) );
+#ifdef VULKAN
+ o.vPosition.y = -o.vPosition.y;
+#endif
+ o.vColor = float4( i.vColor.rgb, 1.0 );
+ return o;
+}
+
+float4 PSMain( PS_INPUT i ) : SV_TARGET
+{
+ return i.vColor;
+}
diff --git a/samples/bin/shaders/axes_ps.spv b/samples/bin/shaders/axes_ps.spv
new file mode 100644
index 0000000..b69111a
--- /dev/null
+++ b/samples/bin/shaders/axes_ps.spv
Binary files differ
diff --git a/samples/bin/shaders/axes_vs.spv b/samples/bin/shaders/axes_vs.spv
new file mode 100644
index 0000000..f5ff1fa
--- /dev/null
+++ b/samples/bin/shaders/axes_vs.spv
Binary files differ
diff --git a/samples/bin/shaders/build_vulkan_shaders.bat b/samples/bin/shaders/build_vulkan_shaders.bat
new file mode 100755
index 0000000..70764a2
--- /dev/null
+++ b/samples/bin/shaders/build_vulkan_shaders.bat
@@ -0,0 +1,5 @@
+@ECHO OFF
+for /r %%i in ("*.hlsl") Do (
+ %VULKAN_SDK%\bin\glslangvalidator.exe -S vert -e VSMain -o %%~ni_vs.spv -V --hlsl-iomap --auto-map-bindings --shift-cbuffer-binding 0 --shift-texture-binding 1 --shift-sampler-binding 2 -D %%i
+ %VULKAN_SDK%\bin\glslangvalidator.exe -S frag -e PSMain -o %%~ni_ps.spv -V --hlsl-iomap --auto-map-bindings --shift-cbuffer-binding 0 --shift-texture-binding 1 --shift-sampler-binding 2 -D %%i
+)
diff --git a/samples/bin/shaders/companion.hlsl b/samples/bin/shaders/companion.hlsl
new file mode 100644
index 0000000..0bfe72e
--- /dev/null
+++ b/samples/bin/shaders/companion.hlsl
@@ -0,0 +1,40 @@
+
+// Vertex Shader
+struct VS_INPUT
+{
+ float2 vPosition : POSITION;
+ float2 vUVCoords: TEXCOORD0;
+};
+
+struct PS_INPUT
+{
+ float4 vPosition : SV_POSITION;
+ float2 vUVCoords : TEXCOORD0;
+};
+
+Texture2DMS<float4> g_Texture : register(t0);
+
+
+PS_INPUT VSMain( VS_INPUT i )
+{
+ PS_INPUT o;
+ o.vPosition = float4( i.vPosition, 0.0, 1.0 );
+#ifdef VULKAN
+ o.vPosition.y = -o.vPosition.y;
+#endif
+ o.vUVCoords = i.vUVCoords;
+ return o;
+}
+
+float4 PSMain( PS_INPUT i ) : SV_TARGET
+{
+ // Determine texture dimensions
+ float nTextureWidth;
+ float nTextureHeight;
+ float nNumSamples;
+ g_Texture.GetDimensions( nTextureWidth, nTextureHeight, nNumSamples );
+
+ // Fetch sample 0
+ float4 vColor = g_Texture.Load( i.vUVCoords * float2( nTextureWidth, nTextureHeight ), 0 );
+ return vColor;
+}
diff --git a/samples/bin/shaders/companion_ps.spv b/samples/bin/shaders/companion_ps.spv
new file mode 100644
index 0000000..4a75979
--- /dev/null
+++ b/samples/bin/shaders/companion_ps.spv
Binary files differ
diff --git a/samples/bin/shaders/companion_vs.spv b/samples/bin/shaders/companion_vs.spv
new file mode 100644
index 0000000..1010014
--- /dev/null
+++ b/samples/bin/shaders/companion_vs.spv
Binary files differ
diff --git a/samples/bin/shaders/rendermodel.hlsl b/samples/bin/shaders/rendermodel.hlsl
new file mode 100644
index 0000000..7fdb5d8
--- /dev/null
+++ b/samples/bin/shaders/rendermodel.hlsl
@@ -0,0 +1,40 @@
+
+// Vertex Shader
+struct VS_INPUT
+{
+ float3 vPosition : POSITION;
+ float3 vNormal: TEXCOORD0;
+ float2 vUVCoords: TEXCOORD1;
+};
+
+struct PS_INPUT
+{
+ float4 vPosition : SV_POSITION;
+ float2 vUVCoords : TEXCOORD0;
+};
+
+cbuffer SceneConstantBuffer : register(b0)
+{
+ float4x4 g_MVPMatrix;
+};
+
+SamplerState g_SamplerState : register(s0);
+Texture2D g_Texture : register(t0);
+
+
+PS_INPUT VSMain( VS_INPUT i )
+{
+ PS_INPUT o;
+ o.vPosition = mul( g_MVPMatrix, float4( i.vPosition, 1.0 ) );
+#ifdef VULKAN
+ o.vPosition.y = -o.vPosition.y;
+#endif
+ o.vUVCoords = i.vUVCoords;
+ return o;
+}
+
+float4 PSMain( PS_INPUT i ) : SV_TARGET
+{
+ float4 vColor = g_Texture.Sample( g_SamplerState, i.vUVCoords );
+ return vColor;
+}
diff --git a/samples/bin/shaders/rendermodel_ps.spv b/samples/bin/shaders/rendermodel_ps.spv
new file mode 100644
index 0000000..beae523
--- /dev/null
+++ b/samples/bin/shaders/rendermodel_ps.spv
Binary files differ
diff --git a/samples/bin/shaders/rendermodel_vs.spv b/samples/bin/shaders/rendermodel_vs.spv
new file mode 100644
index 0000000..3caf361
--- /dev/null
+++ b/samples/bin/shaders/rendermodel_vs.spv
Binary files differ
diff --git a/samples/bin/shaders/scene.hlsl b/samples/bin/shaders/scene.hlsl
new file mode 100644
index 0000000..2ebf0c4
--- /dev/null
+++ b/samples/bin/shaders/scene.hlsl
@@ -0,0 +1,39 @@
+
+// Vertex Shader
+struct VS_INPUT
+{
+ float3 vPosition : POSITION;
+ float2 vUVCoords: TEXCOORD0;
+};
+
+struct PS_INPUT
+{
+ float4 vPosition : SV_POSITION;
+ float2 vUVCoords : TEXCOORD0;
+};
+
+cbuffer SceneConstantBuffer : register(b0)
+{
+ float4x4 g_MVPMatrix;
+};
+
+SamplerState g_SamplerState : register(s0);
+Texture2D g_Texture : register(t0);
+
+
+PS_INPUT VSMain( VS_INPUT i )
+{
+ PS_INPUT o;
+ o.vPosition = mul( g_MVPMatrix, float4( i.vPosition, 1.0 ) );
+#ifdef VULKAN
+ o.vPosition.y = -o.vPosition.y;
+#endif
+ o.vUVCoords = i.vUVCoords;
+ return o;
+}
+
+float4 PSMain( PS_INPUT i ) : SV_TARGET
+{
+ float4 vColor = g_Texture.Sample( g_SamplerState, i.vUVCoords );
+ return vColor;
+}
diff --git a/samples/bin/shaders/scene_ps.spv b/samples/bin/shaders/scene_ps.spv
new file mode 100644
index 0000000..beae523
--- /dev/null
+++ b/samples/bin/shaders/scene_ps.spv
Binary files differ
diff --git a/samples/bin/shaders/scene_vs.spv b/samples/bin/shaders/scene_vs.spv
new file mode 100644
index 0000000..b9f9658
--- /dev/null
+++ b/samples/bin/shaders/scene_vs.spv
Binary files differ
diff --git a/samples/bin/win32/openvr_api.dll b/samples/bin/win32/openvr_api.dll
index aafef72..5c13f29 100644
--- a/samples/bin/win32/openvr_api.dll
+++ b/samples/bin/win32/openvr_api.dll
Binary files differ
diff --git a/samples/bin/win64/openvr_api.dll b/samples/bin/win64/openvr_api.dll
index ddda897..3b70c22 100644
--- a/samples/bin/win64/openvr_api.dll
+++ b/samples/bin/win64/openvr_api.dll
Binary files differ
diff --git a/samples/hellovr_dx12/d3dx12.h b/samples/hellovr_dx12/d3dx12.h
new file mode 100644
index 0000000..d0575d2
--- /dev/null
+++ b/samples/hellovr_dx12/d3dx12.h
@@ -0,0 +1,2540 @@
+//*********************************************************
+//
+// Copyright (c) Microsoft. All rights reserved.
+// This code is licensed under the MIT License (MIT).
+// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
+// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
+// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
+// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
+//
+//*********************************************************
+
+#ifndef __D3DX12_H__
+#define __D3DX12_H__
+
+#include "d3d12.h"
+
+#if defined( __cplusplus )
+
+struct CD3DX12_DEFAULT {};
+extern const DECLSPEC_SELECTANY CD3DX12_DEFAULT D3D12_DEFAULT;
+
+//------------------------------------------------------------------------------------------------
+inline bool operator==( const D3D12_VIEWPORT& l, const D3D12_VIEWPORT& r )
+{
+ return l.TopLeftX == r.TopLeftX && l.TopLeftY == r.TopLeftY && l.Width == r.Width &&
+ l.Height == r.Height && l.MinDepth == r.MinDepth && l.MaxDepth == r.MaxDepth;
+}
+
+//------------------------------------------------------------------------------------------------
+inline bool operator!=( const D3D12_VIEWPORT& l, const D3D12_VIEWPORT& r )
+{ return !( l == r ); }
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RECT : public D3D12_RECT
+{
+ CD3DX12_RECT()
+ {}
+ explicit CD3DX12_RECT( const D3D12_RECT& o ) :
+ D3D12_RECT( o )
+ {}
+ explicit CD3DX12_RECT(
+ LONG Left,
+ LONG Top,
+ LONG Right,
+ LONG Bottom )
+ {
+ left = Left;
+ top = Top;
+ right = Right;
+ bottom = Bottom;
+ }
+ ~CD3DX12_RECT() {}
+ operator const D3D12_RECT&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_VIEWPORT : public D3D12_VIEWPORT
+{
+ CD3DX12_VIEWPORT()
+ {}
+ explicit CD3DX12_VIEWPORT( const D3D12_VIEWPORT& o ) :
+ D3D12_VIEWPORT( o )
+ {}
+ explicit CD3DX12_VIEWPORT(
+ FLOAT topLeftX,
+ FLOAT topLeftY,
+ FLOAT width,
+ FLOAT height,
+ FLOAT minDepth = D3D12_MIN_DEPTH,
+ FLOAT maxDepth = D3D12_MAX_DEPTH )
+ {
+ TopLeftX = topLeftX;
+ TopLeftY = topLeftY;
+ Width = width;
+ Height = height;
+ MinDepth = minDepth;
+ MaxDepth = maxDepth;
+ }
+ explicit CD3DX12_VIEWPORT(
+ _In_ ID3D12Resource* pResource,
+ UINT mipSlice = 0,
+ FLOAT topLeftX = 0.0f,
+ FLOAT topLeftY = 0.0f,
+ FLOAT minDepth = D3D12_MIN_DEPTH,
+ FLOAT maxDepth = D3D12_MAX_DEPTH )
+ {
+ D3D12_RESOURCE_DESC Desc = pResource->GetDesc();
+ const UINT64 SubresourceWidth = Desc.Width >> mipSlice;
+ const UINT64 SubresourceHeight = Desc.Height >> mipSlice;
+ switch (Desc.Dimension)
+ {
+ case D3D12_RESOURCE_DIMENSION_BUFFER:
+ TopLeftX = topLeftX;
+ TopLeftY = 0.0f;
+ Width = Desc.Width - topLeftX;
+ Height = 1.0f;
+ break;
+ case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
+ TopLeftX = topLeftX;
+ TopLeftY = 0.0f;
+ Width = (SubresourceWidth ? SubresourceWidth : 1.0f) - topLeftX;
+ Height = 1.0f;
+ break;
+ case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE3D:
+ TopLeftX = topLeftX;
+ TopLeftY = topLeftY;
+ Width = (SubresourceWidth ? SubresourceWidth : 1.0f) - topLeftX;
+ Height = (SubresourceHeight ? SubresourceHeight: 1.0f) - topLeftY;
+ break;
+ default: break;
+ }
+
+ MinDepth = minDepth;
+ MaxDepth = maxDepth;
+ }
+ ~CD3DX12_VIEWPORT() {}
+ operator const D3D12_VIEWPORT&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_BOX : public D3D12_BOX
+{
+ CD3DX12_BOX()
+ {}
+ explicit CD3DX12_BOX( const D3D12_BOX& o ) :
+ D3D12_BOX( o )
+ {}
+ explicit CD3DX12_BOX(
+ LONG Left,
+ LONG Right )
+ {
+ left = Left;
+ top = 0;
+ front = 0;
+ right = Right;
+ bottom = 1;
+ back = 1;
+ }
+ explicit CD3DX12_BOX(
+ LONG Left,
+ LONG Top,
+ LONG Right,
+ LONG Bottom )
+ {
+ left = Left;
+ top = Top;
+ front = 0;
+ right = Right;
+ bottom = Bottom;
+ back = 1;
+ }
+ explicit CD3DX12_BOX(
+ LONG Left,
+ LONG Top,
+ LONG Front,
+ LONG Right,
+ LONG Bottom,
+ LONG Back )
+ {
+ left = Left;
+ top = Top;
+ front = Front;
+ right = Right;
+ bottom = Bottom;
+ back = Back;
+ }
+ ~CD3DX12_BOX() {}
+ operator const D3D12_BOX&() const { return *this; }
+};
+inline bool operator==( const D3D12_BOX& l, const D3D12_BOX& r )
+{
+ return l.left == r.left && l.top == r.top && l.front == r.front &&
+ l.right == r.right && l.bottom == r.bottom && l.back == r.back;
+}
+inline bool operator!=( const D3D12_BOX& l, const D3D12_BOX& r )
+{ return !( l == r ); }
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_DEPTH_STENCIL_DESC : public D3D12_DEPTH_STENCIL_DESC
+{
+ CD3DX12_DEPTH_STENCIL_DESC()
+ {}
+ explicit CD3DX12_DEPTH_STENCIL_DESC( const D3D12_DEPTH_STENCIL_DESC& o ) :
+ D3D12_DEPTH_STENCIL_DESC( o )
+ {}
+ explicit CD3DX12_DEPTH_STENCIL_DESC( CD3DX12_DEFAULT )
+ {
+ DepthEnable = TRUE;
+ DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL;
+ DepthFunc = D3D12_COMPARISON_FUNC_LESS;
+ StencilEnable = FALSE;
+ StencilReadMask = D3D12_DEFAULT_STENCIL_READ_MASK;
+ StencilWriteMask = D3D12_DEFAULT_STENCIL_WRITE_MASK;
+ const D3D12_DEPTH_STENCILOP_DESC defaultStencilOp =
+ { D3D12_STENCIL_OP_KEEP, D3D12_STENCIL_OP_KEEP, D3D12_STENCIL_OP_KEEP, D3D12_COMPARISON_FUNC_ALWAYS };
+ FrontFace = defaultStencilOp;
+ BackFace = defaultStencilOp;
+ }
+ explicit CD3DX12_DEPTH_STENCIL_DESC(
+ BOOL depthEnable,
+ D3D12_DEPTH_WRITE_MASK depthWriteMask,
+ D3D12_COMPARISON_FUNC depthFunc,
+ BOOL stencilEnable,
+ UINT8 stencilReadMask,
+ UINT8 stencilWriteMask,
+ D3D12_STENCIL_OP frontStencilFailOp,
+ D3D12_STENCIL_OP frontStencilDepthFailOp,
+ D3D12_STENCIL_OP frontStencilPassOp,
+ D3D12_COMPARISON_FUNC frontStencilFunc,
+ D3D12_STENCIL_OP backStencilFailOp,
+ D3D12_STENCIL_OP backStencilDepthFailOp,
+ D3D12_STENCIL_OP backStencilPassOp,
+ D3D12_COMPARISON_FUNC backStencilFunc )
+ {
+ DepthEnable = depthEnable;
+ DepthWriteMask = depthWriteMask;
+ DepthFunc = depthFunc;
+ StencilEnable = stencilEnable;
+ StencilReadMask = stencilReadMask;
+ StencilWriteMask = stencilWriteMask;
+ FrontFace.StencilFailOp = frontStencilFailOp;
+ FrontFace.StencilDepthFailOp = frontStencilDepthFailOp;
+ FrontFace.StencilPassOp = frontStencilPassOp;
+ FrontFace.StencilFunc = frontStencilFunc;
+ BackFace.StencilFailOp = backStencilFailOp;
+ BackFace.StencilDepthFailOp = backStencilDepthFailOp;
+ BackFace.StencilPassOp = backStencilPassOp;
+ BackFace.StencilFunc = backStencilFunc;
+ }
+ ~CD3DX12_DEPTH_STENCIL_DESC() {}
+ operator const D3D12_DEPTH_STENCIL_DESC&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_DEPTH_STENCIL_DESC1 : public D3D12_DEPTH_STENCIL_DESC1
+{
+ CD3DX12_DEPTH_STENCIL_DESC1()
+ {}
+ explicit CD3DX12_DEPTH_STENCIL_DESC1( const D3D12_DEPTH_STENCIL_DESC1& o ) :
+ D3D12_DEPTH_STENCIL_DESC1( o )
+ {}
+ explicit CD3DX12_DEPTH_STENCIL_DESC1( const D3D12_DEPTH_STENCIL_DESC& o )
+ {
+ DepthEnable = o.DepthEnable;
+ DepthWriteMask = o.DepthWriteMask;
+ DepthFunc = o.DepthFunc;
+ StencilEnable = o.StencilEnable;
+ StencilReadMask = o.StencilReadMask;
+ StencilWriteMask = o.StencilWriteMask;
+ FrontFace.StencilFailOp = o.FrontFace.StencilFailOp;
+ FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp;
+ FrontFace.StencilPassOp = o.FrontFace.StencilPassOp;
+ FrontFace.StencilFunc = o.FrontFace.StencilFunc;
+ BackFace.StencilFailOp = o.BackFace.StencilFailOp;
+ BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp;
+ BackFace.StencilPassOp = o.BackFace.StencilPassOp;
+ BackFace.StencilFunc = o.BackFace.StencilFunc;
+ DepthBoundsTestEnable = FALSE;
+ }
+ explicit CD3DX12_DEPTH_STENCIL_DESC1( CD3DX12_DEFAULT )
+ {
+ DepthEnable = TRUE;
+ DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL;
+ DepthFunc = D3D12_COMPARISON_FUNC_LESS;
+ StencilEnable = FALSE;
+ StencilReadMask = D3D12_DEFAULT_STENCIL_READ_MASK;
+ StencilWriteMask = D3D12_DEFAULT_STENCIL_WRITE_MASK;
+ const D3D12_DEPTH_STENCILOP_DESC defaultStencilOp =
+ { D3D12_STENCIL_OP_KEEP, D3D12_STENCIL_OP_KEEP, D3D12_STENCIL_OP_KEEP, D3D12_COMPARISON_FUNC_ALWAYS };
+ FrontFace = defaultStencilOp;
+ BackFace = defaultStencilOp;
+ DepthBoundsTestEnable = FALSE;
+ }
+ explicit CD3DX12_DEPTH_STENCIL_DESC1(
+ BOOL depthEnable,
+ D3D12_DEPTH_WRITE_MASK depthWriteMask,
+ D3D12_COMPARISON_FUNC depthFunc,
+ BOOL stencilEnable,
+ UINT8 stencilReadMask,
+ UINT8 stencilWriteMask,
+ D3D12_STENCIL_OP frontStencilFailOp,
+ D3D12_STENCIL_OP frontStencilDepthFailOp,
+ D3D12_STENCIL_OP frontStencilPassOp,
+ D3D12_COMPARISON_FUNC frontStencilFunc,
+ D3D12_STENCIL_OP backStencilFailOp,
+ D3D12_STENCIL_OP backStencilDepthFailOp,
+ D3D12_STENCIL_OP backStencilPassOp,
+ D3D12_COMPARISON_FUNC backStencilFunc,
+ BOOL depthBoundsTestEnable )
+ {
+ DepthEnable = depthEnable;
+ DepthWriteMask = depthWriteMask;
+ DepthFunc = depthFunc;
+ StencilEnable = stencilEnable;
+ StencilReadMask = stencilReadMask;
+ StencilWriteMask = stencilWriteMask;
+ FrontFace.StencilFailOp = frontStencilFailOp;
+ FrontFace.StencilDepthFailOp = frontStencilDepthFailOp;
+ FrontFace.StencilPassOp = frontStencilPassOp;
+ FrontFace.StencilFunc = frontStencilFunc;
+ BackFace.StencilFailOp = backStencilFailOp;
+ BackFace.StencilDepthFailOp = backStencilDepthFailOp;
+ BackFace.StencilPassOp = backStencilPassOp;
+ BackFace.StencilFunc = backStencilFunc;
+ DepthBoundsTestEnable = depthBoundsTestEnable;
+ }
+ ~CD3DX12_DEPTH_STENCIL_DESC1() {}
+ operator const D3D12_DEPTH_STENCIL_DESC1&() const { return *this; }
+ operator const D3D12_DEPTH_STENCIL_DESC() const
+ {
+ D3D12_DEPTH_STENCIL_DESC D;
+ D.DepthEnable = DepthEnable;
+ D.DepthWriteMask = DepthWriteMask;
+ D.DepthFunc = DepthFunc;
+ D.StencilEnable = StencilEnable;
+ D.StencilReadMask = StencilReadMask;
+ D.StencilWriteMask = StencilWriteMask;
+ D.FrontFace.StencilFailOp = FrontFace.StencilFailOp;
+ D.FrontFace.StencilDepthFailOp = FrontFace.StencilDepthFailOp;
+ D.FrontFace.StencilPassOp = FrontFace.StencilPassOp;
+ D.FrontFace.StencilFunc = FrontFace.StencilFunc;
+ D.BackFace.StencilFailOp = BackFace.StencilFailOp;
+ D.BackFace.StencilDepthFailOp = BackFace.StencilDepthFailOp;
+ D.BackFace.StencilPassOp = BackFace.StencilPassOp;
+ D.BackFace.StencilFunc = BackFace.StencilFunc;
+ return D;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_BLEND_DESC : public D3D12_BLEND_DESC
+{
+ CD3DX12_BLEND_DESC()
+ {}
+ explicit CD3DX12_BLEND_DESC( const D3D12_BLEND_DESC& o ) :
+ D3D12_BLEND_DESC( o )
+ {}
+ explicit CD3DX12_BLEND_DESC( CD3DX12_DEFAULT )
+ {
+ AlphaToCoverageEnable = FALSE;
+ IndependentBlendEnable = FALSE;
+ const D3D12_RENDER_TARGET_BLEND_DESC defaultRenderTargetBlendDesc =
+ {
+ FALSE,FALSE,
+ D3D12_BLEND_ONE, D3D12_BLEND_ZERO, D3D12_BLEND_OP_ADD,
+ D3D12_BLEND_ONE, D3D12_BLEND_ZERO, D3D12_BLEND_OP_ADD,
+ D3D12_LOGIC_OP_NOOP,
+ D3D12_COLOR_WRITE_ENABLE_ALL,
+ };
+ for (UINT i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i)
+ RenderTarget[ i ] = defaultRenderTargetBlendDesc;
+ }
+ ~CD3DX12_BLEND_DESC() {}
+ operator const D3D12_BLEND_DESC&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RASTERIZER_DESC : public D3D12_RASTERIZER_DESC
+{
+ CD3DX12_RASTERIZER_DESC()
+ {}
+ explicit CD3DX12_RASTERIZER_DESC( const D3D12_RASTERIZER_DESC& o ) :
+ D3D12_RASTERIZER_DESC( o )
+ {}
+ explicit CD3DX12_RASTERIZER_DESC( CD3DX12_DEFAULT )
+ {
+ FillMode = D3D12_FILL_MODE_SOLID;
+ CullMode = D3D12_CULL_MODE_BACK;
+ FrontCounterClockwise = FALSE;
+ DepthBias = D3D12_DEFAULT_DEPTH_BIAS;
+ DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP;
+ SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS;
+ DepthClipEnable = TRUE;
+ MultisampleEnable = FALSE;
+ AntialiasedLineEnable = FALSE;
+ ForcedSampleCount = 0;
+ ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
+ }
+ explicit CD3DX12_RASTERIZER_DESC(
+ D3D12_FILL_MODE fillMode,
+ D3D12_CULL_MODE cullMode,
+ BOOL frontCounterClockwise,
+ INT depthBias,
+ FLOAT depthBiasClamp,
+ FLOAT slopeScaledDepthBias,
+ BOOL depthClipEnable,
+ BOOL multisampleEnable,
+ BOOL antialiasedLineEnable,
+ UINT forcedSampleCount,
+ D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster)
+ {
+ FillMode = fillMode;
+ CullMode = cullMode;
+ FrontCounterClockwise = frontCounterClockwise;
+ DepthBias = depthBias;
+ DepthBiasClamp = depthBiasClamp;
+ SlopeScaledDepthBias = slopeScaledDepthBias;
+ DepthClipEnable = depthClipEnable;
+ MultisampleEnable = multisampleEnable;
+ AntialiasedLineEnable = antialiasedLineEnable;
+ ForcedSampleCount = forcedSampleCount;
+ ConservativeRaster = conservativeRaster;
+ }
+ ~CD3DX12_RASTERIZER_DESC() {}
+ operator const D3D12_RASTERIZER_DESC&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RESOURCE_ALLOCATION_INFO : public D3D12_RESOURCE_ALLOCATION_INFO
+{
+ CD3DX12_RESOURCE_ALLOCATION_INFO()
+ {}
+ explicit CD3DX12_RESOURCE_ALLOCATION_INFO( const D3D12_RESOURCE_ALLOCATION_INFO& o ) :
+ D3D12_RESOURCE_ALLOCATION_INFO( o )
+ {}
+ CD3DX12_RESOURCE_ALLOCATION_INFO(
+ UINT64 size,
+ UINT64 alignment )
+ {
+ SizeInBytes = size;
+ Alignment = alignment;
+ }
+ operator const D3D12_RESOURCE_ALLOCATION_INFO&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_HEAP_PROPERTIES : public D3D12_HEAP_PROPERTIES
+{
+ CD3DX12_HEAP_PROPERTIES()
+ {}
+ explicit CD3DX12_HEAP_PROPERTIES(const D3D12_HEAP_PROPERTIES &o) :
+ D3D12_HEAP_PROPERTIES(o)
+ {}
+ CD3DX12_HEAP_PROPERTIES(
+ D3D12_CPU_PAGE_PROPERTY cpuPageProperty,
+ D3D12_MEMORY_POOL memoryPoolPreference,
+ UINT creationNodeMask = 1,
+ UINT nodeMask = 1 )
+ {
+ Type = D3D12_HEAP_TYPE_CUSTOM;
+ CPUPageProperty = cpuPageProperty;
+ MemoryPoolPreference = memoryPoolPreference;
+ CreationNodeMask = creationNodeMask;
+ VisibleNodeMask = nodeMask;
+ }
+ explicit CD3DX12_HEAP_PROPERTIES(
+ D3D12_HEAP_TYPE type,
+ UINT creationNodeMask = 1,
+ UINT nodeMask = 1 )
+ {
+ Type = type;
+ CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ CreationNodeMask = creationNodeMask;
+ VisibleNodeMask = nodeMask;
+ }
+ operator const D3D12_HEAP_PROPERTIES&() const { return *this; }
+ bool IsCPUAccessible() const
+ {
+ return Type == D3D12_HEAP_TYPE_UPLOAD || Type == D3D12_HEAP_TYPE_READBACK || (Type == D3D12_HEAP_TYPE_CUSTOM &&
+ (CPUPageProperty == D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE || CPUPageProperty == D3D12_CPU_PAGE_PROPERTY_WRITE_BACK));
+ }
+};
+inline bool operator==( const D3D12_HEAP_PROPERTIES& l, const D3D12_HEAP_PROPERTIES& r )
+{
+ return l.Type == r.Type && l.CPUPageProperty == r.CPUPageProperty &&
+ l.MemoryPoolPreference == r.MemoryPoolPreference &&
+ l.CreationNodeMask == r.CreationNodeMask &&
+ l.VisibleNodeMask == r.VisibleNodeMask;
+}
+inline bool operator!=( const D3D12_HEAP_PROPERTIES& l, const D3D12_HEAP_PROPERTIES& r )
+{ return !( l == r ); }
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_HEAP_DESC : public D3D12_HEAP_DESC
+{
+ CD3DX12_HEAP_DESC()
+ {}
+ explicit CD3DX12_HEAP_DESC(const D3D12_HEAP_DESC &o) :
+ D3D12_HEAP_DESC(o)
+ {}
+ CD3DX12_HEAP_DESC(
+ UINT64 size,
+ D3D12_HEAP_PROPERTIES properties,
+ UINT64 alignment = 0,
+ D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE )
+ {
+ SizeInBytes = size;
+ Properties = properties;
+ Alignment = alignment;
+ Flags = flags;
+ }
+ CD3DX12_HEAP_DESC(
+ UINT64 size,
+ D3D12_HEAP_TYPE type,
+ UINT64 alignment = 0,
+ D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE )
+ {
+ SizeInBytes = size;
+ Properties = CD3DX12_HEAP_PROPERTIES( type );
+ Alignment = alignment;
+ Flags = flags;
+ }
+ CD3DX12_HEAP_DESC(
+ UINT64 size,
+ D3D12_CPU_PAGE_PROPERTY cpuPageProperty,
+ D3D12_MEMORY_POOL memoryPoolPreference,
+ UINT64 alignment = 0,
+ D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE )
+ {
+ SizeInBytes = size;
+ Properties = CD3DX12_HEAP_PROPERTIES( cpuPageProperty, memoryPoolPreference );
+ Alignment = alignment;
+ Flags = flags;
+ }
+ CD3DX12_HEAP_DESC(
+ const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo,
+ D3D12_HEAP_PROPERTIES properties,
+ D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE )
+ {
+ SizeInBytes = resAllocInfo.SizeInBytes;
+ Properties = properties;
+ Alignment = resAllocInfo.Alignment;
+ Flags = flags;
+ }
+ CD3DX12_HEAP_DESC(
+ const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo,
+ D3D12_HEAP_TYPE type,
+ D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE )
+ {
+ SizeInBytes = resAllocInfo.SizeInBytes;
+ Properties = CD3DX12_HEAP_PROPERTIES( type );
+ Alignment = resAllocInfo.Alignment;
+ Flags = flags;
+ }
+ CD3DX12_HEAP_DESC(
+ const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo,
+ D3D12_CPU_PAGE_PROPERTY cpuPageProperty,
+ D3D12_MEMORY_POOL memoryPoolPreference,
+ D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE )
+ {
+ SizeInBytes = resAllocInfo.SizeInBytes;
+ Properties = CD3DX12_HEAP_PROPERTIES( cpuPageProperty, memoryPoolPreference );
+ Alignment = resAllocInfo.Alignment;
+ Flags = flags;
+ }
+ operator const D3D12_HEAP_DESC&() const { return *this; }
+ bool IsCPUAccessible() const
+ { return static_cast< const CD3DX12_HEAP_PROPERTIES* >( &Properties )->IsCPUAccessible(); }
+};
+inline bool operator==( const D3D12_HEAP_DESC& l, const D3D12_HEAP_DESC& r )
+{
+ return l.SizeInBytes == r.SizeInBytes &&
+ l.Properties == r.Properties &&
+ l.Alignment == r.Alignment &&
+ l.Flags == r.Flags;
+}
+inline bool operator!=( const D3D12_HEAP_DESC& l, const D3D12_HEAP_DESC& r )
+{ return !( l == r ); }
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_CLEAR_VALUE : public D3D12_CLEAR_VALUE
+{
+ CD3DX12_CLEAR_VALUE()
+ {}
+ explicit CD3DX12_CLEAR_VALUE(const D3D12_CLEAR_VALUE &o) :
+ D3D12_CLEAR_VALUE(o)
+ {}
+ CD3DX12_CLEAR_VALUE(
+ DXGI_FORMAT format,
+ const FLOAT color[4] )
+ {
+ Format = format;
+ memcpy( Color, color, sizeof( Color ) );
+ }
+ CD3DX12_CLEAR_VALUE(
+ DXGI_FORMAT format,
+ FLOAT depth,
+ UINT8 stencil )
+ {
+ Format = format;
+ /* Use memcpy to preserve NAN values */
+ memcpy( &DepthStencil.Depth, &depth, sizeof( depth ) );
+ DepthStencil.Stencil = stencil;
+ }
+ operator const D3D12_CLEAR_VALUE&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RANGE : public D3D12_RANGE
+{
+ CD3DX12_RANGE()
+ {}
+ explicit CD3DX12_RANGE(const D3D12_RANGE &o) :
+ D3D12_RANGE(o)
+ {}
+ CD3DX12_RANGE(
+ SIZE_T begin,
+ SIZE_T end )
+ {
+ Begin = begin;
+ End = end;
+ }
+ operator const D3D12_RANGE&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RANGE_UINT64 : public D3D12_RANGE_UINT64
+{
+ CD3DX12_RANGE_UINT64()
+ {}
+ explicit CD3DX12_RANGE_UINT64(const D3D12_RANGE_UINT64 &o) :
+ D3D12_RANGE_UINT64(o)
+ {}
+ CD3DX12_RANGE_UINT64(
+ UINT64 begin,
+ UINT64 end )
+ {
+ Begin = begin;
+ End = end;
+ }
+ operator const D3D12_RANGE_UINT64&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_SUBRESOURCE_RANGE_UINT64 : public D3D12_SUBRESOURCE_RANGE_UINT64
+{
+ CD3DX12_SUBRESOURCE_RANGE_UINT64()
+ {}
+ explicit CD3DX12_SUBRESOURCE_RANGE_UINT64(const D3D12_SUBRESOURCE_RANGE_UINT64 &o) :
+ D3D12_SUBRESOURCE_RANGE_UINT64(o)
+ {}
+ CD3DX12_SUBRESOURCE_RANGE_UINT64(
+ UINT subresource,
+ const D3D12_RANGE_UINT64& range )
+ {
+ Subresource = subresource;
+ Range = range;
+ }
+ CD3DX12_SUBRESOURCE_RANGE_UINT64(
+ UINT subresource,
+ UINT64 begin,
+ UINT64 end )
+ {
+ Subresource = subresource;
+ Range.Begin = begin;
+ Range.End = end;
+ }
+ operator const D3D12_SUBRESOURCE_RANGE_UINT64&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_SHADER_BYTECODE : public D3D12_SHADER_BYTECODE
+{
+ CD3DX12_SHADER_BYTECODE()
+ {}
+ explicit CD3DX12_SHADER_BYTECODE(const D3D12_SHADER_BYTECODE &o) :
+ D3D12_SHADER_BYTECODE(o)
+ {}
+ CD3DX12_SHADER_BYTECODE(
+ _In_ ID3DBlob* pShaderBlob )
+ {
+ pShaderBytecode = pShaderBlob->GetBufferPointer();
+ BytecodeLength = pShaderBlob->GetBufferSize();
+ }
+ CD3DX12_SHADER_BYTECODE(
+ const void* _pShaderBytecode,
+ SIZE_T bytecodeLength )
+ {
+ pShaderBytecode = _pShaderBytecode;
+ BytecodeLength = bytecodeLength;
+ }
+ operator const D3D12_SHADER_BYTECODE&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_TILED_RESOURCE_COORDINATE : public D3D12_TILED_RESOURCE_COORDINATE
+{
+ CD3DX12_TILED_RESOURCE_COORDINATE()
+ {}
+ explicit CD3DX12_TILED_RESOURCE_COORDINATE(const D3D12_TILED_RESOURCE_COORDINATE &o) :
+ D3D12_TILED_RESOURCE_COORDINATE(o)
+ {}
+ CD3DX12_TILED_RESOURCE_COORDINATE(
+ UINT x,
+ UINT y,
+ UINT z,
+ UINT subresource )
+ {
+ X = x;
+ Y = y;
+ Z = z;
+ Subresource = subresource;
+ }
+ operator const D3D12_TILED_RESOURCE_COORDINATE&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_TILE_REGION_SIZE : public D3D12_TILE_REGION_SIZE
+{
+ CD3DX12_TILE_REGION_SIZE()
+ {}
+ explicit CD3DX12_TILE_REGION_SIZE(const D3D12_TILE_REGION_SIZE &o) :
+ D3D12_TILE_REGION_SIZE(o)
+ {}
+ CD3DX12_TILE_REGION_SIZE(
+ UINT numTiles,
+ BOOL useBox,
+ UINT width,
+ UINT16 height,
+ UINT16 depth )
+ {
+ NumTiles = numTiles;
+ UseBox = useBox;
+ Width = width;
+ Height = height;
+ Depth = depth;
+ }
+ operator const D3D12_TILE_REGION_SIZE&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_SUBRESOURCE_TILING : public D3D12_SUBRESOURCE_TILING
+{
+ CD3DX12_SUBRESOURCE_TILING()
+ {}
+ explicit CD3DX12_SUBRESOURCE_TILING(const D3D12_SUBRESOURCE_TILING &o) :
+ D3D12_SUBRESOURCE_TILING(o)
+ {}
+ CD3DX12_SUBRESOURCE_TILING(
+ UINT widthInTiles,
+ UINT16 heightInTiles,
+ UINT16 depthInTiles,
+ UINT startTileIndexInOverallResource )
+ {
+ WidthInTiles = widthInTiles;
+ HeightInTiles = heightInTiles;
+ DepthInTiles = depthInTiles;
+ StartTileIndexInOverallResource = startTileIndexInOverallResource;
+ }
+ operator const D3D12_SUBRESOURCE_TILING&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_TILE_SHAPE : public D3D12_TILE_SHAPE
+{
+ CD3DX12_TILE_SHAPE()
+ {}
+ explicit CD3DX12_TILE_SHAPE(const D3D12_TILE_SHAPE &o) :
+ D3D12_TILE_SHAPE(o)
+ {}
+ CD3DX12_TILE_SHAPE(
+ UINT widthInTexels,
+ UINT heightInTexels,
+ UINT depthInTexels )
+ {
+ WidthInTexels = widthInTexels;
+ HeightInTexels = heightInTexels;
+ DepthInTexels = depthInTexels;
+ }
+ operator const D3D12_TILE_SHAPE&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RESOURCE_BARRIER : public D3D12_RESOURCE_BARRIER
+{
+ CD3DX12_RESOURCE_BARRIER()
+ {}
+ explicit CD3DX12_RESOURCE_BARRIER(const D3D12_RESOURCE_BARRIER &o) :
+ D3D12_RESOURCE_BARRIER(o)
+ {}
+ static inline CD3DX12_RESOURCE_BARRIER Transition(
+ _In_ ID3D12Resource* pResource,
+ D3D12_RESOURCE_STATES stateBefore,
+ D3D12_RESOURCE_STATES stateAfter,
+ UINT subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES,
+ D3D12_RESOURCE_BARRIER_FLAGS flags = D3D12_RESOURCE_BARRIER_FLAG_NONE)
+ {
+ CD3DX12_RESOURCE_BARRIER result;
+ ZeroMemory(&result, sizeof(result));
+ D3D12_RESOURCE_BARRIER &barrier = result;
+ result.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ result.Flags = flags;
+ barrier.Transition.pResource = pResource;
+ barrier.Transition.StateBefore = stateBefore;
+ barrier.Transition.StateAfter = stateAfter;
+ barrier.Transition.Subresource = subresource;
+ return result;
+ }
+ static inline CD3DX12_RESOURCE_BARRIER Aliasing(
+ _In_ ID3D12Resource* pResourceBefore,
+ _In_ ID3D12Resource* pResourceAfter)
+ {
+ CD3DX12_RESOURCE_BARRIER result;
+ ZeroMemory(&result, sizeof(result));
+ D3D12_RESOURCE_BARRIER &barrier = result;
+ result.Type = D3D12_RESOURCE_BARRIER_TYPE_ALIASING;
+ barrier.Aliasing.pResourceBefore = pResourceBefore;
+ barrier.Aliasing.pResourceAfter = pResourceAfter;
+ return result;
+ }
+ static inline CD3DX12_RESOURCE_BARRIER UAV(
+ _In_ ID3D12Resource* pResource)
+ {
+ CD3DX12_RESOURCE_BARRIER result;
+ ZeroMemory(&result, sizeof(result));
+ D3D12_RESOURCE_BARRIER &barrier = result;
+ result.Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
+ barrier.UAV.pResource = pResource;
+ return result;
+ }
+ operator const D3D12_RESOURCE_BARRIER&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_PACKED_MIP_INFO : public D3D12_PACKED_MIP_INFO
+{
+ CD3DX12_PACKED_MIP_INFO()
+ {}
+ explicit CD3DX12_PACKED_MIP_INFO(const D3D12_PACKED_MIP_INFO &o) :
+ D3D12_PACKED_MIP_INFO(o)
+ {}
+ CD3DX12_PACKED_MIP_INFO(
+ UINT8 numStandardMips,
+ UINT8 numPackedMips,
+ UINT numTilesForPackedMips,
+ UINT startTileIndexInOverallResource )
+ {
+ NumStandardMips = numStandardMips;
+ NumPackedMips = numPackedMips;
+ NumTilesForPackedMips = numTilesForPackedMips;
+ StartTileIndexInOverallResource = startTileIndexInOverallResource;
+ }
+ operator const D3D12_PACKED_MIP_INFO&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_SUBRESOURCE_FOOTPRINT : public D3D12_SUBRESOURCE_FOOTPRINT
+{
+ CD3DX12_SUBRESOURCE_FOOTPRINT()
+ {}
+ explicit CD3DX12_SUBRESOURCE_FOOTPRINT(const D3D12_SUBRESOURCE_FOOTPRINT &o) :
+ D3D12_SUBRESOURCE_FOOTPRINT(o)
+ {}
+ CD3DX12_SUBRESOURCE_FOOTPRINT(
+ DXGI_FORMAT format,
+ UINT width,
+ UINT height,
+ UINT depth,
+ UINT rowPitch )
+ {
+ Format = format;
+ Width = width;
+ Height = height;
+ Depth = depth;
+ RowPitch = rowPitch;
+ }
+ explicit CD3DX12_SUBRESOURCE_FOOTPRINT(
+ const D3D12_RESOURCE_DESC& resDesc,
+ UINT rowPitch )
+ {
+ Format = resDesc.Format;
+ Width = UINT( resDesc.Width );
+ Height = resDesc.Height;
+ Depth = (resDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? resDesc.DepthOrArraySize : 1);
+ RowPitch = rowPitch;
+ }
+ operator const D3D12_SUBRESOURCE_FOOTPRINT&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_TEXTURE_COPY_LOCATION : public D3D12_TEXTURE_COPY_LOCATION
+{
+ CD3DX12_TEXTURE_COPY_LOCATION()
+ {}
+ explicit CD3DX12_TEXTURE_COPY_LOCATION(const D3D12_TEXTURE_COPY_LOCATION &o) :
+ D3D12_TEXTURE_COPY_LOCATION(o)
+ {}
+ CD3DX12_TEXTURE_COPY_LOCATION(ID3D12Resource* pRes) { pResource = pRes; }
+ CD3DX12_TEXTURE_COPY_LOCATION(ID3D12Resource* pRes, D3D12_PLACED_SUBRESOURCE_FOOTPRINT const& Footprint)
+ {
+ pResource = pRes;
+ Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
+ PlacedFootprint = Footprint;
+ }
+ CD3DX12_TEXTURE_COPY_LOCATION(ID3D12Resource* pRes, UINT Sub)
+ {
+ pResource = pRes;
+ Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
+ SubresourceIndex = Sub;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_DESCRIPTOR_RANGE : public D3D12_DESCRIPTOR_RANGE
+{
+ CD3DX12_DESCRIPTOR_RANGE() { }
+ explicit CD3DX12_DESCRIPTOR_RANGE(const D3D12_DESCRIPTOR_RANGE &o) :
+ D3D12_DESCRIPTOR_RANGE(o)
+ {}
+ CD3DX12_DESCRIPTOR_RANGE(
+ D3D12_DESCRIPTOR_RANGE_TYPE rangeType,
+ UINT numDescriptors,
+ UINT baseShaderRegister,
+ UINT registerSpace = 0,
+ UINT offsetInDescriptorsFromTableStart =
+ D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND)
+ {
+ Init(rangeType, numDescriptors, baseShaderRegister, registerSpace, offsetInDescriptorsFromTableStart);
+ }
+
+ inline void Init(
+ D3D12_DESCRIPTOR_RANGE_TYPE rangeType,
+ UINT numDescriptors,
+ UINT baseShaderRegister,
+ UINT registerSpace = 0,
+ UINT offsetInDescriptorsFromTableStart =
+ D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND)
+ {
+ Init(*this, rangeType, numDescriptors, baseShaderRegister, registerSpace, offsetInDescriptorsFromTableStart);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_DESCRIPTOR_RANGE &range,
+ D3D12_DESCRIPTOR_RANGE_TYPE rangeType,
+ UINT numDescriptors,
+ UINT baseShaderRegister,
+ UINT registerSpace = 0,
+ UINT offsetInDescriptorsFromTableStart =
+ D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND)
+ {
+ range.RangeType = rangeType;
+ range.NumDescriptors = numDescriptors;
+ range.BaseShaderRegister = baseShaderRegister;
+ range.RegisterSpace = registerSpace;
+ range.OffsetInDescriptorsFromTableStart = offsetInDescriptorsFromTableStart;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_DESCRIPTOR_TABLE : public D3D12_ROOT_DESCRIPTOR_TABLE
+{
+ CD3DX12_ROOT_DESCRIPTOR_TABLE() {}
+ explicit CD3DX12_ROOT_DESCRIPTOR_TABLE(const D3D12_ROOT_DESCRIPTOR_TABLE &o) :
+ D3D12_ROOT_DESCRIPTOR_TABLE(o)
+ {}
+ CD3DX12_ROOT_DESCRIPTOR_TABLE(
+ UINT numDescriptorRanges,
+ _In_reads_opt_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE* _pDescriptorRanges)
+ {
+ Init(numDescriptorRanges, _pDescriptorRanges);
+ }
+
+ inline void Init(
+ UINT numDescriptorRanges,
+ _In_reads_opt_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE* _pDescriptorRanges)
+ {
+ Init(*this, numDescriptorRanges, _pDescriptorRanges);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_ROOT_DESCRIPTOR_TABLE &rootDescriptorTable,
+ UINT numDescriptorRanges,
+ _In_reads_opt_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE* _pDescriptorRanges)
+ {
+ rootDescriptorTable.NumDescriptorRanges = numDescriptorRanges;
+ rootDescriptorTable.pDescriptorRanges = _pDescriptorRanges;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_CONSTANTS : public D3D12_ROOT_CONSTANTS
+{
+ CD3DX12_ROOT_CONSTANTS() {}
+ explicit CD3DX12_ROOT_CONSTANTS(const D3D12_ROOT_CONSTANTS &o) :
+ D3D12_ROOT_CONSTANTS(o)
+ {}
+ CD3DX12_ROOT_CONSTANTS(
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0)
+ {
+ Init(num32BitValues, shaderRegister, registerSpace);
+ }
+
+ inline void Init(
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0)
+ {
+ Init(*this, num32BitValues, shaderRegister, registerSpace);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_ROOT_CONSTANTS &rootConstants,
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0)
+ {
+ rootConstants.Num32BitValues = num32BitValues;
+ rootConstants.ShaderRegister = shaderRegister;
+ rootConstants.RegisterSpace = registerSpace;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_DESCRIPTOR : public D3D12_ROOT_DESCRIPTOR
+{
+ CD3DX12_ROOT_DESCRIPTOR() {}
+ explicit CD3DX12_ROOT_DESCRIPTOR(const D3D12_ROOT_DESCRIPTOR &o) :
+ D3D12_ROOT_DESCRIPTOR(o)
+ {}
+ CD3DX12_ROOT_DESCRIPTOR(
+ UINT shaderRegister,
+ UINT registerSpace = 0)
+ {
+ Init(shaderRegister, registerSpace);
+ }
+
+ inline void Init(
+ UINT shaderRegister,
+ UINT registerSpace = 0)
+ {
+ Init(*this, shaderRegister, registerSpace);
+ }
+
+ static inline void Init(_Out_ D3D12_ROOT_DESCRIPTOR &table, UINT shaderRegister, UINT registerSpace = 0)
+ {
+ table.ShaderRegister = shaderRegister;
+ table.RegisterSpace = registerSpace;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_PARAMETER : public D3D12_ROOT_PARAMETER
+{
+ CD3DX12_ROOT_PARAMETER() {}
+ explicit CD3DX12_ROOT_PARAMETER(const D3D12_ROOT_PARAMETER &o) :
+ D3D12_ROOT_PARAMETER(o)
+ {}
+
+ static inline void InitAsDescriptorTable(
+ _Out_ D3D12_ROOT_PARAMETER &rootParam,
+ UINT numDescriptorRanges,
+ _In_reads_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE* pDescriptorRanges,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR_TABLE::Init(rootParam.DescriptorTable, numDescriptorRanges, pDescriptorRanges);
+ }
+
+ static inline void InitAsConstants(
+ _Out_ D3D12_ROOT_PARAMETER &rootParam,
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_CONSTANTS::Init(rootParam.Constants, num32BitValues, shaderRegister, registerSpace);
+ }
+
+ static inline void InitAsConstantBufferView(
+ _Out_ D3D12_ROOT_PARAMETER &rootParam,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR::Init(rootParam.Descriptor, shaderRegister, registerSpace);
+ }
+
+ static inline void InitAsShaderResourceView(
+ _Out_ D3D12_ROOT_PARAMETER &rootParam,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR::Init(rootParam.Descriptor, shaderRegister, registerSpace);
+ }
+
+ static inline void InitAsUnorderedAccessView(
+ _Out_ D3D12_ROOT_PARAMETER &rootParam,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_UAV;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR::Init(rootParam.Descriptor, shaderRegister, registerSpace);
+ }
+
+ inline void InitAsDescriptorTable(
+ UINT numDescriptorRanges,
+ _In_reads_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE* pDescriptorRanges,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsDescriptorTable(*this, numDescriptorRanges, pDescriptorRanges, visibility);
+ }
+
+ inline void InitAsConstants(
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsConstants(*this, num32BitValues, shaderRegister, registerSpace, visibility);
+ }
+
+ inline void InitAsConstantBufferView(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsConstantBufferView(*this, shaderRegister, registerSpace, visibility);
+ }
+
+ inline void InitAsShaderResourceView(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsShaderResourceView(*this, shaderRegister, registerSpace, visibility);
+ }
+
+ inline void InitAsUnorderedAccessView(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsUnorderedAccessView(*this, shaderRegister, registerSpace, visibility);
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_STATIC_SAMPLER_DESC : public D3D12_STATIC_SAMPLER_DESC
+{
+ CD3DX12_STATIC_SAMPLER_DESC() {}
+ explicit CD3DX12_STATIC_SAMPLER_DESC(const D3D12_STATIC_SAMPLER_DESC &o) :
+ D3D12_STATIC_SAMPLER_DESC(o)
+ {}
+ CD3DX12_STATIC_SAMPLER_DESC(
+ UINT shaderRegister,
+ D3D12_FILTER filter = D3D12_FILTER_ANISOTROPIC,
+ D3D12_TEXTURE_ADDRESS_MODE addressU = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ D3D12_TEXTURE_ADDRESS_MODE addressV = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ D3D12_TEXTURE_ADDRESS_MODE addressW = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ FLOAT mipLODBias = 0,
+ UINT maxAnisotropy = 16,
+ D3D12_COMPARISON_FUNC comparisonFunc = D3D12_COMPARISON_FUNC_LESS_EQUAL,
+ D3D12_STATIC_BORDER_COLOR borderColor = D3D12_STATIC_BORDER_COLOR_OPAQUE_WHITE,
+ FLOAT minLOD = 0.f,
+ FLOAT maxLOD = D3D12_FLOAT32_MAX,
+ D3D12_SHADER_VISIBILITY shaderVisibility = D3D12_SHADER_VISIBILITY_ALL,
+ UINT registerSpace = 0)
+ {
+ Init(
+ shaderRegister,
+ filter,
+ addressU,
+ addressV,
+ addressW,
+ mipLODBias,
+ maxAnisotropy,
+ comparisonFunc,
+ borderColor,
+ minLOD,
+ maxLOD,
+ shaderVisibility,
+ registerSpace);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_STATIC_SAMPLER_DESC &samplerDesc,
+ UINT shaderRegister,
+ D3D12_FILTER filter = D3D12_FILTER_ANISOTROPIC,
+ D3D12_TEXTURE_ADDRESS_MODE addressU = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ D3D12_TEXTURE_ADDRESS_MODE addressV = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ D3D12_TEXTURE_ADDRESS_MODE addressW = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ FLOAT mipLODBias = 0,
+ UINT maxAnisotropy = 16,
+ D3D12_COMPARISON_FUNC comparisonFunc = D3D12_COMPARISON_FUNC_LESS_EQUAL,
+ D3D12_STATIC_BORDER_COLOR borderColor = D3D12_STATIC_BORDER_COLOR_OPAQUE_WHITE,
+ FLOAT minLOD = 0.f,
+ FLOAT maxLOD = D3D12_FLOAT32_MAX,
+ D3D12_SHADER_VISIBILITY shaderVisibility = D3D12_SHADER_VISIBILITY_ALL,
+ UINT registerSpace = 0)
+ {
+ samplerDesc.ShaderRegister = shaderRegister;
+ samplerDesc.Filter = filter;
+ samplerDesc.AddressU = addressU;
+ samplerDesc.AddressV = addressV;
+ samplerDesc.AddressW = addressW;
+ samplerDesc.MipLODBias = mipLODBias;
+ samplerDesc.MaxAnisotropy = maxAnisotropy;
+ samplerDesc.ComparisonFunc = comparisonFunc;
+ samplerDesc.BorderColor = borderColor;
+ samplerDesc.MinLOD = minLOD;
+ samplerDesc.MaxLOD = maxLOD;
+ samplerDesc.ShaderVisibility = shaderVisibility;
+ samplerDesc.RegisterSpace = registerSpace;
+ }
+ inline void Init(
+ UINT shaderRegister,
+ D3D12_FILTER filter = D3D12_FILTER_ANISOTROPIC,
+ D3D12_TEXTURE_ADDRESS_MODE addressU = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ D3D12_TEXTURE_ADDRESS_MODE addressV = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ D3D12_TEXTURE_ADDRESS_MODE addressW = D3D12_TEXTURE_ADDRESS_MODE_WRAP,
+ FLOAT mipLODBias = 0,
+ UINT maxAnisotropy = 16,
+ D3D12_COMPARISON_FUNC comparisonFunc = D3D12_COMPARISON_FUNC_LESS_EQUAL,
+ D3D12_STATIC_BORDER_COLOR borderColor = D3D12_STATIC_BORDER_COLOR_OPAQUE_WHITE,
+ FLOAT minLOD = 0.f,
+ FLOAT maxLOD = D3D12_FLOAT32_MAX,
+ D3D12_SHADER_VISIBILITY shaderVisibility = D3D12_SHADER_VISIBILITY_ALL,
+ UINT registerSpace = 0)
+ {
+ Init(
+ *this,
+ shaderRegister,
+ filter,
+ addressU,
+ addressV,
+ addressW,
+ mipLODBias,
+ maxAnisotropy,
+ comparisonFunc,
+ borderColor,
+ minLOD,
+ maxLOD,
+ shaderVisibility,
+ registerSpace);
+ }
+
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_SIGNATURE_DESC : public D3D12_ROOT_SIGNATURE_DESC
+{
+ CD3DX12_ROOT_SIGNATURE_DESC() {}
+ explicit CD3DX12_ROOT_SIGNATURE_DESC(const D3D12_ROOT_SIGNATURE_DESC &o) :
+ D3D12_ROOT_SIGNATURE_DESC(o)
+ {}
+ CD3DX12_ROOT_SIGNATURE_DESC(
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ Init(numParameters, _pParameters, numStaticSamplers, _pStaticSamplers, flags);
+ }
+ CD3DX12_ROOT_SIGNATURE_DESC(CD3DX12_DEFAULT)
+ {
+ Init(0, NULL, 0, NULL, D3D12_ROOT_SIGNATURE_FLAG_NONE);
+ }
+
+ inline void Init(
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ Init(*this, numParameters, _pParameters, numStaticSamplers, _pStaticSamplers, flags);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_ROOT_SIGNATURE_DESC &desc,
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ desc.NumParameters = numParameters;
+ desc.pParameters = _pParameters;
+ desc.NumStaticSamplers = numStaticSamplers;
+ desc.pStaticSamplers = _pStaticSamplers;
+ desc.Flags = flags;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_DESCRIPTOR_RANGE1 : public D3D12_DESCRIPTOR_RANGE1
+{
+ CD3DX12_DESCRIPTOR_RANGE1() { }
+ explicit CD3DX12_DESCRIPTOR_RANGE1(const D3D12_DESCRIPTOR_RANGE1 &o) :
+ D3D12_DESCRIPTOR_RANGE1(o)
+ {}
+ CD3DX12_DESCRIPTOR_RANGE1(
+ D3D12_DESCRIPTOR_RANGE_TYPE rangeType,
+ UINT numDescriptors,
+ UINT baseShaderRegister,
+ UINT registerSpace = 0,
+ D3D12_DESCRIPTOR_RANGE_FLAGS flags = D3D12_DESCRIPTOR_RANGE_FLAG_NONE,
+ UINT offsetInDescriptorsFromTableStart =
+ D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND)
+ {
+ Init(rangeType, numDescriptors, baseShaderRegister, registerSpace, flags, offsetInDescriptorsFromTableStart);
+ }
+
+ inline void Init(
+ D3D12_DESCRIPTOR_RANGE_TYPE rangeType,
+ UINT numDescriptors,
+ UINT baseShaderRegister,
+ UINT registerSpace = 0,
+ D3D12_DESCRIPTOR_RANGE_FLAGS flags = D3D12_DESCRIPTOR_RANGE_FLAG_NONE,
+ UINT offsetInDescriptorsFromTableStart =
+ D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND)
+ {
+ Init(*this, rangeType, numDescriptors, baseShaderRegister, registerSpace, flags, offsetInDescriptorsFromTableStart);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_DESCRIPTOR_RANGE1 &range,
+ D3D12_DESCRIPTOR_RANGE_TYPE rangeType,
+ UINT numDescriptors,
+ UINT baseShaderRegister,
+ UINT registerSpace = 0,
+ D3D12_DESCRIPTOR_RANGE_FLAGS flags = D3D12_DESCRIPTOR_RANGE_FLAG_NONE,
+ UINT offsetInDescriptorsFromTableStart =
+ D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND)
+ {
+ range.RangeType = rangeType;
+ range.NumDescriptors = numDescriptors;
+ range.BaseShaderRegister = baseShaderRegister;
+ range.RegisterSpace = registerSpace;
+ range.Flags = flags;
+ range.OffsetInDescriptorsFromTableStart = offsetInDescriptorsFromTableStart;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_DESCRIPTOR_TABLE1 : public D3D12_ROOT_DESCRIPTOR_TABLE1
+{
+ CD3DX12_ROOT_DESCRIPTOR_TABLE1() {}
+ explicit CD3DX12_ROOT_DESCRIPTOR_TABLE1(const D3D12_ROOT_DESCRIPTOR_TABLE1 &o) :
+ D3D12_ROOT_DESCRIPTOR_TABLE1(o)
+ {}
+ CD3DX12_ROOT_DESCRIPTOR_TABLE1(
+ UINT numDescriptorRanges,
+ _In_reads_opt_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE1* _pDescriptorRanges)
+ {
+ Init(numDescriptorRanges, _pDescriptorRanges);
+ }
+
+ inline void Init(
+ UINT numDescriptorRanges,
+ _In_reads_opt_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE1* _pDescriptorRanges)
+ {
+ Init(*this, numDescriptorRanges, _pDescriptorRanges);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_ROOT_DESCRIPTOR_TABLE1 &rootDescriptorTable,
+ UINT numDescriptorRanges,
+ _In_reads_opt_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE1* _pDescriptorRanges)
+ {
+ rootDescriptorTable.NumDescriptorRanges = numDescriptorRanges;
+ rootDescriptorTable.pDescriptorRanges = _pDescriptorRanges;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_DESCRIPTOR1 : public D3D12_ROOT_DESCRIPTOR1
+{
+ CD3DX12_ROOT_DESCRIPTOR1() {}
+ explicit CD3DX12_ROOT_DESCRIPTOR1(const D3D12_ROOT_DESCRIPTOR1 &o) :
+ D3D12_ROOT_DESCRIPTOR1(o)
+ {}
+ CD3DX12_ROOT_DESCRIPTOR1(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE)
+ {
+ Init(shaderRegister, registerSpace, flags);
+ }
+
+ inline void Init(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE)
+ {
+ Init(*this, shaderRegister, registerSpace, flags);
+ }
+
+ static inline void Init(
+ _Out_ D3D12_ROOT_DESCRIPTOR1 &table,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE)
+ {
+ table.ShaderRegister = shaderRegister;
+ table.RegisterSpace = registerSpace;
+ table.Flags = flags;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_ROOT_PARAMETER1 : public D3D12_ROOT_PARAMETER1
+{
+ CD3DX12_ROOT_PARAMETER1() {}
+ explicit CD3DX12_ROOT_PARAMETER1(const D3D12_ROOT_PARAMETER1 &o) :
+ D3D12_ROOT_PARAMETER1(o)
+ {}
+
+ static inline void InitAsDescriptorTable(
+ _Out_ D3D12_ROOT_PARAMETER1 &rootParam,
+ UINT numDescriptorRanges,
+ _In_reads_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE1* pDescriptorRanges,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR_TABLE1::Init(rootParam.DescriptorTable, numDescriptorRanges, pDescriptorRanges);
+ }
+
+ static inline void InitAsConstants(
+ _Out_ D3D12_ROOT_PARAMETER1 &rootParam,
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_CONSTANTS::Init(rootParam.Constants, num32BitValues, shaderRegister, registerSpace);
+ }
+
+ static inline void InitAsConstantBufferView(
+ _Out_ D3D12_ROOT_PARAMETER1 &rootParam,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR1::Init(rootParam.Descriptor, shaderRegister, registerSpace, flags);
+ }
+
+ static inline void InitAsShaderResourceView(
+ _Out_ D3D12_ROOT_PARAMETER1 &rootParam,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR1::Init(rootParam.Descriptor, shaderRegister, registerSpace, flags);
+ }
+
+ static inline void InitAsUnorderedAccessView(
+ _Out_ D3D12_ROOT_PARAMETER1 &rootParam,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ rootParam.ParameterType = D3D12_ROOT_PARAMETER_TYPE_UAV;
+ rootParam.ShaderVisibility = visibility;
+ CD3DX12_ROOT_DESCRIPTOR1::Init(rootParam.Descriptor, shaderRegister, registerSpace, flags);
+ }
+
+ inline void InitAsDescriptorTable(
+ UINT numDescriptorRanges,
+ _In_reads_(numDescriptorRanges) const D3D12_DESCRIPTOR_RANGE1* pDescriptorRanges,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsDescriptorTable(*this, numDescriptorRanges, pDescriptorRanges, visibility);
+ }
+
+ inline void InitAsConstants(
+ UINT num32BitValues,
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsConstants(*this, num32BitValues, shaderRegister, registerSpace, visibility);
+ }
+
+ inline void InitAsConstantBufferView(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsConstantBufferView(*this, shaderRegister, registerSpace, flags, visibility);
+ }
+
+ inline void InitAsShaderResourceView(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsShaderResourceView(*this, shaderRegister, registerSpace, flags, visibility);
+ }
+
+ inline void InitAsUnorderedAccessView(
+ UINT shaderRegister,
+ UINT registerSpace = 0,
+ D3D12_ROOT_DESCRIPTOR_FLAGS flags = D3D12_ROOT_DESCRIPTOR_FLAG_NONE,
+ D3D12_SHADER_VISIBILITY visibility = D3D12_SHADER_VISIBILITY_ALL)
+ {
+ InitAsUnorderedAccessView(*this, shaderRegister, registerSpace, flags, visibility);
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC : public D3D12_VERSIONED_ROOT_SIGNATURE_DESC
+{
+ CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC() {}
+ explicit CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC(const D3D12_VERSIONED_ROOT_SIGNATURE_DESC &o) :
+ D3D12_VERSIONED_ROOT_SIGNATURE_DESC(o)
+ {}
+ explicit CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC(const D3D12_ROOT_SIGNATURE_DESC &o)
+ {
+ Version = D3D_ROOT_SIGNATURE_VERSION_1_0;
+ Desc_1_0 = o;
+ }
+ explicit CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC(const D3D12_ROOT_SIGNATURE_DESC1 &o)
+ {
+ Version = D3D_ROOT_SIGNATURE_VERSION_1_1;
+ Desc_1_1 = o;
+ }
+ CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC(
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ Init_1_0(numParameters, _pParameters, numStaticSamplers, _pStaticSamplers, flags);
+ }
+ CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC(
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER1* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ Init_1_1(numParameters, _pParameters, numStaticSamplers, _pStaticSamplers, flags);
+ }
+ CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC(CD3DX12_DEFAULT)
+ {
+ Init_1_1(0, NULL, 0, NULL, D3D12_ROOT_SIGNATURE_FLAG_NONE);
+ }
+
+ inline void Init_1_0(
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ Init_1_0(*this, numParameters, _pParameters, numStaticSamplers, _pStaticSamplers, flags);
+ }
+
+ static inline void Init_1_0(
+ _Out_ D3D12_VERSIONED_ROOT_SIGNATURE_DESC &desc,
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ desc.Version = D3D_ROOT_SIGNATURE_VERSION_1_0;
+ desc.Desc_1_0.NumParameters = numParameters;
+ desc.Desc_1_0.pParameters = _pParameters;
+ desc.Desc_1_0.NumStaticSamplers = numStaticSamplers;
+ desc.Desc_1_0.pStaticSamplers = _pStaticSamplers;
+ desc.Desc_1_0.Flags = flags;
+ }
+
+ inline void Init_1_1(
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER1* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ Init_1_1(*this, numParameters, _pParameters, numStaticSamplers, _pStaticSamplers, flags);
+ }
+
+ static inline void Init_1_1(
+ _Out_ D3D12_VERSIONED_ROOT_SIGNATURE_DESC &desc,
+ UINT numParameters,
+ _In_reads_opt_(numParameters) const D3D12_ROOT_PARAMETER1* _pParameters,
+ UINT numStaticSamplers = 0,
+ _In_reads_opt_(numStaticSamplers) const D3D12_STATIC_SAMPLER_DESC* _pStaticSamplers = NULL,
+ D3D12_ROOT_SIGNATURE_FLAGS flags = D3D12_ROOT_SIGNATURE_FLAG_NONE)
+ {
+ desc.Version = D3D_ROOT_SIGNATURE_VERSION_1_1;
+ desc.Desc_1_1.NumParameters = numParameters;
+ desc.Desc_1_1.pParameters = _pParameters;
+ desc.Desc_1_1.NumStaticSamplers = numStaticSamplers;
+ desc.Desc_1_1.pStaticSamplers = _pStaticSamplers;
+ desc.Desc_1_1.Flags = flags;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_CPU_DESCRIPTOR_HANDLE : public D3D12_CPU_DESCRIPTOR_HANDLE
+{
+ CD3DX12_CPU_DESCRIPTOR_HANDLE() {}
+ explicit CD3DX12_CPU_DESCRIPTOR_HANDLE(const D3D12_CPU_DESCRIPTOR_HANDLE &o) :
+ D3D12_CPU_DESCRIPTOR_HANDLE(o)
+ {}
+ CD3DX12_CPU_DESCRIPTOR_HANDLE(CD3DX12_DEFAULT) { ptr = 0; }
+ CD3DX12_CPU_DESCRIPTOR_HANDLE(_In_ const D3D12_CPU_DESCRIPTOR_HANDLE &other, INT offsetScaledByIncrementSize)
+ {
+ InitOffsetted(other, offsetScaledByIncrementSize);
+ }
+ CD3DX12_CPU_DESCRIPTOR_HANDLE(_In_ const D3D12_CPU_DESCRIPTOR_HANDLE &other, INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ InitOffsetted(other, offsetInDescriptors, descriptorIncrementSize);
+ }
+ CD3DX12_CPU_DESCRIPTOR_HANDLE& Offset(INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ ptr += offsetInDescriptors * descriptorIncrementSize;
+ return *this;
+ }
+ CD3DX12_CPU_DESCRIPTOR_HANDLE& Offset(INT offsetScaledByIncrementSize)
+ {
+ ptr += offsetScaledByIncrementSize;
+ return *this;
+ }
+ bool operator==(_In_ const D3D12_CPU_DESCRIPTOR_HANDLE& other) const
+ {
+ return (ptr == other.ptr);
+ }
+ bool operator!=(_In_ const D3D12_CPU_DESCRIPTOR_HANDLE& other) const
+ {
+ return (ptr != other.ptr);
+ }
+ CD3DX12_CPU_DESCRIPTOR_HANDLE &operator=(const D3D12_CPU_DESCRIPTOR_HANDLE &other)
+ {
+ ptr = other.ptr;
+ return *this;
+ }
+
+ inline void InitOffsetted(_In_ const D3D12_CPU_DESCRIPTOR_HANDLE &base, INT offsetScaledByIncrementSize)
+ {
+ InitOffsetted(*this, base, offsetScaledByIncrementSize);
+ }
+
+ inline void InitOffsetted(_In_ const D3D12_CPU_DESCRIPTOR_HANDLE &base, INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ InitOffsetted(*this, base, offsetInDescriptors, descriptorIncrementSize);
+ }
+
+ static inline void InitOffsetted(_Out_ D3D12_CPU_DESCRIPTOR_HANDLE &handle, _In_ const D3D12_CPU_DESCRIPTOR_HANDLE &base, INT offsetScaledByIncrementSize)
+ {
+ handle.ptr = base.ptr + offsetScaledByIncrementSize;
+ }
+
+ static inline void InitOffsetted(_Out_ D3D12_CPU_DESCRIPTOR_HANDLE &handle, _In_ const D3D12_CPU_DESCRIPTOR_HANDLE &base, INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ handle.ptr = base.ptr + offsetInDescriptors * descriptorIncrementSize;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_GPU_DESCRIPTOR_HANDLE : public D3D12_GPU_DESCRIPTOR_HANDLE
+{
+ CD3DX12_GPU_DESCRIPTOR_HANDLE() {}
+ explicit CD3DX12_GPU_DESCRIPTOR_HANDLE(const D3D12_GPU_DESCRIPTOR_HANDLE &o) :
+ D3D12_GPU_DESCRIPTOR_HANDLE(o)
+ {}
+ CD3DX12_GPU_DESCRIPTOR_HANDLE(CD3DX12_DEFAULT) { ptr = 0; }
+ CD3DX12_GPU_DESCRIPTOR_HANDLE(_In_ const D3D12_GPU_DESCRIPTOR_HANDLE &other, INT offsetScaledByIncrementSize)
+ {
+ InitOffsetted(other, offsetScaledByIncrementSize);
+ }
+ CD3DX12_GPU_DESCRIPTOR_HANDLE(_In_ const D3D12_GPU_DESCRIPTOR_HANDLE &other, INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ InitOffsetted(other, offsetInDescriptors, descriptorIncrementSize);
+ }
+ CD3DX12_GPU_DESCRIPTOR_HANDLE& Offset(INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ ptr += offsetInDescriptors * descriptorIncrementSize;
+ return *this;
+ }
+ CD3DX12_GPU_DESCRIPTOR_HANDLE& Offset(INT offsetScaledByIncrementSize)
+ {
+ ptr += offsetScaledByIncrementSize;
+ return *this;
+ }
+ inline bool operator==(_In_ const D3D12_GPU_DESCRIPTOR_HANDLE& other) const
+ {
+ return (ptr == other.ptr);
+ }
+ inline bool operator!=(_In_ const D3D12_GPU_DESCRIPTOR_HANDLE& other) const
+ {
+ return (ptr != other.ptr);
+ }
+ CD3DX12_GPU_DESCRIPTOR_HANDLE &operator=(const D3D12_GPU_DESCRIPTOR_HANDLE &other)
+ {
+ ptr = other.ptr;
+ return *this;
+ }
+
+ inline void InitOffsetted(_In_ const D3D12_GPU_DESCRIPTOR_HANDLE &base, INT offsetScaledByIncrementSize)
+ {
+ InitOffsetted(*this, base, offsetScaledByIncrementSize);
+ }
+
+ inline void InitOffsetted(_In_ const D3D12_GPU_DESCRIPTOR_HANDLE &base, INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ InitOffsetted(*this, base, offsetInDescriptors, descriptorIncrementSize);
+ }
+
+ static inline void InitOffsetted(_Out_ D3D12_GPU_DESCRIPTOR_HANDLE &handle, _In_ const D3D12_GPU_DESCRIPTOR_HANDLE &base, INT offsetScaledByIncrementSize)
+ {
+ handle.ptr = base.ptr + offsetScaledByIncrementSize;
+ }
+
+ static inline void InitOffsetted(_Out_ D3D12_GPU_DESCRIPTOR_HANDLE &handle, _In_ const D3D12_GPU_DESCRIPTOR_HANDLE &base, INT offsetInDescriptors, UINT descriptorIncrementSize)
+ {
+ handle.ptr = base.ptr + offsetInDescriptors * descriptorIncrementSize;
+ }
+};
+
+//------------------------------------------------------------------------------------------------
+inline UINT D3D12CalcSubresource( UINT MipSlice, UINT ArraySlice, UINT PlaneSlice, UINT MipLevels, UINT ArraySize )
+{
+ return MipSlice + ArraySlice * MipLevels + PlaneSlice * MipLevels * ArraySize;
+}
+
+//------------------------------------------------------------------------------------------------
+template <typename T, typename U, typename V>
+inline void D3D12DecomposeSubresource( UINT Subresource, UINT MipLevels, UINT ArraySize, _Out_ T& MipSlice, _Out_ U& ArraySlice, _Out_ V& PlaneSlice )
+{
+ MipSlice = static_cast<T>(Subresource % MipLevels);
+ ArraySlice = static_cast<U>((Subresource / MipLevels) % ArraySize);
+ PlaneSlice = static_cast<V>(Subresource / (MipLevels * ArraySize));
+}
+
+//------------------------------------------------------------------------------------------------
+inline UINT8 D3D12GetFormatPlaneCount(
+ _In_ ID3D12Device* pDevice,
+ DXGI_FORMAT Format
+ )
+{
+ D3D12_FEATURE_DATA_FORMAT_INFO formatInfo = {Format};
+ if (FAILED(pDevice->CheckFeatureSupport(D3D12_FEATURE_FORMAT_INFO, &formatInfo, sizeof(formatInfo))))
+ {
+ return 0;
+ }
+ return formatInfo.PlaneCount;
+}
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RESOURCE_DESC : public D3D12_RESOURCE_DESC
+{
+ CD3DX12_RESOURCE_DESC()
+ {}
+ explicit CD3DX12_RESOURCE_DESC( const D3D12_RESOURCE_DESC& o ) :
+ D3D12_RESOURCE_DESC( o )
+ {}
+ CD3DX12_RESOURCE_DESC(
+ D3D12_RESOURCE_DIMENSION dimension,
+ UINT64 alignment,
+ UINT64 width,
+ UINT height,
+ UINT16 depthOrArraySize,
+ UINT16 mipLevels,
+ DXGI_FORMAT format,
+ UINT sampleCount,
+ UINT sampleQuality,
+ D3D12_TEXTURE_LAYOUT layout,
+ D3D12_RESOURCE_FLAGS flags )
+ {
+ Dimension = dimension;
+ Alignment = alignment;
+ Width = width;
+ Height = height;
+ DepthOrArraySize = depthOrArraySize;
+ MipLevels = mipLevels;
+ Format = format;
+ SampleDesc.Count = sampleCount;
+ SampleDesc.Quality = sampleQuality;
+ Layout = layout;
+ Flags = flags;
+ }
+ static inline CD3DX12_RESOURCE_DESC Buffer(
+ const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo,
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE )
+ {
+ return CD3DX12_RESOURCE_DESC( D3D12_RESOURCE_DIMENSION_BUFFER, resAllocInfo.Alignment, resAllocInfo.SizeInBytes,
+ 1, 1, 1, DXGI_FORMAT_UNKNOWN, 1, 0, D3D12_TEXTURE_LAYOUT_ROW_MAJOR, flags );
+ }
+ static inline CD3DX12_RESOURCE_DESC Buffer(
+ UINT64 width,
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE,
+ UINT64 alignment = 0 )
+ {
+ return CD3DX12_RESOURCE_DESC( D3D12_RESOURCE_DIMENSION_BUFFER, alignment, width, 1, 1, 1,
+ DXGI_FORMAT_UNKNOWN, 1, 0, D3D12_TEXTURE_LAYOUT_ROW_MAJOR, flags );
+ }
+ static inline CD3DX12_RESOURCE_DESC Tex1D(
+ DXGI_FORMAT format,
+ UINT64 width,
+ UINT16 arraySize = 1,
+ UINT16 mipLevels = 0,
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE,
+ D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN,
+ UINT64 alignment = 0 )
+ {
+ return CD3DX12_RESOURCE_DESC( D3D12_RESOURCE_DIMENSION_TEXTURE1D, alignment, width, 1, arraySize,
+ mipLevels, format, 1, 0, layout, flags );
+ }
+ static inline CD3DX12_RESOURCE_DESC Tex2D(
+ DXGI_FORMAT format,
+ UINT64 width,
+ UINT height,
+ UINT16 arraySize = 1,
+ UINT16 mipLevels = 0,
+ UINT sampleCount = 1,
+ UINT sampleQuality = 0,
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE,
+ D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN,
+ UINT64 alignment = 0 )
+ {
+ return CD3DX12_RESOURCE_DESC( D3D12_RESOURCE_DIMENSION_TEXTURE2D, alignment, width, height, arraySize,
+ mipLevels, format, sampleCount, sampleQuality, layout, flags );
+ }
+ static inline CD3DX12_RESOURCE_DESC Tex3D(
+ DXGI_FORMAT format,
+ UINT64 width,
+ UINT height,
+ UINT16 depth,
+ UINT16 mipLevels = 0,
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE,
+ D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN,
+ UINT64 alignment = 0 )
+ {
+ return CD3DX12_RESOURCE_DESC( D3D12_RESOURCE_DIMENSION_TEXTURE3D, alignment, width, height, depth,
+ mipLevels, format, 1, 0, layout, flags );
+ }
+ inline UINT16 Depth() const
+ { return (Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1); }
+ inline UINT16 ArraySize() const
+ { return (Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1); }
+ inline UINT8 PlaneCount(_In_ ID3D12Device* pDevice) const
+ { return D3D12GetFormatPlaneCount(pDevice, Format); }
+ inline UINT Subresources(_In_ ID3D12Device* pDevice) const
+ { return MipLevels * ArraySize() * PlaneCount(pDevice); }
+ inline UINT CalcSubresource(UINT MipSlice, UINT ArraySlice, UINT PlaneSlice)
+ { return D3D12CalcSubresource(MipSlice, ArraySlice, PlaneSlice, MipLevels, ArraySize()); }
+ operator const D3D12_RESOURCE_DESC&() const { return *this; }
+};
+inline bool operator==( const D3D12_RESOURCE_DESC& l, const D3D12_RESOURCE_DESC& r )
+{
+ return l.Dimension == r.Dimension &&
+ l.Alignment == r.Alignment &&
+ l.Width == r.Width &&
+ l.Height == r.Height &&
+ l.DepthOrArraySize == r.DepthOrArraySize &&
+ l.MipLevels == r.MipLevels &&
+ l.Format == r.Format &&
+ l.SampleDesc.Count == r.SampleDesc.Count &&
+ l.SampleDesc.Quality == r.SampleDesc.Quality &&
+ l.Layout == r.Layout &&
+ l.Flags == r.Flags;
+}
+inline bool operator!=( const D3D12_RESOURCE_DESC& l, const D3D12_RESOURCE_DESC& r )
+{ return !( l == r ); }
+
+//------------------------------------------------------------------------------------------------
+// Row-by-row memcpy
+inline void MemcpySubresource(
+ _In_ const D3D12_MEMCPY_DEST* pDest,
+ _In_ const D3D12_SUBRESOURCE_DATA* pSrc,
+ SIZE_T RowSizeInBytes,
+ UINT NumRows,
+ UINT NumSlices)
+{
+ for (UINT z = 0; z < NumSlices; ++z)
+ {
+ BYTE* pDestSlice = reinterpret_cast<BYTE*>(pDest->pData) + pDest->SlicePitch * z;
+ const BYTE* pSrcSlice = reinterpret_cast<const BYTE*>(pSrc->pData) + pSrc->SlicePitch * z;
+ for (UINT y = 0; y < NumRows; ++y)
+ {
+ memcpy(pDestSlice + pDest->RowPitch * y,
+ pSrcSlice + pSrc->RowPitch * y,
+ RowSizeInBytes);
+ }
+ }
+}
+
+//------------------------------------------------------------------------------------------------
+// Returns required size of a buffer to be used for data upload
+inline UINT64 GetRequiredIntermediateSize(
+ _In_ ID3D12Resource* pDestinationResource,
+ _In_range_(0,D3D12_REQ_SUBRESOURCES) UINT FirstSubresource,
+ _In_range_(0,D3D12_REQ_SUBRESOURCES-FirstSubresource) UINT NumSubresources)
+{
+ D3D12_RESOURCE_DESC Desc = pDestinationResource->GetDesc();
+ UINT64 RequiredSize = 0;
+
+ ID3D12Device* pDevice;
+ pDestinationResource->GetDevice(__uuidof(*pDevice), reinterpret_cast<void**>(&pDevice));
+ pDevice->GetCopyableFootprints(&Desc, FirstSubresource, NumSubresources, 0, nullptr, nullptr, nullptr, &RequiredSize);
+ pDevice->Release();
+
+ return RequiredSize;
+}
+
+//------------------------------------------------------------------------------------------------
+// All arrays must be populated (e.g. by calling GetCopyableFootprints)
+inline UINT64 UpdateSubresources(
+ _In_ ID3D12GraphicsCommandList* pCmdList,
+ _In_ ID3D12Resource* pDestinationResource,
+ _In_ ID3D12Resource* pIntermediate,
+ _In_range_(0,D3D12_REQ_SUBRESOURCES) UINT FirstSubresource,
+ _In_range_(0,D3D12_REQ_SUBRESOURCES-FirstSubresource) UINT NumSubresources,
+ UINT64 RequiredSize,
+ _In_reads_(NumSubresources) const D3D12_PLACED_SUBRESOURCE_FOOTPRINT* pLayouts,
+ _In_reads_(NumSubresources) const UINT* pNumRows,
+ _In_reads_(NumSubresources) const UINT64* pRowSizesInBytes,
+ _In_reads_(NumSubresources) const D3D12_SUBRESOURCE_DATA* pSrcData)
+{
+ // Minor validation
+ D3D12_RESOURCE_DESC IntermediateDesc = pIntermediate->GetDesc();
+ D3D12_RESOURCE_DESC DestinationDesc = pDestinationResource->GetDesc();
+ if (IntermediateDesc.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER ||
+ IntermediateDesc.Width < RequiredSize + pLayouts[0].Offset ||
+ RequiredSize > (SIZE_T)-1 ||
+ (DestinationDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER &&
+ (FirstSubresource != 0 || NumSubresources != 1)))
+ {
+ return 0;
+ }
+
+ BYTE* pData;
+ HRESULT hr = pIntermediate->Map(0, NULL, reinterpret_cast<void**>(&pData));
+ if (FAILED(hr))
+ {
+ return 0;
+ }
+
+ for (UINT i = 0; i < NumSubresources; ++i)
+ {
+ if (pRowSizesInBytes[i] > (SIZE_T)-1) return 0;
+ D3D12_MEMCPY_DEST DestData = { pData + pLayouts[i].Offset, pLayouts[i].Footprint.RowPitch, pLayouts[i].Footprint.RowPitch * pNumRows[i] };
+ MemcpySubresource(&DestData, &pSrcData[i], (SIZE_T)pRowSizesInBytes[i], pNumRows[i], pLayouts[i].Footprint.Depth);
+ }
+ pIntermediate->Unmap(0, NULL);
+
+ if (DestinationDesc.Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
+ {
+ CD3DX12_BOX SrcBox( UINT( pLayouts[0].Offset ), UINT( pLayouts[0].Offset + pLayouts[0].Footprint.Width ) );
+ pCmdList->CopyBufferRegion(
+ pDestinationResource, 0, pIntermediate, pLayouts[0].Offset, pLayouts[0].Footprint.Width);
+ }
+ else
+ {
+ for (UINT i = 0; i < NumSubresources; ++i)
+ {
+ CD3DX12_TEXTURE_COPY_LOCATION Dst(pDestinationResource, i + FirstSubresource);
+ CD3DX12_TEXTURE_COPY_LOCATION Src(pIntermediate, pLayouts[i]);
+ pCmdList->CopyTextureRegion(&Dst, 0, 0, 0, &Src, nullptr);
+ }
+ }
+ return RequiredSize;
+}
+
+//------------------------------------------------------------------------------------------------
+// Heap-allocating UpdateSubresources implementation
+inline UINT64 UpdateSubresources(
+ _In_ ID3D12GraphicsCommandList* pCmdList,
+ _In_ ID3D12Resource* pDestinationResource,
+ _In_ ID3D12Resource* pIntermediate,
+ UINT64 IntermediateOffset,
+ _In_range_(0,D3D12_REQ_SUBRESOURCES) UINT FirstSubresource,
+ _In_range_(0,D3D12_REQ_SUBRESOURCES-FirstSubresource) UINT NumSubresources,
+ _In_reads_(NumSubresources) D3D12_SUBRESOURCE_DATA* pSrcData)
+{
+ UINT64 RequiredSize = 0;
+ UINT64 MemToAlloc = static_cast<UINT64>(sizeof(D3D12_PLACED_SUBRESOURCE_FOOTPRINT) + sizeof(UINT) + sizeof(UINT64)) * NumSubresources;
+ if (MemToAlloc > SIZE_MAX)
+ {
+ return 0;
+ }
+ void* pMem = HeapAlloc(GetProcessHeap(), 0, static_cast<SIZE_T>(MemToAlloc));
+ if (pMem == NULL)
+ {
+ return 0;
+ }
+ D3D12_PLACED_SUBRESOURCE_FOOTPRINT* pLayouts = reinterpret_cast<D3D12_PLACED_SUBRESOURCE_FOOTPRINT*>(pMem);
+ UINT64* pRowSizesInBytes = reinterpret_cast<UINT64*>(pLayouts + NumSubresources);
+ UINT* pNumRows = reinterpret_cast<UINT*>(pRowSizesInBytes + NumSubresources);
+
+ D3D12_RESOURCE_DESC Desc = pDestinationResource->GetDesc();
+ ID3D12Device* pDevice;
+ pDestinationResource->GetDevice(__uuidof(*pDevice), reinterpret_cast<void**>(&pDevice));
+ pDevice->GetCopyableFootprints(&Desc, FirstSubresource, NumSubresources, IntermediateOffset, pLayouts, pNumRows, pRowSizesInBytes, &RequiredSize);
+ pDevice->Release();
+
+ UINT64 Result = UpdateSubresources(pCmdList, pDestinationResource, pIntermediate, FirstSubresource, NumSubresources, RequiredSize, pLayouts, pNumRows, pRowSizesInBytes, pSrcData);
+ HeapFree(GetProcessHeap(), 0, pMem);
+ return Result;
+}
+
+//------------------------------------------------------------------------------------------------
+// Stack-allocating UpdateSubresources implementation
+template <UINT MaxSubresources>
+inline UINT64 UpdateSubresources(
+ _In_ ID3D12GraphicsCommandList* pCmdList,
+ _In_ ID3D12Resource* pDestinationResource,
+ _In_ ID3D12Resource* pIntermediate,
+ UINT64 IntermediateOffset,
+ _In_range_(0, MaxSubresources) UINT FirstSubresource,
+ _In_range_(1, MaxSubresources - FirstSubresource) UINT NumSubresources,
+ _In_reads_(NumSubresources) D3D12_SUBRESOURCE_DATA* pSrcData)
+{
+ UINT64 RequiredSize = 0;
+ D3D12_PLACED_SUBRESOURCE_FOOTPRINT Layouts[MaxSubresources];
+ UINT NumRows[MaxSubresources];
+ UINT64 RowSizesInBytes[MaxSubresources];
+
+ D3D12_RESOURCE_DESC Desc = pDestinationResource->GetDesc();
+ ID3D12Device* pDevice;
+ pDestinationResource->GetDevice(__uuidof(*pDevice), reinterpret_cast<void**>(&pDevice));
+ pDevice->GetCopyableFootprints(&Desc, FirstSubresource, NumSubresources, IntermediateOffset, Layouts, NumRows, RowSizesInBytes, &RequiredSize);
+ pDevice->Release();
+
+ return UpdateSubresources(pCmdList, pDestinationResource, pIntermediate, FirstSubresource, NumSubresources, RequiredSize, Layouts, NumRows, RowSizesInBytes, pSrcData);
+}
+
+//------------------------------------------------------------------------------------------------
+inline bool D3D12IsLayoutOpaque( D3D12_TEXTURE_LAYOUT Layout )
+{ return Layout == D3D12_TEXTURE_LAYOUT_UNKNOWN || Layout == D3D12_TEXTURE_LAYOUT_64KB_UNDEFINED_SWIZZLE; }
+
+//------------------------------------------------------------------------------------------------
+template <typename t_CommandListType>
+inline ID3D12CommandList * const * CommandListCast(t_CommandListType * const * pp)
+{
+ // This cast is useful for passing strongly typed command list pointers into
+ // ExecuteCommandLists.
+ // This cast is valid as long as the const-ness is respected. D3D12 APIs do
+ // respect the const-ness of their arguments.
+ return reinterpret_cast<ID3D12CommandList * const *>(pp);
+}
+
+//------------------------------------------------------------------------------------------------
+// D3D12 exports a new method for serializing root signatures in the Windows 10 Anniversary Update.
+// To help enable root signature 1.1 features when they are available and not require maintaining
+// two code paths for building root signatures, this helper method reconstructs a 1.0 signature when
+// 1.1 is not supported.
+inline HRESULT D3DX12SerializeVersionedRootSignature(
+ _In_ const D3D12_VERSIONED_ROOT_SIGNATURE_DESC* pRootSignatureDesc,
+ D3D_ROOT_SIGNATURE_VERSION MaxVersion,
+ _Outptr_ ID3DBlob** ppBlob,
+ _Always_(_Outptr_opt_result_maybenull_) ID3DBlob** ppErrorBlob)
+{
+ if (ppErrorBlob != NULL)
+ {
+ *ppErrorBlob = NULL;
+ }
+
+ switch (MaxVersion)
+ {
+ case D3D_ROOT_SIGNATURE_VERSION_1_0:
+ switch (pRootSignatureDesc->Version)
+ {
+ case D3D_ROOT_SIGNATURE_VERSION_1_0:
+ return D3D12SerializeRootSignature(&pRootSignatureDesc->Desc_1_0, D3D_ROOT_SIGNATURE_VERSION_1, ppBlob, ppErrorBlob);
+
+ case D3D_ROOT_SIGNATURE_VERSION_1_1:
+ {
+ HRESULT hr = S_OK;
+ const D3D12_ROOT_SIGNATURE_DESC1& desc_1_1 = pRootSignatureDesc->Desc_1_1;
+
+ const SIZE_T ParametersSize = sizeof(D3D12_ROOT_PARAMETER) * desc_1_1.NumParameters;
+ void* pParameters = (ParametersSize > 0) ? HeapAlloc(GetProcessHeap(), 0, ParametersSize) : NULL;
+ if (ParametersSize > 0 && pParameters == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ D3D12_ROOT_PARAMETER* pParameters_1_0 = reinterpret_cast<D3D12_ROOT_PARAMETER*>(pParameters);
+
+ if (SUCCEEDED(hr))
+ {
+ for (UINT n = 0; n < desc_1_1.NumParameters; n++)
+ {
+ __analysis_assume(ParametersSize == sizeof(D3D12_ROOT_PARAMETER) * desc_1_1.NumParameters);
+ pParameters_1_0[n].ParameterType = desc_1_1.pParameters[n].ParameterType;
+ pParameters_1_0[n].ShaderVisibility = desc_1_1.pParameters[n].ShaderVisibility;
+
+ switch (desc_1_1.pParameters[n].ParameterType)
+ {
+ case D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS:
+ pParameters_1_0[n].Constants.Num32BitValues = desc_1_1.pParameters[n].Constants.Num32BitValues;
+ pParameters_1_0[n].Constants.RegisterSpace = desc_1_1.pParameters[n].Constants.RegisterSpace;
+ pParameters_1_0[n].Constants.ShaderRegister = desc_1_1.pParameters[n].Constants.ShaderRegister;
+ break;
+
+ case D3D12_ROOT_PARAMETER_TYPE_CBV:
+ case D3D12_ROOT_PARAMETER_TYPE_SRV:
+ case D3D12_ROOT_PARAMETER_TYPE_UAV:
+ pParameters_1_0[n].Descriptor.RegisterSpace = desc_1_1.pParameters[n].Descriptor.RegisterSpace;
+ pParameters_1_0[n].Descriptor.ShaderRegister = desc_1_1.pParameters[n].Descriptor.ShaderRegister;
+ break;
+
+ case D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE:
+ const D3D12_ROOT_DESCRIPTOR_TABLE1& table_1_1 = desc_1_1.pParameters[n].DescriptorTable;
+
+ const SIZE_T DescriptorRangesSize = sizeof(D3D12_DESCRIPTOR_RANGE) * table_1_1.NumDescriptorRanges;
+ void* pDescriptorRanges = (DescriptorRangesSize > 0 && SUCCEEDED(hr)) ? HeapAlloc(GetProcessHeap(), 0, DescriptorRangesSize) : NULL;
+ if (DescriptorRangesSize > 0 && pDescriptorRanges == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ D3D12_DESCRIPTOR_RANGE* pDescriptorRanges_1_0 = reinterpret_cast<D3D12_DESCRIPTOR_RANGE*>(pDescriptorRanges);
+
+ if (SUCCEEDED(hr))
+ {
+ for (UINT x = 0; x < table_1_1.NumDescriptorRanges; x++)
+ {
+ __analysis_assume(DescriptorRangesSize == sizeof(D3D12_DESCRIPTOR_RANGE) * table_1_1.NumDescriptorRanges);
+ pDescriptorRanges_1_0[x].BaseShaderRegister = table_1_1.pDescriptorRanges[x].BaseShaderRegister;
+ pDescriptorRanges_1_0[x].NumDescriptors = table_1_1.pDescriptorRanges[x].NumDescriptors;
+ pDescriptorRanges_1_0[x].OffsetInDescriptorsFromTableStart = table_1_1.pDescriptorRanges[x].OffsetInDescriptorsFromTableStart;
+ pDescriptorRanges_1_0[x].RangeType = table_1_1.pDescriptorRanges[x].RangeType;
+ pDescriptorRanges_1_0[x].RegisterSpace = table_1_1.pDescriptorRanges[x].RegisterSpace;
+ }
+ }
+
+ D3D12_ROOT_DESCRIPTOR_TABLE& table_1_0 = pParameters_1_0[n].DescriptorTable;
+ table_1_0.NumDescriptorRanges = table_1_1.NumDescriptorRanges;
+ table_1_0.pDescriptorRanges = pDescriptorRanges_1_0;
+ }
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ CD3DX12_ROOT_SIGNATURE_DESC desc_1_0(desc_1_1.NumParameters, pParameters_1_0, desc_1_1.NumStaticSamplers, desc_1_1.pStaticSamplers, desc_1_1.Flags);
+ hr = D3D12SerializeRootSignature(&desc_1_0, D3D_ROOT_SIGNATURE_VERSION_1, ppBlob, ppErrorBlob);
+ }
+
+ if (pParameters)
+ {
+ for (UINT n = 0; n < desc_1_1.NumParameters; n++)
+ {
+ if (desc_1_1.pParameters[n].ParameterType == D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE)
+ {
+ HeapFree(GetProcessHeap(), 0, reinterpret_cast<void*>(const_cast<D3D12_DESCRIPTOR_RANGE*>(pParameters_1_0[n].DescriptorTable.pDescriptorRanges)));
+ }
+ }
+ HeapFree(GetProcessHeap(), 0, pParameters);
+ }
+ return hr;
+ }
+ }
+ break;
+
+ case D3D_ROOT_SIGNATURE_VERSION_1_1:
+ return D3D12SerializeVersionedRootSignature(pRootSignatureDesc, ppBlob, ppErrorBlob);
+ }
+
+ return E_INVALIDARG;
+}
+
+//------------------------------------------------------------------------------------------------
+struct CD3DX12_RT_FORMAT_ARRAY : public D3D12_RT_FORMAT_ARRAY
+{
+ CD3DX12_RT_FORMAT_ARRAY() {}
+ explicit CD3DX12_RT_FORMAT_ARRAY(const D3D12_RT_FORMAT_ARRAY& o)
+ : D3D12_RT_FORMAT_ARRAY(o)
+ {}
+ explicit CD3DX12_RT_FORMAT_ARRAY(const DXGI_FORMAT* pFormats, UINT NumFormats)
+ {
+ NumRenderTargets = NumFormats;
+ memcpy(RTFormats, pFormats, sizeof(RTFormats));
+ // assumes ARRAY_SIZE(pFormats) == ARRAY_SIZE(RTFormats)
+ }
+ operator const D3D12_RT_FORMAT_ARRAY&() const { return *this; }
+};
+
+//------------------------------------------------------------------------------------------------
+// Pipeline State Stream Helpers
+//------------------------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------------------------
+// Stream Subobjects, i.e. elements of a stream
+
+template <typename InnerStructType, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE Type, typename DefaultArg = InnerStructType>
+class alignas(void*) CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT
+{
+private:
+ D3D12_PIPELINE_STATE_SUBOBJECT_TYPE _Type;
+ InnerStructType _Inner;
+public:
+ CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT() : _Type(Type), _Inner(DefaultArg()) {}
+ CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT(InnerStructType const& i) : _Type(Type), _Inner(i) {}
+ CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT& operator=(InnerStructType const& i) { _Inner = i; return *this; }
+ operator InnerStructType() const { return _Inner; }
+};
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_PIPELINE_STATE_FLAGS, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_FLAGS> CD3DX12_PIPELINE_STATE_STREAM_FLAGS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< UINT, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_NODE_MASK> CD3DX12_PIPELINE_STATE_STREAM_NODE_MASK;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< ID3D12RootSignature*, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_ROOT_SIGNATURE> CD3DX12_PIPELINE_STATE_STREAM_ROOT_SIGNATURE;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_INPUT_LAYOUT_DESC, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_INPUT_LAYOUT> CD3DX12_PIPELINE_STATE_STREAM_INPUT_LAYOUT;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_INDEX_BUFFER_STRIP_CUT_VALUE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_IB_STRIP_CUT_VALUE> CD3DX12_PIPELINE_STATE_STREAM_IB_STRIP_CUT_VALUE;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_PRIMITIVE_TOPOLOGY_TYPE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_PRIMITIVE_TOPOLOGY> CD3DX12_PIPELINE_STATE_STREAM_PRIMITIVE_TOPOLOGY;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_SHADER_BYTECODE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_VS> CD3DX12_PIPELINE_STATE_STREAM_VS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_SHADER_BYTECODE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_GS> CD3DX12_PIPELINE_STATE_STREAM_GS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_STREAM_OUTPUT_DESC, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_STREAM_OUTPUT> CD3DX12_PIPELINE_STATE_STREAM_STREAM_OUTPUT;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_SHADER_BYTECODE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_HS> CD3DX12_PIPELINE_STATE_STREAM_HS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_SHADER_BYTECODE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DS> CD3DX12_PIPELINE_STATE_STREAM_DS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_SHADER_BYTECODE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_PS> CD3DX12_PIPELINE_STATE_STREAM_PS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_SHADER_BYTECODE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_CS> CD3DX12_PIPELINE_STATE_STREAM_CS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< CD3DX12_BLEND_DESC, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_BLEND, CD3DX12_DEFAULT> CD3DX12_PIPELINE_STATE_STREAM_BLEND_DESC;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< CD3DX12_DEPTH_STENCIL_DESC, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL, CD3DX12_DEFAULT> CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< CD3DX12_DEPTH_STENCIL_DESC1, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL1, CD3DX12_DEFAULT> CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL1;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< DXGI_FORMAT, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL_FORMAT> CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL_FORMAT;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< CD3DX12_RASTERIZER_DESC, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_RASTERIZER, CD3DX12_DEFAULT> CD3DX12_PIPELINE_STATE_STREAM_RASTERIZER;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_RT_FORMAT_ARRAY, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_RENDER_TARGET_FORMATS> CD3DX12_PIPELINE_STATE_STREAM_RENDER_TARGET_FORMATS;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< DXGI_SAMPLE_DESC, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_SAMPLE_DESC> CD3DX12_PIPELINE_STATE_STREAM_SAMPLE_DESC;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< UINT, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_SAMPLE_MASK> CD3DX12_PIPELINE_STATE_STREAM_SAMPLE_MASK;
+typedef CD3DX12_PIPELINE_STATE_STREAM_SUBOBJECT< D3D12_CACHED_PIPELINE_STATE, D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_CACHED_PSO> CD3DX12_PIPELINE_STATE_STREAM_CACHED_PSO;
+
+//------------------------------------------------------------------------------------------------
+// Stream Parser Helpers
+
+struct ID3DX12PipelineParserCallbacks
+{
+ // Subobject Callbacks
+ virtual void FlagsCb(D3D12_PIPELINE_STATE_FLAGS) {}
+ virtual void NodeMaskCb(UINT) {}
+ virtual void RootSignatureCb(ID3D12RootSignature*) {}
+ virtual void InputLayoutCb(const D3D12_INPUT_LAYOUT_DESC&) {}
+ virtual void IBStripCutValueCb(D3D12_INDEX_BUFFER_STRIP_CUT_VALUE) {}
+ virtual void PrimitiveTopologyTypeCb(D3D12_PRIMITIVE_TOPOLOGY_TYPE) {}
+ virtual void VSCb(const D3D12_SHADER_BYTECODE&) {}
+ virtual void GSCb(const D3D12_SHADER_BYTECODE&) {}
+ virtual void StreamOutputCb(const D3D12_STREAM_OUTPUT_DESC&) {}
+ virtual void HSCb(const D3D12_SHADER_BYTECODE&) {}
+ virtual void DSCb(const D3D12_SHADER_BYTECODE&) {}
+ virtual void PSCb(const D3D12_SHADER_BYTECODE&) {}
+ virtual void CSCb(const D3D12_SHADER_BYTECODE&) {}
+ virtual void BlendStateCb(const D3D12_BLEND_DESC&) {}
+ virtual void DepthStencilStateCb(const D3D12_DEPTH_STENCIL_DESC&) {}
+ virtual void DepthStencilState1Cb(const D3D12_DEPTH_STENCIL_DESC1&) {}
+ virtual void DSVFormatCb(DXGI_FORMAT) {}
+ virtual void RasterizerStateCb(const D3D12_RASTERIZER_DESC&) {}
+ virtual void RTVFormatsCb(const D3D12_RT_FORMAT_ARRAY&) {}
+ virtual void SampleDescCb(const DXGI_SAMPLE_DESC&) {}
+ virtual void SampleMaskCb(UINT) {}
+ virtual void CachedPSOCb(const D3D12_CACHED_PIPELINE_STATE&) {}
+
+ // Error Callbacks
+ virtual void ErrorBadInputParameter(UINT /*ParameterIndex*/) {}
+ virtual void ErrorDuplicateSubobject(D3D12_PIPELINE_STATE_SUBOBJECT_TYPE /*DuplicateType*/) {}
+ virtual void ErrorUnknownSubobject(UINT /*UnknownTypeValue*/) {}
+
+};
+
+struct CD3DX12_PIPELINE_STATE_STREAM
+{
+ CD3DX12_PIPELINE_STATE_STREAM() {}
+ CD3DX12_PIPELINE_STATE_STREAM(const D3D12_GRAPHICS_PIPELINE_STATE_DESC& Desc)
+ : Flags(Desc.Flags)
+ , NodeMask(Desc.NodeMask)
+ , pRootSignature(Desc.pRootSignature)
+ , InputLayout(Desc.InputLayout)
+ , IBStripCutValue(Desc.IBStripCutValue)
+ , PrimitiveTopologyType(Desc.PrimitiveTopologyType)
+ , VS(Desc.VS)
+ , GS(Desc.GS)
+ , StreamOutput(Desc.StreamOutput)
+ , HS(Desc.HS)
+ , DS(Desc.DS)
+ , PS(Desc.PS)
+ , BlendState(CD3DX12_BLEND_DESC(Desc.BlendState))
+ , DepthStencilState(CD3DX12_DEPTH_STENCIL_DESC1(Desc.DepthStencilState))
+ , DSVFormat(Desc.DSVFormat)
+ , RasterizerState(CD3DX12_RASTERIZER_DESC(Desc.RasterizerState))
+ , RTVFormats(CD3DX12_RT_FORMAT_ARRAY(Desc.RTVFormats, Desc.NumRenderTargets))
+ , SampleDesc(Desc.SampleDesc)
+ , SampleMask(Desc.SampleMask)
+ , CachedPSO(Desc.CachedPSO)
+ {}
+ CD3DX12_PIPELINE_STATE_STREAM(const D3D12_COMPUTE_PIPELINE_STATE_DESC& Desc)
+ : Flags(Desc.Flags)
+ , NodeMask(Desc.NodeMask)
+ , pRootSignature(Desc.pRootSignature)
+ , CS(CD3DX12_SHADER_BYTECODE(Desc.CS))
+ , CachedPSO(Desc.CachedPSO)
+ {}
+ CD3DX12_PIPELINE_STATE_STREAM_FLAGS Flags;
+ CD3DX12_PIPELINE_STATE_STREAM_NODE_MASK NodeMask;
+ CD3DX12_PIPELINE_STATE_STREAM_ROOT_SIGNATURE pRootSignature;
+ CD3DX12_PIPELINE_STATE_STREAM_INPUT_LAYOUT InputLayout;
+ CD3DX12_PIPELINE_STATE_STREAM_IB_STRIP_CUT_VALUE IBStripCutValue;
+ CD3DX12_PIPELINE_STATE_STREAM_PRIMITIVE_TOPOLOGY PrimitiveTopologyType;
+ CD3DX12_PIPELINE_STATE_STREAM_VS VS;
+ CD3DX12_PIPELINE_STATE_STREAM_GS GS;
+ CD3DX12_PIPELINE_STATE_STREAM_STREAM_OUTPUT StreamOutput;
+ CD3DX12_PIPELINE_STATE_STREAM_HS HS;
+ CD3DX12_PIPELINE_STATE_STREAM_DS DS;
+ CD3DX12_PIPELINE_STATE_STREAM_PS PS;
+ CD3DX12_PIPELINE_STATE_STREAM_CS CS;
+ CD3DX12_PIPELINE_STATE_STREAM_BLEND_DESC BlendState;
+ CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL1 DepthStencilState;
+ CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL_FORMAT DSVFormat;
+ CD3DX12_PIPELINE_STATE_STREAM_RASTERIZER RasterizerState;
+ CD3DX12_PIPELINE_STATE_STREAM_RENDER_TARGET_FORMATS RTVFormats;
+ CD3DX12_PIPELINE_STATE_STREAM_SAMPLE_DESC SampleDesc;
+ CD3DX12_PIPELINE_STATE_STREAM_SAMPLE_MASK SampleMask;
+ CD3DX12_PIPELINE_STATE_STREAM_CACHED_PSO CachedPSO;
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC GraphicsDescV0() const
+ {
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC D;
+ D.Flags = this->Flags;
+ D.NodeMask = this->NodeMask;
+ D.pRootSignature = this->pRootSignature;
+ D.InputLayout = this->InputLayout;
+ D.IBStripCutValue = this->IBStripCutValue;
+ D.PrimitiveTopologyType = this->PrimitiveTopologyType;
+ D.VS = this->VS;
+ D.GS = this->GS;
+ D.StreamOutput = this->StreamOutput;
+ D.HS = this->HS;
+ D.DS = this->DS;
+ D.PS = this->PS;
+ D.BlendState = this->BlendState;
+ D.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC1(D3D12_DEPTH_STENCIL_DESC1(this->DepthStencilState));
+ D.DSVFormat = this->DSVFormat;
+ D.RasterizerState = this->RasterizerState;
+ D.NumRenderTargets = D3D12_RT_FORMAT_ARRAY(this->RTVFormats).NumRenderTargets;
+ memcpy(D.RTVFormats, D3D12_RT_FORMAT_ARRAY(this->RTVFormats).RTFormats, sizeof(D.RTVFormats));
+ D.SampleDesc = this->SampleDesc;
+ D.SampleMask = this->SampleMask;
+ D.CachedPSO = this->CachedPSO;
+ return D;
+ }
+ D3D12_COMPUTE_PIPELINE_STATE_DESC ComputeDescV0() const
+ {
+ D3D12_COMPUTE_PIPELINE_STATE_DESC D;
+ D.Flags = this->Flags;
+ D.NodeMask = this->NodeMask;
+ D.pRootSignature = this->pRootSignature;
+ D.CS = this->CS;
+ D.CachedPSO = this->CachedPSO;
+ return D;
+ }
+};
+
+struct CD3DX12_PIPELINE_STATE_STREAM_PARSE_HELPER : public ID3DX12PipelineParserCallbacks
+{
+ CD3DX12_PIPELINE_STATE_STREAM PipelineStream;
+
+ // ID3DX12PipelineParserCallbacks
+ void FlagsCb(D3D12_PIPELINE_STATE_FLAGS Flags) {PipelineStream.Flags = Flags;}
+ void NodeMaskCb(UINT NodeMask) {PipelineStream.NodeMask = NodeMask;}
+ void RootSignatureCb(ID3D12RootSignature* pRootSignature) {PipelineStream.pRootSignature = pRootSignature;}
+ void InputLayoutCb(const D3D12_INPUT_LAYOUT_DESC& InputLayout) {PipelineStream.InputLayout = InputLayout;}
+ void IBStripCutValueCb(D3D12_INDEX_BUFFER_STRIP_CUT_VALUE IBStripCutValue) {PipelineStream.IBStripCutValue = IBStripCutValue;}
+ void PrimitiveTopologyTypeCb(D3D12_PRIMITIVE_TOPOLOGY_TYPE PrimitiveTopologyType) {PipelineStream.PrimitiveTopologyType = PrimitiveTopologyType;}
+ void VSCb(const D3D12_SHADER_BYTECODE& VS) {PipelineStream.VS = VS;}
+ void GSCb(const D3D12_SHADER_BYTECODE& GS) {PipelineStream.GS = GS;}
+ void StreamOutputCb(const D3D12_STREAM_OUTPUT_DESC& StreamOutput) {PipelineStream.StreamOutput = StreamOutput;}
+ void HSCb(const D3D12_SHADER_BYTECODE& HS) {PipelineStream.HS = HS;}
+ void DSCb(const D3D12_SHADER_BYTECODE& DS) {PipelineStream.DS = DS;}
+ void PSCb(const D3D12_SHADER_BYTECODE& PS) {PipelineStream.PS = PS;}
+ void CSCb(const D3D12_SHADER_BYTECODE& CS) {PipelineStream.CS = CS;}
+ void BlendStateCb(const D3D12_BLEND_DESC& BlendState) {PipelineStream.BlendState = CD3DX12_BLEND_DESC(BlendState);}
+ void DepthStencilStateCb(const D3D12_DEPTH_STENCIL_DESC& DepthStencilState) {PipelineStream.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC1(DepthStencilState);}
+ void DepthStencilState1Cb(const D3D12_DEPTH_STENCIL_DESC1& DepthStencilState) {PipelineStream.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC1(DepthStencilState);}
+ void DSVFormatCb(DXGI_FORMAT DSVFormat) {PipelineStream.DSVFormat = DSVFormat;}
+ void RasterizerStateCb(const D3D12_RASTERIZER_DESC& RasterizerState) {PipelineStream.RasterizerState = CD3DX12_RASTERIZER_DESC(RasterizerState);}
+ void RTVFormatsCb(const D3D12_RT_FORMAT_ARRAY& RTVFormats) {PipelineStream.RTVFormats = RTVFormats;}
+ void SampleDescCb(const DXGI_SAMPLE_DESC& SampleDesc) {PipelineStream.SampleDesc = SampleDesc;}
+ void SampleMaskCb(UINT SampleMask) {PipelineStream.SampleMask = SampleMask;}
+ void CachedPSOCb(const D3D12_CACHED_PIPELINE_STATE& CachedPSO) {PipelineStream.CachedPSO = CachedPSO;}
+ void ErrorBadInputParameter(UINT) {}
+ void ErrorDuplicateSubobject(D3D12_PIPELINE_STATE_SUBOBJECT_TYPE) {}
+ void ErrorUnknownSubobject(UINT) {}
+};
+
+inline D3D12_PIPELINE_STATE_SUBOBJECT_TYPE D3DX12GetBaseSubobjectType(D3D12_PIPELINE_STATE_SUBOBJECT_TYPE SubobjectType)
+{
+ switch (SubobjectType)
+ {
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL1:
+ return D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL;
+ default:
+ return SubobjectType;
+ }
+}
+
+inline HRESULT D3DX12ParsePipelineStream(const D3D12_PIPELINE_STATE_STREAM_DESC& Desc, ID3DX12PipelineParserCallbacks* pCallbacks)
+{
+ if (Desc.SizeInBytes == 0 || Desc.pPipelineStateSubobjectStream == nullptr)
+ {
+ pCallbacks->ErrorBadInputParameter(1); // first parameter issue
+ return E_INVALIDARG;
+ }
+
+ if (pCallbacks == nullptr)
+ {
+ pCallbacks->ErrorBadInputParameter(2); // second parameter issue
+ return E_INVALIDARG;
+ }
+
+ bool SubobjectSeen[D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_MAX_VALID] = {0};
+ for (SIZE_T CurOffset = 0, SizeOfSubobject = 0; CurOffset < Desc.SizeInBytes; CurOffset += SizeOfSubobject)
+ {
+ BYTE* pStream = static_cast<BYTE*>(Desc.pPipelineStateSubobjectStream)+CurOffset;
+ auto SubobjectType = *reinterpret_cast<D3D12_PIPELINE_STATE_SUBOBJECT_TYPE*>(pStream);
+ if (SubobjectType >= D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_MAX_VALID)
+ {
+ pCallbacks->ErrorUnknownSubobject(SubobjectType);
+ return E_INVALIDARG;
+ }
+ if (SubobjectSeen[D3DX12GetBaseSubobjectType(SubobjectType)])
+ {
+ pCallbacks->ErrorDuplicateSubobject(SubobjectType);
+ return E_INVALIDARG; // disallow subobject duplicates in a stream
+ }
+ SubobjectSeen[SubobjectType] = true;
+ switch (SubobjectType)
+ {
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_ROOT_SIGNATURE:
+ pCallbacks->RootSignatureCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::pRootSignature)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::pRootSignature);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_VS:
+ pCallbacks->VSCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::VS)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::VS);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_PS:
+ pCallbacks->PSCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::PS)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::PS);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DS:
+ pCallbacks->DSCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::DS)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::DS);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_HS:
+ pCallbacks->HSCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::HS)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::HS);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_GS:
+ pCallbacks->GSCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::GS)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::GS);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_CS:
+ pCallbacks->CSCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::CS)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::CS);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_STREAM_OUTPUT:
+ pCallbacks->StreamOutputCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::StreamOutput)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::StreamOutput);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_BLEND:
+ pCallbacks->BlendStateCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::BlendState)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::BlendState);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_SAMPLE_MASK:
+ pCallbacks->SampleMaskCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::SampleMask)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::SampleMask);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_RASTERIZER:
+ pCallbacks->RasterizerStateCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::RasterizerState)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::RasterizerState);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL:
+ pCallbacks->DepthStencilStateCb(*reinterpret_cast<CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM_DEPTH_STENCIL);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL1:
+ pCallbacks->DepthStencilState1Cb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::DepthStencilState)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::DepthStencilState);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_INPUT_LAYOUT:
+ pCallbacks->InputLayoutCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::InputLayout)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::InputLayout);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_IB_STRIP_CUT_VALUE:
+ pCallbacks->IBStripCutValueCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::IBStripCutValue)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::IBStripCutValue);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_PRIMITIVE_TOPOLOGY:
+ pCallbacks->PrimitiveTopologyTypeCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::PrimitiveTopologyType)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::PrimitiveTopologyType);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_RENDER_TARGET_FORMATS:
+ pCallbacks->RTVFormatsCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::RTVFormats)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::RTVFormats);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_DEPTH_STENCIL_FORMAT:
+ pCallbacks->DSVFormatCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::DSVFormat)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::DSVFormat);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_SAMPLE_DESC:
+ pCallbacks->SampleDescCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::SampleDesc)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::SampleDesc);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_NODE_MASK:
+ pCallbacks->NodeMaskCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::NodeMask)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::NodeMask);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_CACHED_PSO:
+ pCallbacks->CachedPSOCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::CachedPSO)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::CachedPSO);
+ break;
+ case D3D12_PIPELINE_STATE_SUBOBJECT_TYPE_FLAGS:
+ pCallbacks->FlagsCb(*reinterpret_cast<decltype(CD3DX12_PIPELINE_STATE_STREAM::Flags)*>(pStream));
+ SizeOfSubobject = sizeof(CD3DX12_PIPELINE_STATE_STREAM::Flags);
+ break;
+ default:
+ pCallbacks->ErrorUnknownSubobject(SubobjectType);
+ return E_INVALIDARG;
+ break;
+ }
+ }
+
+ return S_OK;
+}
+
+
+#endif // defined( __cplusplus )
+
+#endif //__D3DX12_H__
+
+
+
diff --git a/samples/hellovr_dx12/hellovr_dx12.vcxproj b/samples/hellovr_dx12/hellovr_dx12.vcxproj
new file mode 100644
index 0000000..ca9661c
--- /dev/null
+++ b/samples/hellovr_dx12/hellovr_dx12.vcxproj
@@ -0,0 +1,124 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{FF19F6AE-67E0-4585-9D4A-038CB6E8DD09}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>hellovr_dx12</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ <PlatformToolset>v140</PlatformToolset>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ <PlatformToolset>v140</PlatformToolset>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <OutDir>..\bin\win32\</OutDir>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <OutDir>..\bin\win32\</OutDir>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_CRT_NONSTDC_NO_DEPRECATE;_DEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>..;../../headers;../thirdparty/sdl2-2.0.3/include;%(AdditionalIncludeDirectories);</AdditionalIncludeDirectories>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>d3d12.lib;dxgi.lib;d3dcompiler.lib;openvr_api.lib;SDL2.lib;SDL2main.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\thirdparty\sdl2-2.0.3\bin\win32;..\..\lib\win32;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;_CRT_NONSTDC_NO_DEPRECATE;NDEBUG;_WINDOWS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <AdditionalIncludeDirectories>..;../../headers;../thirdparty/sdl2-2.0.3/include;%(AdditionalIncludeDirectories);</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalDependencies>d3d12.lib;dxgi.lib;d3dcompiler.lib;openvr_api.lib;SDL2.lib;SDL2main.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\thirdparty\sdl2-2.0.3\bin\win32;..\..\lib\win32;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\shared\lodepng.cpp" />
+ <ClCompile Include="..\shared\Matrices.cpp" />
+ <ClCompile Include="..\shared\pathtools.cpp" />
+ <ClCompile Include="..\shared\strtools.cpp" />
+ <ClCompile Include="hellovr_dx12_main.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\shared\lodepng.h" />
+ <ClInclude Include="..\shared\Matrices.h" />
+ <ClInclude Include="..\shared\pathtools.h" />
+ <ClInclude Include="..\shared\strtools.h" />
+ <ClInclude Include="..\shared\Vectors.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <FxCompile Include="..\bin\shaders\axes.hlsl">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </FxCompile>
+ <FxCompile Include="..\bin\shaders\companion.hlsl">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </FxCompile>
+ <FxCompile Include="..\bin\shaders\rendermodel.hlsl">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </FxCompile>
+ <FxCompile Include="..\bin\shaders\scene.hlsl">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </FxCompile>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/samples/hellovr_dx12/hellovr_dx12.vcxproj.filters b/samples/hellovr_dx12/hellovr_dx12.vcxproj.filters
new file mode 100644
index 0000000..af9cd60
--- /dev/null
+++ b/samples/hellovr_dx12/hellovr_dx12.vcxproj.filters
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ <Filter Include="Resource Files">
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+ </Filter>
+ <Filter Include="Link Libraries">
+ <UniqueIdentifier>{23064a78-3bcb-49ae-8c90-ed419518885d}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Shared">
+ <UniqueIdentifier>{8cca1fa3-575c-4e0f-acae-7d4d800be358}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Assets">
+ <UniqueIdentifier>{f6e7efd6-eb98-43a4-a7e2-d9b78db9bb82}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Assets\Shaders">
+ <UniqueIdentifier>{ebda1bf9-2fbb-43c3-b649-ab1812c81e5b}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\shared\lodepng.cpp">
+ <Filter>Shared</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shared\Matrices.cpp">
+ <Filter>Shared</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shared\pathtools.cpp">
+ <Filter>Shared</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shared\strtools.cpp">
+ <Filter>Shared</Filter>
+ </ClCompile>
+ <ClCompile Include="hellovr_dx12_main.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\shared\lodepng.h">
+ <Filter>Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\shared\Matrices.h">
+ <Filter>Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\shared\Vectors.h">
+ <Filter>Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\shared\pathtools.h">
+ <Filter>Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\shared\strtools.h">
+ <Filter>Shared</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <FxCompile Include="..\bin\shaders\scene.hlsl">
+ <Filter>Assets\Shaders</Filter>
+ </FxCompile>
+ <FxCompile Include="..\bin\shaders\companion.hlsl">
+ <Filter>Assets\Shaders</Filter>
+ </FxCompile>
+ <FxCompile Include="..\bin\shaders\axes.hlsl">
+ <Filter>Assets\Shaders</Filter>
+ </FxCompile>
+ <FxCompile Include="..\bin\shaders\rendermodel.hlsl">
+ <Filter>Assets\Shaders</Filter>
+ </FxCompile>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/samples/hellovr_dx12/hellovr_dx12_main.cpp b/samples/hellovr_dx12/hellovr_dx12_main.cpp
new file mode 100644
index 0000000..879b88c
--- /dev/null
+++ b/samples/hellovr_dx12/hellovr_dx12_main.cpp
@@ -0,0 +1,2287 @@
+//========= Copyright Valve Corporation ============//
+
+#include <windows.h>
+#include "d3dx12.h"
+#include <d3d12.h>
+#include <dxgi1_4.h>
+#include <wrl.h>
+#include <D3Dcompiler.h>
+#include <SDL.h>
+#include <SDL_syswm.h>
+#include <stdio.h>
+#include <string>
+#include <cstdlib>
+
+#include <openvr.h>
+
+#include "shared/lodepng.h"
+#include "shared/Matrices.h"
+#include "shared/pathtools.h"
+
+using Microsoft::WRL::ComPtr;
+
+void ThreadSleep( unsigned long nMilliseconds )
+{
+ ::Sleep( nMilliseconds );
+}
+
+// Slots in the RenderTargetView descriptor heap
+enum RTVIndex_t
+{
+ RTV_LEFT_EYE = 0,
+ RTV_RIGHT_EYE,
+ RTV_SWAPCHAIN0,
+ RTV_SWAPCHAIN1,
+ NUM_RTVS
+};
+
+// Slots in the ConstantBufferView/ShaderResourceView descriptor heap
+enum CBVSRVIndex_t
+{
+ CBV_LEFT_EYE = 0,
+ CBV_RIGHT_EYE,
+ SRV_LEFT_EYE,
+ SRV_RIGHT_EYE,
+ SRV_TEXTURE_MAP,
+ // Slot for texture in each possible render model
+ SRV_TEXTURE_RENDER_MODEL0,
+ SRV_TEXTURE_RENDER_MODEL1,
+ SRV_TEXTURE_RENDER_MODEL2,
+ SRV_TEXTURE_RENDER_MODEL3,
+ SRV_TEXTURE_RENDER_MODEL4,
+ SRV_TEXTURE_RENDER_MODEL5,
+ SRV_TEXTURE_RENDER_MODEL6,
+ SRV_TEXTURE_RENDER_MODEL7,
+ SRV_TEXTURE_RENDER_MODEL8,
+ SRV_TEXTURE_RENDER_MODEL9,
+ SRV_TEXTURE_RENDER_MODEL10,
+ SRV_TEXTURE_RENDER_MODEL11,
+ SRV_TEXTURE_RENDER_MODEL12,
+ SRV_TEXTURE_RENDER_MODEL13,
+ SRV_TEXTURE_RENDER_MODEL14,
+ SRV_TEXTURE_RENDER_MODEL15,
+ // Slot for transform in each possible rendermodel
+ CBV_LEFT_EYE_RENDER_MODEL0,
+ CBV_LEFT_EYE_RENDER_MODEL1,
+ CBV_LEFT_EYE_RENDER_MODEL2,
+ CBV_LEFT_EYE_RENDER_MODEL3,
+ CBV_LEFT_EYE_RENDER_MODEL4,
+ CBV_LEFT_EYE_RENDER_MODEL5,
+ CBV_LEFT_EYE_RENDER_MODEL6,
+ CBV_LEFT_EYE_RENDER_MODEL7,
+ CBV_LEFT_EYE_RENDER_MODEL8,
+ CBV_LEFT_EYE_RENDER_MODEL9,
+ CBV_LEFT_EYE_RENDER_MODEL10,
+ CBV_LEFT_EYE_RENDER_MODEL11,
+ CBV_LEFT_EYE_RENDER_MODEL12,
+ CBV_LEFT_EYE_RENDER_MODEL13,
+ CBV_LEFT_EYE_RENDER_MODEL14,
+ CBV_LEFT_EYE_RENDER_MODEL15,
+ CBV_RIGHT_EYE_RENDER_MODEL0,
+ CBV_RIGHT_EYE_RENDER_MODEL1,
+ CBV_RIGHT_EYE_RENDER_MODEL2,
+ CBV_RIGHT_EYE_RENDER_MODEL3,
+ CBV_RIGHT_EYE_RENDER_MODEL4,
+ CBV_RIGHT_EYE_RENDER_MODEL5,
+ CBV_RIGHT_EYE_RENDER_MODEL6,
+ CBV_RIGHT_EYE_RENDER_MODEL7,
+ CBV_RIGHT_EYE_RENDER_MODEL8,
+ CBV_RIGHT_EYE_RENDER_MODEL9,
+ CBV_RIGHT_EYE_RENDER_MODEL10,
+ CBV_RIGHT_EYE_RENDER_MODEL11,
+ CBV_RIGHT_EYE_RENDER_MODEL12,
+ CBV_RIGHT_EYE_RENDER_MODEL13,
+ CBV_RIGHT_EYE_RENDER_MODEL14,
+ CBV_RIGHT_EYE_RENDER_MODEL15,
+ NUM_SRV_CBVS
+};
+
+
+class DX12RenderModel
+{
+public:
+ DX12RenderModel( const std::string & sRenderModelName );
+ ~DX12RenderModel();
+
+ bool BInit( ID3D12Device *pDevice, ID3D12GraphicsCommandList *pCommandList, ID3D12DescriptorHeap *pCBVSRVHeap, vr::TrackedDeviceIndex_t unTrackedDeviceIndex, const vr::RenderModel_t & vrModel, const vr::RenderModel_TextureMap_t & vrDiffuseTexture );
+ void Cleanup();
+ void Draw( vr::EVREye nEye, ID3D12GraphicsCommandList *pCommandList, UINT nCBVSRVDescriptorSize, const Matrix4 &matMVP );
+ const std::string & GetName() const { return m_sModelName; }
+
+private:
+ ComPtr< ID3D12Resource > m_pVertexBuffer;
+ D3D12_VERTEX_BUFFER_VIEW m_vertexBufferView;
+ ComPtr< ID3D12Resource > m_pIndexBuffer;
+ D3D12_INDEX_BUFFER_VIEW m_indexBufferView;
+ ComPtr< ID3D12Resource > m_pTexture;
+ ComPtr< ID3D12Resource > m_pTextureUploadHeap;
+ ComPtr< ID3D12Resource > m_pConstantBuffer;
+ UINT8 *m_pConstantBufferData[ 2 ];
+ size_t m_unVertexCount;
+ vr::TrackedDeviceIndex_t m_unTrackedDeviceIndex;
+ ID3D12DescriptorHeap *m_pCBVSRVHeap;
+ std::string m_sModelName;
+};
+
+static bool g_bPrintf = true;
+static const int g_nFrameCount = 2; // Swapchain depth
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//------------------------------------------------------------------------------
+class CMainApplication
+{
+public:
+ CMainApplication( int argc, char *argv[] );
+ virtual ~CMainApplication();
+
+ bool BInit();
+ bool BInitD3D12();
+ bool BInitCompositor();
+
+ void SetupRenderModels();
+
+ void Shutdown();
+
+ void RunMainLoop();
+ bool HandleInput();
+ void ProcessVREvent( const vr::VREvent_t & event );
+ void RenderFrame();
+
+ bool SetupTexturemaps();
+ static void GenMipMapRGBA( const UINT8 *pSrc, UINT8 **ppDst, int nSrcWidth, int nSrcHeight, int *pDstWidthOut, int *pDstHeightOut );
+
+ void SetupScene();
+ void AddCubeToScene( Matrix4 mat, std::vector<float> &vertdata );
+ void AddCubeVertex( float fl0, float fl1, float fl2, float fl3, float fl4, std::vector<float> &vertdata );
+
+ void UpdateControllerAxes();
+
+ bool SetupStereoRenderTargets();
+ void SetupCompanionWindow();
+ void SetupCameras();
+
+ void RenderStereoTargets();
+ void RenderCompanionWindow();
+ void RenderScene( vr::Hmd_Eye nEye );
+
+ Matrix4 GetHMDMatrixProjectionEye( vr::Hmd_Eye nEye );
+ Matrix4 GetHMDMatrixPoseEye( vr::Hmd_Eye nEye );
+ Matrix4 GetCurrentViewProjectionMatrix( vr::Hmd_Eye nEye );
+ void UpdateHMDMatrixPose();
+
+ Matrix4 ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose );
+
+ bool CreateAllShaders();
+
+ void SetupRenderModelForTrackedDevice( vr::TrackedDeviceIndex_t unTrackedDeviceIndex );
+ DX12RenderModel *FindOrLoadRenderModel( vr::TrackedDeviceIndex_t unTrackedDeviceIndex, const char *pchRenderModelName );
+
+private:
+ bool m_bDebugD3D12;
+ bool m_bVerbose;
+ bool m_bPerf;
+ bool m_bVblank;
+ int m_nMSAASampleCount;
+ // Optional scaling factor to render with supersampling (defaults off, use -scale)
+ float m_flSuperSampleScale;
+
+ vr::IVRSystem *m_pHMD;
+ vr::IVRRenderModels *m_pRenderModels;
+ std::string m_strDriver;
+ std::string m_strDisplay;
+ vr::TrackedDevicePose_t m_rTrackedDevicePose[ vr::k_unMaxTrackedDeviceCount ];
+ Matrix4 m_rmat4DevicePose[ vr::k_unMaxTrackedDeviceCount ];
+ bool m_rbShowTrackedDevice[ vr::k_unMaxTrackedDeviceCount ];
+
+private: // SDL bookkeeping
+ SDL_Window *m_pCompanionWindow;
+ uint32_t m_nCompanionWindowWidth;
+ uint32_t m_nCompanionWindowHeight;
+
+private:
+ int m_iTrackedControllerCount;
+ int m_iTrackedControllerCount_Last;
+ int m_iValidPoseCount;
+ int m_iValidPoseCount_Last;
+ bool m_bShowCubes;
+
+ std::string m_strPoseClasses; // what classes we saw poses for this frame
+ char m_rDevClassChar[ vr::k_unMaxTrackedDeviceCount ]; // for each device, a character representing its class
+
+ int m_iSceneVolumeWidth;
+ int m_iSceneVolumeHeight;
+ int m_iSceneVolumeDepth;
+ float m_fScaleSpacing;
+ float m_fScale;
+
+ int m_iSceneVolumeInit; // if you want something other than the default 20x20x20
+
+ float m_fNearClip;
+ float m_fFarClip;
+
+ unsigned int m_uiVertcount;
+ unsigned int m_uiCompanionWindowIndexSize;
+
+ // D3D12 members
+ UINT m_nFrameIndex;
+ HANDLE m_fenceEvent;
+ ComPtr< ID3D12Fence > m_pFence;
+ UINT64 m_nFenceValues[ g_nFrameCount ];
+ ComPtr< ID3D12Device > m_pDevice;
+ ComPtr< IDXGISwapChain3 > m_pSwapChain;
+ ComPtr< ID3D12Resource > m_pSwapChainRenderTarget[ g_nFrameCount ];
+ ComPtr< ID3D12CommandQueue > m_pCommandQueue;
+ ComPtr< ID3D12CommandAllocator > m_pCommandAllocators[ g_nFrameCount ];
+ ComPtr< ID3D12GraphicsCommandList > m_pCommandList;
+ ComPtr< ID3D12DescriptorHeap > m_pCBVSRVHeap;
+ ComPtr< ID3D12DescriptorHeap > m_pRTVHeap;
+ ComPtr< ID3D12DescriptorHeap > m_pDSVHeap;
+ ComPtr< ID3D12RootSignature > m_pRootSignature;
+ ComPtr< ID3D12PipelineState > m_pScenePipelineState;
+ ComPtr< ID3D12PipelineState > m_pCompanionPipelineState;
+ ComPtr< ID3D12PipelineState > m_pAxesPipelineState;
+ ComPtr< ID3D12PipelineState > m_pRenderModelPipelineState;
+ ComPtr< ID3D12Resource > m_pSceneConstantBuffer;
+ D3D12_CPU_DESCRIPTOR_HANDLE m_sceneConstantBufferView[ 2 ];
+ UINT8 *m_pSceneConstantBufferData[ 2 ];
+ UINT m_nRTVDescriptorSize;
+ UINT m_nDSVDescriptorSize;
+ UINT m_nCBVSRVDescriptorSize;
+
+ ComPtr< ID3D12Resource > m_pSceneVertexBuffer;
+ D3D12_VERTEX_BUFFER_VIEW m_sceneVertexBufferView;
+ ComPtr< ID3D12Resource > m_pTexture;
+ ComPtr< ID3D12Resource > m_pTextureUploadHeap;
+ D3D12_CPU_DESCRIPTOR_HANDLE m_textureShaderResourceView;
+ ComPtr< ID3D12Resource > m_pCompanionWindowVertexBuffer;
+ D3D12_VERTEX_BUFFER_VIEW m_companionWindowVertexBufferView;
+ ComPtr< ID3D12Resource > m_pCompanionWindowIndexBuffer;
+ D3D12_INDEX_BUFFER_VIEW m_companionWindowIndexBufferView;
+ ComPtr< ID3D12Resource > m_pControllerAxisVertexBuffer;
+ D3D12_VERTEX_BUFFER_VIEW m_controllerAxisVertexBufferView;
+
+
+ unsigned int m_uiControllerVertcount;
+
+ Matrix4 m_mat4HMDPose;
+ Matrix4 m_mat4eyePosLeft;
+ Matrix4 m_mat4eyePosRight;
+
+ Matrix4 m_mat4ProjectionCenter;
+ Matrix4 m_mat4ProjectionLeft;
+ Matrix4 m_mat4ProjectionRight;
+
+ struct VertexDataScene
+ {
+ Vector3 position;
+ Vector2 texCoord;
+ };
+
+ struct VertexDataWindow
+ {
+ Vector2 position;
+ Vector2 texCoord;
+
+ VertexDataWindow( const Vector2 & pos, const Vector2 tex ) : position(pos), texCoord(tex) { }
+ };
+
+ struct FramebufferDesc
+ {
+ ComPtr< ID3D12Resource > m_pTexture;
+ CD3DX12_CPU_DESCRIPTOR_HANDLE m_renderTargetViewHandle;
+ ComPtr< ID3D12Resource > m_pDepthStencil;
+ CD3DX12_CPU_DESCRIPTOR_HANDLE m_depthStencilViewHandle;
+ };
+ FramebufferDesc m_leftEyeDesc;
+ FramebufferDesc m_rightEyeDesc;
+
+ bool CreateFrameBuffer( int nWidth, int nHeight, FramebufferDesc &framebufferDesc, RTVIndex_t nRTVIndex );
+
+ uint32_t m_nRenderWidth;
+ uint32_t m_nRenderHeight;
+
+ std::vector< DX12RenderModel * > m_vecRenderModels;
+ DX12RenderModel *m_rTrackedDeviceToRenderModel[ vr::k_unMaxTrackedDeviceCount ];
+};
+
+//-----------------------------------------------------------------------------
+// Purpose: Outputs a set of optional arguments to debugging output, using
+// the printf format setting specified in fmt*.
+//-----------------------------------------------------------------------------
+void dprintf( const char *fmt, ... )
+{
+ va_list args;
+ char buffer[ 2048 ];
+
+ va_start( args, fmt );
+ vsprintf_s( buffer, fmt, args );
+ va_end( args );
+
+ if ( g_bPrintf )
+ printf( "%s", buffer );
+
+ OutputDebugStringA( buffer );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Constructor
+//-----------------------------------------------------------------------------
+CMainApplication::CMainApplication( int argc, char *argv[] )
+ : m_pCompanionWindow(NULL)
+ , m_nCompanionWindowWidth( 640 )
+ , m_nCompanionWindowHeight( 320 )
+ , m_pHMD( NULL )
+ , m_pRenderModels( NULL )
+ , m_bDebugD3D12( false )
+ , m_bVerbose( false )
+ , m_bPerf( false )
+ , m_bVblank( false )
+ , m_nMSAASampleCount( 4 )
+ , m_flSuperSampleScale( 1.0f )
+ , m_iTrackedControllerCount( 0 )
+ , m_iTrackedControllerCount_Last( -1 )
+ , m_iValidPoseCount( 0 )
+ , m_iValidPoseCount_Last( -1 )
+ , m_iSceneVolumeInit( 20 )
+ , m_strPoseClasses("")
+ , m_bShowCubes( true )
+ , m_nFrameIndex( 0 )
+ , m_fenceEvent( NULL )
+ , m_nRTVDescriptorSize( 0 )
+ , m_nCBVSRVDescriptorSize( 0 )
+ , m_nDSVDescriptorSize( 0 )
+{
+ memset( m_pSceneConstantBufferData, 0, sizeof( m_pSceneConstantBufferData ) );
+
+ for( int i = 1; i < argc; i++ )
+ {
+ if( !stricmp( argv[i], "-dxdebug" ) )
+ {
+ m_bDebugD3D12 = true;
+ }
+ else if( !stricmp( argv[i], "-verbose" ) )
+ {
+ m_bVerbose = true;
+ }
+ else if( !stricmp( argv[i], "-novblank" ) )
+ {
+ m_bVblank = false;
+ }
+ else if ( !stricmp( argv[i], "-msaa" ) && ( argc > i + 1 ) && ( *argv[ i + 1 ] != '-' ) )
+ {
+ m_nMSAASampleCount = atoi( argv[ i + 1 ] );
+ i++;
+ }
+ else if ( !stricmp( argv[i], "-supersample" ) && ( argc > i + 1 ) && ( *argv[ i + 1 ] != '-' ) )
+ {
+ m_flSuperSampleScale = ( float )atof( argv[ i + 1 ] );
+ i++;
+ }
+ else if( !stricmp( argv[i], "-noprintf" ) )
+ {
+ g_bPrintf = false;
+ }
+ else if ( !stricmp( argv[i], "-cubevolume" ) && ( argc > i + 1 ) && ( *argv[ i + 1 ] != '-' ) )
+ {
+ m_iSceneVolumeInit = atoi( argv[ i + 1 ] );
+ i++;
+ }
+ }
+ // other initialization tasks are done in BInit
+ memset( m_rDevClassChar, 0, sizeof( m_rDevClassChar ) );
+};
+
+//-----------------------------------------------------------------------------
+// Purpose: Destructor
+//-----------------------------------------------------------------------------
+CMainApplication::~CMainApplication()
+{
+ // work is done in Shutdown
+ dprintf( "Shutdown" );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Helper to get a string from a tracked device property and turn it
+// into a std::string
+//-----------------------------------------------------------------------------
+std::string GetTrackedDeviceString( vr::IVRSystem *pHmd, vr::TrackedDeviceIndex_t unDevice, vr::TrackedDeviceProperty prop, vr::TrackedPropertyError *peError = NULL )
+{
+ uint32_t unRequiredBufferLen = pHmd->GetStringTrackedDeviceProperty( unDevice, prop, NULL, 0, peError );
+ if( unRequiredBufferLen == 0 )
+ return "";
+
+ char *pchBuffer = new char[ unRequiredBufferLen ];
+ unRequiredBufferLen = pHmd->GetStringTrackedDeviceProperty( unDevice, prop, pchBuffer, unRequiredBufferLen, peError );
+ std::string sResult = pchBuffer;
+ delete [] pchBuffer;
+ return sResult;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInit()
+{
+ if ( SDL_Init( SDL_INIT_VIDEO | SDL_INIT_TIMER ) < 0 )
+ {
+ dprintf("%s - SDL could not initialize! SDL Error: %s\n", __FUNCTION__, SDL_GetError());
+ return false;
+ }
+
+ // Loading the SteamVR Runtime
+ vr::EVRInitError eError = vr::VRInitError_None;
+ m_pHMD = vr::VR_Init( &eError, vr::VRApplication_Scene );
+
+ if ( eError != vr::VRInitError_None )
+ {
+ m_pHMD = NULL;
+ char buf[1024];
+ sprintf_s( buf, sizeof( buf ), "Unable to init VR runtime: %s", vr::VR_GetVRInitErrorAsEnglishDescription( eError ) );
+ SDL_ShowSimpleMessageBox( SDL_MESSAGEBOX_ERROR, "VR_Init Failed", buf, NULL );
+ return false;
+ }
+
+
+ m_pRenderModels = (vr::IVRRenderModels *)vr::VR_GetGenericInterface( vr::IVRRenderModels_Version, &eError );
+ if( !m_pRenderModels )
+ {
+ m_pHMD = NULL;
+ vr::VR_Shutdown();
+
+ char buf[1024];
+ sprintf_s( buf, sizeof( buf ), "Unable to get render model interface: %s", vr::VR_GetVRInitErrorAsEnglishDescription( eError ) );
+ SDL_ShowSimpleMessageBox( SDL_MESSAGEBOX_ERROR, "VR_Init Failed", buf, NULL );
+ return false;
+ }
+
+ int nWindowPosX = 700;
+ int nWindowPosY = 100;
+ Uint32 unWindowFlags = SDL_WINDOW_SHOWN;
+
+ m_pCompanionWindow = SDL_CreateWindow( "hellovr [D3D12]", nWindowPosX, nWindowPosY, m_nCompanionWindowWidth, m_nCompanionWindowHeight, unWindowFlags );
+ if (m_pCompanionWindow == NULL)
+ {
+ dprintf( "%s - Window could not be created! SDL Error: %s\n", __FUNCTION__, SDL_GetError() );
+ return false;
+ }
+
+ m_strDriver = "No Driver";
+ m_strDisplay = "No Display";
+
+ m_strDriver = GetTrackedDeviceString( m_pHMD, vr::k_unTrackedDeviceIndex_Hmd, vr::Prop_TrackingSystemName_String );
+ m_strDisplay = GetTrackedDeviceString( m_pHMD, vr::k_unTrackedDeviceIndex_Hmd, vr::Prop_SerialNumber_String );
+
+ std::string strWindowTitle = "hellovr [D3D12] - " + m_strDriver + " " + m_strDisplay;
+ SDL_SetWindowTitle( m_pCompanionWindow, strWindowTitle.c_str() );
+
+ // cube array
+ m_iSceneVolumeWidth = m_iSceneVolumeInit;
+ m_iSceneVolumeHeight = m_iSceneVolumeInit;
+ m_iSceneVolumeDepth = m_iSceneVolumeInit;
+
+ m_fScale = 0.3f;
+ m_fScaleSpacing = 4.0f;
+
+ m_fNearClip = 0.1f;
+ m_fFarClip = 30.0f;
+
+ m_uiVertcount = 0;
+
+ if ( !BInitD3D12() )
+ {
+ dprintf( "%s - Unable to initialize D3D12!\n", __FUNCTION__ );
+ return false;
+ }
+
+ if ( !BInitCompositor() )
+ {
+ dprintf( "%s - Failed to initialize VR Compositor!\n", __FUNCTION__ );
+ return false;
+ }
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize DX12. Returns true if DX12 has been successfully
+// initialized, false if shaders could not be created.
+// If failure occurred in a module other than shaders, the function
+// may return true or throw an error.
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitD3D12()
+{
+ UINT nDXGIFactoryFlags = 0;
+
+ // Debug layers if -dxdebug is specified
+ if ( m_bDebugD3D12 )
+ {
+ ComPtr< ID3D12Debug > pDebugController;
+ if ( SUCCEEDED( D3D12GetDebugInterface( IID_PPV_ARGS( &pDebugController ) ) ) )
+ {
+ pDebugController->EnableDebugLayer();
+ nDXGIFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
+ }
+ }
+
+ ComPtr< IDXGIFactory4 > pFactory;
+ if ( FAILED( CreateDXGIFactory2( nDXGIFactoryFlags, IID_PPV_ARGS( &pFactory ) ) ) )
+ {
+ dprintf( "CreateDXGIFactory2 failed.\n");
+ return false;
+ }
+
+ // Query OpenVR for the output adapter index
+ int32_t nAdapterIndex = 0;
+ m_pHMD->GetDXGIOutputInfo( &nAdapterIndex );
+
+ ComPtr< IDXGIAdapter1 > pAdapter;
+ if ( FAILED( pFactory->EnumAdapters1( nAdapterIndex, &pAdapter ) ) )
+ {
+ dprintf( "Error enumerating DXGI adapter.\n") ;
+ }
+ DXGI_ADAPTER_DESC1 adapterDesc;
+ pAdapter->GetDesc1( &adapterDesc );
+
+ if ( FAILED( D3D12CreateDevice( pAdapter.Get(), D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS( &m_pDevice ) ) ) )
+ {
+ dprintf( "Failed to create D3D12 device with D3D12CreateDevice.\n" );
+ return false;
+ }
+
+ // Create the command queue
+ D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+ queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+ queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+ if ( FAILED( m_pDevice->CreateCommandQueue( &queueDesc, IID_PPV_ARGS( &m_pCommandQueue ) ) ) )
+ {
+ printf( "Failed to create D3D12 command queue.\n" );
+ return false;
+ }
+
+ // Create the swapchain
+ DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+ swapChainDesc.BufferCount = g_nFrameCount;
+ swapChainDesc.Width = m_nCompanionWindowWidth;
+ swapChainDesc.Height = m_nCompanionWindowHeight;
+ swapChainDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+ swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ swapChainDesc.SampleDesc.Count = 1;
+
+ // Determine the HWND from SDL
+ struct SDL_SysWMinfo wmInfo;
+ SDL_VERSION( &wmInfo.version );
+ SDL_GetWindowWMInfo( m_pCompanionWindow, &wmInfo );
+ HWND hWnd = wmInfo.info.win.window;
+
+ ComPtr< IDXGISwapChain1 > pSwapChain;
+ if ( FAILED( pFactory->CreateSwapChainForHwnd( m_pCommandQueue.Get(), hWnd, &swapChainDesc, nullptr, nullptr, &pSwapChain ) ) )
+ {
+ dprintf( "Failed to create DXGI swapchain.\n" );
+ return false;
+ }
+
+ pFactory->MakeWindowAssociation( hWnd, DXGI_MWA_NO_ALT_ENTER );
+ pSwapChain.As( &m_pSwapChain );
+ m_nFrameIndex = m_pSwapChain->GetCurrentBackBufferIndex();
+
+ // Create descriptor heaps
+ {
+ m_nRTVDescriptorSize = m_pDevice->GetDescriptorHandleIncrementSize( D3D12_DESCRIPTOR_HEAP_TYPE_RTV );
+ m_nDSVDescriptorSize = m_pDevice->GetDescriptorHandleIncrementSize( D3D12_DESCRIPTOR_HEAP_TYPE_DSV );
+ m_nCBVSRVDescriptorSize = m_pDevice->GetDescriptorHandleIncrementSize( D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV );
+
+ D3D12_DESCRIPTOR_HEAP_DESC rtvHeapDesc = {};
+ rtvHeapDesc.NumDescriptors = NUM_RTVS;
+ rtvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV;
+ rtvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
+ m_pDevice->CreateDescriptorHeap( &rtvHeapDesc, IID_PPV_ARGS( &m_pRTVHeap ) );
+
+ D3D12_DESCRIPTOR_HEAP_DESC dsvHeapDesc = {};
+ rtvHeapDesc.NumDescriptors = NUM_RTVS;
+ rtvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_DSV;
+ rtvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
+ m_pDevice->CreateDescriptorHeap( &rtvHeapDesc, IID_PPV_ARGS( &m_pDSVHeap ) );
+
+ D3D12_DESCRIPTOR_HEAP_DESC cbvSrvHeapDesc = {};
+ cbvSrvHeapDesc.NumDescriptors = NUM_SRV_CBVS;
+ cbvSrvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
+ cbvSrvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
+ m_pDevice->CreateDescriptorHeap( &cbvSrvHeapDesc, IID_PPV_ARGS( &m_pCBVSRVHeap ) );
+ }
+
+ // Create per-frame resources
+ for ( int nFrame = 0; nFrame < g_nFrameCount; nFrame++ )
+ {
+ if ( FAILED( m_pDevice->CreateCommandAllocator( D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS( &m_pCommandAllocators[ nFrame ] ) ) ) )
+ {
+ dprintf( "Failed to create command allocators.\n" );
+ return false;
+ }
+
+ // Create swapchain render targets
+ m_pSwapChain->GetBuffer( nFrame, IID_PPV_ARGS( &m_pSwapChainRenderTarget[ nFrame ] ) );
+
+ // Create swapchain render target views
+ CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHandle( m_pRTVHeap->GetCPUDescriptorHandleForHeapStart() );
+ rtvHandle.Offset( RTV_SWAPCHAIN0 + nFrame, m_nRTVDescriptorSize );
+ m_pDevice->CreateRenderTargetView( m_pSwapChainRenderTarget[ nFrame ].Get(), nullptr, rtvHandle );
+ }
+
+ // Create constant buffer
+ {
+ m_pDevice->CreateCommittedResource(
+ &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( 1024 * 64 ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pSceneConstantBuffer ) );
+
+ // Keep as persistently mapped buffer, store left eye in first 256 bytes, right eye in second
+ UINT8 *pBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pSceneConstantBuffer->Map( 0, &readRange, reinterpret_cast<void**>( &pBuffer ) );
+ // Left eye to first 256 bytes, right eye to second 256 bytes
+ m_pSceneConstantBufferData[ 0 ] = pBuffer;
+ m_pSceneConstantBufferData[ 1 ] = pBuffer + 256;
+
+ // Left eye CBV
+ CD3DX12_CPU_DESCRIPTOR_HANDLE cbvLeftEyeHandle( m_pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ cbvLeftEyeHandle.Offset( CBV_LEFT_EYE, m_nCBVSRVDescriptorSize );
+ D3D12_CONSTANT_BUFFER_VIEW_DESC cbvDesc = {};
+ cbvDesc.BufferLocation = m_pSceneConstantBuffer->GetGPUVirtualAddress();
+ cbvDesc.SizeInBytes = ( sizeof( Matrix4 ) + 255 ) & ~255; // Pad to 256 bytes
+ m_pDevice->CreateConstantBufferView( &cbvDesc, cbvLeftEyeHandle );
+ m_sceneConstantBufferView[ 0 ] = cbvLeftEyeHandle;
+
+ // Right eye CBV
+ CD3DX12_CPU_DESCRIPTOR_HANDLE cbvRightEyeHandle( m_pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ cbvRightEyeHandle.Offset( CBV_RIGHT_EYE, m_nCBVSRVDescriptorSize );
+ cbvDesc.BufferLocation += 256;
+ m_pDevice->CreateConstantBufferView( &cbvDesc, cbvRightEyeHandle );
+ m_sceneConstantBufferView[ 1 ] = cbvRightEyeHandle;
+ }
+
+ // Create fence
+ {
+ memset( m_nFenceValues, 0, sizeof( m_nFenceValues ) );
+ m_pDevice->CreateFence( m_nFenceValues[ m_nFrameIndex ], D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS( &m_pFence ) );
+ m_nFenceValues[ m_nFrameIndex ]++;
+
+ m_fenceEvent = CreateEvent( nullptr, FALSE, FALSE, nullptr );
+ }
+
+ if( !CreateAllShaders() )
+ return false;
+
+ // Create command list
+ m_pDevice->CreateCommandList( 0, D3D12_COMMAND_LIST_TYPE_DIRECT, m_pCommandAllocators[ m_nFrameIndex ].Get(), m_pScenePipelineState.Get(), IID_PPV_ARGS( &m_pCommandList ) );
+
+ SetupTexturemaps();
+ SetupScene();
+ SetupCameras();
+ SetupStereoRenderTargets();
+ SetupCompanionWindow();
+ SetupRenderModels();
+
+ // Do any work that was queued up during loading
+ m_pCommandList->Close();
+ ID3D12CommandList* ppCommandLists[] = { m_pCommandList.Get() };
+ m_pCommandQueue->ExecuteCommandLists( _countof(ppCommandLists), ppCommandLists );
+
+ // Wait for it to finish
+ m_pCommandQueue->Signal( m_pFence.Get(), m_nFenceValues[ m_nFrameIndex ] );
+ m_pFence->SetEventOnCompletion( m_nFenceValues[ m_nFrameIndex], m_fenceEvent );
+ WaitForSingleObjectEx( m_fenceEvent, INFINITE, FALSE );
+ m_nFenceValues[ m_nFrameIndex ]++;
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize Compositor. Returns true if the compositor was
+// successfully initialized, false otherwise.
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitCompositor()
+{
+ vr::EVRInitError peError = vr::VRInitError_None;
+
+ if ( !vr::VRCompositor() )
+ {
+ dprintf( "Compositor initialization failed. See log file for details\n" );
+ return false;
+ }
+
+ return true;
+}
+
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::Shutdown()
+{
+ if( m_pHMD )
+ {
+ vr::VR_Shutdown();
+ m_pHMD = NULL;
+ }
+
+ for( std::vector< DX12RenderModel * >::iterator i = m_vecRenderModels.begin(); i != m_vecRenderModels.end(); i++ )
+ {
+ delete (*i);
+ }
+ m_vecRenderModels.clear();
+
+ if( m_pCompanionWindow )
+ {
+ SDL_DestroyWindow(m_pCompanionWindow);
+ m_pCompanionWindow = NULL;
+ }
+
+ SDL_Quit();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::HandleInput()
+{
+ SDL_Event sdlEvent;
+ bool bRet = false;
+
+ while ( SDL_PollEvent( &sdlEvent ) != 0 )
+ {
+ if ( sdlEvent.type == SDL_QUIT )
+ {
+ bRet = true;
+ }
+ else if ( sdlEvent.type == SDL_KEYDOWN )
+ {
+ if ( sdlEvent.key.keysym.sym == SDLK_ESCAPE
+ || sdlEvent.key.keysym.sym == SDLK_q )
+ {
+ bRet = true;
+ }
+ if( sdlEvent.key.keysym.sym == SDLK_c )
+ {
+ m_bShowCubes = !m_bShowCubes;
+ }
+ }
+ }
+
+ // Process SteamVR events
+ vr::VREvent_t event;
+ while( m_pHMD->PollNextEvent( &event, sizeof( event ) ) )
+ {
+ ProcessVREvent( event );
+ }
+
+ // Process SteamVR controller state
+ for( vr::TrackedDeviceIndex_t unDevice = 0; unDevice < vr::k_unMaxTrackedDeviceCount; unDevice++ )
+ {
+ vr::VRControllerState_t state;
+ if( m_pHMD->GetControllerState( unDevice, &state, sizeof(state) ) )
+ {
+ m_rbShowTrackedDevice[ unDevice ] = state.ulButtonPressed == 0;
+ }
+ }
+
+ return bRet;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RunMainLoop()
+{
+ bool bQuit = false;
+
+ SDL_StartTextInput();
+ SDL_ShowCursor( SDL_DISABLE );
+
+ while ( !bQuit )
+ {
+ bQuit = HandleInput();
+
+ RenderFrame();
+ }
+
+ SDL_StopTextInput();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Processes a single VR event
+//-----------------------------------------------------------------------------
+void CMainApplication::ProcessVREvent( const vr::VREvent_t & event )
+{
+ switch( event.eventType )
+ {
+ case vr::VREvent_TrackedDeviceActivated:
+ {
+ SetupRenderModelForTrackedDevice( event.trackedDeviceIndex );
+ dprintf( "Device %u attached. Setting up render model.\n", event.trackedDeviceIndex );
+ }
+ break;
+ case vr::VREvent_TrackedDeviceDeactivated:
+ {
+ dprintf( "Device %u detached.\n", event.trackedDeviceIndex );
+ }
+ break;
+ case vr::VREvent_TrackedDeviceUpdated:
+ {
+ dprintf( "Device %u updated.\n", event.trackedDeviceIndex );
+ }
+ break;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderFrame()
+{
+ if ( m_pHMD )
+ {
+ m_pCommandAllocators[ m_nFrameIndex ]->Reset();
+
+ m_pCommandList->Reset( m_pCommandAllocators[ m_nFrameIndex ].Get(), m_pScenePipelineState.Get() );
+ m_pCommandList->SetGraphicsRootSignature( m_pRootSignature.Get() );
+
+ ID3D12DescriptorHeap *ppHeaps[] = { m_pCBVSRVHeap.Get() };
+ m_pCommandList->SetDescriptorHeaps( _countof( ppHeaps ), ppHeaps );
+
+ UpdateControllerAxes();
+ RenderStereoTargets();
+ RenderCompanionWindow();
+
+ m_pCommandList->Close();
+
+ // Execute the command list.
+ ID3D12CommandList* ppCommandLists[] = { m_pCommandList.Get() };
+ m_pCommandQueue->ExecuteCommandLists( _countof( ppCommandLists ), ppCommandLists );
+
+ vr::VRTextureBounds_t bounds;
+ bounds.uMin = 0.0f;
+ bounds.uMax = 1.0f;
+ bounds.vMin = 0.0f;
+ bounds.vMax = 1.0f;
+
+ vr::D3D12TextureData_t d3d12LeftEyeTexture = { m_leftEyeDesc.m_pTexture.Get(), m_pCommandQueue.Get(), 0 };
+ vr::Texture_t leftEyeTexture = { ( void * ) &d3d12LeftEyeTexture, vr::TextureType_DirectX12, vr::ColorSpace_Gamma };
+ vr::VRCompositor()->Submit( vr::Eye_Left, &leftEyeTexture, &bounds, vr::Submit_Default );
+
+ vr::D3D12TextureData_t d3d12RightEyeTexture = { m_rightEyeDesc.m_pTexture.Get(), m_pCommandQueue.Get(), 0 };
+ vr::Texture_t rightEyeTexture = { ( void * ) &d3d12RightEyeTexture, vr::TextureType_DirectX12, vr::ColorSpace_Gamma };
+ vr::VRCompositor()->Submit( vr::Eye_Right, &rightEyeTexture, &bounds, vr::Submit_Default );
+ }
+
+ // Present
+ m_pSwapChain->Present( 0, 0 );
+
+ // Wait for completion
+ {
+ const UINT64 nCurrentFenceValue = m_nFenceValues[ m_nFrameIndex ];
+ m_pCommandQueue->Signal( m_pFence.Get(), nCurrentFenceValue );
+
+ m_nFrameIndex = m_pSwapChain->GetCurrentBackBufferIndex();
+ if ( m_pFence->GetCompletedValue() < m_nFenceValues[ m_nFrameIndex ] )
+ {
+ m_pFence->SetEventOnCompletion( m_nFenceValues[ m_nFrameIndex ], m_fenceEvent );
+ WaitForSingleObjectEx( m_fenceEvent, INFINITE, FALSE );
+ }
+
+ m_nFenceValues[ m_nFrameIndex ] = nCurrentFenceValue + 1;
+ }
+
+ // Spew out the controller and pose count whenever they change.
+ if ( m_iTrackedControllerCount != m_iTrackedControllerCount_Last || m_iValidPoseCount != m_iValidPoseCount_Last )
+ {
+ m_iValidPoseCount_Last = m_iValidPoseCount;
+ m_iTrackedControllerCount_Last = m_iTrackedControllerCount;
+
+ dprintf( "PoseCount:%d(%s) Controllers:%d\n", m_iValidPoseCount, m_strPoseClasses.c_str(), m_iTrackedControllerCount );
+ }
+
+ UpdateHMDMatrixPose();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Creates all the shaders used by HelloVR DX12
+//-----------------------------------------------------------------------------
+bool CMainApplication::CreateAllShaders()
+{
+ std::string sExecutableDirectory = Path_StripFilename( Path_GetExecutablePath() );
+ std::string strFullPath = Path_MakeAbsolute( "../cube_texture.png", sExecutableDirectory );
+
+ // Root signature
+ {
+ D3D12_FEATURE_DATA_ROOT_SIGNATURE featureData = {};
+ featureData.HighestVersion = D3D_ROOT_SIGNATURE_VERSION_1_1;
+ if ( FAILED( m_pDevice->CheckFeatureSupport( D3D12_FEATURE_ROOT_SIGNATURE, &featureData, sizeof( featureData ) ) ) )
+ {
+ featureData.HighestVersion = D3D_ROOT_SIGNATURE_VERSION_1_0;
+ }
+
+ CD3DX12_DESCRIPTOR_RANGE1 ranges[2];
+ CD3DX12_ROOT_PARAMETER1 rootParameters[2];
+
+ ranges[0].Init( D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0, 0, D3D12_DESCRIPTOR_RANGE_FLAG_DATA_STATIC );
+ ranges[1].Init( D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0, 0 );
+ rootParameters[0].InitAsDescriptorTable( 1, &ranges[0], D3D12_SHADER_VISIBILITY_VERTEX );
+ rootParameters[1].InitAsDescriptorTable( 1, &ranges[1], D3D12_SHADER_VISIBILITY_PIXEL );
+
+ D3D12_ROOT_SIGNATURE_FLAGS rootSignatureFlags =
+ D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT |
+ D3D12_ROOT_SIGNATURE_FLAG_DENY_HULL_SHADER_ROOT_ACCESS |
+ D3D12_ROOT_SIGNATURE_FLAG_DENY_DOMAIN_SHADER_ROOT_ACCESS |
+ D3D12_ROOT_SIGNATURE_FLAG_DENY_GEOMETRY_SHADER_ROOT_ACCESS;
+
+ D3D12_STATIC_SAMPLER_DESC sampler = {};
+ sampler.Filter = D3D12_FILTER_MIN_MAG_MIP_POINT;
+ sampler.AddressU = D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+ sampler.AddressV = D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+ sampler.AddressW = D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+ sampler.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
+ sampler.MaxLOD = D3D12_FLOAT32_MAX;
+ sampler.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+
+ CD3DX12_VERSIONED_ROOT_SIGNATURE_DESC rootSignatureDesc;
+ rootSignatureDesc.Init_1_1( _countof(rootParameters), rootParameters, 1, &sampler, rootSignatureFlags );
+ ComPtr< ID3DBlob > signature;
+ ComPtr< ID3DBlob > error;
+ D3DX12SerializeVersionedRootSignature( &rootSignatureDesc, featureData.HighestVersion, &signature, &error );
+ m_pDevice->CreateRootSignature( 0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS( &m_pRootSignature ) );
+ }
+
+ // Scene shader
+ {
+ ComPtr<ID3DBlob> vertexShader;
+ ComPtr<ID3DBlob> pixelShader;
+ UINT compileFlags = 0;
+
+ std::string shaderPath = Path_MakeAbsolute( "../shaders/scene.hlsl", sExecutableDirectory );
+ std::wstring shaderPathW = std::wstring( shaderPath.begin(), shaderPath.end() );
+ ComPtr< ID3DBlob > error;
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "VSMain", "vs_5_0", compileFlags, 0, &vertexShader, &error ) ) )
+ {
+ dprintf( "Failed compiling vertex shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "PSMain", "ps_5_0", compileFlags, 0, &pixelShader, &error ) ) )
+ {
+ dprintf( "Failed compiling pixel shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+
+ // Define the vertex input layout.
+ D3D12_INPUT_ELEMENT_DESC inputElementDescs[] =
+ {
+ { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ { "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ };
+
+ // Describe and create the graphics pipeline state object (PSO).
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {};
+ psoDesc.InputLayout = { inputElementDescs, _countof( inputElementDescs ) };
+ psoDesc.pRootSignature = m_pRootSignature.Get();
+ psoDesc.VS = CD3DX12_SHADER_BYTECODE( vertexShader.Get() );
+ psoDesc.PS = CD3DX12_SHADER_BYTECODE( pixelShader.Get() );
+ psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC( D3D12_DEFAULT );
+ psoDesc.RasterizerState.FrontCounterClockwise = TRUE;
+ psoDesc.RasterizerState.MultisampleEnable = TRUE;
+ psoDesc.BlendState = CD3DX12_BLEND_DESC( D3D12_DEFAULT );
+ psoDesc.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC( D3D12_DEFAULT );
+ psoDesc.SampleMask = UINT_MAX;
+ psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+ psoDesc.NumRenderTargets = 1;
+ psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+ psoDesc.DSVFormat = DXGI_FORMAT_D32_FLOAT;
+ psoDesc.SampleDesc.Count = m_nMSAASampleCount;
+ psoDesc.SampleDesc.Quality = 0;
+ if ( FAILED( m_pDevice->CreateGraphicsPipelineState( &psoDesc, IID_PPV_ARGS( &m_pScenePipelineState ) ) ) )
+ {
+ dprintf( "Error creating D3D12 pipeline state.\n" );
+ return false;
+ }
+ }
+
+ // Companion shader
+ {
+ ComPtr<ID3DBlob> vertexShader;
+ ComPtr<ID3DBlob> pixelShader;
+ UINT compileFlags = 0;
+
+ std::string shaderPath = Path_MakeAbsolute( "../shaders/companion.hlsl", sExecutableDirectory );
+ std::wstring shaderPathW = std::wstring( shaderPath.begin(), shaderPath.end() );
+ ComPtr< ID3DBlob > error;
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "VSMain", "vs_5_0", compileFlags, 0, &vertexShader, &error ) ) )
+ {
+ dprintf( "Failed compiling vertex shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "PSMain", "ps_5_0", compileFlags, 0, &pixelShader, &error ) ) )
+ {
+ dprintf( "Failed compiling pixel shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+
+ // Define the vertex input layout.
+ D3D12_INPUT_ELEMENT_DESC inputElementDescs[] =
+ {
+ { "POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ { "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 8, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ };
+
+ // Describe and create the graphics pipeline state object (PSO).
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {};
+ psoDesc.InputLayout = { inputElementDescs, _countof( inputElementDescs ) };
+ psoDesc.pRootSignature = m_pRootSignature.Get();
+ psoDesc.VS = CD3DX12_SHADER_BYTECODE( vertexShader.Get() );
+ psoDesc.PS = CD3DX12_SHADER_BYTECODE( pixelShader.Get() );
+ psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC( D3D12_DEFAULT );
+ psoDesc.RasterizerState.FrontCounterClockwise = TRUE;
+ psoDesc.BlendState = CD3DX12_BLEND_DESC( D3D12_DEFAULT );
+ psoDesc.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC( D3D12_DEFAULT );
+ psoDesc.DepthStencilState.DepthEnable = FALSE;
+ psoDesc.DepthStencilState.StencilEnable = FALSE;
+ psoDesc.SampleMask = UINT_MAX;
+ psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+ psoDesc.NumRenderTargets = 1;
+ psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM;
+ psoDesc.SampleDesc.Count = 1;
+ if ( FAILED( m_pDevice->CreateGraphicsPipelineState( &psoDesc, IID_PPV_ARGS( &m_pCompanionPipelineState ) ) ) )
+ {
+ dprintf( "Error creating D3D12 pipeline state.\n" );
+ return false;
+ }
+ }
+
+ // Axes shader
+ {
+ ComPtr<ID3DBlob> vertexShader;
+ ComPtr<ID3DBlob> pixelShader;
+ UINT compileFlags = 0;
+
+ std::string shaderPath = Path_MakeAbsolute( "../shaders/axes.hlsl", sExecutableDirectory );
+ std::wstring shaderPathW = std::wstring( shaderPath.begin(), shaderPath.end() );
+ ComPtr< ID3DBlob > error;
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "VSMain", "vs_5_0", compileFlags, 0, &vertexShader, &error ) ) )
+ {
+ dprintf( "Failed compiling vertex shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "PSMain", "ps_5_0", compileFlags, 0, &pixelShader, &error ) ) )
+ {
+ dprintf( "Failed compiling pixel shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+
+ // Define the vertex input layout.
+ D3D12_INPUT_ELEMENT_DESC inputElementDescs[] =
+ {
+ { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ { "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ };
+
+ // Describe and create the graphics pipeline state object (PSO).
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {};
+ psoDesc.InputLayout = { inputElementDescs, _countof( inputElementDescs ) };
+ psoDesc.pRootSignature = m_pRootSignature.Get();
+ psoDesc.VS = CD3DX12_SHADER_BYTECODE( vertexShader.Get() );
+ psoDesc.PS = CD3DX12_SHADER_BYTECODE( pixelShader.Get() );
+ psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC( D3D12_DEFAULT );
+ psoDesc.RasterizerState.FrontCounterClockwise = TRUE;
+ psoDesc.RasterizerState.MultisampleEnable = TRUE;
+ psoDesc.BlendState = CD3DX12_BLEND_DESC( D3D12_DEFAULT );
+ psoDesc.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC( D3D12_DEFAULT );
+ psoDesc.SampleMask = UINT_MAX;
+ psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
+ psoDesc.NumRenderTargets = 1;
+ psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+ psoDesc.DSVFormat = DXGI_FORMAT_D32_FLOAT;
+ psoDesc.SampleDesc.Count = m_nMSAASampleCount;
+ psoDesc.SampleDesc.Quality = 0;
+ if ( FAILED( m_pDevice->CreateGraphicsPipelineState( &psoDesc, IID_PPV_ARGS( &m_pAxesPipelineState ) ) ) )
+ {
+ dprintf( "Error creating D3D12 pipeline state.\n" );
+ return false;
+ }
+ }
+
+ // Render Model shader
+ {
+ ComPtr<ID3DBlob> vertexShader;
+ ComPtr<ID3DBlob> pixelShader;
+ UINT compileFlags = 0;
+
+ std::string shaderPath = Path_MakeAbsolute( "../shaders/rendermodel.hlsl", sExecutableDirectory );
+ std::wstring shaderPathW = std::wstring( shaderPath.begin(), shaderPath.end() );
+ ComPtr< ID3DBlob > error;
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "VSMain", "vs_5_0", compileFlags, 0, &vertexShader, &error ) ) )
+ {
+ dprintf( "Failed compiling vertex shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+ if ( FAILED( D3DCompileFromFile( shaderPathW.c_str(), nullptr, nullptr, "PSMain", "ps_5_0", compileFlags, 0, &pixelShader, &error ) ) )
+ {
+ dprintf( "Failed compiling pixel shader '%s':\n%s\n", shaderPath.c_str(), ( char* )error->GetBufferPointer() );
+ return false;
+ }
+
+ // Define the vertex input layout.
+ D3D12_INPUT_ELEMENT_DESC inputElementDescs[] =
+ {
+ { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ { "TEXCOORD", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ { "TEXCOORD", 1, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
+ };
+
+ // Describe and create the graphics pipeline state object (PSO).
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {};
+ psoDesc.InputLayout = { inputElementDescs, _countof( inputElementDescs ) };
+ psoDesc.pRootSignature = m_pRootSignature.Get();
+ psoDesc.VS = CD3DX12_SHADER_BYTECODE( vertexShader.Get() );
+ psoDesc.PS = CD3DX12_SHADER_BYTECODE( pixelShader.Get() );
+ psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC( D3D12_DEFAULT );
+ psoDesc.RasterizerState.FrontCounterClockwise = TRUE;
+ psoDesc.RasterizerState.MultisampleEnable = TRUE;
+ psoDesc.BlendState = CD3DX12_BLEND_DESC( D3D12_DEFAULT );
+ psoDesc.DepthStencilState = CD3DX12_DEPTH_STENCIL_DESC( D3D12_DEFAULT );
+ psoDesc.SampleMask = UINT_MAX;
+ psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+ psoDesc.NumRenderTargets = 1;
+ psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+ psoDesc.DSVFormat = DXGI_FORMAT_D32_FLOAT;
+ psoDesc.SampleDesc.Count = m_nMSAASampleCount;
+ psoDesc.SampleDesc.Quality = 0;
+ if ( FAILED( m_pDevice->CreateGraphicsPipelineState( &psoDesc, IID_PPV_ARGS( &m_pRenderModelPipelineState ) ) ) )
+ {
+ dprintf( "Error creating D3D12 pipeline state.\n" );
+ return false;
+ }
+ }
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::SetupTexturemaps()
+{
+ std::string sExecutableDirectory = Path_StripFilename( Path_GetExecutablePath() );
+ std::string strFullPath = Path_MakeAbsolute( "../cube_texture.png", sExecutableDirectory );
+
+ std::vector< unsigned char > imageRGBA;
+ unsigned nImageWidth, nImageHeight;
+ unsigned nError = lodepng::decode( imageRGBA, nImageWidth, nImageHeight, strFullPath.c_str() );
+
+ if ( nError != 0 )
+ return false;
+
+ // Store level 0
+ std::vector< D3D12_SUBRESOURCE_DATA > mipLevelData;
+ UINT8 *pBaseData = new UINT8[ nImageWidth * nImageHeight * 4 ];
+ memcpy( pBaseData, &imageRGBA[0], sizeof( UINT8 ) * nImageWidth * nImageHeight * 4 );
+
+ D3D12_SUBRESOURCE_DATA textureData = {};
+ textureData.pData = &pBaseData[ 0 ];
+ textureData.RowPitch = nImageWidth * 4;
+ textureData.SlicePitch = textureData.RowPitch * nImageHeight;
+ mipLevelData.push_back( textureData );
+
+ // Generate mipmaps for the image
+ int nPrevImageIndex = 0;
+ int nMipWidth = nImageWidth;
+ int nMipHeight = nImageHeight;
+
+ while( nMipWidth > 1 && nMipHeight > 1 )
+ {
+ UINT8 *pNewImage;
+ GenMipMapRGBA( ( UINT8* )mipLevelData[ nPrevImageIndex ].pData, &pNewImage, nMipWidth, nMipHeight, &nMipWidth, &nMipHeight );
+
+ D3D12_SUBRESOURCE_DATA mipData = {};
+ mipData.pData = pNewImage;
+ mipData.RowPitch = nMipWidth * 4;
+ mipData.SlicePitch = textureData.RowPitch * nMipHeight;
+ mipLevelData.push_back( mipData );
+
+ nPrevImageIndex++;
+ }
+
+ D3D12_RESOURCE_DESC textureDesc = {};
+ textureDesc.MipLevels = ( UINT16 ) mipLevelData.size();
+ textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+ textureDesc.Width = nImageWidth;
+ textureDesc.Height = nImageHeight;
+ textureDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
+ textureDesc.DepthOrArraySize = 1;
+ textureDesc.SampleDesc.Count = 1;
+ textureDesc.SampleDesc.Quality = 0;
+ textureDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+
+ m_pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ),
+ D3D12_HEAP_FLAG_NONE,
+ &textureDesc,
+ D3D12_RESOURCE_STATE_COPY_DEST,
+ nullptr,
+ IID_PPV_ARGS( &m_pTexture ) );
+
+ // Create shader resource view
+ CD3DX12_CPU_DESCRIPTOR_HANDLE srvHandle( m_pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ srvHandle.Offset( SRV_TEXTURE_MAP, m_nCBVSRVDescriptorSize );
+ m_pDevice->CreateShaderResourceView( m_pTexture.Get(), nullptr, srvHandle );
+ m_textureShaderResourceView = srvHandle;
+
+ const UINT64 nUploadBufferSize = GetRequiredIntermediateSize( m_pTexture.Get(), 0, textureDesc.MipLevels );
+
+ // Create the GPU upload buffer.
+ m_pDevice->CreateCommittedResource(
+ &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( nUploadBufferSize ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pTextureUploadHeap ) );
+
+ UpdateSubresources( m_pCommandList.Get(), m_pTexture.Get(), m_pTextureUploadHeap.Get(), 0, 0, mipLevelData.size(), &mipLevelData[0] );
+ m_pCommandList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_pTexture.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE ) );
+
+ // Free mip pointers
+ for ( size_t nMip = 0; nMip < mipLevelData.size(); nMip++ )
+ {
+ delete [] mipLevelData[ nMip ].pData;
+ }
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: generate next level mipmap for an RGBA image
+//-----------------------------------------------------------------------------
+void CMainApplication::GenMipMapRGBA( const UINT8 *pSrc, UINT8 **ppDst, int nSrcWidth, int nSrcHeight, int *pDstWidthOut, int *pDstHeightOut )
+{
+ *pDstWidthOut = nSrcWidth / 2;
+ if ( *pDstWidthOut <= 0 )
+ {
+ *pDstWidthOut = 1;
+ }
+ *pDstHeightOut = nSrcHeight / 2;
+ if ( *pDstHeightOut <= 0 )
+ {
+ *pDstHeightOut = 1;
+ }
+
+ *ppDst = new UINT8[ 4 * ( *pDstWidthOut ) * ( *pDstHeightOut ) ];
+ for ( int y = 0; y < *pDstHeightOut; y++ )
+ {
+ for ( int x = 0; x < *pDstWidthOut; x++ )
+ {
+ int nSrcIndex[4];
+ float r = 0.0f;
+ float g = 0.0f;
+ float b = 0.0f;
+ float a = 0.0f;
+
+ nSrcIndex[0] = ( ( ( y * 2 ) * nSrcWidth ) + ( x * 2 ) ) * 4;
+ nSrcIndex[1] = ( ( ( y * 2 ) * nSrcWidth ) + ( x * 2 + 1 ) ) * 4;
+ nSrcIndex[2] = ( ( ( ( y * 2 ) + 1 ) * nSrcWidth ) + ( x * 2 ) ) * 4;
+ nSrcIndex[3] = ( ( ( ( y * 2 ) + 1 ) * nSrcWidth ) + ( x * 2 + 1 ) ) * 4;
+
+ // Sum all pixels
+ for ( int nSample = 0; nSample < 4; nSample++ )
+ {
+ r += pSrc[ nSrcIndex[ nSample ] ];
+ g += pSrc[ nSrcIndex[ nSample ] + 1 ];
+ b += pSrc[ nSrcIndex[ nSample ] + 2 ];
+ a += pSrc[ nSrcIndex[ nSample ] + 3 ];
+ }
+
+ // Average results
+ r /= 4.0;
+ g /= 4.0;
+ b /= 4.0;
+ a /= 4.0;
+
+ // Store resulting pixels
+ ( *ppDst ) [ ( y * ( *pDstWidthOut ) + x ) * 4 ] = ( UINT8 ) ( r );
+ ( *ppDst ) [ ( y * ( *pDstWidthOut ) + x ) * 4 + 1] = ( UINT8 ) ( g );
+ ( *ppDst ) [ ( y * ( *pDstWidthOut ) + x ) * 4 + 2] = ( UINT8 ) ( b );
+ ( *ppDst ) [ ( y * ( *pDstWidthOut ) + x ) * 4 + 3] = ( UINT8 ) ( a );
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: create a sea of cubes
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupScene()
+{
+ if ( !m_pHMD )
+ return;
+
+ std::vector<float> vertdataarray;
+
+ Matrix4 matScale;
+ matScale.scale( m_fScale, m_fScale, m_fScale );
+ Matrix4 matTransform;
+ matTransform.translate(
+ -( (float)m_iSceneVolumeWidth * m_fScaleSpacing ) / 2.f,
+ -( (float)m_iSceneVolumeHeight * m_fScaleSpacing ) / 2.f,
+ -( (float)m_iSceneVolumeDepth * m_fScaleSpacing ) / 2.f);
+
+ Matrix4 mat = matScale * matTransform;
+
+ for( int z = 0; z< m_iSceneVolumeDepth; z++ )
+ {
+ for( int y = 0; y< m_iSceneVolumeHeight; y++ )
+ {
+ for( int x = 0; x< m_iSceneVolumeWidth; x++ )
+ {
+ AddCubeToScene( mat, vertdataarray );
+ mat = mat * Matrix4().translate( m_fScaleSpacing, 0, 0 );
+ }
+ mat = mat * Matrix4().translate( -((float)m_iSceneVolumeWidth) * m_fScaleSpacing, m_fScaleSpacing, 0 );
+ }
+ mat = mat * Matrix4().translate( 0, -((float)m_iSceneVolumeHeight) * m_fScaleSpacing, m_fScaleSpacing );
+ }
+ m_uiVertcount = vertdataarray.size()/5;
+
+ m_pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( sizeof(float) * vertdataarray.size() ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pSceneVertexBuffer ) );
+
+ UINT8 *pMappedBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pSceneVertexBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &pMappedBuffer ) );
+ memcpy( pMappedBuffer, &vertdataarray[0], sizeof( float ) * vertdataarray.size() );
+ m_pSceneVertexBuffer->Unmap( 0, nullptr );
+
+ m_sceneVertexBufferView.BufferLocation = m_pSceneVertexBuffer->GetGPUVirtualAddress();
+ m_sceneVertexBufferView.StrideInBytes = sizeof( VertexDataScene );
+ m_sceneVertexBufferView.SizeInBytes = sizeof( float ) * vertdataarray.size();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::AddCubeVertex( float fl0, float fl1, float fl2, float fl3, float fl4, std::vector<float> &vertdata )
+{
+ vertdata.push_back( fl0 );
+ vertdata.push_back( fl1 );
+ vertdata.push_back( fl2 );
+ vertdata.push_back( fl3 );
+ vertdata.push_back( fl4 );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::AddCubeToScene( Matrix4 mat, std::vector<float> &vertdata )
+{
+ // Matrix4 mat( outermat.data() );
+
+ Vector4 A = mat * Vector4( 0, 0, 0, 1 );
+ Vector4 B = mat * Vector4( 1, 0, 0, 1 );
+ Vector4 C = mat * Vector4( 1, 1, 0, 1 );
+ Vector4 D = mat * Vector4( 0, 1, 0, 1 );
+ Vector4 E = mat * Vector4( 0, 0, 1, 1 );
+ Vector4 F = mat * Vector4( 1, 0, 1, 1 );
+ Vector4 G = mat * Vector4( 1, 1, 1, 1 );
+ Vector4 H = mat * Vector4( 0, 1, 1, 1 );
+
+ // triangles instead of quads
+ AddCubeVertex( E.x, E.y, E.z, 0, 1, vertdata ); //Front
+ AddCubeVertex( F.x, F.y, F.z, 1, 1, vertdata );
+ AddCubeVertex( G.x, G.y, G.z, 1, 0, vertdata );
+ AddCubeVertex( G.x, G.y, G.z, 1, 0, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 0, 0, vertdata );
+ AddCubeVertex( E.x, E.y, E.z, 0, 1, vertdata );
+
+ AddCubeVertex( B.x, B.y, B.z, 0, 1, vertdata ); //Back
+ AddCubeVertex( A.x, A.y, A.z, 1, 1, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 1, 0, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 1, 0, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 0, 0, vertdata );
+ AddCubeVertex( B.x, B.y, B.z, 0, 1, vertdata );
+
+ AddCubeVertex( H.x, H.y, H.z, 0, 1, vertdata ); //Top
+ AddCubeVertex( G.x, G.y, G.z, 1, 1, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 0, 0, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 0, 1, vertdata );
+
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata ); //Bottom
+ AddCubeVertex( B.x, B.y, B.z, 1, 1, vertdata );
+ AddCubeVertex( F.x, F.y, F.z, 1, 0, vertdata );
+ AddCubeVertex( F.x, F.y, F.z, 1, 0, vertdata );
+ AddCubeVertex( E.x, E.y, E.z, 0, 0, vertdata );
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata );
+
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata ); //Left
+ AddCubeVertex( E.x, E.y, E.z, 1, 1, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 1, 0, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 1, 0, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 0, 0, vertdata );
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata );
+
+ AddCubeVertex( F.x, F.y, F.z, 0, 1, vertdata ); //Right
+ AddCubeVertex( B.x, B.y, B.z, 1, 1, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( G.x, G.y, G.z, 0, 0, vertdata );
+ AddCubeVertex( F.x, F.y, F.z, 0, 1, vertdata );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Update the vertex data for the controllers as X/Y/Z lines
+//-----------------------------------------------------------------------------
+void CMainApplication::UpdateControllerAxes()
+{
+ // don't draw controllers if somebody else has input focus
+ if( m_pHMD->IsInputFocusCapturedByAnotherProcess() )
+ return;
+
+ std::vector<float> vertdataarray;
+
+ m_uiControllerVertcount = 0;
+ m_iTrackedControllerCount = 0;
+
+ for ( vr::TrackedDeviceIndex_t unTrackedDevice = vr::k_unTrackedDeviceIndex_Hmd + 1; unTrackedDevice < vr::k_unMaxTrackedDeviceCount; ++unTrackedDevice )
+ {
+ if ( !m_pHMD->IsTrackedDeviceConnected( unTrackedDevice ) )
+ continue;
+
+ if( m_pHMD->GetTrackedDeviceClass( unTrackedDevice ) != vr::TrackedDeviceClass_Controller )
+ continue;
+
+ m_iTrackedControllerCount += 1;
+
+ if( !m_rTrackedDevicePose[ unTrackedDevice ].bPoseIsValid )
+ continue;
+
+ const Matrix4 & mat = m_rmat4DevicePose[unTrackedDevice];
+
+ Vector4 center = mat * Vector4( 0, 0, 0, 1 );
+
+ for ( int i = 0; i < 3; ++i )
+ {
+ Vector3 color( 0, 0, 0 );
+ Vector4 point( 0, 0, 0, 1 );
+ point[i] += 0.05f; // offset in X, Y, Z
+ color[i] = 1.0; // R, G, B
+ point = mat * point;
+ vertdataarray.push_back( center.x );
+ vertdataarray.push_back( center.y );
+ vertdataarray.push_back( center.z );
+
+ vertdataarray.push_back( color.x );
+ vertdataarray.push_back( color.y );
+ vertdataarray.push_back( color.z );
+
+ vertdataarray.push_back( point.x );
+ vertdataarray.push_back( point.y );
+ vertdataarray.push_back( point.z );
+
+ vertdataarray.push_back( color.x );
+ vertdataarray.push_back( color.y );
+ vertdataarray.push_back( color.z );
+
+ m_uiControllerVertcount += 2;
+ }
+
+ Vector4 start = mat * Vector4( 0, 0, -0.02f, 1 );
+ Vector4 end = mat * Vector4( 0, 0, -39.f, 1 );
+ Vector3 color( .92f, .92f, .71f );
+
+ vertdataarray.push_back( start.x );vertdataarray.push_back( start.y );vertdataarray.push_back( start.z );
+ vertdataarray.push_back( color.x );vertdataarray.push_back( color.y );vertdataarray.push_back( color.z );
+
+ vertdataarray.push_back( end.x );vertdataarray.push_back( end.y );vertdataarray.push_back( end.z );
+ vertdataarray.push_back( color.x );vertdataarray.push_back( color.y );vertdataarray.push_back( color.z );
+ m_uiControllerVertcount += 2;
+ }
+
+ // Setup the VB the first time through.
+ if ( m_pControllerAxisVertexBuffer == nullptr && vertdataarray.size() > 0 )
+ {
+ // Make big enough to hold up to the max number
+ size_t nSize = sizeof(float) * vertdataarray.size();
+ nSize *= vr::k_unMaxTrackedDeviceCount;
+
+ m_pDevice->CreateCommittedResource(
+ &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( nSize ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pControllerAxisVertexBuffer ) );
+
+ m_controllerAxisVertexBufferView.BufferLocation = m_pControllerAxisVertexBuffer->GetGPUVirtualAddress();
+ m_controllerAxisVertexBufferView.StrideInBytes = sizeof( float ) * 6;
+ m_controllerAxisVertexBufferView.SizeInBytes = sizeof( float ) * vertdataarray.size();
+ }
+
+ // Update the VB data
+ if ( m_pControllerAxisVertexBuffer )
+ {
+ UINT8 *pMappedBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pControllerAxisVertexBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &pMappedBuffer ) );
+ memcpy( pMappedBuffer, &vertdataarray[0], sizeof( float ) * vertdataarray.size() );
+ m_pControllerAxisVertexBuffer->Unmap( 0, nullptr );
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupCameras()
+{
+ m_mat4ProjectionLeft = GetHMDMatrixProjectionEye( vr::Eye_Left );
+ m_mat4ProjectionRight = GetHMDMatrixProjectionEye( vr::Eye_Right );
+ m_mat4eyePosLeft = GetHMDMatrixPoseEye( vr::Eye_Left );
+ m_mat4eyePosRight = GetHMDMatrixPoseEye( vr::Eye_Right );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Creates a frame buffer. Returns true if the buffer was set up.
+// Returns false if the setup failed.
+//-----------------------------------------------------------------------------
+bool CMainApplication::CreateFrameBuffer( int nWidth, int nHeight, FramebufferDesc &framebufferDesc, RTVIndex_t nRTVIndex )
+{
+ D3D12_RESOURCE_DESC textureDesc = {};
+ textureDesc.MipLevels = 1;
+ textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+ textureDesc.Width = nWidth;
+ textureDesc.Height = nHeight;
+ textureDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
+ textureDesc.DepthOrArraySize = 1;
+ textureDesc.SampleDesc.Count = m_nMSAASampleCount;
+ textureDesc.SampleDesc.Quality = 0;
+ textureDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+
+ const float clearColor[] = { 0.0f, 0.0f, 0.0f, 1.0f };
+
+ // Create color target
+ m_pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ),
+ D3D12_HEAP_FLAG_NONE,
+ &textureDesc,
+ D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
+ &CD3DX12_CLEAR_VALUE( DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, clearColor ),
+ IID_PPV_ARGS( &framebufferDesc.m_pTexture ) );
+
+ CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHandle( m_pRTVHeap->GetCPUDescriptorHandleForHeapStart() );
+ rtvHandle.Offset( nRTVIndex, m_nRTVDescriptorSize );
+ m_pDevice->CreateRenderTargetView( framebufferDesc.m_pTexture.Get(), nullptr, rtvHandle );
+ framebufferDesc.m_renderTargetViewHandle = rtvHandle;
+
+ // Create shader resource view
+ CD3DX12_CPU_DESCRIPTOR_HANDLE srvHandle( m_pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ srvHandle.Offset( SRV_LEFT_EYE + nRTVIndex, m_nCBVSRVDescriptorSize );
+ m_pDevice->CreateShaderResourceView( framebufferDesc.m_pTexture.Get(), nullptr, srvHandle );
+
+ // Create depth
+ D3D12_RESOURCE_DESC depthDesc = textureDesc;
+ depthDesc.Format = DXGI_FORMAT_D32_FLOAT;
+ depthDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
+ m_pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ),
+ D3D12_HEAP_FLAG_NONE,
+ &depthDesc,
+ D3D12_RESOURCE_STATE_DEPTH_WRITE,
+ &CD3DX12_CLEAR_VALUE( DXGI_FORMAT_D32_FLOAT, 1.0f, 0 ),
+ IID_PPV_ARGS( &framebufferDesc.m_pDepthStencil ) );
+
+ CD3DX12_CPU_DESCRIPTOR_HANDLE dsvHandle( m_pDSVHeap->GetCPUDescriptorHandleForHeapStart() );
+ dsvHandle.Offset( nRTVIndex, m_nDSVDescriptorSize );
+ m_pDevice->CreateDepthStencilView( framebufferDesc.m_pDepthStencil.Get(), nullptr, dsvHandle );
+ framebufferDesc.m_depthStencilViewHandle = dsvHandle;
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::SetupStereoRenderTargets()
+{
+ if ( !m_pHMD )
+ return false;
+
+ m_pHMD->GetRecommendedRenderTargetSize( &m_nRenderWidth, &m_nRenderHeight );
+ m_nRenderWidth = ( uint32_t )( m_flSuperSampleScale * ( float ) m_nRenderWidth );
+ m_nRenderHeight = ( uint32_t )( m_flSuperSampleScale * ( float ) m_nRenderHeight );
+
+ CreateFrameBuffer( m_nRenderWidth, m_nRenderHeight, m_leftEyeDesc, RTV_LEFT_EYE );
+ CreateFrameBuffer( m_nRenderWidth, m_nRenderHeight, m_rightEyeDesc, RTV_RIGHT_EYE );
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupCompanionWindow()
+{
+ if ( !m_pHMD )
+ return;
+
+ std::vector<VertexDataWindow> vVerts;
+
+ // left eye verts
+ vVerts.push_back( VertexDataWindow( Vector2(-1, -1), Vector2(0, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(0, -1), Vector2(1, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(-1, 1), Vector2(0, 0)) );
+ vVerts.push_back( VertexDataWindow( Vector2(0, 1), Vector2(1, 0)) );
+
+ // right eye verts
+ vVerts.push_back( VertexDataWindow( Vector2(0, -1), Vector2(0, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(1, -1), Vector2(1, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(0, 1), Vector2(0, 0)) );
+ vVerts.push_back( VertexDataWindow( Vector2(1, 1), Vector2(1, 0)) );
+
+ m_pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( sizeof( VertexDataWindow ) * vVerts.size() ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pCompanionWindowVertexBuffer ) );
+
+ UINT8 *pMappedBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pCompanionWindowVertexBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &pMappedBuffer ) );
+ memcpy( pMappedBuffer, &vVerts[0], sizeof( VertexDataWindow ) * vVerts.size() );
+ m_pCompanionWindowVertexBuffer->Unmap( 0, nullptr );
+
+ m_companionWindowVertexBufferView.BufferLocation = m_pCompanionWindowVertexBuffer->GetGPUVirtualAddress();
+ m_companionWindowVertexBufferView.StrideInBytes = sizeof( VertexDataWindow );
+ m_companionWindowVertexBufferView.SizeInBytes = sizeof( VertexDataWindow ) * vVerts.size();
+
+
+ UINT16 vIndices[] = { 0, 1, 3, 0, 3, 2, 4, 5, 7, 4, 7, 6};
+ m_uiCompanionWindowIndexSize = _countof(vIndices);
+
+ m_pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( sizeof( vIndices ) ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pCompanionWindowIndexBuffer ) );
+
+ m_pCompanionWindowIndexBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &pMappedBuffer ) );
+ memcpy( pMappedBuffer, &vIndices[0], sizeof( vIndices ) );
+ m_pCompanionWindowIndexBuffer->Unmap( 0, nullptr );
+
+ m_companionWindowIndexBufferView.BufferLocation = m_pCompanionWindowIndexBuffer->GetGPUVirtualAddress();
+ m_companionWindowIndexBufferView.Format = DXGI_FORMAT_R16_UINT;
+ m_companionWindowIndexBufferView.SizeInBytes = sizeof( vIndices );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderStereoTargets()
+{
+ D3D12_VIEWPORT viewport = { 0.0f, 0.0f, ( FLOAT ) m_nRenderWidth, ( FLOAT ) m_nRenderHeight, 0.0f, 1.0f };
+ D3D12_RECT scissor = { 0, 0, ( LONG ) m_nRenderWidth, ( LONG )m_nRenderHeight };
+
+ m_pCommandList->RSSetViewports( 1, &viewport );
+ m_pCommandList->RSSetScissorRects( 1, &scissor );
+
+ //----------//
+ // Left Eye //
+ //----------//
+ // Transition to RENDER_TARGET
+ m_pCommandList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_leftEyeDesc.m_pTexture.Get(), D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET ) );
+ m_pCommandList->OMSetRenderTargets( 1, &m_leftEyeDesc.m_renderTargetViewHandle, FALSE, &m_leftEyeDesc.m_depthStencilViewHandle );
+
+ const float clearColor[] = { 0.0f, 0.0f, 0.0f, 1.0f };
+ m_pCommandList->ClearRenderTargetView( m_leftEyeDesc.m_renderTargetViewHandle, clearColor, 0, nullptr );
+ m_pCommandList->ClearDepthStencilView( m_leftEyeDesc.m_depthStencilViewHandle, D3D12_CLEAR_FLAG_DEPTH, 1.0, 0, 0, nullptr );
+
+ RenderScene( vr::Eye_Left );
+
+ // Transition to SHADER_RESOURCE to submit to SteamVR
+ m_pCommandList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_leftEyeDesc.m_pTexture.Get(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE ) );
+
+
+ //-----------//
+ // Right Eye //
+ //-----------//
+ // Transition to RENDER_TARGET
+ m_pCommandList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_rightEyeDesc.m_pTexture.Get(), D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET ) );
+ m_pCommandList->OMSetRenderTargets( 1, &m_rightEyeDesc.m_renderTargetViewHandle, FALSE, &m_rightEyeDesc.m_depthStencilViewHandle );
+
+ m_pCommandList->ClearRenderTargetView( m_rightEyeDesc.m_renderTargetViewHandle, clearColor, 0, nullptr );
+ m_pCommandList->ClearDepthStencilView( m_rightEyeDesc.m_depthStencilViewHandle, D3D12_CLEAR_FLAG_DEPTH, 1.0, 0, 0, nullptr );
+
+ RenderScene( vr::Eye_Right );
+
+ // Transition to SHADER_RESOURCE to submit to SteamVR
+ m_pCommandList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_rightEyeDesc.m_pTexture.Get(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE ) );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Renders a scene with respect to nEye.
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderScene( vr::Hmd_Eye nEye )
+{
+ if( m_bShowCubes )
+ {
+ m_pCommandList->SetPipelineState( m_pScenePipelineState.Get() );
+
+ // Select the CBV (left or right eye)
+ CD3DX12_GPU_DESCRIPTOR_HANDLE cbvHandle( m_pCBVSRVHeap->GetGPUDescriptorHandleForHeapStart() );
+ cbvHandle.Offset( nEye, m_nCBVSRVDescriptorSize );
+ m_pCommandList->SetGraphicsRootDescriptorTable( 0, cbvHandle );
+
+ // SRV is just after the left eye
+ CD3DX12_GPU_DESCRIPTOR_HANDLE srvHandle( m_pCBVSRVHeap->GetGPUDescriptorHandleForHeapStart() );
+ srvHandle.Offset( SRV_TEXTURE_MAP, m_nCBVSRVDescriptorSize );
+ m_pCommandList->SetGraphicsRootDescriptorTable( 1, srvHandle );
+
+ // Update the persistently mapped pointer to the CB data with the latest matrix
+ memcpy( m_pSceneConstantBufferData[ nEye ], GetCurrentViewProjectionMatrix( nEye ).get(), sizeof( Matrix4 ) );
+
+ // Draw
+ m_pCommandList->IASetPrimitiveTopology( D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
+ m_pCommandList->IASetVertexBuffers( 0, 1, &m_sceneVertexBufferView );
+ m_pCommandList->DrawInstanced( m_uiVertcount, 1, 0, 0 );
+ }
+
+ bool bIsInputCapturedByAnotherProcess = m_pHMD->IsInputFocusCapturedByAnotherProcess();
+
+ if( !bIsInputCapturedByAnotherProcess && m_pControllerAxisVertexBuffer )
+ {
+ // draw the controller axis lines
+ m_pCommandList->SetPipelineState( m_pAxesPipelineState.Get() );
+
+ m_pCommandList->IASetPrimitiveTopology( D3D_PRIMITIVE_TOPOLOGY_LINELIST );
+ m_pCommandList->IASetVertexBuffers( 0, 1, &m_controllerAxisVertexBufferView );
+ m_pCommandList->DrawInstanced( m_uiControllerVertcount, 1, 0, 0 );
+ }
+
+ // ----- Render Model rendering -----
+ m_pCommandList->SetPipelineState( m_pRenderModelPipelineState.Get() );
+ for( uint32_t unTrackedDevice = 0; unTrackedDevice < vr::k_unMaxTrackedDeviceCount; unTrackedDevice++ )
+ {
+ if( !m_rTrackedDeviceToRenderModel[ unTrackedDevice ] || !m_rbShowTrackedDevice[ unTrackedDevice ] )
+ continue;
+
+ const vr::TrackedDevicePose_t & pose = m_rTrackedDevicePose[ unTrackedDevice ];
+ if( !pose.bPoseIsValid )
+ continue;
+
+ if( bIsInputCapturedByAnotherProcess && m_pHMD->GetTrackedDeviceClass( unTrackedDevice ) == vr::TrackedDeviceClass_Controller )
+ continue;
+
+ const Matrix4 & matDeviceToTracking = m_rmat4DevicePose[ unTrackedDevice ];
+ Matrix4 matMVP = GetCurrentViewProjectionMatrix( nEye ) * matDeviceToTracking;
+
+ m_rTrackedDeviceToRenderModel[ unTrackedDevice ]->Draw( nEye, m_pCommandList.Get(), m_nCBVSRVDescriptorSize, matMVP );
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderCompanionWindow()
+{
+ m_pCommandList->SetPipelineState( m_pCompanionPipelineState.Get() );
+
+ // Transition swapchain image to RENDER_TARGET
+ m_pCommandList->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition( m_pSwapChainRenderTarget[ m_nFrameIndex ].Get(), D3D12_RESOURCE_STATE_PRESENT, D3D12_RESOURCE_STATE_RENDER_TARGET ) );
+
+ // Bind current swapchain image
+ CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHandle( m_pRTVHeap->GetCPUDescriptorHandleForHeapStart() );
+ rtvHandle.Offset( RTV_SWAPCHAIN0 + m_nFrameIndex, m_nRTVDescriptorSize );
+ m_pCommandList->OMSetRenderTargets( 1, &rtvHandle, 0, nullptr );
+
+ D3D12_VIEWPORT viewport = { 0.0f, 0.0f, ( FLOAT ) m_nCompanionWindowWidth, ( FLOAT ) m_nCompanionWindowHeight, 0.0f, 1.0f };
+ D3D12_RECT scissor = { 0, 0, ( LONG ) m_nCompanionWindowWidth, ( LONG )m_nCompanionWindowHeight };
+
+ m_pCommandList->RSSetViewports( 1, &viewport );
+ m_pCommandList->RSSetScissorRects( 1, &scissor );
+
+
+ // render left eye (first half of index array)
+ CD3DX12_GPU_DESCRIPTOR_HANDLE srvHandleLeftEye( m_pCBVSRVHeap->GetGPUDescriptorHandleForHeapStart() );
+ srvHandleLeftEye.Offset( SRV_LEFT_EYE, m_nCBVSRVDescriptorSize );
+ m_pCommandList->SetGraphicsRootDescriptorTable( 1, srvHandleLeftEye );
+
+ m_pCommandList->IASetPrimitiveTopology( D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
+ m_pCommandList->IASetVertexBuffers( 0, 1, &m_companionWindowVertexBufferView );
+ m_pCommandList->IASetIndexBuffer( &m_companionWindowIndexBufferView );
+ m_pCommandList->DrawIndexedInstanced( m_uiCompanionWindowIndexSize / 2, 1, 0, 0, 0 );
+
+ // render right eye (second half of index array)
+ CD3DX12_GPU_DESCRIPTOR_HANDLE srvHandleRightEye( m_pCBVSRVHeap->GetGPUDescriptorHandleForHeapStart() );
+ srvHandleRightEye.Offset( SRV_RIGHT_EYE, m_nCBVSRVDescriptorSize );
+ m_pCommandList->SetGraphicsRootDescriptorTable( 1, srvHandleRightEye );
+ m_pCommandList->DrawIndexedInstanced( m_uiCompanionWindowIndexSize / 2, 1, ( m_uiCompanionWindowIndexSize / 2 ), 0, 0 );
+
+ // Transition swapchain image to PRESENT
+ m_pCommandList->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition( m_pSwapChainRenderTarget[ m_nFrameIndex ].Get(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PRESENT ) );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Gets a Matrix Projection Eye with respect to nEye.
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::GetHMDMatrixProjectionEye( vr::Hmd_Eye nEye )
+{
+ if ( !m_pHMD )
+ return Matrix4();
+
+ vr::HmdMatrix44_t mat = m_pHMD->GetProjectionMatrix( nEye, m_fNearClip, m_fFarClip );
+
+ return Matrix4(
+ mat.m[0][0], mat.m[1][0], mat.m[2][0], mat.m[3][0],
+ mat.m[0][1], mat.m[1][1], mat.m[2][1], mat.m[3][1],
+ mat.m[0][2], mat.m[1][2], mat.m[2][2], mat.m[3][2],
+ mat.m[0][3], mat.m[1][3], mat.m[2][3], mat.m[3][3]
+ );
+}
+
+
+//-----------------------------------------------------------------------------
+// Purpose: Gets an HMDMatrixPoseEye with respect to nEye.
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::GetHMDMatrixPoseEye( vr::Hmd_Eye nEye )
+{
+ if ( !m_pHMD )
+ return Matrix4();
+
+ vr::HmdMatrix34_t matEyeRight = m_pHMD->GetEyeToHeadTransform( nEye );
+ Matrix4 matrixObj(
+ matEyeRight.m[0][0], matEyeRight.m[1][0], matEyeRight.m[2][0], 0.0,
+ matEyeRight.m[0][1], matEyeRight.m[1][1], matEyeRight.m[2][1], 0.0,
+ matEyeRight.m[0][2], matEyeRight.m[1][2], matEyeRight.m[2][2], 0.0,
+ matEyeRight.m[0][3], matEyeRight.m[1][3], matEyeRight.m[2][3], 1.0f
+ );
+
+ return matrixObj.invert();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Gets a Current View Projection Matrix with respect to nEye,
+// which may be an Eye_Left or an Eye_Right.
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::GetCurrentViewProjectionMatrix( vr::Hmd_Eye nEye )
+{
+ Matrix4 matMVP;
+ if( nEye == vr::Eye_Left )
+ {
+ matMVP = m_mat4ProjectionLeft * m_mat4eyePosLeft * m_mat4HMDPose;
+ }
+ else if( nEye == vr::Eye_Right )
+ {
+ matMVP = m_mat4ProjectionRight * m_mat4eyePosRight * m_mat4HMDPose;
+ }
+
+ return matMVP;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::UpdateHMDMatrixPose()
+{
+ if ( !m_pHMD )
+ return;
+
+ vr::VRCompositor()->WaitGetPoses(m_rTrackedDevicePose, vr::k_unMaxTrackedDeviceCount, NULL, 0 );
+
+ m_iValidPoseCount = 0;
+ m_strPoseClasses = "";
+ for ( int nDevice = 0; nDevice < vr::k_unMaxTrackedDeviceCount; ++nDevice )
+ {
+ if ( m_rTrackedDevicePose[nDevice].bPoseIsValid )
+ {
+ m_iValidPoseCount++;
+ m_rmat4DevicePose[nDevice] = ConvertSteamVRMatrixToMatrix4( m_rTrackedDevicePose[nDevice].mDeviceToAbsoluteTracking );
+ if (m_rDevClassChar[nDevice]==0)
+ {
+ switch (m_pHMD->GetTrackedDeviceClass(nDevice))
+ {
+ case vr::TrackedDeviceClass_Controller: m_rDevClassChar[nDevice] = 'C'; break;
+ case vr::TrackedDeviceClass_HMD: m_rDevClassChar[nDevice] = 'H'; break;
+ case vr::TrackedDeviceClass_Invalid: m_rDevClassChar[nDevice] = 'I'; break;
+ case vr::TrackedDeviceClass_GenericTracker: m_rDevClassChar[nDevice] = 'G'; break;
+ case vr::TrackedDeviceClass_TrackingReference: m_rDevClassChar[nDevice] = 'T'; break;
+ default: m_rDevClassChar[nDevice] = '?'; break;
+ }
+ }
+ m_strPoseClasses += m_rDevClassChar[nDevice];
+ }
+ }
+
+ if ( m_rTrackedDevicePose[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid )
+ {
+ m_mat4HMDPose = m_rmat4DevicePose[vr::k_unTrackedDeviceIndex_Hmd];
+ m_mat4HMDPose.invert();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Finds a render model we've already loaded or loads a new one
+//-----------------------------------------------------------------------------
+DX12RenderModel *CMainApplication::FindOrLoadRenderModel( vr::TrackedDeviceIndex_t unTrackedDeviceIndex, const char *pchRenderModelName )
+{
+ DX12RenderModel *pRenderModel = NULL;
+ // To simplify the D3D12 rendering code, create an instance of the model for each model name. This is less efficient
+ // memory wise, but simplifies the rendering code so we can store the transform in a constant buffer associated with
+ // the model itself. You would not want to do this in a production application.
+ //for( std::vector< DX12RenderModel * >::iterator i = m_vecRenderModels.begin(); i != m_vecRenderModels.end(); i++ )
+ //{
+ //if( !stricmp( (*i)->GetName().c_str(), pchRenderModelName ) )
+ //{
+ // pRenderModel = *i;
+ // break;
+ //}
+ //}
+
+ // load the model if we didn't find one
+ if( !pRenderModel )
+ {
+ vr::RenderModel_t *pModel;
+ vr::EVRRenderModelError error;
+ while ( 1 )
+ {
+ error = vr::VRRenderModels()->LoadRenderModel_Async( pchRenderModelName, &pModel );
+ if ( error != vr::VRRenderModelError_Loading )
+ break;
+
+ ThreadSleep( 1 );
+ }
+
+ if ( error != vr::VRRenderModelError_None )
+ {
+ dprintf( "Unable to load render model %s - %s\n", pchRenderModelName, vr::VRRenderModels()->GetRenderModelErrorNameFromEnum( error ) );
+ return NULL; // move on to the next tracked device
+ }
+
+ vr::RenderModel_TextureMap_t *pTexture;
+ while ( 1 )
+ {
+ error = vr::VRRenderModels()->LoadTexture_Async( pModel->diffuseTextureId, &pTexture );
+ if ( error != vr::VRRenderModelError_Loading )
+ break;
+
+ ThreadSleep( 1 );
+ }
+
+ if ( error != vr::VRRenderModelError_None )
+ {
+ dprintf( "Unable to load render texture id:%d for render model %s\n", pModel->diffuseTextureId, pchRenderModelName );
+ vr::VRRenderModels()->FreeRenderModel( pModel );
+ return NULL; // move on to the next tracked device
+ }
+
+ pRenderModel = new DX12RenderModel( pchRenderModelName );
+ if ( !pRenderModel->BInit( m_pDevice.Get(), m_pCommandList.Get(), m_pCBVSRVHeap.Get(), unTrackedDeviceIndex, *pModel, *pTexture ) )
+ {
+ dprintf( "Unable to create D3D12 model from render model %s\n", pchRenderModelName );
+ delete pRenderModel;
+ pRenderModel = NULL;
+ }
+ else
+ {
+ m_vecRenderModels.push_back( pRenderModel );
+ }
+ vr::VRRenderModels()->FreeRenderModel( pModel );
+ vr::VRRenderModels()->FreeTexture( pTexture );
+ }
+
+ return pRenderModel;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Create/destroy D3D12 a Render Model for a single tracked device
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupRenderModelForTrackedDevice( vr::TrackedDeviceIndex_t unTrackedDeviceIndex )
+{
+ if( unTrackedDeviceIndex >= vr::k_unMaxTrackedDeviceCount )
+ return;
+
+ // try to find a model we've already set up
+ std::string sRenderModelName = GetTrackedDeviceString( m_pHMD, unTrackedDeviceIndex, vr::Prop_RenderModelName_String );
+ DX12RenderModel *pRenderModel = FindOrLoadRenderModel( unTrackedDeviceIndex, sRenderModelName.c_str() );
+ if( !pRenderModel )
+ {
+ std::string sTrackingSystemName = GetTrackedDeviceString( m_pHMD, unTrackedDeviceIndex, vr::Prop_TrackingSystemName_String );
+ dprintf( "Unable to load render model for tracked device %d (%s.%s)", unTrackedDeviceIndex, sTrackingSystemName.c_str(), sRenderModelName.c_str() );
+ }
+ else
+ {
+ m_rTrackedDeviceToRenderModel[ unTrackedDeviceIndex ] = pRenderModel;
+ m_rbShowTrackedDevice[ unTrackedDeviceIndex ] = true;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Create/destroy D3D12 Render Models
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupRenderModels()
+{
+ memset( m_rTrackedDeviceToRenderModel, 0, sizeof( m_rTrackedDeviceToRenderModel ) );
+
+ if( !m_pHMD )
+ return;
+
+ for( uint32_t unTrackedDevice = vr::k_unTrackedDeviceIndex_Hmd + 1; unTrackedDevice < vr::k_unMaxTrackedDeviceCount; unTrackedDevice++ )
+ {
+ if( !m_pHMD->IsTrackedDeviceConnected( unTrackedDevice ) )
+ continue;
+
+ SetupRenderModelForTrackedDevice( unTrackedDevice );
+ }
+
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Converts a SteamVR matrix to our local matrix class
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose )
+{
+ Matrix4 matrixObj(
+ matPose.m[0][0], matPose.m[1][0], matPose.m[2][0], 0.0,
+ matPose.m[0][1], matPose.m[1][1], matPose.m[2][1], 0.0,
+ matPose.m[0][2], matPose.m[1][2], matPose.m[2][2], 0.0,
+ matPose.m[0][3], matPose.m[1][3], matPose.m[2][3], 1.0f
+ );
+ return matrixObj;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Create/destroy D3D12 Render Models
+//-----------------------------------------------------------------------------
+DX12RenderModel::DX12RenderModel( const std::string & sRenderModelName )
+ : m_sModelName( sRenderModelName )
+{
+ memset( m_pConstantBufferData, 0, sizeof( m_pConstantBufferData ) );
+}
+
+DX12RenderModel::~DX12RenderModel()
+{
+ Cleanup();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Allocates and populates the D3D12 resources for a render model
+//-----------------------------------------------------------------------------
+bool DX12RenderModel::BInit( ID3D12Device *pDevice, ID3D12GraphicsCommandList *pCommandList, ID3D12DescriptorHeap *pCBVSRVHeap, vr::TrackedDeviceIndex_t unTrackedDeviceIndex, const vr::RenderModel_t & vrModel, const vr::RenderModel_TextureMap_t & vrDiffuseTexture )
+{
+ m_unTrackedDeviceIndex = unTrackedDeviceIndex;
+ m_pCBVSRVHeap = pCBVSRVHeap;
+
+ // Create and populate the vertex buffer
+ {
+ pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( sizeof( vr::RenderModel_Vertex_t ) * vrModel.unVertexCount ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pVertexBuffer ) );
+
+ UINT8 *pMappedBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pVertexBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &pMappedBuffer ) );
+ memcpy( pMappedBuffer, vrModel.rVertexData, sizeof( vr::RenderModel_Vertex_t ) * vrModel.unVertexCount );
+ m_pVertexBuffer->Unmap( 0, nullptr );
+
+ m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
+ m_vertexBufferView.StrideInBytes = sizeof( vr::RenderModel_Vertex_t );
+ m_vertexBufferView.SizeInBytes = sizeof( vr::RenderModel_Vertex_t ) * vrModel.unVertexCount;
+ }
+
+ // Create and populate the index buffer
+ {
+ pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( sizeof( uint16_t ) * vrModel.unTriangleCount * 3 ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pIndexBuffer ) );
+
+ UINT8 *pMappedBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pIndexBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &pMappedBuffer ) );
+ memcpy( pMappedBuffer, vrModel.rIndexData, sizeof( uint16_t ) * vrModel.unTriangleCount * 3 );
+ m_pIndexBuffer->Unmap( 0, nullptr );
+
+ m_indexBufferView.BufferLocation = m_pIndexBuffer->GetGPUVirtualAddress();
+ m_indexBufferView.Format = DXGI_FORMAT_R16_UINT;
+ m_indexBufferView.SizeInBytes = sizeof( uint16_t ) * vrModel.unTriangleCount * 3;
+ }
+
+ // create and populate the texture
+ {
+ int nImageWidth = vrDiffuseTexture.unWidth;
+ int nImageHeight = vrDiffuseTexture.unHeight;
+ std::vector< D3D12_SUBRESOURCE_DATA > mipLevelData;
+
+ UINT8 *pBaseData = new UINT8[ nImageWidth * nImageHeight * 4 ];
+ memcpy( pBaseData,vrDiffuseTexture.rubTextureMapData, sizeof( UINT8 ) * nImageWidth * nImageHeight * 4 );
+ D3D12_SUBRESOURCE_DATA textureData = {};
+ textureData.pData = &pBaseData[ 0 ];
+ textureData.RowPitch = nImageWidth * 4;
+ textureData.SlicePitch = textureData.RowPitch * nImageHeight;
+ mipLevelData.push_back( textureData );
+
+ // Generate mipmaps for the image
+ int nPrevImageIndex = 0;
+ int nMipWidth = nImageWidth;
+ int nMipHeight = nImageHeight;
+
+ while( nMipWidth > 1 && nMipHeight > 1 )
+ {
+ UINT8 *pNewImage;
+ CMainApplication::GenMipMapRGBA( ( UINT8* )mipLevelData[ nPrevImageIndex ].pData, &pNewImage, nMipWidth, nMipHeight, &nMipWidth, &nMipHeight );
+
+ D3D12_SUBRESOURCE_DATA mipData = {};
+ mipData.pData = pNewImage;
+ mipData.RowPitch = nMipWidth * 4;
+ mipData.SlicePitch = textureData.RowPitch * nMipHeight;
+ mipLevelData.push_back( mipData );
+
+ nPrevImageIndex++;
+ }
+
+ D3D12_RESOURCE_DESC textureDesc = {};
+ textureDesc.MipLevels = ( UINT16 ) mipLevelData.size();
+ textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+ textureDesc.Width = nImageWidth;
+ textureDesc.Height = nImageHeight;
+ textureDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
+ textureDesc.DepthOrArraySize = 1;
+ textureDesc.SampleDesc.Count = 1;
+ textureDesc.SampleDesc.Quality = 0;
+ textureDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+
+ pDevice->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ),
+ D3D12_HEAP_FLAG_NONE,
+ &textureDesc,
+ D3D12_RESOURCE_STATE_COPY_DEST,
+ nullptr,
+ IID_PPV_ARGS( &m_pTexture ) );
+
+ // Create shader resource view
+ CD3DX12_CPU_DESCRIPTOR_HANDLE srvHandle( pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ srvHandle.Offset( SRV_TEXTURE_RENDER_MODEL0 + unTrackedDeviceIndex, pDevice->GetDescriptorHandleIncrementSize( D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ) );
+ pDevice->CreateShaderResourceView( m_pTexture.Get(), nullptr, srvHandle );
+
+ const UINT64 nUploadBufferSize = GetRequiredIntermediateSize( m_pTexture.Get(), 0, textureDesc.MipLevels );
+
+ // Create the GPU upload buffer.
+ pDevice->CreateCommittedResource(
+ &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( nUploadBufferSize ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pTextureUploadHeap ) );
+
+ UpdateSubresources( pCommandList, m_pTexture.Get(), m_pTextureUploadHeap.Get(), 0, 0, mipLevelData.size(), &mipLevelData[0] );
+ pCommandList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_pTexture.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE ) );
+
+ // Free mip pointers
+ for ( size_t nMip = 0; nMip < mipLevelData.size(); nMip++ )
+ {
+ delete [] mipLevelData[ nMip ].pData;
+ }
+ }
+
+ // Create a constant buffer to hold the transform (one for each eye)
+ {
+ pDevice->CreateCommittedResource(
+ &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),
+ D3D12_HEAP_FLAG_NONE,
+ &CD3DX12_RESOURCE_DESC::Buffer( 1024 * 64 ),
+ D3D12_RESOURCE_STATE_GENERIC_READ,
+ nullptr,
+ IID_PPV_ARGS( &m_pConstantBuffer ) );
+
+ // Keep as persistently mapped buffer, store left eye in first 256 bytes, right eye in second
+ UINT8 *pBuffer;
+ CD3DX12_RANGE readRange( 0, 0 );
+ m_pConstantBuffer->Map( 0, &readRange, reinterpret_cast<void**>( &pBuffer ) );
+ // Left eye to first 256 bytes, right eye to second 256 bytes
+ m_pConstantBufferData[ 0 ] = pBuffer;
+ m_pConstantBufferData[ 1 ] = pBuffer + 256;
+
+ // Left eye CBV
+ CD3DX12_CPU_DESCRIPTOR_HANDLE cbvLeftEyeHandle( m_pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ cbvLeftEyeHandle.Offset( CBV_LEFT_EYE_RENDER_MODEL0 + m_unTrackedDeviceIndex, pDevice->GetDescriptorHandleIncrementSize( D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ) );
+ D3D12_CONSTANT_BUFFER_VIEW_DESC cbvDesc = {};
+ cbvDesc.BufferLocation = m_pConstantBuffer->GetGPUVirtualAddress();
+ cbvDesc.SizeInBytes = ( sizeof( Matrix4 ) + 255 ) & ~255; // Pad to 256 bytes
+ pDevice->CreateConstantBufferView( &cbvDesc, cbvLeftEyeHandle );
+
+ // Right eye CBV
+ CD3DX12_CPU_DESCRIPTOR_HANDLE cbvRightEyeHandle( m_pCBVSRVHeap->GetCPUDescriptorHandleForHeapStart() );
+ cbvRightEyeHandle.Offset( CBV_RIGHT_EYE_RENDER_MODEL0 + m_unTrackedDeviceIndex, pDevice->GetDescriptorHandleIncrementSize( D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ) );
+ cbvDesc.BufferLocation += 256;
+ pDevice->CreateConstantBufferView( &cbvDesc, cbvRightEyeHandle );
+ }
+
+ m_unVertexCount = vrModel.unTriangleCount * 3;
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Frees the D3D12 resources for a render model
+//-----------------------------------------------------------------------------
+void DX12RenderModel::Cleanup()
+{
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Draws the render model
+//-----------------------------------------------------------------------------
+void DX12RenderModel::Draw( vr::EVREye nEye, ID3D12GraphicsCommandList *pCommandList, UINT nCBVSRVDescriptorSize, const Matrix4 &matMVP )
+{
+ // Update the CB with the transform
+ memcpy( m_pConstantBufferData[ nEye ], &matMVP, sizeof( matMVP ) );
+
+ // Bind the CB
+ int nStartOffset = ( nEye == vr::Eye_Left ) ? CBV_LEFT_EYE_RENDER_MODEL0 : CBV_RIGHT_EYE_RENDER_MODEL0;
+ CD3DX12_GPU_DESCRIPTOR_HANDLE cbvHandle( m_pCBVSRVHeap->GetGPUDescriptorHandleForHeapStart() );
+ cbvHandle.Offset( nStartOffset + m_unTrackedDeviceIndex, nCBVSRVDescriptorSize );
+ pCommandList->SetGraphicsRootDescriptorTable( 0, cbvHandle );
+
+ // Bind the texture
+ CD3DX12_GPU_DESCRIPTOR_HANDLE srvHandle( m_pCBVSRVHeap->GetGPUDescriptorHandleForHeapStart() );
+ srvHandle.Offset( SRV_TEXTURE_RENDER_MODEL0 + m_unTrackedDeviceIndex, nCBVSRVDescriptorSize );
+ pCommandList->SetGraphicsRootDescriptorTable( 1, srvHandle );
+
+ // Bind the VB/IB and draw
+ pCommandList->IASetPrimitiveTopology( D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
+ pCommandList->IASetVertexBuffers( 0, 1, &m_vertexBufferView );
+ pCommandList->IASetIndexBuffer( &m_indexBufferView );
+ pCommandList->DrawIndexedInstanced( m_unVertexCount, 1, 0, 0, 0 );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+int main(int argc, char *argv[])
+{
+ CMainApplication *pMainApplication = new CMainApplication( argc, argv );
+
+ if ( !pMainApplication->BInit() )
+ {
+ pMainApplication->Shutdown();
+ return 1;
+ }
+
+ pMainApplication->RunMainLoop();
+
+ pMainApplication->Shutdown();
+
+ return 0;
+}
diff --git a/samples/hellovr_vulkan/CMakeLists.txt b/samples/hellovr_vulkan/CMakeLists.txt
new file mode 100644
index 0000000..ebbfcbe
--- /dev/null
+++ b/samples/hellovr_vulkan/CMakeLists.txt
@@ -0,0 +1,16 @@
+set(TARGET_NAME hellovr_vulkan)
+
+add_executable(${TARGET_NAME}
+ ${SHARED_SRC_FILES}
+ hellovr_vulkan_main.cpp
+)
+
+target_link_libraries(${TARGET_NAME}
+ ${SDL2_LIBRARIES}
+ ${VULKAN_LIBRARY}
+ ${OPENVR_LIBRARIES}
+ ${CMAKE_DL_LIBS}
+ ${EXTRA_LIBS}
+)
+
+setTargetOutputDirectory(${TARGET_NAME})
diff --git a/samples/hellovr_vulkan/hellovr_vulkan_main.cpp b/samples/hellovr_vulkan/hellovr_vulkan_main.cpp
new file mode 100644
index 0000000..884962b
--- /dev/null
+++ b/samples/hellovr_vulkan/hellovr_vulkan_main.cpp
@@ -0,0 +1,3776 @@
+//========= Copyright Valve Corporation ============//
+
+#if defined( _WIN32 )
+ #define VK_USE_PLATFORM_WIN32_KHR
+#else
+ #define SDL_VIDEO_DRIVER_X11
+ #define VK_USE_PLATFORM_XLIB_KHR
+#endif
+#include <vulkan/vulkan.h>
+#include <SDL.h>
+#include <SDL_syswm.h>
+#include <stdio.h>
+#include <string>
+#include <cstdlib>
+#include <inttypes.h>
+#include <openvr.h>
+#include <deque>
+
+#include "shared/lodepng.h"
+#include "shared/Matrices.h"
+#include "shared/pathtools.h"
+
+#if defined(POSIX)
+#include "unistd.h"
+#endif
+
+#ifndef _countof
+#define _countof(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+void ThreadSleep( unsigned long nMilliseconds )
+{
+#if defined(_WIN32)
+ ::Sleep( nMilliseconds );
+#elif defined(POSIX)
+ usleep( nMilliseconds * 1000 );
+#endif
+}
+
+// Pipeline state objects
+enum PipelineStateObjectEnum_t
+{
+ PSO_SCENE = 0,
+ PSO_AXES,
+ PSO_RENDERMODEL,
+ PSO_COMPANION,
+ PSO_COUNT
+};
+
+// Indices of descriptor sets for rendering
+enum DescriptorSetIndex_t
+{
+ DESCRIPTOR_SET_LEFT_EYE_SCENE = 0,
+ DESCRIPTOR_SET_RIGHT_EYE_SCENE,
+ DESCRIPTOR_SET_COMPANION_LEFT_TEXTURE,
+ DESCRIPTOR_SET_COMPANION_RIGHT_TEXTURE,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL0,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL1,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL2,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL3,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL4,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL5,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL6,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL7,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL8,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL9,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL10,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL11,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL12,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL13,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL14,
+ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL15,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL0,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL1,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL2,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL3,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL4,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL5,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL6,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL7,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL8,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL9,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL10,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL11,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL12,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL13,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL14,
+ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL15,
+ NUM_DESCRIPTOR_SETS
+};
+
+class VulkanRenderModel
+{
+public:
+ VulkanRenderModel( const std::string & sRenderModelName );
+ ~VulkanRenderModel();
+
+ bool BInit( VkDevice pDevice, const VkPhysicalDeviceMemoryProperties &memoryProperties, VkCommandBuffer pCommandBuffer, vr::TrackedDeviceIndex_t unTrackedDeviceIndex, VkDescriptorSet pDescriptorSets[ 2 ], const vr::RenderModel_t & vrModel, const vr::RenderModel_TextureMap_t & vrDiffuseTexture );
+ void Cleanup();
+ void Draw( vr::EVREye nEye, VkCommandBuffer pCommandBuffer, VkPipelineLayout pPipelineLayout, const Matrix4 &matMVP );
+ const std::string & GetName() const { return m_sModelName; }
+
+private:
+ VkDevice m_pDevice;
+ VkPhysicalDeviceMemoryProperties m_physicalDeviceMemoryProperties;
+ VkBuffer m_pVertexBuffer;
+ VkDeviceMemory m_pVertexBufferMemory;
+ VkBuffer m_pIndexBuffer;
+ VkDeviceMemory m_pIndexBufferMemory;
+ VkImage m_pImage;
+ VkDeviceMemory m_pImageMemory;
+ VkImageView m_pImageView;
+ VkBuffer m_pImageStagingBuffer;
+ VkDeviceMemory m_pImageStagingBufferMemory;
+ VkBuffer m_pConstantBuffer[ 2 ];
+ VkDeviceMemory m_pConstantBufferMemory[ 2 ];
+ void *m_pConstantBufferData[ 2 ];
+ VkDescriptorSet m_pDescriptorSets[ 2 ];
+ VkSampler m_pSampler;
+
+ size_t m_unVertexCount;
+ vr::TrackedDeviceIndex_t m_unTrackedDeviceIndex;
+ std::string m_sModelName;
+};
+
+static bool g_bPrintf = true;
+
+// Vulkan extension entrypoints
+static PFN_vkCreateDebugReportCallbackEXT g_pVkCreateDebugReportCallbackEXT = nullptr;
+static PFN_vkDestroyDebugReportCallbackEXT g_pVkDestroyDebugReportCallbackEXT = nullptr;
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//------------------------------------------------------------------------------
+class CMainApplication
+{
+public:
+ CMainApplication( int argc, char *argv[] );
+ virtual ~CMainApplication();
+
+ bool BInit();
+ bool BInitVulkan();
+ bool BInitVulkanInstance();
+ bool BInitVulkanDevice();
+ bool BInitVulkanSwapchain();
+ bool BInitCompositor();
+ bool GetVulkanInstanceExtensionsRequired( std::vector< std::string > &outInstanceExtensionList );
+ bool GetVulkanDeviceExtensionsRequired( VkPhysicalDevice pPhysicalDevice, std::vector< std::string > &outDeviceExtensionList );
+
+ void SetupRenderModels();
+
+ void Shutdown();
+
+ void RunMainLoop();
+ bool HandleInput();
+ void ProcessVREvent( const vr::VREvent_t & event );
+ void RenderFrame();
+
+ bool SetupTexturemaps();
+ static void GenMipMapRGBA( const uint8_t *pSrc, uint8_t *ppDst, int nSrcWidth, int nSrcHeight, int *pDstWidthOut, int *pDstHeightOut );
+
+ void SetupScene();
+ void AddCubeToScene( Matrix4 mat, std::vector<float> &vertdata );
+ void AddCubeVertex( float fl0, float fl1, float fl2, float fl3, float fl4, std::vector<float> &vertdata );
+
+ void UpdateControllerAxes();
+
+ bool SetupStereoRenderTargets();
+ void SetupCompanionWindow();
+ void SetupCameras();
+
+ void RenderStereoTargets();
+ void RenderCompanionWindow();
+ void RenderScene( vr::Hmd_Eye nEye );
+
+ Matrix4 GetHMDMatrixProjectionEye( vr::Hmd_Eye nEye );
+ Matrix4 GetHMDMatrixPoseEye( vr::Hmd_Eye nEye );
+ Matrix4 GetCurrentViewProjectionMatrix( vr::Hmd_Eye nEye );
+ void UpdateHMDMatrixPose();
+
+ Matrix4 ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose );
+
+ bool CreateAllShaders();
+ void CreateAllDescriptorSets();
+
+ void SetupRenderModelForTrackedDevice( vr::TrackedDeviceIndex_t unTrackedDeviceIndex );
+ VulkanRenderModel *FindOrLoadRenderModel( vr::TrackedDeviceIndex_t unTrackedDeviceIndex, const char *pchRenderModelName );
+
+private:
+ bool m_bDebugVulkan;
+ bool m_bVerbose;
+ bool m_bPerf;
+ bool m_bVblank;
+ int m_nMSAASampleCount;
+ // Optional scaling factor to render with supersampling (defaults off, use -scale)
+ float m_flSuperSampleScale;
+
+ vr::IVRSystem *m_pHMD;
+ vr::IVRRenderModels *m_pRenderModels;
+ std::string m_strDriver;
+ std::string m_strDisplay;
+ vr::TrackedDevicePose_t m_rTrackedDevicePose[ vr::k_unMaxTrackedDeviceCount ];
+ Matrix4 m_rmat4DevicePose[ vr::k_unMaxTrackedDeviceCount ];
+ bool m_rbShowTrackedDevice[ vr::k_unMaxTrackedDeviceCount ];
+
+private: // SDL bookkeeping
+ SDL_Window *m_pCompanionWindow;
+ uint32_t m_nCompanionWindowWidth;
+ uint32_t m_nCompanionWindowHeight;
+
+private:
+ int m_iTrackedControllerCount;
+ int m_iTrackedControllerCount_Last;
+ int m_iValidPoseCount;
+ int m_iValidPoseCount_Last;
+ bool m_bShowCubes;
+
+ std::string m_strPoseClasses; // what classes we saw poses for this frame
+ char m_rDevClassChar[ vr::k_unMaxTrackedDeviceCount ]; // for each device, a character representing its class
+
+ int m_iSceneVolumeWidth;
+ int m_iSceneVolumeHeight;
+ int m_iSceneVolumeDepth;
+ float m_fScaleSpacing;
+ float m_fScale;
+
+ int m_iSceneVolumeInit; // if you want something other than the default 20x20x20
+
+ float m_fNearClip;
+ float m_fFarClip;
+
+ unsigned int m_uiVertcount;
+ unsigned int m_uiCompanionWindowIndexSize;
+
+ VkInstance m_pInstance;
+ VkDevice m_pDevice;
+ VkPhysicalDevice m_pPhysicalDevice;
+ VkQueue m_pQueue;
+ VkSurfaceKHR m_pSurface;
+ VkSwapchainKHR m_pSwapchain;
+ VkPhysicalDeviceProperties m_physicalDeviceProperties;
+ VkPhysicalDeviceMemoryProperties m_physicalDeviceMemoryProperties;
+ VkPhysicalDeviceFeatures m_physicalDeviceFeatures;
+ uint32_t m_nQueueFamilyIndex;
+ VkDebugReportCallbackEXT m_pDebugReportCallback;
+ uint32_t m_nSwapQueueImageCount;
+ uint32_t m_nFrameIndex;
+ uint32_t m_nCurrentSwapchainImage;
+ std::vector< VkImage > m_swapchainImages;
+ std::vector< VkImageView > m_pSwapchainImageViews;
+ std::vector< VkFramebuffer > m_pSwapchainFramebuffers;
+ std::vector< VkSemaphore > m_pSwapchainSemaphores;
+ VkRenderPass m_pSwapchainRenderPass;
+
+
+ VkCommandPool m_pCommandPool;
+ VkDescriptorPool m_pDescriptorPool;
+ VkDescriptorSet m_pDescriptorSets[ NUM_DESCRIPTOR_SETS ];
+
+ struct VulkanCommandBuffer_t
+ {
+ VkCommandBuffer m_pCommandBuffer;
+ VkFence m_pFence;
+ };
+ std::deque< VulkanCommandBuffer_t > m_commandBuffers;
+ VulkanCommandBuffer_t m_currentCommandBuffer;
+
+ VulkanCommandBuffer_t GetCommandBuffer();
+
+ // Scene resources
+ VkBuffer m_pSceneVertexBuffer;
+ VkDeviceMemory m_pSceneVertexBufferMemory;
+ VkBufferView m_pSceneVertexBufferView;
+ VkBuffer m_pSceneConstantBuffer[ 2 ];
+ VkDeviceMemory m_pSceneConstantBufferMemory[ 2 ];
+ void *m_pSceneConstantBufferData[ 2 ];
+ VkImage m_pSceneImage;
+ VkDeviceMemory m_pSceneImageMemory;
+ VkImageView m_pSceneImageView;
+ VkBuffer m_pSceneStagingBuffer;
+ VkDeviceMemory m_pSceneStagingBufferMemory;
+ VkSampler m_pSceneSampler;
+
+ // Storage for VS and PS for each PSO
+ VkShaderModule m_pShaderModules[ PSO_COUNT * 2 ];
+ VkPipeline m_pPipelines[ PSO_COUNT ];
+ VkDescriptorSetLayout m_pDescriptorSetLayout;
+ VkPipelineLayout m_pPipelineLayout;
+ VkPipelineCache m_pPipelineCache;
+
+ // Companion window resources
+ VkBuffer m_pCompanionWindowVertexBuffer;
+ VkDeviceMemory m_pCompanionWindowVertexBufferMemory;
+ VkBuffer m_pCompanionWindowIndexBuffer;
+ VkDeviceMemory m_pCompanionWindowIndexBufferMemory;
+
+ // Controller axes resources
+ VkBuffer m_pControllerAxesVertexBuffer;
+ VkDeviceMemory m_pControllerAxesVertexBufferMemory;
+
+ unsigned int m_uiControllerVertcount;
+
+ Matrix4 m_mat4HMDPose;
+ Matrix4 m_mat4eyePosLeft;
+ Matrix4 m_mat4eyePosRight;
+
+ Matrix4 m_mat4ProjectionCenter;
+ Matrix4 m_mat4ProjectionLeft;
+ Matrix4 m_mat4ProjectionRight;
+
+ struct VertexDataScene
+ {
+ Vector3 position;
+ Vector2 texCoord;
+ };
+
+ struct VertexDataWindow
+ {
+ Vector2 position;
+ Vector2 texCoord;
+
+ VertexDataWindow( const Vector2 & pos, const Vector2 tex ) : position(pos), texCoord(tex) { }
+ };
+
+ struct FramebufferDesc
+ {
+ VkImage m_pImage;
+ VkImageLayout m_nImageLayout;
+ VkDeviceMemory m_pDeviceMemory;
+ VkImageView m_pImageView;
+ VkImage m_pDepthStencilImage;
+ VkImageLayout m_nDepthStencilImageLayout;
+ VkDeviceMemory m_pDepthStencilDeviceMemory;
+ VkImageView m_pDepthStencilImageView;
+ VkRenderPass m_pRenderPass;
+ VkFramebuffer m_pFramebuffer;
+ };
+ FramebufferDesc m_leftEyeDesc;
+ FramebufferDesc m_rightEyeDesc;
+
+ bool CreateFrameBuffer( int nWidth, int nHeight, FramebufferDesc &framebufferDesc );
+
+ uint32_t m_nRenderWidth;
+ uint32_t m_nRenderHeight;
+
+ std::vector< VulkanRenderModel * > m_vecRenderModels;
+ VulkanRenderModel *m_rTrackedDeviceToRenderModel[ vr::k_unMaxTrackedDeviceCount ];
+};
+
+//-----------------------------------------------------------------------------
+// Purpose: Outputs a set of optional arguments to debugging output, using
+// the printf format setting specified in fmt*.
+//-----------------------------------------------------------------------------
+void dprintf( const char *fmt, ... )
+{
+ va_list args;
+ char buffer[ 2048 ];
+
+ va_start( args, fmt );
+ vsprintf_s( buffer, fmt, args );
+ va_end( args );
+
+ if ( g_bPrintf )
+ printf( "%s", buffer );
+
+ OutputDebugStringA( buffer );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: VK_EXT_debug_report callback
+//-----------------------------------------------------------------------------
+static VkBool32 VKAPI_PTR VKDebugMessageCallback( VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object,
+ size_t location, int32_t messageCode, const char* pLayerPrefix, const char *pMessage, void *pUserData )
+{
+ char buf[4096] = { 0 };
+ switch ( flags )
+ {
+ case VK_DEBUG_REPORT_ERROR_BIT_EXT:
+ sprintf( buf, "VK ERROR %s %" PRIu64 ":%d: %s\n", pLayerPrefix, uint64_t( location ), messageCode, pMessage );
+ break;
+ case VK_DEBUG_REPORT_WARNING_BIT_EXT:
+ sprintf( buf, "VK WARNING %s %" PRIu64 ":%d: %s\n", pLayerPrefix, uint64_t( location ), messageCode, pMessage );
+ break;
+ case VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT:
+ sprintf( buf, "VK PERF %s %" PRIu64 ":%d: %s\n", pLayerPrefix, uint64_t( location ), messageCode, pMessage );
+ break;
+ case VK_DEBUG_REPORT_INFORMATION_BIT_EXT:
+ sprintf( buf, "VK INFO %s %" PRIu64 ":%d: %s\n", pLayerPrefix, uint64_t( location ), messageCode, pMessage );
+ break;
+ case VK_DEBUG_REPORT_DEBUG_BIT_EXT:
+ sprintf( buf, "VK DEBUG %s %" PRIu64 ":%d: %s\n", pLayerPrefix, uint64_t( location ), messageCode, pMessage );
+ break;
+ default:
+ break;
+ }
+
+ dprintf( "%s\n", buf );
+
+ return VK_FALSE;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Determine the memory type index from the memory requirements
+// and type bits
+//-----------------------------------------------------------------------------
+static bool MemoryTypeFromProperties( const VkPhysicalDeviceMemoryProperties &memoryProperties, uint32_t nMemoryTypeBits, VkMemoryPropertyFlags nMemoryProperties, uint32_t *pTypeIndexOut )
+{
+ for ( uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++ )
+ {
+ if ( ( nMemoryTypeBits & 1 ) == 1)
+ {
+ // Type is available, does it match user properties?
+ if ( ( memoryProperties.memoryTypes[i].propertyFlags & nMemoryProperties ) == nMemoryProperties )
+ {
+ *pTypeIndexOut = i;
+ return true;
+ }
+ }
+ nMemoryTypeBits >>= 1;
+ }
+
+ // No memory types matched, return failure
+ return false;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Helper function to create Vulkan static VB/IBs
+//-----------------------------------------------------------------------------
+static bool CreateVulkanBuffer( VkDevice pDevice, const VkPhysicalDeviceMemoryProperties &memoryProperties, const void *pBufferData, VkDeviceSize nSize, VkBufferUsageFlags nUsage, VkBuffer *ppBufferOut, VkDeviceMemory *ppDeviceMemoryOut )
+{
+ // Create the vertex buffer and fill with data
+ VkBufferCreateInfo bufferCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ bufferCreateInfo.size = nSize;
+ bufferCreateInfo.usage = nUsage;
+ VkResult nResult = vkCreateBuffer( pDevice, &bufferCreateInfo, nullptr, ppBufferOut );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "%s - vkCreateBuffer failed with error %d\n", __FUNCTION__, nResult );
+ return false;
+ }
+
+ VkMemoryRequirements memoryRequirements = {};
+ vkGetBufferMemoryRequirements( pDevice, *ppBufferOut, &memoryRequirements );
+
+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ if ( !MemoryTypeFromProperties( memoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex ) )
+ {
+ dprintf( "%s - failed to find matching memoryTypeIndex for buffer\n", __FUNCTION__ );
+ return false;
+ }
+ allocInfo.allocationSize = memoryRequirements.size;
+
+ nResult = vkAllocateMemory( pDevice, &allocInfo, nullptr, ppDeviceMemoryOut );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "%s - vkCreateBuffer failed with error %d\n", __FUNCTION__, nResult );
+ return false;
+ }
+
+ nResult = vkBindBufferMemory( pDevice, *ppBufferOut, *ppDeviceMemoryOut, 0 );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "%s vkBindBufferMemory failed with error %d\n", __FUNCTION__, nResult );
+ return false;
+ }
+
+ if ( pBufferData != nullptr )
+ {
+ void *pData;
+ nResult = vkMapMemory( pDevice, *ppDeviceMemoryOut, 0, VK_WHOLE_SIZE, 0, &pData );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "%s - vkMapMemory returned error %d\n", __FUNCTION__, nResult );
+ return false;
+ }
+ memcpy( pData, pBufferData, nSize );
+ vkUnmapMemory( pDevice, *ppDeviceMemoryOut );
+
+ VkMappedMemoryRange memoryRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+ memoryRange.memory = *ppDeviceMemoryOut;
+ memoryRange.size = VK_WHOLE_SIZE;
+ vkFlushMappedMemoryRanges( pDevice, 1, &memoryRange );
+
+ }
+ return true;
+}
+
+
+//-----------------------------------------------------------------------------
+// Purpose: Constructor
+//-----------------------------------------------------------------------------
+CMainApplication::CMainApplication( int argc, char *argv[] )
+ : m_pCompanionWindow(NULL)
+ , m_nCompanionWindowWidth( 640 )
+ , m_nCompanionWindowHeight( 320 )
+ , m_pHMD( NULL )
+ , m_pRenderModels( NULL )
+ , m_bDebugVulkan( false )
+ , m_bVerbose( false )
+ , m_bPerf( false )
+ , m_bVblank( false )
+ , m_nMSAASampleCount( 4 )
+ , m_flSuperSampleScale( 1.0f )
+ , m_iTrackedControllerCount( 0 )
+ , m_iTrackedControllerCount_Last( -1 )
+ , m_iValidPoseCount( 0 )
+ , m_iValidPoseCount_Last( -1 )
+ , m_iSceneVolumeInit( 20 )
+ , m_strPoseClasses("")
+ , m_bShowCubes( true )
+ , m_pInstance( VK_NULL_HANDLE )
+ , m_pDevice( VK_NULL_HANDLE )
+ , m_pPhysicalDevice( VK_NULL_HANDLE )
+ , m_pQueue( VK_NULL_HANDLE )
+ , m_pSurface( VK_NULL_HANDLE )
+ , m_pSwapchain( VK_NULL_HANDLE )
+ , m_pDebugReportCallback( VK_NULL_HANDLE )
+ , m_pCommandPool( VK_NULL_HANDLE )
+ , m_pDescriptorPool( VK_NULL_HANDLE )
+ , m_nSwapQueueImageCount( 0 )
+ , m_nFrameIndex( 0 )
+ , m_nCurrentSwapchainImage( 0 )
+ , m_pSceneVertexBuffer( VK_NULL_HANDLE )
+ , m_pSceneVertexBufferMemory( VK_NULL_HANDLE )
+ , m_pSceneVertexBufferView( VK_NULL_HANDLE )
+ , m_pSceneImage( VK_NULL_HANDLE )
+ , m_pSceneImageMemory( VK_NULL_HANDLE )
+ , m_pSceneImageView( VK_NULL_HANDLE )
+ , m_pSceneStagingBuffer( VK_NULL_HANDLE )
+ , m_pSceneStagingBufferMemory( VK_NULL_HANDLE )
+ , m_pSceneSampler( VK_NULL_HANDLE )
+ , m_pDescriptorSetLayout( VK_NULL_HANDLE )
+ , m_pPipelineLayout( VK_NULL_HANDLE )
+ , m_pPipelineCache( VK_NULL_HANDLE )
+ , m_pCompanionWindowVertexBuffer( VK_NULL_HANDLE )
+ , m_pCompanionWindowVertexBufferMemory( VK_NULL_HANDLE )
+ , m_pCompanionWindowIndexBuffer( VK_NULL_HANDLE )
+ , m_pCompanionWindowIndexBufferMemory( VK_NULL_HANDLE )
+ , m_pControllerAxesVertexBuffer( VK_NULL_HANDLE )
+ , m_pControllerAxesVertexBufferMemory( VK_NULL_HANDLE )
+{
+ memset( &m_leftEyeDesc, 0, sizeof( m_leftEyeDesc ) );
+ memset( &m_rightEyeDesc, 0, sizeof( m_rightEyeDesc ) );
+ memset( &m_pShaderModules[ 0 ], 0, sizeof( m_pShaderModules ) );
+ memset( &m_pPipelines[ 0 ], 0, sizeof( m_pPipelines ) );
+ memset( m_pSceneConstantBufferData, 0, sizeof( m_pSceneConstantBufferData ) );
+ memset( m_pDescriptorSets, 0, sizeof( m_pDescriptorSets ) );
+
+ for( int i = 1; i < argc; i++ )
+ {
+ if( !stricmp( argv[i], "-vulkandebug" ) )
+ {
+ m_bDebugVulkan = true;
+ }
+ else if( !stricmp( argv[i], "-verbose" ) )
+ {
+ m_bVerbose = true;
+ }
+ else if( !stricmp( argv[i], "-novblank" ) )
+ {
+ m_bVblank = false;
+ }
+ else if ( !stricmp( argv[i], "-msaa" ) && ( argc > i + 1 ) && ( *argv[ i + 1 ] != '-' ) )
+ {
+ m_nMSAASampleCount = atoi( argv[ i + 1 ] );
+ i++;
+ }
+ else if ( !stricmp( argv[i], "-supersample" ) && ( argc > i + 1 ) && ( *argv[ i + 1 ] != '-' ) )
+ {
+ m_flSuperSampleScale = ( float )atof( argv[ i + 1 ] );
+ i++;
+ }
+ else if( !stricmp( argv[i], "-noprintf" ) )
+ {
+ g_bPrintf = false;
+ }
+ else if ( !stricmp( argv[i], "-cubevolume" ) && ( argc > i + 1 ) && ( *argv[ i + 1 ] != '-' ) )
+ {
+ m_iSceneVolumeInit = atoi( argv[ i + 1 ] );
+ i++;
+ }
+ }
+ // other initialization tasks are done in BInit
+ memset( m_rDevClassChar, 0, sizeof( m_rDevClassChar ) );
+};
+
+//-----------------------------------------------------------------------------
+// Purpose: Destructor
+//-----------------------------------------------------------------------------
+CMainApplication::~CMainApplication()
+{
+ // work is done in Shutdown
+ dprintf( "Shutdown" );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Helper to get a string from a tracked device property and turn it
+// into a std::string
+//-----------------------------------------------------------------------------
+std::string GetTrackedDeviceString( vr::IVRSystem *pHmd, vr::TrackedDeviceIndex_t unDevice, vr::TrackedDeviceProperty prop, vr::TrackedPropertyError *peError = NULL )
+{
+ uint32_t unRequiredBufferLen = pHmd->GetStringTrackedDeviceProperty( unDevice, prop, NULL, 0, peError );
+ if( unRequiredBufferLen == 0 )
+ return "";
+
+ char *pchBuffer = new char[ unRequiredBufferLen ];
+ unRequiredBufferLen = pHmd->GetStringTrackedDeviceProperty( unDevice, prop, pchBuffer, unRequiredBufferLen, peError );
+ std::string sResult = pchBuffer;
+ delete [] pchBuffer;
+ return sResult;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInit()
+{
+ if ( SDL_Init( SDL_INIT_VIDEO | SDL_INIT_TIMER ) < 0 )
+ {
+ dprintf("%s - SDL could not initialize! SDL Error: %s\n", __FUNCTION__, SDL_GetError());
+ return false;
+ }
+
+ // Loading the SteamVR Runtime
+ vr::EVRInitError eError = vr::VRInitError_None;
+ m_pHMD = vr::VR_Init( &eError, vr::VRApplication_Scene );
+
+ if ( eError != vr::VRInitError_None )
+ {
+ m_pHMD = NULL;
+ char buf[1024];
+ sprintf_s( buf, sizeof( buf ), "Unable to init VR runtime: %s", vr::VR_GetVRInitErrorAsEnglishDescription( eError ) );
+ SDL_ShowSimpleMessageBox( SDL_MESSAGEBOX_ERROR, "VR_Init Failed", buf, NULL );
+ return false;
+ }
+
+ m_pRenderModels = (vr::IVRRenderModels *)vr::VR_GetGenericInterface( vr::IVRRenderModels_Version, &eError );
+ if( !m_pRenderModels )
+ {
+ m_pHMD = NULL;
+ vr::VR_Shutdown();
+
+ char buf[1024];
+ sprintf_s( buf, sizeof( buf ), "Unable to get render model interface: %s", vr::VR_GetVRInitErrorAsEnglishDescription( eError ) );
+ SDL_ShowSimpleMessageBox( SDL_MESSAGEBOX_ERROR, "VR_Init Failed", buf, NULL );
+ return false;
+ }
+
+ int nWindowPosX = 700;
+ int nWindowPosY = 100;
+ Uint32 unWindowFlags = SDL_WINDOW_SHOWN;
+
+ m_pCompanionWindow = SDL_CreateWindow( "hellovr [Vulkan]", nWindowPosX, nWindowPosY, m_nCompanionWindowWidth, m_nCompanionWindowHeight, unWindowFlags );
+ if (m_pCompanionWindow == NULL)
+ {
+ dprintf( "%s - Window could not be created! SDL Error: %s\n", __FUNCTION__, SDL_GetError() );
+ return false;
+ }
+
+ m_strDriver = "No Driver";
+ m_strDisplay = "No Display";
+
+ m_strDriver = GetTrackedDeviceString( m_pHMD, vr::k_unTrackedDeviceIndex_Hmd, vr::Prop_TrackingSystemName_String );
+ m_strDisplay = GetTrackedDeviceString( m_pHMD, vr::k_unTrackedDeviceIndex_Hmd, vr::Prop_SerialNumber_String );
+
+ std::string strWindowTitle = "hellovr [Vulkan] - " + m_strDriver + " " + m_strDisplay;
+ SDL_SetWindowTitle( m_pCompanionWindow, strWindowTitle.c_str() );
+
+ // cube array
+ m_iSceneVolumeWidth = m_iSceneVolumeInit;
+ m_iSceneVolumeHeight = m_iSceneVolumeInit;
+ m_iSceneVolumeDepth = m_iSceneVolumeInit;
+
+ m_fScale = 0.3f;
+ m_fScaleSpacing = 4.0f;
+
+ m_fNearClip = 0.1f;
+ m_fFarClip = 30.0f;
+
+ m_uiVertcount = 0;
+ m_uiCompanionWindowIndexSize = 0;
+
+ if ( !BInitVulkan() )
+ {
+ dprintf( "%s - Unable to initialize Vulkan!\n", __FUNCTION__ );
+ return false;
+ }
+
+ if ( !BInitCompositor() )
+ {
+ dprintf( "%s - Failed to initialize VR Compositor!\n", __FUNCTION__ );
+ return false;
+ }
+
+ return true;
+}
+
+//--------------------------------------------------------------------------------------
+// Ask OpenVR for the list of instance extensions required
+//--------------------------------------------------------------------------------------
+bool CMainApplication::GetVulkanInstanceExtensionsRequired( std::vector< std::string > &outInstanceExtensionList )
+{
+ if ( !vr::VRCompositor() )
+ {
+ return false;
+ }
+
+ outInstanceExtensionList.clear();
+ uint32_t nBufferSize = vr::VRCompositor()->GetVulkanInstanceExtensionsRequired( nullptr, 0 );
+ if ( nBufferSize > 0 )
+ {
+ // Allocate memory for the space separated list and query for it
+ char *pExtensionStr = new char[ nBufferSize ];
+ pExtensionStr[0] = 0;
+ vr::VRCompositor()->GetVulkanInstanceExtensionsRequired( pExtensionStr, nBufferSize );
+
+ // Break up the space separated list into entries on the CUtlStringList
+ std::string curExtStr;
+ uint32_t nIndex = 0;
+ while ( pExtensionStr[ nIndex ] != 0 && ( nIndex < nBufferSize ) )
+ {
+ if ( pExtensionStr[ nIndex ] == ' ' )
+ {
+ outInstanceExtensionList.push_back( curExtStr );
+ curExtStr.clear();
+ }
+ else
+ {
+ curExtStr += pExtensionStr[ nIndex ];
+ }
+ nIndex++;
+ }
+ if ( curExtStr.size() > 0 )
+ {
+ outInstanceExtensionList.push_back( curExtStr );
+ }
+
+ delete [] pExtensionStr;
+ }
+
+ return true;
+}
+
+//--------------------------------------------------------------------------------------
+// Ask OpenVR for the list of device extensions required
+//--------------------------------------------------------------------------------------
+bool CMainApplication::GetVulkanDeviceExtensionsRequired( VkPhysicalDevice pPhysicalDevice, std::vector< std::string > &outDeviceExtensionList )
+{
+ if ( !vr::VRCompositor() )
+ {
+ return false;
+ }
+
+ outDeviceExtensionList.clear();
+ uint32_t nBufferSize = vr::VRCompositor()->GetVulkanDeviceExtensionsRequired( ( VkPhysicalDevice_T * ) pPhysicalDevice, nullptr, 0 );
+ if ( nBufferSize > 0 )
+ {
+ // Allocate memory for the space separated list and query for it
+ char *pExtensionStr = new char[ nBufferSize ];
+ pExtensionStr[0] = 0;
+ vr::VRCompositor()->GetVulkanDeviceExtensionsRequired( ( VkPhysicalDevice_T * ) pPhysicalDevice, pExtensionStr, nBufferSize );
+
+ // Break up the space separated list into entries on the CUtlStringList
+ std::string curExtStr;
+ uint32_t nIndex = 0;
+ while ( pExtensionStr[ nIndex ] != 0 && ( nIndex < nBufferSize ) )
+ {
+ if ( pExtensionStr[ nIndex ] == ' ' )
+ {
+ outDeviceExtensionList.push_back( curExtStr );
+ curExtStr.clear();
+ }
+ else
+ {
+ curExtStr += pExtensionStr[ nIndex ];
+ }
+ nIndex++;
+ }
+ if ( curExtStr.size() > 0 )
+ {
+ outDeviceExtensionList.push_back( curExtStr );
+ }
+
+ delete [] pExtensionStr;
+ }
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize Vulkan VkInstance
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitVulkanInstance()
+{
+ VkResult nResult;
+
+ //----------------------//
+ // VkInstance creation //
+ //----------------------//
+ // Query OpenVR to determine which instance extensions need to be enabled before creating the instance
+ std::vector< std::string > requiredInstanceExtensions;
+ if ( !GetVulkanInstanceExtensionsRequired( requiredInstanceExtensions ) )
+ {
+ dprintf( "Could not determine OpenVR Vulkan instance extensions.\n" );
+ return false;
+ }
+
+ // Additional required instance extensions
+ requiredInstanceExtensions.push_back( VK_KHR_SURFACE_EXTENSION_NAME );
+#if defined ( _WIN32 )
+ requiredInstanceExtensions.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME );
+#else
+ requiredInstanceExtensions.push_back( VK_KHR_XLIB_SURFACE_EXTENSION_NAME );
+#endif
+
+ uint32_t nEnabledLayerCount = 0;
+ VkLayerProperties *pLayerProperties = nullptr;
+ char **ppEnabledLayerNames = nullptr;
+
+ // Enable validation layers
+ if ( m_bDebugVulkan )
+ {
+ // OpenVR: no unique_objects when using validation with SteamVR
+ char const *pInstanceValidationLayers[] =
+ {
+ "VK_LAYER_GOOGLE_threading",
+ "VK_LAYER_LUNARG_parameter_validation",
+ "VK_LAYER_LUNARG_object_tracker",
+ "VK_LAYER_LUNARG_image",
+ "VK_LAYER_LUNARG_core_validation",
+ "VK_LAYER_LUNARG_swapchain"
+ };
+
+ uint32_t nInstanceLayerCount = 0;
+ VkResult nResult = vkEnumerateInstanceLayerProperties( &nInstanceLayerCount, nullptr );
+ if ( nResult == VK_SUCCESS && nInstanceLayerCount > 0 )
+ {
+ pLayerProperties = new VkLayerProperties[ nInstanceLayerCount ];
+ ppEnabledLayerNames = new char*[ nInstanceLayerCount ];
+ nResult = vkEnumerateInstanceLayerProperties( &nInstanceLayerCount, pLayerProperties );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Error vkEnumerateInstanceLayerProperties in %d\n", nResult );
+ return false;
+ }
+
+ uint32_t nLayerIndex = 0;
+ for ( nLayerIndex = 0; nLayerIndex < nInstanceLayerCount; nLayerIndex++ )
+ {
+ for ( uint32_t nLayer = 0; nLayer < _countof( pInstanceValidationLayers ); nLayer++ )
+ {
+ if ( strstr( pLayerProperties[ nLayerIndex ].layerName, pInstanceValidationLayers[ nLayer ] ) != NULL )
+ {
+ ppEnabledLayerNames[ nEnabledLayerCount++ ] = pLayerProperties[ nLayerIndex ].layerName;
+ }
+ }
+ }
+ requiredInstanceExtensions.push_back( VK_EXT_DEBUG_REPORT_EXTENSION_NAME );
+ }
+ }
+
+ uint32_t nInstanceExtensionCount = 0;
+ nResult = vkEnumerateInstanceExtensionProperties( NULL, &nInstanceExtensionCount, NULL );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkEnumerateInstanceExtensionProperties failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Allocate enough ExtensionProperties to support all extensions being enabled
+ char** ppEnableInstanceExtensionNames = new char*[ requiredInstanceExtensions.size() ];
+ int32_t nEnableInstanceExtensionNamesCount = 0;
+ VkExtensionProperties *pExtensionProperties = new VkExtensionProperties[ nInstanceExtensionCount ];
+ if ( nInstanceExtensionCount > 0 )
+ {
+ nResult = vkEnumerateInstanceExtensionProperties( NULL, &nInstanceExtensionCount, pExtensionProperties );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkEnumerateInstanceExtensionProperties failed with error %d\n", nResult );
+ return false;
+ }
+
+ for ( size_t nExt = 0; nExt < requiredInstanceExtensions.size(); nExt++ )
+ {
+ bool bFound = false;
+ uint32_t nExtIndex = 0;
+ for ( nExtIndex = 0; nExtIndex < nInstanceExtensionCount; nExtIndex++ )
+ {
+ if ( strcmp( requiredInstanceExtensions[ nExt ].c_str(), pExtensionProperties[ nExtIndex ].extensionName ) == 0 )
+ {
+ bFound = true;
+ ppEnableInstanceExtensionNames[ nEnableInstanceExtensionNamesCount++ ] = pExtensionProperties[ nExtIndex ].extensionName;
+ break;
+ }
+ }
+
+ if ( !bFound )
+ {
+ dprintf( "Vulkan missing requested extension '%s'.\n", requiredInstanceExtensions[ nExt ] );
+ }
+ }
+
+ if ( nEnableInstanceExtensionNamesCount != requiredInstanceExtensions.size() )
+ {
+ return false;
+ }
+ }
+
+ VkApplicationInfo appInfo = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
+ appInfo.pApplicationName = "hellovr_vulkan";
+ appInfo.applicationVersion = 1;
+ appInfo.pEngineName = nullptr;
+ appInfo.engineVersion = 1;
+ appInfo.apiVersion = VK_MAKE_VERSION( 1, 0, 0 );
+
+ // Create the instance
+ VkInstanceCreateInfo instanceCreateInfo = {};
+ instanceCreateInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ instanceCreateInfo.pNext = NULL;
+ instanceCreateInfo.pApplicationInfo = &appInfo;
+ instanceCreateInfo.enabledExtensionCount = nEnableInstanceExtensionNamesCount;
+ instanceCreateInfo.ppEnabledExtensionNames = ppEnableInstanceExtensionNames;
+ instanceCreateInfo.enabledLayerCount = nEnabledLayerCount;
+ instanceCreateInfo.ppEnabledLayerNames = ppEnabledLayerNames;
+
+ nResult = vkCreateInstance( &instanceCreateInfo, nullptr, &m_pInstance );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateInstance failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Enable debug report extension
+ if ( m_bDebugVulkan )
+ {
+ g_pVkCreateDebugReportCallbackEXT = ( PFN_vkCreateDebugReportCallbackEXT ) vkGetInstanceProcAddr( m_pInstance, "vkCreateDebugReportCallbackEXT" );
+ g_pVkDestroyDebugReportCallbackEXT = ( PFN_vkDestroyDebugReportCallbackEXT ) vkGetInstanceProcAddr( m_pInstance, "vkDestroyDebugReportCallbackEXT" );
+
+ VkDebugReportCallbackCreateInfoEXT debugReportCreateInfo = {};
+ debugReportCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
+ debugReportCreateInfo.pNext = NULL;
+ debugReportCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT;
+ debugReportCreateInfo.pfnCallback = VKDebugMessageCallback;
+ debugReportCreateInfo.pUserData = NULL;
+ g_pVkCreateDebugReportCallbackEXT( m_pInstance, &debugReportCreateInfo, NULL, &m_pDebugReportCallback );
+ }
+
+ delete [] ppEnableInstanceExtensionNames;
+ delete [] ppEnabledLayerNames;
+ delete [] pLayerProperties;
+ delete [] pExtensionProperties;
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize Vulkan VkDevice
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitVulkanDevice()
+{
+ uint32_t nDeviceCount = 0;
+ VkResult nResult = vkEnumeratePhysicalDevices( m_pInstance, &nDeviceCount, NULL );
+ if ( nResult != VK_SUCCESS || nDeviceCount == 0 )
+ {
+ dprintf( "vkEnumeratePhysicalDevices failed, unable to init and enumerate GPUs with Vulkan.\n" );
+ return false;
+ }
+
+ VkPhysicalDevice *pPhysicalDevices = new VkPhysicalDevice[ nDeviceCount ];
+ nResult = vkEnumeratePhysicalDevices( m_pInstance, &nDeviceCount, pPhysicalDevices );
+ if ( nResult != VK_SUCCESS || nDeviceCount == 0 )
+ {
+ dprintf( "vkEnumeratePhysicalDevices failed, unable to init and enumerate GPUs with Vulkan.\n" );
+ return false;
+ }
+
+ // Grab the first physical device
+ m_pPhysicalDevice = pPhysicalDevices[ 0 ];
+ delete [] pPhysicalDevices;
+
+ vkGetPhysicalDeviceProperties( m_pPhysicalDevice, &m_physicalDeviceProperties );
+ vkGetPhysicalDeviceMemoryProperties( m_pPhysicalDevice, &m_physicalDeviceMemoryProperties );
+ vkGetPhysicalDeviceFeatures( m_pPhysicalDevice, &m_physicalDeviceFeatures );
+
+ //--------------------//
+ // VkDevice creation //
+ //--------------------//
+ // Query OpenVR for the required device extensions for this physical device
+ std::vector< std::string > requiredDeviceExtensions;
+ GetVulkanDeviceExtensionsRequired( m_pPhysicalDevice, requiredDeviceExtensions );
+ // Add additional required extensions
+ requiredDeviceExtensions.push_back( VK_KHR_SWAPCHAIN_EXTENSION_NAME );
+
+ // Find the first graphics queue
+ uint32_t nQueueCount = 0;
+ vkGetPhysicalDeviceQueueFamilyProperties( m_pPhysicalDevice, &nQueueCount, 0 );
+ VkQueueFamilyProperties *pQueueFamilyProperties = new VkQueueFamilyProperties[ nQueueCount ];
+ vkGetPhysicalDeviceQueueFamilyProperties( m_pPhysicalDevice, &nQueueCount, pQueueFamilyProperties );
+ if ( nQueueCount == 0 )
+ {
+ dprintf( "Failed to get queue properties.\n" );
+ return false;
+ }
+ uint32_t nGraphicsQueueIndex = 0;
+ for ( nGraphicsQueueIndex = 0; nGraphicsQueueIndex < nQueueCount; nGraphicsQueueIndex++ )
+ {
+ if ( pQueueFamilyProperties[ nGraphicsQueueIndex].queueFlags & VK_QUEUE_GRAPHICS_BIT )
+ {
+ break;
+ }
+ }
+ if ( nGraphicsQueueIndex >= nQueueCount )
+ {
+ dprintf( "No graphics queue found\n" );
+ return false;
+ }
+ m_nQueueFamilyIndex = nGraphicsQueueIndex;
+ delete [] pQueueFamilyProperties;
+
+ uint32_t nDeviceExtensionCount = 0;
+ nResult = vkEnumerateDeviceExtensionProperties( m_pPhysicalDevice, NULL, &nDeviceExtensionCount, NULL );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkEnumerateDeviceExtensionProperties failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Allocate enough ExtensionProperties to support all extensions being enabled
+ const char** ppDeviceExtensionNames = new const char* [ nDeviceExtensionCount ];
+ uint32_t nEnabledDeviceExtensionCount = 0;
+
+ // Enable required device extensions
+ VkExtensionProperties *pDeviceExtProperties = new VkExtensionProperties[ nDeviceExtensionCount ];
+ memset( pDeviceExtProperties, 0, sizeof( VkExtensionProperties ) * nDeviceExtensionCount );
+ if ( nDeviceExtensionCount > 0 )
+ {
+ nResult = vkEnumerateDeviceExtensionProperties( m_pPhysicalDevice, NULL, &nDeviceExtensionCount, pDeviceExtProperties );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkEnumerateDeviceExtensionProperties failed with error %d\n", nResult );
+ return false;
+ }
+
+ for ( size_t nRequiredDeviceExt = 0; nRequiredDeviceExt < requiredDeviceExtensions.size(); nRequiredDeviceExt++ )
+ {
+ bool bExtFound = false;
+ for ( uint32_t nDeviceExt = 0; nDeviceExt < nDeviceExtensionCount; nDeviceExt++ )
+ {
+ if ( stricmp( requiredDeviceExtensions[ nRequiredDeviceExt ].c_str(), pDeviceExtProperties[ nDeviceExt ].extensionName ) == 0 )
+ {
+ bExtFound = true;
+ break;
+ }
+ }
+
+ if ( bExtFound )
+ {
+ ppDeviceExtensionNames[ nEnabledDeviceExtensionCount ] = requiredDeviceExtensions[ nRequiredDeviceExt ].c_str();
+ nEnabledDeviceExtensionCount++;
+ }
+ }
+ }
+
+ // Create the device
+ VkDeviceQueueCreateInfo deviceQueueCreateInfo = { VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO };
+ deviceQueueCreateInfo.queueFamilyIndex = m_nQueueFamilyIndex;
+ deviceQueueCreateInfo.queueCount = 1;
+ float fQueuePriority = 1.0f;
+ deviceQueueCreateInfo.pQueuePriorities = &fQueuePriority;
+
+ VkDeviceCreateInfo deviceCreateInfo = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO };
+ deviceCreateInfo.queueCreateInfoCount = 1;
+ deviceCreateInfo.pQueueCreateInfos = &deviceQueueCreateInfo;
+ deviceCreateInfo.enabledExtensionCount = nEnabledDeviceExtensionCount;
+ deviceCreateInfo.ppEnabledExtensionNames = ppDeviceExtensionNames;
+ deviceCreateInfo.pEnabledFeatures = &m_physicalDeviceFeatures;
+
+ nResult = vkCreateDevice( m_pPhysicalDevice, &deviceCreateInfo, nullptr, &m_pDevice );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateDevice failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Get the device queue
+ vkGetDeviceQueue( m_pDevice, m_nQueueFamilyIndex, 0, &m_pQueue );
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize Vulkan swapchain and associated images
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitVulkanSwapchain()
+{
+ //----------------------//
+ // Swapchain creation //
+ //----------------------//
+ SDL_SysWMinfo wmInfo;
+ SDL_VERSION( &wmInfo.version );
+ SDL_GetWindowWMInfo( m_pCompanionWindow, &wmInfo );
+ VkResult nResult;
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ VkWin32SurfaceCreateInfoKHR win32SurfaceCreateInfo = {};
+ win32SurfaceCreateInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+ win32SurfaceCreateInfo.pNext = NULL;
+ win32SurfaceCreateInfo.flags = 0;
+ win32SurfaceCreateInfo.hinstance = GetModuleHandle( NULL );
+ win32SurfaceCreateInfo.hwnd = ( HWND ) wmInfo.info.win.window;
+ nResult = vkCreateWin32SurfaceKHR( m_pInstance, &win32SurfaceCreateInfo, nullptr, &m_pSurface );
+#else
+ VkXlibSurfaceCreateInfoKHR xlibSurfaceCreateInfo = { VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR };
+ xlibSurfaceCreateInfo.flags = 0;
+ xlibSurfaceCreateInfo.dpy = wmInfo.info.x11.display;
+ xlibSurfaceCreateInfo.window = wmInfo.info.x11.window;
+ nResult = vkCreateXlibSurfaceKHR( m_pInstance, &xlibSurfaceCreateInfo, nullptr, &m_pSurface );
+#endif
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Failed to create VkSurfaceKHR error %d.\n", nResult );
+ return false;
+ }
+
+ VkBool32 bSupportsPresent = VK_FALSE;
+ nResult = vkGetPhysicalDeviceSurfaceSupportKHR( m_pPhysicalDevice, m_nQueueFamilyIndex, m_pSurface, &bSupportsPresent );
+ if ( nResult != VK_SUCCESS || bSupportsPresent == VK_FALSE )
+ {
+ dprintf( "vkGetPhysicalDeviceSurfaceSupportKHR present not supported.\n" );
+ return false;
+ }
+
+ // Query supported swapchain formats
+ VkFormat nSwapChainFormat;
+ uint32_t nFormatIndex = 0;
+ uint32_t nNumSupportedSwapChainFormats = 0;
+ VkColorSpaceKHR nColorSpace;
+ if ( vkGetPhysicalDeviceSurfaceFormatsKHR( m_pPhysicalDevice, m_pSurface, &nNumSupportedSwapChainFormats, NULL ) != VK_SUCCESS )
+ {
+ dprintf( "Unable to query size of supported swapchain formats.\n" );
+ return false;
+ }
+ VkSurfaceFormatKHR *pSupportedSurfaceFormats = new VkSurfaceFormatKHR[ nNumSupportedSwapChainFormats ];
+ if ( vkGetPhysicalDeviceSurfaceFormatsKHR( m_pPhysicalDevice, m_pSurface, &nNumSupportedSwapChainFormats, pSupportedSurfaceFormats ) != VK_SUCCESS )
+ {
+ dprintf( "Unable to query supported swapchain formats.\n" );
+ return false;
+ }
+ if ( nNumSupportedSwapChainFormats == 1 && pSupportedSurfaceFormats[0].format == VK_FORMAT_UNDEFINED )
+ {
+ nSwapChainFormat = VK_FORMAT_B8G8R8A8_UNORM;
+ }
+ else
+ {
+ // Favor sRGB if it's available
+ for ( nFormatIndex = 0; nFormatIndex < nNumSupportedSwapChainFormats; nFormatIndex++ )
+ {
+ if ( pSupportedSurfaceFormats[ nFormatIndex ].format == VK_FORMAT_B8G8R8A8_SRGB ||
+ pSupportedSurfaceFormats[ nFormatIndex ].format == VK_FORMAT_R8G8B8A8_SRGB )
+ {
+ break;
+ }
+ }
+ if ( nFormatIndex == nNumSupportedSwapChainFormats )
+ {
+ // Default to the first one if no sRGB
+ nFormatIndex = 0;
+ }
+ nSwapChainFormat = pSupportedSurfaceFormats[ nFormatIndex ].format;
+ }
+ nColorSpace = pSupportedSurfaceFormats[ nFormatIndex ].colorSpace;
+ delete [] pSupportedSurfaceFormats;
+
+ // Check the surface properties and formats
+ VkSurfaceCapabilitiesKHR surfaceCaps = {};
+ nResult = vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_pPhysicalDevice, m_pSurface, &surfaceCaps );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed with error %d\n", nResult );
+ return false;
+ }
+
+ uint32_t nPresentModeCount = 0;
+ nResult = vkGetPhysicalDeviceSurfacePresentModesKHR( m_pPhysicalDevice, m_pSurface, &nPresentModeCount, NULL );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkGetPhysicalDeviceSurfacePresentModesKHR failed with error %d\n", nResult );
+ return false;
+ }
+ VkPresentModeKHR *pPresentModes = new VkPresentModeKHR[ nPresentModeCount ];
+ nResult = vkGetPhysicalDeviceSurfacePresentModesKHR( m_pPhysicalDevice, m_pSurface, &nPresentModeCount, pPresentModes );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkGetPhysicalDeviceSurfacePresentModesKHR failed with error %d\n", nResult );
+ return false;
+ }
+
+ // width and height are either both -1, or both not -1.
+ VkExtent2D swapChainExtent;
+ if ( surfaceCaps.currentExtent.width == -1 )
+ {
+ // If the surface size is undefined, the size is set to the size of the images requested.
+ swapChainExtent.width = m_nCompanionWindowWidth;
+ swapChainExtent.height = m_nCompanionWindowHeight;
+ }
+ else
+ {
+ // If the surface size is defined, the swap chain size must match
+ swapChainExtent = surfaceCaps.currentExtent;
+ }
+
+ // VK_PRESENT_MODE_FIFO_KHR - equivalent of eglSwapInterval(1). The presentation engine waits for the next vertical blanking period to update
+ // the current image. Tearing cannot be observed. This mode must be supported by all implementations.
+ VkPresentModeKHR swapChainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
+ for ( uint32_t i = 0; i < nPresentModeCount; i++ )
+ {
+ // Order of preference for no vsync:
+ // 1. VK_PRESENT_MODE_IMMEDIATE_KHR - The presentation engine does not wait for a vertical blanking period to update the current image,
+ // meaning this mode may result in visible tearing
+ // 2. VK_PRESENT_MODE_MAILBOX_KHR - The presentation engine waits for the next vertical blanking period to update the current image. Tearing cannot be observed.
+ // An internal single-entry queue is used to hold pending presentation requests.
+ // 3. VK_PRESENT_MODE_FIFO_RELAXED_KHR - equivalent of eglSwapInterval(-1).
+ if ( pPresentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR )
+ {
+ // The presentation engine does not wait for a vertical blanking period to update the
+ // current image, meaning this mode may result in visible tearing
+ swapChainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR;
+ break;
+ }
+ else if ( pPresentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR )
+ {
+ swapChainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR;
+ }
+ else if ( ( swapChainPresentMode != VK_PRESENT_MODE_MAILBOX_KHR ) &&
+ ( pPresentModes[i] == VK_PRESENT_MODE_FIFO_RELAXED_KHR ) )
+ {
+ // VK_PRESENT_MODE_FIFO_RELAXED_KHR - equivalent of eglSwapInterval(-1)
+ swapChainPresentMode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
+ }
+ }
+
+ // Have a swap queue depth of at least three frames
+ m_nSwapQueueImageCount = surfaceCaps.minImageCount;
+ if ( m_nSwapQueueImageCount < 2 )
+ {
+ m_nSwapQueueImageCount = 2;
+ }
+ if ( ( surfaceCaps.maxImageCount > 0 ) && ( m_nSwapQueueImageCount > surfaceCaps.maxImageCount ) )
+ {
+ // Application must settle for fewer images than desired:
+ m_nSwapQueueImageCount = surfaceCaps.maxImageCount;
+ }
+
+ VkSurfaceTransformFlagsKHR preTransform;
+ if ( surfaceCaps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR )
+ {
+ preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ }
+ else
+ {
+ preTransform = surfaceCaps.currentTransform;
+ }
+
+ VkImageUsageFlags nImageUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ if ( ( surfaceCaps.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT ) )
+ {
+ nImageUsageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+ else
+ {
+ dprintf( "Vulkan swapchain does not support VK_IMAGE_USAGE_TRANSFER_DST_BIT. Some operations may not be supported.\n" );
+ }
+
+ VkSwapchainCreateInfoKHR swapChainCreateInfo = {};
+ swapChainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ swapChainCreateInfo.pNext = NULL;
+ swapChainCreateInfo.surface = m_pSurface;
+ swapChainCreateInfo.minImageCount = m_nSwapQueueImageCount;
+ swapChainCreateInfo.imageFormat = nSwapChainFormat;
+ swapChainCreateInfo.imageColorSpace = nColorSpace;
+ swapChainCreateInfo.imageExtent = swapChainExtent;
+ swapChainCreateInfo.imageUsage = nImageUsageFlags;
+ swapChainCreateInfo.preTransform = ( VkSurfaceTransformFlagBitsKHR ) preTransform;
+ swapChainCreateInfo.imageArrayLayers = 1;
+ swapChainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ swapChainCreateInfo.queueFamilyIndexCount = 0;
+ swapChainCreateInfo.pQueueFamilyIndices = NULL;
+ swapChainCreateInfo.presentMode = swapChainPresentMode;
+ swapChainCreateInfo.clipped = VK_TRUE;
+ if ( ( surfaceCaps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR ) != 0 )
+ {
+ swapChainCreateInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ }
+ else if ( ( surfaceCaps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR ) != 0 )
+ {
+ swapChainCreateInfo.compositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
+ }
+ else
+ {
+ dprintf( "Unexpected value for VkSurfaceCapabilitiesKHR.compositeAlpha: %x\n", surfaceCaps.supportedCompositeAlpha );
+ }
+
+ nResult = vkCreateSwapchainKHR( m_pDevice, &swapChainCreateInfo, NULL, &m_pSwapchain );
+ if( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateSwapchainKHR returned an error %d.\n", nResult );
+ return false;
+ }
+
+ nResult = vkGetSwapchainImagesKHR( m_pDevice, m_pSwapchain, &m_nSwapQueueImageCount, NULL );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkGetSwapchainImagesKHR failed with error %d\n", nResult );
+ return false;
+ }
+ m_swapchainImages.resize( m_nSwapQueueImageCount );
+ vkGetSwapchainImagesKHR( m_pDevice, m_pSwapchain, &m_nSwapQueueImageCount, &m_swapchainImages[ 0 ] );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkGetSwapchainImagesKHR failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Create a renderpass
+ uint32_t nTotalAttachments = 1;
+ VkAttachmentDescription attachmentDesc;
+ VkAttachmentReference attachmentReference;
+ attachmentReference.attachment = 0;
+ attachmentReference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ attachmentDesc.format = nSwapChainFormat;
+ attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
+ attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDesc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachmentDesc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDesc.flags = 0;
+
+ VkSubpassDescription subPassCreateInfo = { };
+ subPassCreateInfo.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subPassCreateInfo.flags = 0;
+ subPassCreateInfo.inputAttachmentCount = 0;
+ subPassCreateInfo.pInputAttachments = NULL;
+ subPassCreateInfo.colorAttachmentCount = 1;
+ subPassCreateInfo.pColorAttachments = &attachmentReference;
+ subPassCreateInfo.pResolveAttachments = NULL;
+ subPassCreateInfo.pDepthStencilAttachment = NULL;
+ subPassCreateInfo.preserveAttachmentCount = 0;
+ subPassCreateInfo.pPreserveAttachments = NULL;
+
+ VkRenderPassCreateInfo renderPassCreateInfo = { };
+ renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ renderPassCreateInfo.flags = 0;
+ renderPassCreateInfo.attachmentCount = 1;
+ renderPassCreateInfo.pAttachments = &attachmentDesc;
+ renderPassCreateInfo.subpassCount = 1;
+ renderPassCreateInfo.pSubpasses = &subPassCreateInfo;
+ renderPassCreateInfo.dependencyCount = 0;
+ renderPassCreateInfo.pDependencies = NULL;
+
+ nResult = vkCreateRenderPass( m_pDevice, &renderPassCreateInfo, NULL, &m_pSwapchainRenderPass );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateRenderPass failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Create image views and framebuffers for each swapchain image
+ for ( size_t nImage = 0; nImage < m_swapchainImages.size(); nImage++ )
+ {
+ VkImageViewCreateInfo imageViewCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ imageViewCreateInfo.flags = 0;
+ imageViewCreateInfo.image = m_swapchainImages[ nImage ];
+ imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ imageViewCreateInfo.format = nSwapChainFormat;
+ imageViewCreateInfo.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
+ imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
+ imageViewCreateInfo.subresourceRange.levelCount = 1;
+ imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
+ imageViewCreateInfo.subresourceRange.layerCount = 1;
+ VkImageView pImageView = VK_NULL_HANDLE;
+ vkCreateImageView( m_pDevice, &imageViewCreateInfo, nullptr, &pImageView );
+ m_pSwapchainImageViews.push_back( pImageView );
+
+ VkImageView attachments[ 1 ] = { pImageView };
+ VkFramebufferCreateInfo framebufferCreateInfo = { VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO };
+ framebufferCreateInfo.renderPass = m_pSwapchainRenderPass;
+ framebufferCreateInfo.attachmentCount = 1;
+ framebufferCreateInfo.pAttachments = &attachments[ 0 ];
+ framebufferCreateInfo.width = m_nCompanionWindowWidth;
+ framebufferCreateInfo.height = m_nCompanionWindowHeight;
+ framebufferCreateInfo.layers = 1;
+ VkFramebuffer pFramebuffer;
+ nResult = vkCreateFramebuffer( m_pDevice, &framebufferCreateInfo, NULL, &pFramebuffer );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateFramebuffer failed with error %d.\n", nResult );
+ return false;
+ }
+ m_pSwapchainFramebuffers.push_back( pFramebuffer );
+
+ VkSemaphoreCreateInfo semaphoreCreateInfo = { VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
+ VkSemaphore pSemaphore = VK_NULL_HANDLE;
+ vkCreateSemaphore( m_pDevice, &semaphoreCreateInfo, nullptr, &pSemaphore );
+ m_pSwapchainSemaphores.push_back( pSemaphore );
+ }
+
+ delete [] pPresentModes;
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize Vulkan. Returns true if Vulkan has been successfully
+// initialized, false if shaders could not be created.
+// If failure occurred in a module other than shaders, the function
+// may return true or throw an error.
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitVulkan()
+{
+ if ( !BInitVulkanInstance() )
+ return false;
+
+ if ( !BInitVulkanDevice() )
+ return false;
+
+ if ( !BInitVulkanSwapchain() )
+ return false;
+
+ VkResult nResult;
+
+ // Create the command pool
+ {
+ VkCommandPoolCreateInfo commandPoolCreateInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO };
+ commandPoolCreateInfo.queueFamilyIndex = m_nQueueFamilyIndex;
+ commandPoolCreateInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ nResult = vkCreateCommandPool( m_pDevice, &commandPoolCreateInfo, nullptr, &m_pCommandPool );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateCommandPool returned error %d.", nResult );
+ return false;
+ }
+ }
+
+ // Command buffer used during resource loading
+ m_currentCommandBuffer = GetCommandBuffer();
+ VkCommandBufferBeginInfo commandBufferBeginInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ commandBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ vkBeginCommandBuffer( m_currentCommandBuffer.m_pCommandBuffer, &commandBufferBeginInfo );
+
+ SetupTexturemaps();
+ SetupScene();
+ SetupCameras();
+ SetupStereoRenderTargets();
+ SetupCompanionWindow();
+
+ if( !CreateAllShaders() )
+ return false;
+
+ CreateAllDescriptorSets();
+ SetupRenderModels();
+
+ // Submit the command buffer used during loading
+ vkEndCommandBuffer( m_currentCommandBuffer.m_pCommandBuffer );
+ VkSubmitInfo submitInfo = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &m_currentCommandBuffer.m_pCommandBuffer;
+ vkQueueSubmit( m_pQueue, 1, &submitInfo, m_currentCommandBuffer.m_pFence );
+ m_commandBuffers.push_front( m_currentCommandBuffer );
+
+ // Wait for the GPU before proceeding
+ vkQueueWaitIdle( m_pQueue );
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Initialize Compositor. Returns true if the compositor was
+// successfully initialized, false otherwise.
+//-----------------------------------------------------------------------------
+bool CMainApplication::BInitCompositor()
+{
+ vr::EVRInitError peError = vr::VRInitError_None;
+
+ if ( !vr::VRCompositor() )
+ {
+ dprintf( "Compositor initialization failed. See log file for details\n" );
+ return false;
+ }
+
+ return true;
+}
+
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::Shutdown()
+{
+ if ( m_pDevice != VK_NULL_HANDLE )
+ {
+ // Idle the device to make sure no work is outstanding
+ vkDeviceWaitIdle( m_pDevice );
+ }
+
+ if( m_pHMD )
+ {
+ vr::VR_Shutdown();
+ m_pHMD = NULL;
+ }
+
+ for( std::vector< VulkanRenderModel * >::iterator i = m_vecRenderModels.begin(); i != m_vecRenderModels.end(); i++ )
+ {
+ delete (*i);
+ }
+ m_vecRenderModels.clear();
+
+ if ( m_pDevice != VK_NULL_HANDLE )
+ {
+ for( std::deque< VulkanCommandBuffer_t >::iterator i = m_commandBuffers.begin(); i != m_commandBuffers.end(); i++ )
+ {
+ vkFreeCommandBuffers( m_pDevice, m_pCommandPool, 1, &i->m_pCommandBuffer );
+ vkDestroyFence( m_pDevice, i->m_pFence, nullptr );
+ }
+
+ vkDestroyCommandPool( m_pDevice, m_pCommandPool, nullptr );
+ vkDestroyDescriptorPool( m_pDevice, m_pDescriptorPool, nullptr );
+
+ FramebufferDesc *pFramebufferDescs[2] = { &m_leftEyeDesc, &m_rightEyeDesc };
+ for ( int32_t nFramebuffer = 0; nFramebuffer < 2; nFramebuffer++ )
+ {
+ if ( pFramebufferDescs[ nFramebuffer ]->m_pImageView != VK_NULL_HANDLE )
+ {
+ vkDestroyImageView( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pImageView, nullptr );
+ vkDestroyImage( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pImage, nullptr );
+ vkFreeMemory( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pDeviceMemory, nullptr );
+ vkDestroyImageView( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pDepthStencilImageView, nullptr );
+ vkDestroyImage( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pDepthStencilImage, nullptr );
+ vkFreeMemory( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pDepthStencilDeviceMemory, nullptr );
+ vkDestroyRenderPass( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pRenderPass, nullptr );
+ vkDestroyFramebuffer( m_pDevice, pFramebufferDescs[ nFramebuffer ]->m_pFramebuffer, nullptr );
+ }
+ }
+
+ vkDestroyImageView( m_pDevice, m_pSceneImageView, nullptr );
+ vkDestroyImage( m_pDevice, m_pSceneImage, nullptr );
+ vkFreeMemory( m_pDevice, m_pSceneImageMemory, nullptr );
+ vkDestroyBuffer( m_pDevice, m_pSceneStagingBuffer, nullptr );
+ vkFreeMemory( m_pDevice, m_pSceneStagingBufferMemory, nullptr );
+ vkDestroySampler( m_pDevice, m_pSceneSampler, nullptr );
+ vkDestroyBuffer( m_pDevice, m_pSceneVertexBuffer, nullptr );
+ vkFreeMemory( m_pDevice, m_pSceneVertexBufferMemory, nullptr );
+ for ( uint32_t nEye = 0; nEye < _countof( m_pSceneConstantBuffer); nEye++ )
+ {
+ vkDestroyBuffer( m_pDevice, m_pSceneConstantBuffer[ nEye ], nullptr );
+ vkFreeMemory( m_pDevice, m_pSceneConstantBufferMemory[ nEye ], nullptr );
+ }
+
+ vkDestroyBuffer( m_pDevice, m_pCompanionWindowVertexBuffer, nullptr );
+ vkFreeMemory( m_pDevice, m_pCompanionWindowVertexBufferMemory, nullptr );
+ vkDestroyBuffer( m_pDevice, m_pCompanionWindowIndexBuffer, nullptr );
+ vkFreeMemory( m_pDevice, m_pCompanionWindowIndexBufferMemory, nullptr );
+
+ vkDestroyBuffer( m_pDevice, m_pControllerAxesVertexBuffer, nullptr );
+ vkFreeMemory( m_pDevice, m_pControllerAxesVertexBufferMemory, nullptr );
+
+ vkDestroyPipelineLayout( m_pDevice, m_pPipelineLayout, nullptr );
+ vkDestroyDescriptorSetLayout( m_pDevice, m_pDescriptorSetLayout, nullptr );
+ for ( uint32_t nPSO = 0; nPSO < PSO_COUNT; nPSO++ )
+ {
+ vkDestroyPipeline( m_pDevice, m_pPipelines[ nPSO ], nullptr );
+ }
+ for ( uint32_t nShader = 0; nShader < _countof( m_pShaderModules); nShader++ )
+ {
+ vkDestroyShaderModule( m_pDevice, m_pShaderModules[ nShader ], nullptr );
+ }
+ vkDestroyPipelineCache( m_pDevice, m_pPipelineCache, nullptr );
+
+ if ( m_pDebugReportCallback != VK_NULL_HANDLE )
+ {
+ g_pVkDestroyDebugReportCallbackEXT( m_pInstance, m_pDebugReportCallback, nullptr );
+ }
+
+ for ( size_t nSwapchainIndex = 0; nSwapchainIndex < m_pSwapchainFramebuffers.size(); nSwapchainIndex++ )
+ {
+ vkDestroyFramebuffer( m_pDevice, m_pSwapchainFramebuffers[ nSwapchainIndex ], nullptr );
+ vkDestroyImageView( m_pDevice, m_pSwapchainImageViews[ nSwapchainIndex ], nullptr );
+ vkDestroySemaphore( m_pDevice, m_pSwapchainSemaphores[ nSwapchainIndex ], nullptr );
+ }
+ vkDestroyRenderPass( m_pDevice, m_pSwapchainRenderPass, nullptr );
+
+ vkDestroySwapchainKHR( m_pDevice, m_pSwapchain, nullptr );
+ vkDestroySurfaceKHR( m_pInstance, m_pSurface, nullptr );
+ vkDestroyDevice( m_pDevice, nullptr );
+ vkDestroyInstance( m_pInstance, nullptr );
+ }
+ if( m_pCompanionWindow )
+ {
+ SDL_DestroyWindow(m_pCompanionWindow);
+ }
+
+ SDL_Quit();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::HandleInput()
+{
+ SDL_Event sdlEvent;
+ bool bRet = false;
+
+ while ( SDL_PollEvent( &sdlEvent ) != 0 )
+ {
+ if ( sdlEvent.type == SDL_QUIT )
+ {
+ bRet = true;
+ }
+ else if ( sdlEvent.type == SDL_KEYDOWN )
+ {
+ if ( sdlEvent.key.keysym.sym == SDLK_ESCAPE
+ || sdlEvent.key.keysym.sym == SDLK_q )
+ {
+ bRet = true;
+ }
+ if( sdlEvent.key.keysym.sym == SDLK_c )
+ {
+ m_bShowCubes = !m_bShowCubes;
+ }
+ }
+ }
+
+ // Process SteamVR events
+ vr::VREvent_t event;
+ while( m_pHMD->PollNextEvent( &event, sizeof( event ) ) )
+ {
+ ProcessVREvent( event );
+ }
+
+ // Process SteamVR controller state
+ for( vr::TrackedDeviceIndex_t unDevice = 0; unDevice < vr::k_unMaxTrackedDeviceCount; unDevice++ )
+ {
+ vr::VRControllerState_t state;
+ if( m_pHMD->GetControllerState( unDevice, &state, sizeof(state) ) )
+ {
+ m_rbShowTrackedDevice[ unDevice ] = state.ulButtonPressed == 0;
+ }
+ }
+
+ return bRet;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RunMainLoop()
+{
+ bool bQuit = false;
+
+ SDL_StartTextInput();
+ SDL_ShowCursor( SDL_DISABLE );
+
+ while ( !bQuit )
+ {
+ bQuit = HandleInput();
+
+ RenderFrame();
+ }
+
+ SDL_StopTextInput();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Processes a single VR event
+//-----------------------------------------------------------------------------
+void CMainApplication::ProcessVREvent( const vr::VREvent_t & event )
+{
+ switch( event.eventType )
+ {
+ case vr::VREvent_TrackedDeviceActivated:
+ {
+ SetupRenderModelForTrackedDevice( event.trackedDeviceIndex );
+ dprintf( "Device %u attached. Setting up render model.\n", event.trackedDeviceIndex );
+ }
+ break;
+ case vr::VREvent_TrackedDeviceDeactivated:
+ {
+ dprintf( "Device %u detached.\n", event.trackedDeviceIndex );
+ }
+ break;
+ case vr::VREvent_TrackedDeviceUpdated:
+ {
+ dprintf( "Device %u updated.\n", event.trackedDeviceIndex );
+ }
+ break;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderFrame()
+{
+ if ( m_pHMD )
+ {
+ m_currentCommandBuffer = GetCommandBuffer();
+
+ // Start the command buffer
+ VkCommandBufferBeginInfo commandBufferBeginInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ commandBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ vkBeginCommandBuffer( m_currentCommandBuffer.m_pCommandBuffer, &commandBufferBeginInfo );
+
+ UpdateControllerAxes();
+ RenderStereoTargets();
+ RenderCompanionWindow();
+
+ // End the command buffer
+ vkEndCommandBuffer( m_currentCommandBuffer.m_pCommandBuffer );
+
+ // Submit the command buffer
+ VkPipelineStageFlags nWaitDstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ VkSubmitInfo submitInfo = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &m_currentCommandBuffer.m_pCommandBuffer;
+ submitInfo.waitSemaphoreCount = 1;
+ submitInfo.pWaitSemaphores = &m_pSwapchainSemaphores[ m_nFrameIndex ];
+ submitInfo.pWaitDstStageMask = &nWaitDstStageMask;
+ vkQueueSubmit( m_pQueue, 1, &submitInfo, m_currentCommandBuffer.m_pFence );
+
+ // Add the command buffer back for later recycling
+ m_commandBuffers.push_front( m_currentCommandBuffer );
+
+ // Submit to SteamVR
+ vr::VRTextureBounds_t bounds;
+ bounds.uMin = 0.0f;
+ bounds.uMax = 1.0f;
+ bounds.vMin = 0.0f;
+ bounds.vMax = 1.0f;
+
+ vr::VRVulkanTextureData_t vulkanData;
+ vulkanData.m_nImage = ( uint64_t ) m_leftEyeDesc.m_pImage;
+ vulkanData.m_pDevice = ( VkDevice_T * ) m_pDevice;
+ vulkanData.m_pPhysicalDevice = ( VkPhysicalDevice_T * ) m_pPhysicalDevice;
+ vulkanData.m_pInstance = ( VkInstance_T *) m_pInstance;
+ vulkanData.m_pQueue = ( VkQueue_T * ) m_pQueue;
+ vulkanData.m_nQueueFamilyIndex = m_nQueueFamilyIndex;
+
+ vulkanData.m_nWidth = m_nRenderWidth;
+ vulkanData.m_nHeight = m_nRenderHeight;
+ vulkanData.m_nFormat = VK_FORMAT_R8G8B8A8_SRGB;
+ vulkanData.m_nSampleCount = m_nMSAASampleCount;
+
+ vr::Texture_t texture = { &vulkanData, vr::TextureType_Vulkan, vr::ColorSpace_Auto };
+ vr::VRCompositor()->Submit( vr::Eye_Left, &texture, &bounds );
+
+ vulkanData.m_nImage = ( uint64_t ) m_rightEyeDesc.m_pImage;
+ vr::VRCompositor()->Submit( vr::Eye_Right, &texture, &bounds );
+ }
+
+ VkPresentInfoKHR presentInfo = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
+ presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ presentInfo.pNext = NULL;
+ presentInfo.swapchainCount = 1;
+ presentInfo.pSwapchains = &m_pSwapchain;
+ presentInfo.pImageIndices = &m_nCurrentSwapchainImage;
+ vkQueuePresentKHR( m_pQueue, &presentInfo );
+
+ // Spew out the controller and pose count whenever they change.
+ if ( m_iTrackedControllerCount != m_iTrackedControllerCount_Last || m_iValidPoseCount != m_iValidPoseCount_Last )
+ {
+ m_iValidPoseCount_Last = m_iValidPoseCount;
+ m_iTrackedControllerCount_Last = m_iTrackedControllerCount;
+
+ dprintf( "PoseCount:%d(%s) Controllers:%d\n", m_iValidPoseCount, m_strPoseClasses.c_str(), m_iTrackedControllerCount );
+ }
+
+ UpdateHMDMatrixPose();
+
+ m_nFrameIndex = ( m_nFrameIndex + 1 ) % m_swapchainImages.size();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Creates all the shaders used by HelloVR Vulkan
+//-----------------------------------------------------------------------------
+bool CMainApplication::CreateAllShaders()
+{
+ VkResult nResult;
+ std::string sExecutableDirectory = Path_StripFilename( Path_GetExecutablePath() );
+
+ const char *pShaderNames[ PSO_COUNT ] =
+ {
+ "scene",
+ "axes",
+ "rendermodel",
+ "companion"
+ };
+ const char *pStageNames[ 2 ] =
+ {
+ "vs",
+ "ps"
+ };
+
+ // Load the SPIR-V into shader modules
+ for ( int32_t nShader = 0; nShader < PSO_COUNT; nShader++ )
+ {
+ for ( int32_t nStage = 0; nStage <= 1; nStage++ )
+ {
+ char shaderFileName[ 1024 ];
+ sprintf( shaderFileName, "../shaders/%s_%s.spv", pShaderNames[ nShader ], pStageNames[ nStage ] );
+ std::string shaderPath = Path_MakeAbsolute( shaderFileName, sExecutableDirectory );
+
+ FILE *fp = fopen( shaderPath.c_str(), "rb" );
+ if ( fp == NULL )
+ {
+ dprintf( "Error opening SPIR-V file: %s\n", shaderPath.c_str() );
+ return false;
+ }
+ fseek( fp, 0, SEEK_END );
+ size_t nSize = ftell( fp );
+ fseek( fp, 0, SEEK_SET );
+
+ char *pBuffer = new char[ nSize ];
+ if ( fread( pBuffer, 1, nSize, fp ) != nSize )
+ {
+ dprintf( "Error reading SPIR-V file: %s\n", shaderPath.c_str() );
+ return false;
+ }
+ fclose( fp );
+
+ // Create the shader module
+ VkShaderModuleCreateInfo shaderModuleCreateInfo = { VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO };
+ shaderModuleCreateInfo.codeSize = nSize;
+ shaderModuleCreateInfo.pCode = ( const uint32_t *) pBuffer;
+ nResult = vkCreateShaderModule( m_pDevice, &shaderModuleCreateInfo, nullptr, &m_pShaderModules[ nShader * 2 + nStage ] );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Error creating shader module for %s, error %d\n", shaderPath.c_str(), nResult );
+ return false;
+ }
+
+ delete [] pBuffer;
+ }
+ }
+
+ // Create a descriptor set layout/pipeline layout compatible with all of our shaders. See bin/shaders/build_vulkan_shaders.bat for
+ // how the HLSL is compiled with glslangValidator and binding numbers are generated
+ VkDescriptorSetLayoutBinding layoutBindings[3] = {};
+ layoutBindings[0].binding = 0;
+ layoutBindings[0].descriptorCount = 1;
+ layoutBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ layoutBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+
+ layoutBindings[1].binding = 1;
+ layoutBindings[1].descriptorCount = 1;
+ layoutBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ layoutBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+
+ layoutBindings[2].binding = 2;
+ layoutBindings[2].descriptorCount = 1;
+ layoutBindings[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ layoutBindings[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+
+ VkDescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO };
+ descriptorSetLayoutCreateInfo.bindingCount = 3;
+ descriptorSetLayoutCreateInfo.pBindings = &layoutBindings[ 0 ];
+ nResult = vkCreateDescriptorSetLayout( m_pDevice, &descriptorSetLayoutCreateInfo, nullptr, &m_pDescriptorSetLayout );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateDescriptorSetLayout failed with error %d\n", nResult );
+ return false;
+ }
+
+ VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = { VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO };
+ pipelineLayoutCreateInfo.pNext = NULL;
+ pipelineLayoutCreateInfo.setLayoutCount = 1;
+ pipelineLayoutCreateInfo.pSetLayouts = &m_pDescriptorSetLayout;
+ pipelineLayoutCreateInfo.pushConstantRangeCount = 0;
+ pipelineLayoutCreateInfo.pPushConstantRanges = NULL;
+ nResult = vkCreatePipelineLayout( m_pDevice, &pipelineLayoutCreateInfo, nullptr, &m_pPipelineLayout );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreatePipelineLayout failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Create pipeline cache
+ VkPipelineCacheCreateInfo pipelineCacheCreateInfo = { VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO };
+ vkCreatePipelineCache( m_pDevice, &pipelineCacheCreateInfo, NULL, &m_pPipelineCache );
+
+ // Renderpass for each PSO that is compatible with what it will render to
+ VkRenderPass pRenderPasses[ PSO_COUNT ] =
+ {
+ m_leftEyeDesc.m_pRenderPass,
+ m_leftEyeDesc.m_pRenderPass,
+ m_leftEyeDesc.m_pRenderPass,
+ m_pSwapchainRenderPass
+ };
+
+ size_t nStrides[ PSO_COUNT ] =
+ {
+ sizeof( VertexDataScene ), // PSO_SCENE
+ sizeof( float ) * 6, // PSO_AXES
+ sizeof( vr::RenderModel_Vertex_t ), // PSO_RENDERMODEL
+ sizeof( VertexDataWindow ) // PSO_COMPANION
+ };
+
+ VkVertexInputAttributeDescription attributeDescriptions[ PSO_COUNT * 3 ]
+ {
+ // PSO_SCENE
+ { 0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0 },
+ { 1, 0, VK_FORMAT_R32G32_SFLOAT, offsetof( VertexDataScene, texCoord ) },
+ { 0, 0, VK_FORMAT_UNDEFINED, 0 },
+ // PSO_AXES
+ { 0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0 },
+ { 1, 0, VK_FORMAT_R32G32B32_SFLOAT, sizeof( float ) * 3 },
+ { 0, 0, VK_FORMAT_UNDEFINED, 0 },
+ // PSO_RENDERMODEL
+ { 0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0 },
+ { 1, 0, VK_FORMAT_R32G32B32_SFLOAT, offsetof( vr::RenderModel_Vertex_t, vNormal ) },
+ { 2, 0, VK_FORMAT_R32G32_SFLOAT, offsetof( vr::RenderModel_Vertex_t, rfTextureCoord ) },
+ // PSO_COMPANION
+ { 0, 0, VK_FORMAT_R32G32_SFLOAT, 0 },
+ { 1, 0, VK_FORMAT_R32G32_SFLOAT, sizeof( float ) * 2 },
+ { 0, 0, VK_FORMAT_UNDEFINED, 0 },
+ };
+
+ // Create the PSOs
+ for ( uint32_t nPSO = 0; nPSO < PSO_COUNT; nPSO++ )
+ {
+ VkGraphicsPipelineCreateInfo pipelineCreateInfo = { VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO };
+
+ // VkPipelineVertexInputStateCreateInfo
+ VkVertexInputBindingDescription bindingDescription;
+ bindingDescription.binding = 0;
+ bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+ bindingDescription.stride = nStrides[ nPSO ];
+
+ VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo = { VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO };
+ for ( uint32_t nAttr = 0; nAttr < 3; nAttr++ )
+ {
+ if ( attributeDescriptions[ nPSO * 3 + nAttr ].format != VK_FORMAT_UNDEFINED )
+ {
+ vertexInputCreateInfo.vertexAttributeDescriptionCount++;
+ }
+ }
+ vertexInputCreateInfo.pVertexAttributeDescriptions = &attributeDescriptions[ nPSO * 3 ];
+ vertexInputCreateInfo.vertexBindingDescriptionCount = 1;
+ vertexInputCreateInfo.pVertexBindingDescriptions = &bindingDescription;
+
+ // VkPipelineDepthStencilStateCreateInfo
+ VkPipelineDepthStencilStateCreateInfo dsState = { VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO };
+ dsState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ dsState.depthTestEnable = ( nPSO != PSO_COMPANION ) ? VK_TRUE : VK_FALSE;
+ dsState.depthWriteEnable = ( nPSO != PSO_COMPANION ) ? VK_TRUE : VK_FALSE;
+ dsState.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
+ dsState.depthBoundsTestEnable = VK_FALSE;
+ dsState.stencilTestEnable = VK_FALSE;
+ dsState.minDepthBounds = 0.0f;
+ dsState.maxDepthBounds = 0.0f;
+
+ // VkPipelineColorBlendStateCreateInfo
+ VkPipelineColorBlendStateCreateInfo cbState = { VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO };
+ cbState.logicOpEnable = VK_FALSE;
+ cbState.logicOp = VK_LOGIC_OP_COPY;
+ VkPipelineColorBlendAttachmentState cbAttachmentState = {};
+ cbAttachmentState.blendEnable = VK_FALSE;
+ cbAttachmentState.colorWriteMask = 0xf;
+ cbState.attachmentCount = 1;
+ cbState.pAttachments = &cbAttachmentState;
+
+ // VkPipelineColorBlendStateCreateInfo
+ VkPipelineRasterizationStateCreateInfo rsState = { VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO };
+ rsState.polygonMode = VK_POLYGON_MODE_FILL;
+ rsState.cullMode = VK_CULL_MODE_BACK_BIT;
+ rsState.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rsState.lineWidth = 1.0f;
+
+ // VkPipelineInputAssemblyStateCreateInfo
+ VkPipelineInputAssemblyStateCreateInfo iaState = { VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO };
+ iaState.topology = ( nPSO == PSO_AXES ) ? VK_PRIMITIVE_TOPOLOGY_LINE_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ iaState.primitiveRestartEnable = VK_FALSE;
+
+ // VkPipelineMultisampleStateCreateInfo
+ VkPipelineMultisampleStateCreateInfo msState = { VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO };
+ msState.rasterizationSamples = ( nPSO == PSO_COMPANION ) ? VK_SAMPLE_COUNT_1_BIT : ( VkSampleCountFlagBits ) m_nMSAASampleCount;
+ msState.minSampleShading = 0.0f;
+ uint32_t nSampleMask = 0xFFFFFFFF;
+ msState.pSampleMask = &nSampleMask;
+
+ // VkPipelineViewportStateCreateInfo
+ VkPipelineViewportStateCreateInfo vpState = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO };
+ vpState.viewportCount = 1;
+ vpState.scissorCount = 1;
+
+ VkPipelineShaderStageCreateInfo shaderStages[ 2 ] = { };
+ shaderStages[ 0 ].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shaderStages[ 0 ].stage = VK_SHADER_STAGE_VERTEX_BIT;
+ shaderStages[ 0 ].module = m_pShaderModules[ nPSO * 2 + 0 ];
+ shaderStages[ 0 ].pName = "VSMain";
+
+ shaderStages[ 1 ].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shaderStages[ 1 ].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ shaderStages[ 1 ].module = m_pShaderModules[ nPSO * 2 + 1 ];
+ shaderStages[ 1 ].pName = "PSMain";
+
+ pipelineCreateInfo.layout = m_pPipelineLayout;
+
+ // Set pipeline states
+ pipelineCreateInfo.pVertexInputState = &vertexInputCreateInfo;
+ pipelineCreateInfo.pInputAssemblyState = &iaState;
+ pipelineCreateInfo.pViewportState = &vpState;
+ pipelineCreateInfo.pRasterizationState = &rsState;
+ pipelineCreateInfo.pMultisampleState = &msState;
+ pipelineCreateInfo.pDepthStencilState = &dsState;
+ pipelineCreateInfo.pColorBlendState = &cbState;
+ pipelineCreateInfo.stageCount = 2;
+ pipelineCreateInfo.pStages = &shaderStages[ 0 ];
+ pipelineCreateInfo.renderPass = pRenderPasses[ nPSO ];
+
+ static VkDynamicState dynamicStates[] =
+ {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ };
+
+ static VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo = {};
+ dynamicStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamicStateCreateInfo.pNext = NULL;
+ dynamicStateCreateInfo.dynamicStateCount = _countof( dynamicStates );
+ dynamicStateCreateInfo.pDynamicStates = &dynamicStates[ 0 ];
+ pipelineCreateInfo.pDynamicState = &dynamicStateCreateInfo;
+
+
+ // Create the pipeline
+ nResult = vkCreateGraphicsPipelines( m_pDevice, m_pPipelineCache, 1, &pipelineCreateInfo, NULL, &m_pPipelines[ nPSO ] );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateGraphicsPipelines failed with error %d\n", nResult );
+ return false;
+ }
+ }
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Creates all the descriptor sets
+//-----------------------------------------------------------------------------
+void CMainApplication::CreateAllDescriptorSets()
+{
+ VkDescriptorPoolSize poolSizes[ 3 ];
+ poolSizes[ 0 ].descriptorCount = NUM_DESCRIPTOR_SETS;
+ poolSizes[ 0 ].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ poolSizes[ 1 ].descriptorCount = NUM_DESCRIPTOR_SETS;
+ poolSizes[ 1 ].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ poolSizes[ 2 ].descriptorCount = NUM_DESCRIPTOR_SETS;
+ poolSizes[ 2 ].type = VK_DESCRIPTOR_TYPE_SAMPLER;
+
+ VkDescriptorPoolCreateInfo descriptorPoolCreateInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO };
+ descriptorPoolCreateInfo.flags = 0;
+ descriptorPoolCreateInfo.maxSets = NUM_DESCRIPTOR_SETS;
+ descriptorPoolCreateInfo.poolSizeCount = _countof( poolSizes );
+ descriptorPoolCreateInfo.pPoolSizes = &poolSizes[ 0 ];
+
+ vkCreateDescriptorPool( m_pDevice, &descriptorPoolCreateInfo, nullptr, &m_pDescriptorPool );
+
+ for ( int nDescriptorSet = 0; nDescriptorSet < NUM_DESCRIPTOR_SETS; nDescriptorSet++ )
+ {
+ VkDescriptorSetAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO };
+ allocInfo.descriptorPool = m_pDescriptorPool;
+ allocInfo.descriptorSetCount = 1;
+ allocInfo.pSetLayouts = &m_pDescriptorSetLayout;
+ vkAllocateDescriptorSets( m_pDevice, &allocInfo, &m_pDescriptorSets[ nDescriptorSet ] );
+ }
+
+ // Scene descriptor sets
+ for ( uint32_t nEye = 0; nEye < 2; nEye++ )
+ {
+ VkDescriptorBufferInfo bufferInfo = {};
+ bufferInfo.buffer = m_pSceneConstantBuffer[ nEye ];
+ bufferInfo.offset = 0;
+ bufferInfo.range = VK_WHOLE_SIZE;
+
+ VkDescriptorImageInfo imageInfo = {};
+ imageInfo.imageView = m_pSceneImageView;
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkDescriptorImageInfo samplerInfo = {};
+ samplerInfo.sampler = m_pSceneSampler;
+
+ VkWriteDescriptorSet writeDescriptorSets[ 3 ] = { };
+ writeDescriptorSets[ 0 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 0 ].dstSet = m_pDescriptorSets[ DESCRIPTOR_SET_LEFT_EYE_SCENE + nEye ];
+ writeDescriptorSets[ 0 ].dstBinding = 0;
+ writeDescriptorSets[ 0 ].descriptorCount = 1;
+ writeDescriptorSets[ 0 ].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ writeDescriptorSets[ 0 ].pBufferInfo = &bufferInfo;
+ writeDescriptorSets[ 1 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 1 ].dstSet = m_pDescriptorSets[ DESCRIPTOR_SET_LEFT_EYE_SCENE + nEye ];
+ writeDescriptorSets[ 1 ].dstBinding = 1;
+ writeDescriptorSets[ 1 ].descriptorCount = 1;
+ writeDescriptorSets[ 1 ].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ writeDescriptorSets[ 1 ].pImageInfo = &imageInfo;
+ writeDescriptorSets[ 2 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 2 ].dstSet = m_pDescriptorSets[ DESCRIPTOR_SET_LEFT_EYE_SCENE + nEye ];
+ writeDescriptorSets[ 2 ].dstBinding = 2;
+ writeDescriptorSets[ 2 ].descriptorCount = 1;
+ writeDescriptorSets[ 2 ].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ writeDescriptorSets[ 2 ].pImageInfo = &samplerInfo;
+
+ vkUpdateDescriptorSets( m_pDevice, _countof( writeDescriptorSets ), writeDescriptorSets, 0, nullptr );
+ }
+
+ // Companion window descriptor sets
+ {
+ VkDescriptorImageInfo imageInfo = {};
+ imageInfo.imageView = m_leftEyeDesc.m_pImageView;
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet writeDescriptorSets[ 1 ] = { };
+ writeDescriptorSets[ 0 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 0 ].dstSet = m_pDescriptorSets[ DESCRIPTOR_SET_COMPANION_LEFT_TEXTURE ];
+ writeDescriptorSets[ 0 ].dstBinding = 1;
+ writeDescriptorSets[ 0 ].descriptorCount = 1;
+ writeDescriptorSets[ 0 ].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ writeDescriptorSets[ 0 ].pImageInfo = &imageInfo;
+ vkUpdateDescriptorSets( m_pDevice, _countof( writeDescriptorSets ), writeDescriptorSets, 0, nullptr );
+
+ imageInfo.imageView = m_rightEyeDesc.m_pImageView;
+ writeDescriptorSets[ 0 ].dstSet = m_pDescriptorSets[ DESCRIPTOR_SET_COMPANION_RIGHT_TEXTURE ];
+ vkUpdateDescriptorSets( m_pDevice, _countof( writeDescriptorSets ), writeDescriptorSets, 0, nullptr );
+ }
+
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::SetupTexturemaps()
+{
+ std::string sExecutableDirectory = Path_StripFilename( Path_GetExecutablePath() );
+ std::string strFullPath = Path_MakeAbsolute( "../cube_texture.png", sExecutableDirectory );
+
+ std::vector< unsigned char > imageRGBA;
+ unsigned nImageWidth, nImageHeight;
+ unsigned nError = lodepng::decode( imageRGBA, nImageWidth, nImageHeight, strFullPath.c_str() );
+
+ if ( nError != 0 )
+ return false;
+
+ // Copy the base level to a buffer, reserve space for mips (overreserve by a bit to avoid having to calc mipchain size ahead of time)
+ VkDeviceSize nBufferSize = 0;
+ uint8_t *pBuffer = new uint8_t[ nImageWidth * nImageHeight * 4 * 2 ];
+ uint8_t *pPrevBuffer = pBuffer;
+ uint8_t *pCurBuffer = pBuffer;
+ memcpy( pCurBuffer, &imageRGBA[0], sizeof( uint8_t ) * nImageWidth * nImageHeight * 4 );
+ pCurBuffer += sizeof( uint8_t ) * nImageWidth * nImageHeight * 4;
+
+ std::vector< VkBufferImageCopy > bufferImageCopies;
+ VkBufferImageCopy bufferImageCopy = {};
+ bufferImageCopy.bufferOffset = 0;
+ bufferImageCopy.bufferRowLength = 0;
+ bufferImageCopy.bufferImageHeight = 0;
+ bufferImageCopy.imageSubresource.baseArrayLayer = 0;
+ bufferImageCopy.imageSubresource.layerCount = 1;
+ bufferImageCopy.imageSubresource.mipLevel = 0;
+ bufferImageCopy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ bufferImageCopy.imageOffset.x = 0;
+ bufferImageCopy.imageOffset.y = 0;
+ bufferImageCopy.imageOffset.z = 0;
+ bufferImageCopy.imageExtent.width = nImageWidth;
+ bufferImageCopy.imageExtent.height = nImageHeight;
+ bufferImageCopy.imageExtent.depth = 1;
+ bufferImageCopies.push_back( bufferImageCopy );
+
+ int nMipWidth = nImageWidth;
+ int nMipHeight = nImageHeight;
+
+ while( nMipWidth > 1 && nMipHeight > 1 )
+ {
+ GenMipMapRGBA( pPrevBuffer, pCurBuffer, nMipWidth, nMipHeight, &nMipWidth, &nMipHeight );
+ bufferImageCopy.bufferOffset = pCurBuffer - pBuffer;
+ bufferImageCopy.imageSubresource.mipLevel++;
+ bufferImageCopy.imageExtent.width = nMipWidth;
+ bufferImageCopy.imageExtent.height = nMipHeight;
+ bufferImageCopies.push_back( bufferImageCopy );
+ pPrevBuffer = pCurBuffer;
+ pCurBuffer += ( nMipWidth * nMipHeight * 4 * sizeof( uint8_t ) );
+ }
+ nBufferSize = pCurBuffer - pBuffer;
+
+ // Create the image
+ VkImageCreateInfo imageCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
+ imageCreateInfo.extent.width = nImageWidth;
+ imageCreateInfo.extent.height = nImageHeight;
+ imageCreateInfo.extent.depth = 1;
+ imageCreateInfo.mipLevels = ( uint32_t ) bufferImageCopies.size();
+ imageCreateInfo.arrayLayers = 1;
+ imageCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+ imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+ imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ imageCreateInfo.flags = 0;
+ vkCreateImage( m_pDevice, &imageCreateInfo, nullptr, &m_pSceneImage );
+
+ VkMemoryRequirements memoryRequirements = {};
+ vkGetImageMemoryRequirements( m_pDevice, m_pSceneImage, &memoryRequirements );
+
+ VkMemoryAllocateInfo memoryAllocateInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ memoryAllocateInfo.allocationSize = memoryRequirements.size;
+ MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memoryAllocateInfo.memoryTypeIndex );
+ vkAllocateMemory( m_pDevice, &memoryAllocateInfo, nullptr, &m_pSceneImageMemory );
+ vkBindImageMemory( m_pDevice, m_pSceneImage, m_pSceneImageMemory, 0 );
+
+ VkImageViewCreateInfo imageViewCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ imageViewCreateInfo.flags = 0;
+ imageViewCreateInfo.image = m_pSceneImage;
+ imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ imageViewCreateInfo.format = imageCreateInfo.format;
+ imageViewCreateInfo.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
+ imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
+ imageViewCreateInfo.subresourceRange.levelCount = imageCreateInfo.mipLevels;
+ imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
+ imageViewCreateInfo.subresourceRange.layerCount = 1;
+ vkCreateImageView( m_pDevice, &imageViewCreateInfo, nullptr, &m_pSceneImageView );
+
+ // Create a staging buffer
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, pBuffer, nBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &m_pSceneStagingBuffer, &m_pSceneStagingBufferMemory ) )
+ {
+ return false;
+ }
+
+ // Transition the image to TRANSFER_DST to receive image
+ VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ imageMemoryBarrier.srcAccessMask = 0;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ imageMemoryBarrier.image = m_pSceneImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
+ imageMemoryBarrier.subresourceRange.levelCount = imageCreateInfo.mipLevels;
+ imageMemoryBarrier.subresourceRange.baseArrayLayer = 0;
+ imageMemoryBarrier.subresourceRange.layerCount = 1;
+ imageMemoryBarrier.srcQueueFamilyIndex = m_nQueueFamilyIndex;
+ imageMemoryBarrier.dstQueueFamilyIndex = m_nQueueFamilyIndex;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+
+ // Issue the copy to fill the image data
+ vkCmdCopyBufferToImage( m_currentCommandBuffer.m_pCommandBuffer, m_pSceneStagingBuffer, m_pSceneImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, ( uint32_t ) bufferImageCopies.size(), &bufferImageCopies[ 0 ] );
+
+ // Transition the image to SHADER_READ_OPTIMAL for reading
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+
+ // Create the sampler
+ VkSamplerCreateInfo samplerCreateInfo = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
+ samplerCreateInfo.magFilter = VK_FILTER_LINEAR;
+ samplerCreateInfo.minFilter = VK_FILTER_LINEAR;
+ samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ samplerCreateInfo.anisotropyEnable = VK_TRUE;
+ samplerCreateInfo.maxAnisotropy = 16.0f;
+ samplerCreateInfo.minLod = 0.0f;
+ samplerCreateInfo.maxLod = ( float ) imageCreateInfo.mipLevels;
+ vkCreateSampler( m_pDevice, &samplerCreateInfo, nullptr, &m_pSceneSampler );
+
+ delete [] pBuffer;
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: generate next level mipmap for an RGBA image
+//-----------------------------------------------------------------------------
+void CMainApplication::GenMipMapRGBA( const uint8_t *pSrc, uint8_t *pDst, int nSrcWidth, int nSrcHeight, int *pDstWidthOut, int *pDstHeightOut )
+{
+ *pDstWidthOut = nSrcWidth / 2;
+ if ( *pDstWidthOut <= 0 )
+ {
+ *pDstWidthOut = 1;
+ }
+ *pDstHeightOut = nSrcHeight / 2;
+ if ( *pDstHeightOut <= 0 )
+ {
+ *pDstHeightOut = 1;
+ }
+
+ for ( int y = 0; y < *pDstHeightOut; y++ )
+ {
+ for ( int x = 0; x < *pDstWidthOut; x++ )
+ {
+ int nSrcIndex[4];
+ float r = 0.0f;
+ float g = 0.0f;
+ float b = 0.0f;
+ float a = 0.0f;
+
+ nSrcIndex[0] = ( ( ( y * 2 ) * nSrcWidth ) + ( x * 2 ) ) * 4;
+ nSrcIndex[1] = ( ( ( y * 2 ) * nSrcWidth ) + ( x * 2 + 1 ) ) * 4;
+ nSrcIndex[2] = ( ( ( ( y * 2 ) + 1 ) * nSrcWidth ) + ( x * 2 ) ) * 4;
+ nSrcIndex[3] = ( ( ( ( y * 2 ) + 1 ) * nSrcWidth ) + ( x * 2 + 1 ) ) * 4;
+
+ // Sum all pixels
+ for ( int nSample = 0; nSample < 4; nSample++ )
+ {
+ r += pSrc[ nSrcIndex[ nSample ] ];
+ g += pSrc[ nSrcIndex[ nSample ] + 1 ];
+ b += pSrc[ nSrcIndex[ nSample ] + 2 ];
+ a += pSrc[ nSrcIndex[ nSample ] + 3 ];
+ }
+
+ // Average results
+ r /= 4.0;
+ g /= 4.0;
+ b /= 4.0;
+ a /= 4.0;
+
+ // Store resulting pixels
+ pDst[ ( y * ( *pDstWidthOut ) + x ) * 4 ] = ( uint8_t ) ( r );
+ pDst[ ( y * ( *pDstWidthOut ) + x ) * 4 + 1] = ( uint8_t ) ( g );
+ pDst[ ( y * ( *pDstWidthOut ) + x ) * 4 + 2] = ( uint8_t ) ( b );
+ pDst[ ( y * ( *pDstWidthOut ) + x ) * 4 + 3] = ( uint8_t ) ( a );
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: create a sea of cubes
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupScene()
+{
+ if ( !m_pHMD )
+ return;
+
+ std::vector<float> vertdataarray;
+
+ Matrix4 matScale;
+ matScale.scale( m_fScale, m_fScale, m_fScale );
+ Matrix4 matTransform;
+ matTransform.translate(
+ -( (float)m_iSceneVolumeWidth * m_fScaleSpacing ) / 2.f,
+ -( (float)m_iSceneVolumeHeight * m_fScaleSpacing ) / 2.f,
+ -( (float)m_iSceneVolumeDepth * m_fScaleSpacing ) / 2.f);
+
+ Matrix4 mat = matScale * matTransform;
+
+ for( int z = 0; z< m_iSceneVolumeDepth; z++ )
+ {
+ for( int y = 0; y< m_iSceneVolumeHeight; y++ )
+ {
+ for( int x = 0; x< m_iSceneVolumeWidth; x++ )
+ {
+ AddCubeToScene( mat, vertdataarray );
+ mat = mat * Matrix4().translate( m_fScaleSpacing, 0, 0 );
+ }
+ mat = mat * Matrix4().translate( -((float)m_iSceneVolumeWidth) * m_fScaleSpacing, m_fScaleSpacing, 0 );
+ }
+ mat = mat * Matrix4().translate( 0, -((float)m_iSceneVolumeHeight) * m_fScaleSpacing, m_fScaleSpacing );
+ }
+ m_uiVertcount = vertdataarray.size()/5;
+
+ // Create the vertex buffer and fill with data
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, &vertdataarray[ 0 ], vertdataarray.size() * sizeof( float ),
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &m_pSceneVertexBuffer, &m_pSceneVertexBufferMemory ) )
+ {
+ return;
+ }
+
+ // Create constant buffer to hold the per-eye CB data
+ for ( uint32_t nEye = 0; nEye < 2; nEye++ )
+ {
+ VkBufferCreateInfo bufferCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ bufferCreateInfo.size = sizeof( Matrix4 );
+ bufferCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ vkCreateBuffer( m_pDevice, &bufferCreateInfo, nullptr, &m_pSceneConstantBuffer[ nEye ] );
+
+ VkMemoryRequirements memoryRequirements = { };
+ vkGetBufferMemoryRequirements( m_pDevice, m_pSceneConstantBuffer[ nEye ], &memoryRequirements );
+
+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, &allocInfo.memoryTypeIndex );
+ allocInfo.allocationSize = memoryRequirements.size;
+
+ vkAllocateMemory( m_pDevice, &allocInfo, nullptr, &m_pSceneConstantBufferMemory[ nEye ] );
+ vkBindBufferMemory( m_pDevice, m_pSceneConstantBuffer[ nEye ], m_pSceneConstantBufferMemory[ nEye ], 0 );
+
+ // Map and keep mapped persistently
+ vkMapMemory( m_pDevice, m_pSceneConstantBufferMemory[ nEye ], 0, VK_WHOLE_SIZE, 0, &m_pSceneConstantBufferData[ nEye ] );
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::AddCubeVertex( float fl0, float fl1, float fl2, float fl3, float fl4, std::vector<float> &vertdata )
+{
+ vertdata.push_back( fl0 );
+ vertdata.push_back( fl1 );
+ vertdata.push_back( fl2 );
+ vertdata.push_back( fl3 );
+ vertdata.push_back( fl4 );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::AddCubeToScene( Matrix4 mat, std::vector<float> &vertdata )
+{
+ // Matrix4 mat( outermat.data() );
+
+ Vector4 A = mat * Vector4( 0, 0, 0, 1 );
+ Vector4 B = mat * Vector4( 1, 0, 0, 1 );
+ Vector4 C = mat * Vector4( 1, 1, 0, 1 );
+ Vector4 D = mat * Vector4( 0, 1, 0, 1 );
+ Vector4 E = mat * Vector4( 0, 0, 1, 1 );
+ Vector4 F = mat * Vector4( 1, 0, 1, 1 );
+ Vector4 G = mat * Vector4( 1, 1, 1, 1 );
+ Vector4 H = mat * Vector4( 0, 1, 1, 1 );
+
+ // triangles instead of quads
+ AddCubeVertex( E.x, E.y, E.z, 0, 1, vertdata ); //Front
+ AddCubeVertex( F.x, F.y, F.z, 1, 1, vertdata );
+ AddCubeVertex( G.x, G.y, G.z, 1, 0, vertdata );
+ AddCubeVertex( G.x, G.y, G.z, 1, 0, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 0, 0, vertdata );
+ AddCubeVertex( E.x, E.y, E.z, 0, 1, vertdata );
+
+ AddCubeVertex( B.x, B.y, B.z, 0, 1, vertdata ); //Back
+ AddCubeVertex( A.x, A.y, A.z, 1, 1, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 1, 0, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 1, 0, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 0, 0, vertdata );
+ AddCubeVertex( B.x, B.y, B.z, 0, 1, vertdata );
+
+ AddCubeVertex( H.x, H.y, H.z, 0, 1, vertdata ); //Top
+ AddCubeVertex( G.x, G.y, G.z, 1, 1, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 0, 0, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 0, 1, vertdata );
+
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata ); //Bottom
+ AddCubeVertex( B.x, B.y, B.z, 1, 1, vertdata );
+ AddCubeVertex( F.x, F.y, F.z, 1, 0, vertdata );
+ AddCubeVertex( F.x, F.y, F.z, 1, 0, vertdata );
+ AddCubeVertex( E.x, E.y, E.z, 0, 0, vertdata );
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata );
+
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata ); //Left
+ AddCubeVertex( E.x, E.y, E.z, 1, 1, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 1, 0, vertdata );
+ AddCubeVertex( H.x, H.y, H.z, 1, 0, vertdata );
+ AddCubeVertex( D.x, D.y, D.z, 0, 0, vertdata );
+ AddCubeVertex( A.x, A.y, A.z, 0, 1, vertdata );
+
+ AddCubeVertex( F.x, F.y, F.z, 0, 1, vertdata ); //Right
+ AddCubeVertex( B.x, B.y, B.z, 1, 1, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( C.x, C.y, C.z, 1, 0, vertdata );
+ AddCubeVertex( G.x, G.y, G.z, 0, 0, vertdata );
+ AddCubeVertex( F.x, F.y, F.z, 0, 1, vertdata );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Update the vertex data for the controllers as X/Y/Z lines
+//-----------------------------------------------------------------------------
+void CMainApplication::UpdateControllerAxes()
+{
+ // don't draw controllers if somebody else has input focus
+ if( m_pHMD->IsInputFocusCapturedByAnotherProcess() )
+ return;
+
+ std::vector<float> vertdataarray;
+
+ m_uiControllerVertcount = 0;
+ m_iTrackedControllerCount = 0;
+
+ for ( vr::TrackedDeviceIndex_t unTrackedDevice = vr::k_unTrackedDeviceIndex_Hmd + 1; unTrackedDevice < vr::k_unMaxTrackedDeviceCount; ++unTrackedDevice )
+ {
+ if ( !m_pHMD->IsTrackedDeviceConnected( unTrackedDevice ) )
+ continue;
+
+ if( m_pHMD->GetTrackedDeviceClass( unTrackedDevice ) != vr::TrackedDeviceClass_Controller )
+ continue;
+
+ m_iTrackedControllerCount += 1;
+
+ if( !m_rTrackedDevicePose[ unTrackedDevice ].bPoseIsValid )
+ continue;
+
+ const Matrix4 & mat = m_rmat4DevicePose[unTrackedDevice];
+
+ Vector4 center = mat * Vector4( 0, 0, 0, 1 );
+
+ for ( int i = 0; i < 3; ++i )
+ {
+ Vector3 color( 0, 0, 0 );
+ Vector4 point( 0, 0, 0, 1 );
+ point[i] += 0.05f; // offset in X, Y, Z
+ color[i] = 1.0; // R, G, B
+ point = mat * point;
+ vertdataarray.push_back( center.x );
+ vertdataarray.push_back( center.y );
+ vertdataarray.push_back( center.z );
+
+ vertdataarray.push_back( color.x );
+ vertdataarray.push_back( color.y );
+ vertdataarray.push_back( color.z );
+
+ vertdataarray.push_back( point.x );
+ vertdataarray.push_back( point.y );
+ vertdataarray.push_back( point.z );
+
+ vertdataarray.push_back( color.x );
+ vertdataarray.push_back( color.y );
+ vertdataarray.push_back( color.z );
+
+ m_uiControllerVertcount += 2;
+ }
+
+ Vector4 start = mat * Vector4( 0, 0, -0.02f, 1 );
+ Vector4 end = mat * Vector4( 0, 0, -39.f, 1 );
+ Vector3 color( .92f, .92f, .71f );
+
+ vertdataarray.push_back( start.x );vertdataarray.push_back( start.y );vertdataarray.push_back( start.z );
+ vertdataarray.push_back( color.x );vertdataarray.push_back( color.y );vertdataarray.push_back( color.z );
+
+ vertdataarray.push_back( end.x );vertdataarray.push_back( end.y );vertdataarray.push_back( end.z );
+ vertdataarray.push_back( color.x );vertdataarray.push_back( color.y );vertdataarray.push_back( color.z );
+ m_uiControllerVertcount += 2;
+ }
+
+ // Setup the VB the first time through.
+ if ( m_pControllerAxesVertexBuffer == VK_NULL_HANDLE && vertdataarray.size() > 0 )
+ {
+ // Make big enough to hold up to the max number
+ VkDeviceSize nSize = sizeof( float ) * vertdataarray.size();
+ nSize *= vr::k_unMaxTrackedDeviceCount;
+
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, nullptr, nSize,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &m_pControllerAxesVertexBuffer, &m_pControllerAxesVertexBufferMemory ) )
+ {
+ return;
+ }
+ }
+
+ // Update the VB data
+ if ( m_pControllerAxesVertexBuffer != VK_NULL_HANDLE && vertdataarray.size() > 0 )
+ {
+ void *pData;
+ VkResult nResult = vkMapMemory( m_pDevice, m_pControllerAxesVertexBufferMemory, 0, VK_WHOLE_SIZE, 0, &pData );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkMapMemory returned error %d\n", nResult );
+ return;
+ }
+ memcpy( pData, &vertdataarray[ 0 ], vertdataarray.size() * sizeof( float ) );
+ vkUnmapMemory( m_pDevice, m_pControllerAxesVertexBufferMemory );
+
+ VkMappedMemoryRange memoryRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+ memoryRange.memory = m_pControllerAxesVertexBufferMemory;
+ memoryRange.size = VK_WHOLE_SIZE;
+ vkFlushMappedMemoryRanges( m_pDevice, 1, &memoryRange );
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupCameras()
+{
+ m_mat4ProjectionLeft = GetHMDMatrixProjectionEye( vr::Eye_Left );
+ m_mat4ProjectionRight = GetHMDMatrixProjectionEye( vr::Eye_Right );
+ m_mat4eyePosLeft = GetHMDMatrixPoseEye( vr::Eye_Left );
+ m_mat4eyePosRight = GetHMDMatrixPoseEye( vr::Eye_Right );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Creates a frame buffer. Returns true if the buffer was set up.
+// Returns false if the setup failed.
+//-----------------------------------------------------------------------------
+bool CMainApplication::CreateFrameBuffer( int nWidth, int nHeight, FramebufferDesc &framebufferDesc )
+{
+ //---------------------------//
+ // Create color target //
+ //---------------------------//
+ VkImageCreateInfo imageCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
+ imageCreateInfo.extent.width = nWidth;
+ imageCreateInfo.extent.height = nHeight;
+ imageCreateInfo.extent.depth = 1;
+ imageCreateInfo.mipLevels = 1;
+ imageCreateInfo.arrayLayers = 1;
+ imageCreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
+ imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageCreateInfo.samples = ( VkSampleCountFlagBits ) m_nMSAASampleCount;
+ imageCreateInfo.usage = ( VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT );
+ imageCreateInfo.flags = 0;
+
+ VkResult nResult;
+ nResult = vkCreateImage( m_pDevice, &imageCreateInfo, nullptr, &framebufferDesc.m_pImage );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateImage failed for eye image with error %d\n", nResult );
+ return false;
+ }
+ VkMemoryRequirements memoryRequirements = {};
+ vkGetImageMemoryRequirements( m_pDevice, framebufferDesc.m_pImage, &memoryRequirements );
+
+ VkMemoryAllocateInfo memoryAllocateInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ memoryAllocateInfo.allocationSize = memoryRequirements.size;
+ if ( !MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memoryAllocateInfo.memoryTypeIndex ) )
+ {
+ dprintf( "Failed to find memory type matching requirements.\n" );
+ return false;
+ }
+
+ nResult = vkAllocateMemory( m_pDevice, &memoryAllocateInfo, nullptr, &framebufferDesc.m_pDeviceMemory );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Failed to find memory for image.\n" );
+ return false;
+ }
+
+ nResult = vkBindImageMemory( m_pDevice, framebufferDesc.m_pImage, framebufferDesc.m_pDeviceMemory, 0 );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Failed to bind memory for image.\n" );
+ return false;
+ }
+
+ VkImageViewCreateInfo imageViewCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ imageViewCreateInfo.flags = 0;
+ imageViewCreateInfo.image = framebufferDesc.m_pImage;
+ imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ imageViewCreateInfo.format = imageCreateInfo.format;
+ imageViewCreateInfo.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
+ imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
+ imageViewCreateInfo.subresourceRange.levelCount = 1;
+ imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
+ imageViewCreateInfo.subresourceRange.layerCount = 1;
+ nResult = vkCreateImageView( m_pDevice, &imageViewCreateInfo, nullptr, &framebufferDesc.m_pImageView );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateImageView failed with error %d\n", nResult );
+ return false;
+ }
+
+ //-----------------------------------//
+ // Create depth/stencil target //
+ //-----------------------------------//
+ imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
+ imageCreateInfo.format = VK_FORMAT_D32_SFLOAT;
+ imageCreateInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ nResult = vkCreateImage( m_pDevice, &imageCreateInfo, nullptr, &framebufferDesc.m_pDepthStencilImage );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateImage failed for eye depth buffer with error %d\n", nResult );
+ return false;
+ }
+ vkGetImageMemoryRequirements( m_pDevice, framebufferDesc.m_pDepthStencilImage, &memoryRequirements );
+
+ memoryAllocateInfo.allocationSize = memoryRequirements.size;
+ if ( !MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memoryAllocateInfo.memoryTypeIndex ) )
+ {
+ dprintf( "Failed to find memory type matching requirements.\n" );
+ return false;
+ }
+
+ nResult = vkAllocateMemory( m_pDevice, &memoryAllocateInfo, nullptr, &framebufferDesc.m_pDepthStencilDeviceMemory );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Failed to find memory for image.\n" );
+ return false;
+ }
+
+ nResult = vkBindImageMemory( m_pDevice, framebufferDesc.m_pDepthStencilImage, framebufferDesc.m_pDepthStencilDeviceMemory, 0 );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Failed to bind memory for image.\n" );
+ return false;
+ }
+
+ imageViewCreateInfo.image = framebufferDesc.m_pDepthStencilImage;
+ imageViewCreateInfo.format = imageCreateInfo.format;
+ imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ nResult = vkCreateImageView( m_pDevice, &imageViewCreateInfo, nullptr, &framebufferDesc.m_pDepthStencilImageView );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateImageView failed with error %d\n", nResult );
+ return false;
+ }
+
+ // Create a renderpass
+ uint32_t nTotalAttachments = 2;
+ VkAttachmentDescription attachmentDescs[ 2 ];
+ VkAttachmentReference attachmentReferences[ 2 ];
+ attachmentReferences[ 0 ].attachment = 0;
+ attachmentReferences[ 0 ].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentReferences[ 1 ].attachment = 1;
+ attachmentReferences[ 1 ].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ attachmentDescs[ 0 ].format = VK_FORMAT_R8G8B8A8_SRGB;
+ attachmentDescs[ 0 ].samples = imageCreateInfo.samples;
+ attachmentDescs[ 0 ].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ attachmentDescs[ 0 ].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDescs[ 0 ].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachmentDescs[ 0 ].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachmentDescs[ 0 ].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDescs[ 0 ].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDescs[ 0 ].flags = 0;
+
+ attachmentDescs[ 1 ].format = VK_FORMAT_D32_SFLOAT;
+ attachmentDescs[ 1 ].samples = imageCreateInfo.samples;
+ attachmentDescs[ 1 ].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ attachmentDescs[ 1 ].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDescs[ 1 ].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachmentDescs[ 1 ].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachmentDescs[ 1 ].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ attachmentDescs[ 1 ].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ attachmentDescs[ 1 ].flags = 0;
+
+ VkSubpassDescription subPassCreateInfo = { };
+ subPassCreateInfo.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subPassCreateInfo.flags = 0;
+ subPassCreateInfo.inputAttachmentCount = 0;
+ subPassCreateInfo.pInputAttachments = NULL;
+ subPassCreateInfo.colorAttachmentCount = 1;
+ subPassCreateInfo.pColorAttachments = &attachmentReferences[ 0 ];
+ subPassCreateInfo.pResolveAttachments = NULL;
+ subPassCreateInfo.pDepthStencilAttachment = &attachmentReferences[ 1 ];
+ subPassCreateInfo.preserveAttachmentCount = 0;
+ subPassCreateInfo.pPreserveAttachments = NULL;
+
+ VkRenderPassCreateInfo renderPassCreateInfo = { };
+ renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ renderPassCreateInfo.flags = 0;
+ renderPassCreateInfo.attachmentCount = 2;
+ renderPassCreateInfo.pAttachments = &attachmentDescs[ 0 ];
+ renderPassCreateInfo.subpassCount = 1;
+ renderPassCreateInfo.pSubpasses = &subPassCreateInfo;
+ renderPassCreateInfo.dependencyCount = 0;
+ renderPassCreateInfo.pDependencies = NULL;
+
+ nResult = vkCreateRenderPass( m_pDevice, &renderPassCreateInfo, NULL, &framebufferDesc.m_pRenderPass );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateRenderPass failed with error %d.\n", nResult );
+ return false;
+ }
+
+ // Create the framebuffer
+ VkImageView attachments[ 2 ] = { framebufferDesc.m_pImageView, framebufferDesc.m_pDepthStencilImageView };
+ VkFramebufferCreateInfo framebufferCreateInfo = { VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO };
+ framebufferCreateInfo.renderPass = framebufferDesc.m_pRenderPass;
+ framebufferCreateInfo.attachmentCount = 2;
+ framebufferCreateInfo.pAttachments = &attachments[ 0 ];
+ framebufferCreateInfo.width = nWidth;
+ framebufferCreateInfo.height = nHeight;
+ framebufferCreateInfo.layers = 1;
+ nResult = vkCreateFramebuffer( m_pDevice, &framebufferCreateInfo, NULL, &framebufferDesc.m_pFramebuffer );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkCreateFramebuffer failed with error %d.\n", nResult );
+ return false;
+ }
+
+ framebufferDesc.m_nImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ framebufferDesc.m_nDepthStencilImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Get an available command buffer or create a new one if
+// none available. Associate a fence with the command buffer.
+//-----------------------------------------------------------------------------
+CMainApplication::VulkanCommandBuffer_t CMainApplication::GetCommandBuffer()
+{
+ VulkanCommandBuffer_t commandBuffer;
+ if ( m_commandBuffers.size() > 0 )
+ {
+ // If the fence associated with the command buffer has finished, reset it and return it
+ if ( vkGetFenceStatus( m_pDevice, m_commandBuffers.back().m_pFence ) == VK_SUCCESS )
+ {
+ VulkanCommandBuffer_t *pCmdBuffer = &m_commandBuffers.back();
+ commandBuffer.m_pCommandBuffer = pCmdBuffer->m_pCommandBuffer;
+ commandBuffer.m_pFence = pCmdBuffer->m_pFence;
+
+ vkResetCommandBuffer( commandBuffer.m_pCommandBuffer, VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT );
+ vkResetFences( m_pDevice, 1, &commandBuffer.m_pFence );
+ m_commandBuffers.pop_back();
+ return commandBuffer;
+ }
+ }
+
+ // Create a new command buffer and associated fence
+ VkCommandBufferAllocateInfo commandBufferAllocateInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
+ commandBufferAllocateInfo.commandBufferCount = 1;
+ commandBufferAllocateInfo.commandPool = m_pCommandPool;
+ commandBufferAllocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ vkAllocateCommandBuffers( m_pDevice, &commandBufferAllocateInfo, &commandBuffer.m_pCommandBuffer );
+
+ VkFenceCreateInfo fenceCreateInfo = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
+ vkCreateFence( m_pDevice, &fenceCreateInfo, nullptr, &commandBuffer.m_pFence );
+ return commandBuffer;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+bool CMainApplication::SetupStereoRenderTargets()
+{
+ if ( !m_pHMD )
+ return false;
+
+ m_pHMD->GetRecommendedRenderTargetSize( &m_nRenderWidth, &m_nRenderHeight );
+ m_nRenderWidth = ( uint32_t )( m_flSuperSampleScale * ( float ) m_nRenderWidth );
+ m_nRenderHeight = ( uint32_t )( m_flSuperSampleScale * ( float ) m_nRenderHeight );
+
+ CreateFrameBuffer( m_nRenderWidth, m_nRenderHeight, m_leftEyeDesc );
+ CreateFrameBuffer( m_nRenderWidth, m_nRenderHeight, m_rightEyeDesc );
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupCompanionWindow()
+{
+ if ( !m_pHMD )
+ return;
+
+ std::vector<VertexDataWindow> vVerts;
+
+ // left eye verts
+ vVerts.push_back( VertexDataWindow( Vector2(-1, -1), Vector2(0, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(0, -1), Vector2(1, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(-1, 1), Vector2(0, 0)) );
+ vVerts.push_back( VertexDataWindow( Vector2(0, 1), Vector2(1, 0)) );
+
+ // right eye verts
+ vVerts.push_back( VertexDataWindow( Vector2(0, -1), Vector2(0, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(1, -1), Vector2(1, 1)) );
+ vVerts.push_back( VertexDataWindow( Vector2(0, 1), Vector2(0, 0)) );
+ vVerts.push_back( VertexDataWindow( Vector2(1, 1), Vector2(1, 0)) );
+
+ // Create the vertex buffer and fill with data
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, &vVerts[ 0 ], sizeof( VertexDataWindow ) * vVerts.size(),
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &m_pCompanionWindowVertexBuffer, &m_pCompanionWindowVertexBufferMemory ) )
+ {
+ return;
+ }
+
+ // Create index buffer
+ uint16_t vIndices[] = { 0, 1, 3, 0, 3, 2, 4, 5, 7, 4, 7, 6};
+ m_uiCompanionWindowIndexSize = _countof( vIndices );
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, &vIndices[ 0 ], sizeof( vIndices ),
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT, &m_pCompanionWindowIndexBuffer, &m_pCompanionWindowIndexBufferMemory ) )
+ {
+ return;
+ }
+
+ // Transition all of the swapchain images to PRESENT_SRC so they are ready for presentation
+ for ( size_t nSwapchainImage = 0; nSwapchainImage < m_swapchainImages.size(); nSwapchainImage++ )
+ {
+ VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ imageMemoryBarrier.srcAccessMask = 0;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ imageMemoryBarrier.image = m_swapchainImages[ nSwapchainImage ];
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
+ imageMemoryBarrier.subresourceRange.levelCount = 1;
+ imageMemoryBarrier.subresourceRange.baseArrayLayer = 0;
+ imageMemoryBarrier.subresourceRange.layerCount = 1;
+ imageMemoryBarrier.srcQueueFamilyIndex = m_nQueueFamilyIndex;
+ imageMemoryBarrier.dstQueueFamilyIndex = m_nQueueFamilyIndex;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderStereoTargets()
+{
+
+ // Set viewport and scissor
+ VkViewport viewport = { 0.0f, 0.0f, (float ) m_nRenderWidth, ( float ) m_nRenderHeight, 0.0f, 1.0f };
+ vkCmdSetViewport( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &viewport );
+ VkRect2D scissor = { 0, 0, m_nRenderWidth, m_nRenderHeight };
+ vkCmdSetScissor( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &scissor );
+
+ //----------//
+ // Left Eye //
+ //----------//
+ // Transition eye image to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = m_leftEyeDesc.m_nImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ imageMemoryBarrier.image = m_leftEyeDesc.m_pImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
+ imageMemoryBarrier.subresourceRange.levelCount = 1;
+ imageMemoryBarrier.subresourceRange.baseArrayLayer = 0;
+ imageMemoryBarrier.subresourceRange.layerCount = 1;
+ imageMemoryBarrier.srcQueueFamilyIndex = m_nQueueFamilyIndex;
+ imageMemoryBarrier.dstQueueFamilyIndex = m_nQueueFamilyIndex;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_leftEyeDesc.m_nImageLayout = imageMemoryBarrier.newLayout;
+
+ // Transition the depth buffer to VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL on first use
+ if ( m_leftEyeDesc.m_nDepthStencilImageLayout == VK_IMAGE_LAYOUT_UNDEFINED )
+ {
+ imageMemoryBarrier.image = m_leftEyeDesc.m_pDepthStencilImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ imageMemoryBarrier.srcAccessMask = 0;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = m_leftEyeDesc.m_nDepthStencilImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_leftEyeDesc.m_nDepthStencilImageLayout = imageMemoryBarrier.newLayout;
+ }
+
+ // Start the renderpass
+ VkRenderPassBeginInfo renderPassBeginInfo = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO };
+ renderPassBeginInfo.renderPass = m_leftEyeDesc.m_pRenderPass;
+ renderPassBeginInfo.framebuffer = m_leftEyeDesc.m_pFramebuffer;
+ renderPassBeginInfo.renderArea.offset.x = 0;
+ renderPassBeginInfo.renderArea.offset.y = 0;
+ renderPassBeginInfo.renderArea.extent.width = m_nRenderWidth;
+ renderPassBeginInfo.renderArea.extent.height = m_nRenderHeight;
+ renderPassBeginInfo.clearValueCount = 2;
+ VkClearValue clearValues[ 2 ];
+ clearValues[ 0 ].color.float32[ 0 ] = 0.0f;
+ clearValues[ 0 ].color.float32[ 1 ] = 0.0f;
+ clearValues[ 0 ].color.float32[ 2 ] = 0.0f;
+ clearValues[ 0 ].color.float32[ 3 ] = 1.0f;
+ clearValues[ 1 ].depthStencil.depth = 1.0f;
+ clearValues[ 1 ].depthStencil.stencil = 0;
+ renderPassBeginInfo.pClearValues = &clearValues[ 0 ];
+ vkCmdBeginRenderPass( m_currentCommandBuffer.m_pCommandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE );
+
+ RenderScene( vr::Eye_Left );
+
+ vkCmdEndRenderPass( m_currentCommandBuffer.m_pCommandBuffer );
+
+ // Transition eye image to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL for display on the companion window
+ imageMemoryBarrier.image = m_leftEyeDesc.m_pImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ imageMemoryBarrier.oldLayout = m_leftEyeDesc.m_nImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_leftEyeDesc.m_nImageLayout = imageMemoryBarrier.newLayout;
+
+ //-----------//
+ // Right Eye //
+ //-----------//
+ // Transition to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ imageMemoryBarrier.image = m_rightEyeDesc.m_pImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = m_rightEyeDesc.m_nImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_rightEyeDesc.m_nImageLayout = imageMemoryBarrier.newLayout;
+
+ // Transition the depth buffer to VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL on first use
+ if ( m_rightEyeDesc.m_nDepthStencilImageLayout == VK_IMAGE_LAYOUT_UNDEFINED )
+ {
+ imageMemoryBarrier.image = m_rightEyeDesc.m_pDepthStencilImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ imageMemoryBarrier.srcAccessMask = 0;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = m_rightEyeDesc.m_nDepthStencilImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_rightEyeDesc.m_nDepthStencilImageLayout = imageMemoryBarrier.newLayout;
+ }
+
+ // Start the renderpass
+ renderPassBeginInfo.renderPass = m_rightEyeDesc.m_pRenderPass;
+ renderPassBeginInfo.framebuffer = m_rightEyeDesc.m_pFramebuffer;
+ renderPassBeginInfo.pClearValues = &clearValues[ 0 ];
+ vkCmdBeginRenderPass( m_currentCommandBuffer.m_pCommandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE );
+
+ RenderScene( vr::Eye_Right );
+
+ vkCmdEndRenderPass( m_currentCommandBuffer.m_pCommandBuffer );
+
+ // Transition eye image to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL for display on the companion window
+ imageMemoryBarrier.image = m_rightEyeDesc.m_pImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ imageMemoryBarrier.oldLayout = m_rightEyeDesc.m_nImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_rightEyeDesc.m_nImageLayout = imageMemoryBarrier.newLayout;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Renders a scene with respect to nEye.
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderScene( vr::Hmd_Eye nEye )
+{
+ if( m_bShowCubes )
+ {
+ vkCmdBindPipeline( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelines[ PSO_SCENE ] );
+
+ // Update the persistently mapped pointer to the CB data with the latest matrix
+ memcpy( m_pSceneConstantBufferData[ nEye ], GetCurrentViewProjectionMatrix( nEye ).get(), sizeof( Matrix4 ) );
+
+ vkCmdBindDescriptorSets( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelineLayout, 0, 1, &m_pDescriptorSets[ DESCRIPTOR_SET_LEFT_EYE_SCENE + nEye ], 0, nullptr );
+
+ // Draw
+ VkDeviceSize nOffsets[ 1 ] = { 0 };
+ vkCmdBindVertexBuffers( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &m_pSceneVertexBuffer, &nOffsets[ 0 ] );
+ vkCmdDraw( m_currentCommandBuffer.m_pCommandBuffer, m_uiVertcount, 1, 0, 0 );
+ }
+
+ bool bIsInputCapturedByAnotherProcess = m_pHMD->IsInputFocusCapturedByAnotherProcess();
+ if( !bIsInputCapturedByAnotherProcess && m_pControllerAxesVertexBuffer != VK_NULL_HANDLE )
+ {
+ // draw the controller axis lines
+ vkCmdBindPipeline( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelines[ PSO_AXES ] );
+
+ VkDeviceSize nOffsets[ 1 ] = { 0 };
+ vkCmdBindVertexBuffers( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &m_pControllerAxesVertexBuffer, &nOffsets[ 0 ] );
+ vkCmdDraw( m_currentCommandBuffer.m_pCommandBuffer, m_uiControllerVertcount, 1, 0, 0 );
+ }
+
+ // ----- Render Model rendering -----
+ vkCmdBindPipeline( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelines[ PSO_RENDERMODEL ] );
+ for( uint32_t unTrackedDevice = 0; unTrackedDevice < vr::k_unMaxTrackedDeviceCount; unTrackedDevice++ )
+ {
+ if( !m_rTrackedDeviceToRenderModel[ unTrackedDevice ] || !m_rbShowTrackedDevice[ unTrackedDevice ] )
+ continue;
+
+ const vr::TrackedDevicePose_t & pose = m_rTrackedDevicePose[ unTrackedDevice ];
+ if( !pose.bPoseIsValid )
+ continue;
+
+ if( bIsInputCapturedByAnotherProcess && m_pHMD->GetTrackedDeviceClass( unTrackedDevice ) == vr::TrackedDeviceClass_Controller )
+ continue;
+
+ const Matrix4 & matDeviceToTracking = m_rmat4DevicePose[ unTrackedDevice ];
+ Matrix4 matMVP = GetCurrentViewProjectionMatrix( nEye ) * matDeviceToTracking;
+
+ m_rTrackedDeviceToRenderModel[ unTrackedDevice ]->Draw( nEye, m_currentCommandBuffer.m_pCommandBuffer, m_pPipelineLayout, matMVP );
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::RenderCompanionWindow()
+{
+ // Get the next swapchain image
+ VkResult nResult = vkAcquireNextImageKHR( m_pDevice, m_pSwapchain, UINT64_MAX, m_pSwapchainSemaphores[ m_nFrameIndex ], VK_NULL_HANDLE, &m_nCurrentSwapchainImage );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "Skipping companion window rendering, vkAcquireNextImageKHR returned %d\n", nResult );
+ return;
+ }
+
+ // Transition the swapchain image to COLOR_ATTACHMENT_OPTIMAL for rendering
+ VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ imageMemoryBarrier.image = m_swapchainImages[ m_nCurrentSwapchainImage ];
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
+ imageMemoryBarrier.subresourceRange.levelCount = 1;
+ imageMemoryBarrier.subresourceRange.baseArrayLayer = 0;
+ imageMemoryBarrier.subresourceRange.layerCount = 1;
+ imageMemoryBarrier.srcQueueFamilyIndex = m_nQueueFamilyIndex;
+ imageMemoryBarrier.dstQueueFamilyIndex = m_nQueueFamilyIndex;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+
+ // Start the renderpass
+ VkRenderPassBeginInfo renderPassBeginInfo = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO };
+ renderPassBeginInfo.renderPass = m_pSwapchainRenderPass;
+ renderPassBeginInfo.framebuffer = m_pSwapchainFramebuffers[ m_nCurrentSwapchainImage ];
+ renderPassBeginInfo.renderArea.offset.x = 0;
+ renderPassBeginInfo.renderArea.offset.y = 0;
+ renderPassBeginInfo.renderArea.extent.width = m_nCompanionWindowWidth;
+ renderPassBeginInfo.renderArea.extent.height = m_nCompanionWindowHeight;
+ VkClearValue clearValues[ 1 ];
+ clearValues[ 0 ].color.float32[ 0 ] = 0.0f;
+ clearValues[ 0 ].color.float32[ 1 ] = 0.0f;
+ clearValues[ 0 ].color.float32[ 2 ] = 0.0f;
+ clearValues[ 0 ].color.float32[ 3 ] = 1.0f;
+ renderPassBeginInfo.clearValueCount = _countof( clearValues );
+ renderPassBeginInfo.pClearValues = &clearValues[ 0 ];
+ vkCmdBeginRenderPass( m_currentCommandBuffer.m_pCommandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE );
+
+ // Set viewport/scissor
+ VkViewport viewport = { 0.0f, 0.0f, (float ) m_nCompanionWindowWidth, ( float ) m_nCompanionWindowHeight, 0.0f, 1.0f };
+ vkCmdSetViewport( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &viewport );
+ VkRect2D scissor = { 0, 0, m_nCompanionWindowWidth, m_nCompanionWindowHeight };
+ vkCmdSetScissor( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &scissor );
+
+ // Bind the pipeline and descriptor set
+ vkCmdBindPipeline( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelines[ PSO_COMPANION ] );
+ vkCmdBindDescriptorSets( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelineLayout, 0, 1, &m_pDescriptorSets[ DESCRIPTOR_SET_COMPANION_LEFT_TEXTURE ], 0, nullptr );
+
+ // Draw left eye texture to companion window
+ VkDeviceSize nOffsets[ 1 ] = { 0 };
+ vkCmdBindVertexBuffers( m_currentCommandBuffer.m_pCommandBuffer, 0, 1, &m_pCompanionWindowVertexBuffer, &nOffsets[ 0 ] );
+ vkCmdBindIndexBuffer( m_currentCommandBuffer.m_pCommandBuffer, m_pCompanionWindowIndexBuffer, 0, VK_INDEX_TYPE_UINT16 );
+ vkCmdDrawIndexed( m_currentCommandBuffer.m_pCommandBuffer, m_uiCompanionWindowIndexSize / 2, 1, 0, 0, 0 );
+
+ // Draw right eye texture to companion window
+ vkCmdBindDescriptorSets( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pPipelineLayout, 0, 1, &m_pDescriptorSets[ DESCRIPTOR_SET_COMPANION_RIGHT_TEXTURE ], 0, nullptr );
+ vkCmdDrawIndexed( m_currentCommandBuffer.m_pCommandBuffer, m_uiCompanionWindowIndexSize / 2, 1, ( m_uiCompanionWindowIndexSize / 2 ), 0, 0 );
+
+ // End the renderpass
+ vkCmdEndRenderPass( m_currentCommandBuffer.m_pCommandBuffer );
+
+ // Transition the swapchain image to PRESENT_SRC for presentation
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+
+ // Transition both of the eye textures to VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL for SteamVR which requires this layout for submit
+ imageMemoryBarrier.image = m_leftEyeDesc.m_pImage;
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ imageMemoryBarrier.oldLayout = m_leftEyeDesc.m_nImageLayout;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_leftEyeDesc.m_nImageLayout = imageMemoryBarrier.newLayout;
+
+ imageMemoryBarrier.image = m_rightEyeDesc.m_pImage;
+ vkCmdPipelineBarrier( m_currentCommandBuffer.m_pCommandBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+ m_rightEyeDesc.m_nImageLayout = imageMemoryBarrier.newLayout;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Gets a Matrix Projection Eye with respect to nEye.
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::GetHMDMatrixProjectionEye( vr::Hmd_Eye nEye )
+{
+ if ( !m_pHMD )
+ return Matrix4();
+
+ vr::HmdMatrix44_t mat = m_pHMD->GetProjectionMatrix( nEye, m_fNearClip, m_fFarClip );
+
+ return Matrix4(
+ mat.m[0][0], mat.m[1][0], mat.m[2][0], mat.m[3][0],
+ mat.m[0][1], mat.m[1][1], mat.m[2][1], mat.m[3][1],
+ mat.m[0][2], mat.m[1][2], mat.m[2][2], mat.m[3][2],
+ mat.m[0][3], mat.m[1][3], mat.m[2][3], mat.m[3][3]
+ );
+}
+
+
+//-----------------------------------------------------------------------------
+// Purpose: Gets an HMDMatrixPoseEye with respect to nEye.
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::GetHMDMatrixPoseEye( vr::Hmd_Eye nEye )
+{
+ if ( !m_pHMD )
+ return Matrix4();
+
+ vr::HmdMatrix34_t matEyeRight = m_pHMD->GetEyeToHeadTransform( nEye );
+ Matrix4 matrixObj(
+ matEyeRight.m[0][0], matEyeRight.m[1][0], matEyeRight.m[2][0], 0.0,
+ matEyeRight.m[0][1], matEyeRight.m[1][1], matEyeRight.m[2][1], 0.0,
+ matEyeRight.m[0][2], matEyeRight.m[1][2], matEyeRight.m[2][2], 0.0,
+ matEyeRight.m[0][3], matEyeRight.m[1][3], matEyeRight.m[2][3], 1.0f
+ );
+
+ return matrixObj.invert();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Gets a Current View Projection Matrix with respect to nEye,
+// which may be an Eye_Left or an Eye_Right.
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::GetCurrentViewProjectionMatrix( vr::Hmd_Eye nEye )
+{
+ Matrix4 matMVP;
+ if( nEye == vr::Eye_Left )
+ {
+ matMVP = m_mat4ProjectionLeft * m_mat4eyePosLeft * m_mat4HMDPose;
+ }
+ else if( nEye == vr::Eye_Right )
+ {
+ matMVP = m_mat4ProjectionRight * m_mat4eyePosRight * m_mat4HMDPose;
+ }
+
+ return matMVP;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+void CMainApplication::UpdateHMDMatrixPose()
+{
+ if ( !m_pHMD )
+ return;
+
+ vr::VRCompositor()->WaitGetPoses(m_rTrackedDevicePose, vr::k_unMaxTrackedDeviceCount, NULL, 0 );
+
+ m_iValidPoseCount = 0;
+ m_strPoseClasses = "";
+ for ( int nDevice = 0; nDevice < vr::k_unMaxTrackedDeviceCount; ++nDevice )
+ {
+ if ( m_rTrackedDevicePose[nDevice].bPoseIsValid )
+ {
+ m_iValidPoseCount++;
+ m_rmat4DevicePose[nDevice] = ConvertSteamVRMatrixToMatrix4( m_rTrackedDevicePose[nDevice].mDeviceToAbsoluteTracking );
+ if (m_rDevClassChar[nDevice]==0)
+ {
+ switch (m_pHMD->GetTrackedDeviceClass(nDevice))
+ {
+ case vr::TrackedDeviceClass_Controller: m_rDevClassChar[nDevice] = 'C'; break;
+ case vr::TrackedDeviceClass_HMD: m_rDevClassChar[nDevice] = 'H'; break;
+ case vr::TrackedDeviceClass_Invalid: m_rDevClassChar[nDevice] = 'I'; break;
+ case vr::TrackedDeviceClass_GenericTracker: m_rDevClassChar[nDevice] = 'G'; break;
+ case vr::TrackedDeviceClass_TrackingReference: m_rDevClassChar[nDevice] = 'T'; break;
+ default: m_rDevClassChar[nDevice] = '?'; break;
+ }
+ }
+ m_strPoseClasses += m_rDevClassChar[nDevice];
+ }
+ }
+
+ if ( m_rTrackedDevicePose[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid )
+ {
+ m_mat4HMDPose = m_rmat4DevicePose[vr::k_unTrackedDeviceIndex_Hmd];
+ m_mat4HMDPose.invert();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Finds a render model we've already loaded or loads a new one
+//-----------------------------------------------------------------------------
+VulkanRenderModel *CMainApplication::FindOrLoadRenderModel( vr::TrackedDeviceIndex_t unTrackedDeviceIndex, const char *pchRenderModelName )
+{
+ VulkanRenderModel *pRenderModel = NULL;
+ // To simplify the Vulkan rendering code, create an instance of the model for each model name. This is less efficient
+ // memory wise, but simplifies the rendering code so we can store the transform in a constant buffer associated with
+ // the model itself. You would not want to do this in a production application.
+ //for( std::vector< VulkanRenderModel * >::iterator i = m_vecRenderModels.begin(); i != m_vecRenderModels.end(); i++ )
+ //{
+ //if( !stricmp( (*i)->GetName().c_str(), pchRenderModelName ) )
+ //{
+ //pRenderModel = *i;
+ //break;
+ //}
+ //}
+
+ // load the model if we didn't find one
+ if( !pRenderModel )
+ {
+ vr::RenderModel_t *pModel;
+ vr::EVRRenderModelError error;
+ while ( 1 )
+ {
+ error = vr::VRRenderModels()->LoadRenderModel_Async( pchRenderModelName, &pModel );
+ if ( error != vr::VRRenderModelError_Loading )
+ break;
+
+ ThreadSleep( 1 );
+ }
+
+ if ( error != vr::VRRenderModelError_None )
+ {
+ dprintf( "Unable to load render model %s - %s\n", pchRenderModelName, vr::VRRenderModels()->GetRenderModelErrorNameFromEnum( error ) );
+ return NULL; // move on to the next tracked device
+ }
+
+ vr::RenderModel_TextureMap_t *pTexture;
+ while ( 1 )
+ {
+ error = vr::VRRenderModels()->LoadTexture_Async( pModel->diffuseTextureId, &pTexture );
+ if ( error != vr::VRRenderModelError_Loading )
+ break;
+
+ ThreadSleep( 1 );
+ }
+
+ if ( error != vr::VRRenderModelError_None )
+ {
+ dprintf( "Unable to load render texture id:%d for render model %s\n", pModel->diffuseTextureId, pchRenderModelName );
+ vr::VRRenderModels()->FreeRenderModel( pModel );
+ return NULL; // move on to the next tracked device
+ }
+
+ pRenderModel = new VulkanRenderModel( pchRenderModelName );
+ VkDescriptorSet pDescriptorSets[ 2 ] =
+ {
+ m_pDescriptorSets[ DESCRIPTOR_SET_LEFT_EYE_RENDER_MODEL0 + unTrackedDeviceIndex ],
+ m_pDescriptorSets[ DESCRIPTOR_SET_RIGHT_EYE_RENDER_MODEL0 + unTrackedDeviceIndex ],
+ };
+ if ( !pRenderModel->BInit( m_pDevice, m_physicalDeviceMemoryProperties, m_currentCommandBuffer.m_pCommandBuffer, unTrackedDeviceIndex, pDescriptorSets, *pModel, *pTexture ) )
+ {
+ dprintf( "Unable to create Vulkan model from render model %s\n", pchRenderModelName );
+ delete pRenderModel;
+ pRenderModel = NULL;
+ }
+ else
+ {
+ m_vecRenderModels.push_back( pRenderModel );
+ }
+ vr::VRRenderModels()->FreeRenderModel( pModel );
+ vr::VRRenderModels()->FreeTexture( pTexture );
+ }
+
+ return pRenderModel;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Create/destroy Vulkan a Render Model for a single tracked device
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupRenderModelForTrackedDevice( vr::TrackedDeviceIndex_t unTrackedDeviceIndex )
+{
+ if( unTrackedDeviceIndex >= vr::k_unMaxTrackedDeviceCount )
+ return;
+
+ // try to find a model we've already set up
+ std::string sRenderModelName = GetTrackedDeviceString( m_pHMD, unTrackedDeviceIndex, vr::Prop_RenderModelName_String );
+ VulkanRenderModel *pRenderModel = FindOrLoadRenderModel( unTrackedDeviceIndex, sRenderModelName.c_str() );
+ if( !pRenderModel )
+ {
+ std::string sTrackingSystemName = GetTrackedDeviceString( m_pHMD, unTrackedDeviceIndex, vr::Prop_TrackingSystemName_String );
+ dprintf( "Unable to load render model for tracked device %d (%s.%s)", unTrackedDeviceIndex, sTrackingSystemName.c_str(), sRenderModelName.c_str() );
+ }
+ else
+ {
+ m_rTrackedDeviceToRenderModel[ unTrackedDeviceIndex ] = pRenderModel;
+ m_rbShowTrackedDevice[ unTrackedDeviceIndex ] = true;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Create/destroy Vulkan Render Models
+//-----------------------------------------------------------------------------
+void CMainApplication::SetupRenderModels()
+{
+ memset( m_rTrackedDeviceToRenderModel, 0, sizeof( m_rTrackedDeviceToRenderModel ) );
+
+ if( !m_pHMD )
+ return;
+
+ for( uint32_t unTrackedDevice = vr::k_unTrackedDeviceIndex_Hmd + 1; unTrackedDevice < vr::k_unMaxTrackedDeviceCount; unTrackedDevice++ )
+ {
+ if( !m_pHMD->IsTrackedDeviceConnected( unTrackedDevice ) )
+ continue;
+
+ SetupRenderModelForTrackedDevice( unTrackedDevice );
+ }
+
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Converts a SteamVR matrix to our local matrix class
+//-----------------------------------------------------------------------------
+Matrix4 CMainApplication::ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose )
+{
+ Matrix4 matrixObj(
+ matPose.m[0][0], matPose.m[1][0], matPose.m[2][0], 0.0,
+ matPose.m[0][1], matPose.m[1][1], matPose.m[2][1], 0.0,
+ matPose.m[0][2], matPose.m[1][2], matPose.m[2][2], 0.0,
+ matPose.m[0][3], matPose.m[1][3], matPose.m[2][3], 1.0f
+ );
+ return matrixObj;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Create/destroy Vulkan Render Models
+//-----------------------------------------------------------------------------
+VulkanRenderModel::VulkanRenderModel( const std::string & sRenderModelName )
+ : m_sModelName( sRenderModelName )
+ , m_pDevice( VK_NULL_HANDLE )
+ , m_pVertexBuffer( VK_NULL_HANDLE )
+ , m_pVertexBufferMemory( VK_NULL_HANDLE )
+ , m_pIndexBuffer( VK_NULL_HANDLE )
+ , m_pIndexBufferMemory( VK_NULL_HANDLE )
+ , m_pImage( VK_NULL_HANDLE )
+ , m_pImageMemory( VK_NULL_HANDLE )
+ , m_pImageView( VK_NULL_HANDLE )
+ , m_pImageStagingBuffer( VK_NULL_HANDLE )
+ , m_pImageStagingBufferMemory( VK_NULL_HANDLE )
+ , m_pSampler( VK_NULL_HANDLE )
+{
+ memset( m_pConstantBuffer, 0, sizeof( m_pConstantBuffer ) );
+ memset( m_pConstantBufferMemory, 0, sizeof( m_pConstantBufferMemory ) );
+ memset( m_pConstantBufferData, 0, sizeof( m_pConstantBufferData ) );
+ memset( m_pDescriptorSets, 0, sizeof( m_pDescriptorSets ) );
+}
+
+VulkanRenderModel::~VulkanRenderModel()
+{
+ Cleanup();
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Allocates and populates the Vulkan resources for a render model
+//-----------------------------------------------------------------------------
+bool VulkanRenderModel::BInit( VkDevice pDevice, const VkPhysicalDeviceMemoryProperties &memoryProperties, VkCommandBuffer pCommandBuffer, vr::TrackedDeviceIndex_t unTrackedDeviceIndex, VkDescriptorSet pDescriptorSets[ 2 ], const vr::RenderModel_t & vrModel, const vr::RenderModel_TextureMap_t & vrDiffuseTexture )
+{
+ m_pDevice = pDevice;
+ m_physicalDeviceMemoryProperties = memoryProperties;
+ m_unTrackedDeviceIndex = unTrackedDeviceIndex;
+ m_pDescriptorSets[ 0 ] = pDescriptorSets[ 0 ];
+ m_pDescriptorSets[ 1 ] = pDescriptorSets[ 1 ];
+
+ // Create and populate the vertex buffer
+ {
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, vrModel.rVertexData, sizeof( vr::RenderModel_Vertex_t ) * vrModel.unVertexCount,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &m_pVertexBuffer, &m_pVertexBufferMemory ) )
+ {
+ return false;
+ }
+ }
+
+ // Create and populate the index buffer
+ {
+ if ( !CreateVulkanBuffer( m_pDevice, m_physicalDeviceMemoryProperties, vrModel.rIndexData, sizeof( uint16_t ) * vrModel.unTriangleCount * 3,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT, &m_pIndexBuffer, &m_pIndexBufferMemory ) )
+ {
+ return false;
+ }
+ }
+
+ // create and populate the texture
+ {
+ int nImageWidth = vrDiffuseTexture.unWidth;
+ int nImageHeight = vrDiffuseTexture.unHeight;
+
+ // Copy the base level to a buffer, reserve space for mips (overreserve by a bit to avoid having to calc mipchain size ahead of time)
+ VkDeviceSize nBufferSize = 0;
+ uint8_t *pBuffer = new uint8_t[ nImageWidth * nImageHeight * 4 * 2 ];
+ uint8_t *pPrevBuffer = pBuffer;
+ uint8_t *pCurBuffer = pBuffer;
+ memcpy( pCurBuffer, vrDiffuseTexture.rubTextureMapData, sizeof( uint8_t ) * nImageWidth * nImageHeight * 4 );
+ pCurBuffer += sizeof( uint8_t ) * nImageWidth * nImageHeight * 4;
+
+ std::vector< VkBufferImageCopy > bufferImageCopies;
+ VkBufferImageCopy bufferImageCopy = {};
+ bufferImageCopy.bufferOffset = 0;
+ bufferImageCopy.bufferRowLength = 0;
+ bufferImageCopy.bufferImageHeight = 0;
+ bufferImageCopy.imageSubresource.baseArrayLayer = 0;
+ bufferImageCopy.imageSubresource.layerCount = 1;
+ bufferImageCopy.imageSubresource.mipLevel = 0;
+ bufferImageCopy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ bufferImageCopy.imageOffset.x = 0;
+ bufferImageCopy.imageOffset.y = 0;
+ bufferImageCopy.imageOffset.z = 0;
+ bufferImageCopy.imageExtent.width = nImageWidth;
+ bufferImageCopy.imageExtent.height = nImageHeight;
+ bufferImageCopy.imageExtent.depth = 1;
+ bufferImageCopies.push_back( bufferImageCopy );
+
+ int nMipWidth = nImageWidth;
+ int nMipHeight = nImageHeight;
+
+ while( nMipWidth > 1 && nMipHeight > 1 )
+ {
+ CMainApplication::GenMipMapRGBA( pPrevBuffer, pCurBuffer, nMipWidth, nMipHeight, &nMipWidth, &nMipHeight );
+ bufferImageCopy.bufferOffset = pCurBuffer - pBuffer;
+ bufferImageCopy.imageSubresource.mipLevel++;
+ bufferImageCopy.imageExtent.width = nMipWidth;
+ bufferImageCopy.imageExtent.height = nMipHeight;
+ bufferImageCopies.push_back( bufferImageCopy );
+ pPrevBuffer = pCurBuffer;
+ pCurBuffer += ( nMipWidth * nMipHeight * 4 * sizeof( uint8_t ) );
+ }
+ nBufferSize = pCurBuffer - pBuffer;
+
+ // Create the image
+ VkImageCreateInfo imageCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
+ imageCreateInfo.extent.width = nImageWidth;
+ imageCreateInfo.extent.height = nImageHeight;
+ imageCreateInfo.extent.depth = 1;
+ imageCreateInfo.mipLevels = ( uint32_t ) bufferImageCopies.size();
+ imageCreateInfo.arrayLayers = 1;
+ imageCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+ imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+ imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ imageCreateInfo.flags = 0;
+ vkCreateImage( m_pDevice, &imageCreateInfo, nullptr, &m_pImage );
+
+ VkMemoryRequirements memoryRequirements = {};
+ vkGetImageMemoryRequirements( m_pDevice, m_pImage, &memoryRequirements );
+
+ VkMemoryAllocateInfo memoryAllocateInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ memoryAllocateInfo.allocationSize = memoryRequirements.size;
+ MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memoryAllocateInfo.memoryTypeIndex );
+ vkAllocateMemory( m_pDevice, &memoryAllocateInfo, nullptr, &m_pImageMemory );
+ vkBindImageMemory( m_pDevice, m_pImage, m_pImageMemory, 0 );
+
+ VkImageViewCreateInfo imageViewCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ imageViewCreateInfo.flags = 0;
+ imageViewCreateInfo.image = m_pImage;
+ imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ imageViewCreateInfo.format = imageCreateInfo.format;
+ imageViewCreateInfo.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
+ imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
+ imageViewCreateInfo.subresourceRange.levelCount = imageCreateInfo.mipLevels;
+ imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
+ imageViewCreateInfo.subresourceRange.layerCount = 1;
+ vkCreateImageView( m_pDevice, &imageViewCreateInfo, nullptr, &m_pImageView );
+
+ // Create a staging buffer
+ VkBufferCreateInfo bufferCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ bufferCreateInfo.size = nBufferSize;
+ bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ vkCreateBuffer( m_pDevice, &bufferCreateInfo, nullptr, &m_pImageStagingBuffer );
+ vkGetBufferMemoryRequirements( m_pDevice, m_pImageStagingBuffer, &memoryRequirements );
+
+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex );
+ allocInfo.allocationSize = memoryRequirements.size;
+
+ vkAllocateMemory( m_pDevice, &allocInfo, nullptr, &m_pImageStagingBufferMemory );
+ vkBindBufferMemory( m_pDevice, m_pImageStagingBuffer, m_pImageStagingBufferMemory, 0 );
+
+ // Copy memory to the staging buffer
+ void *pData;
+ VkResult nResult = vkMapMemory( m_pDevice, m_pImageStagingBufferMemory, 0, VK_WHOLE_SIZE, 0, &pData );
+ if ( nResult != VK_SUCCESS )
+ {
+ dprintf( "vkMapMemory returned error %d\n", nResult );
+ return false;
+ }
+ memcpy( pData, pBuffer, nBufferSize );
+ vkUnmapMemory( m_pDevice, m_pImageStagingBufferMemory );
+
+ VkMappedMemoryRange memoryRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+ memoryRange.memory = m_pImageStagingBufferMemory;
+ memoryRange.size = VK_WHOLE_SIZE;
+ vkFlushMappedMemoryRanges( m_pDevice, 1, &memoryRange );
+
+ // Transition the image to TRANSFER_DST to receive image
+ VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ imageMemoryBarrier.srcAccessMask = 0;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ imageMemoryBarrier.image = m_pImage;
+ imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
+ imageMemoryBarrier.subresourceRange.levelCount = imageCreateInfo.mipLevels;
+ imageMemoryBarrier.subresourceRange.baseArrayLayer = 0;
+ imageMemoryBarrier.subresourceRange.layerCount = 1;
+ vkCmdPipelineBarrier( pCommandBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+
+ // Issue the copy to fill the image data
+ vkCmdCopyBufferToImage( pCommandBuffer, m_pImageStagingBuffer, m_pImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, ( uint32_t ) bufferImageCopies.size(), &bufferImageCopies[ 0 ] );
+
+ // Transition the image to SHADER_READ_OPTIMAL for reading
+ imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ vkCmdPipelineBarrier( pCommandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier );
+
+ // Create a sampler
+ VkSamplerCreateInfo samplerCreateInfo = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
+ samplerCreateInfo.magFilter = VK_FILTER_LINEAR;
+ samplerCreateInfo.minFilter = VK_FILTER_LINEAR;
+ samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ samplerCreateInfo.anisotropyEnable = VK_TRUE;
+ samplerCreateInfo.maxAnisotropy = 16.0f;
+ samplerCreateInfo.minLod = 0.0f;
+ samplerCreateInfo.maxLod = ( float ) imageCreateInfo.mipLevels;
+ vkCreateSampler( m_pDevice, &samplerCreateInfo, nullptr, &m_pSampler );
+ }
+
+ // Create a constant buffer to hold the transform (one for each eye)
+ for ( uint32_t nEye = 0; nEye < 2; nEye++ )
+ {
+ VkBufferCreateInfo bufferCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ bufferCreateInfo.size = sizeof( Matrix4 );
+ bufferCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ vkCreateBuffer( m_pDevice, &bufferCreateInfo, nullptr, &m_pConstantBuffer[ nEye ] );
+
+ VkMemoryRequirements memoryRequirements = {};
+ vkGetBufferMemoryRequirements( m_pDevice, m_pConstantBuffer[ nEye ], &memoryRequirements );
+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ MemoryTypeFromProperties( m_physicalDeviceMemoryProperties, memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, &allocInfo.memoryTypeIndex );
+ allocInfo.allocationSize = memoryRequirements.size;
+
+ vkAllocateMemory( m_pDevice, &allocInfo, nullptr, &m_pConstantBufferMemory[ nEye ] );
+ vkBindBufferMemory( m_pDevice, m_pConstantBuffer[ nEye ], m_pConstantBufferMemory[ nEye ], 0 );
+
+ // Map and keep mapped persistently
+ vkMapMemory( m_pDevice, m_pConstantBufferMemory[ nEye ], 0, VK_WHOLE_SIZE, 0, &m_pConstantBufferData[ nEye ] );
+
+ // Bake the descriptor set
+ VkDescriptorBufferInfo bufferInfo = {};
+ bufferInfo.buffer = m_pConstantBuffer[ nEye ];
+ bufferInfo.offset = 0;
+ bufferInfo.range = VK_WHOLE_SIZE;
+
+ VkDescriptorImageInfo imageInfo = {};
+ imageInfo.imageView = m_pImageView;
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkDescriptorImageInfo samplerInfo = {};
+ samplerInfo.sampler = m_pSampler;
+
+ VkWriteDescriptorSet writeDescriptorSets[ 3 ] = { };
+ writeDescriptorSets[ 0 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 0 ].dstSet = m_pDescriptorSets[ nEye ];
+ writeDescriptorSets[ 0 ].dstBinding = 0;
+ writeDescriptorSets[ 0 ].descriptorCount = 1;
+ writeDescriptorSets[ 0 ].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ writeDescriptorSets[ 0 ].pBufferInfo = &bufferInfo;
+ writeDescriptorSets[ 1 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 1 ].dstSet = m_pDescriptorSets[ nEye ];
+ writeDescriptorSets[ 1 ].dstBinding = 1;
+ writeDescriptorSets[ 1 ].descriptorCount = 1;
+ writeDescriptorSets[ 1 ].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ writeDescriptorSets[ 1 ].pImageInfo = &imageInfo;
+ writeDescriptorSets[ 2 ].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeDescriptorSets[ 2 ].dstSet = m_pDescriptorSets[ nEye ];
+ writeDescriptorSets[ 2 ].dstBinding = 2;
+ writeDescriptorSets[ 2 ].descriptorCount = 1;
+ writeDescriptorSets[ 2 ].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ writeDescriptorSets[ 2 ].pImageInfo = &samplerInfo;
+
+ vkUpdateDescriptorSets( m_pDevice, _countof( writeDescriptorSets ), writeDescriptorSets, 0, nullptr );
+ }
+
+ m_unVertexCount = vrModel.unTriangleCount * 3;
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Frees the Vulkan resources for a render model
+//-----------------------------------------------------------------------------
+void VulkanRenderModel::Cleanup()
+{
+ if ( m_pVertexBuffer != VK_NULL_HANDLE )
+ {
+ vkDestroyBuffer( m_pDevice, m_pVertexBuffer, nullptr );
+ m_pVertexBuffer = VK_NULL_HANDLE;
+ }
+
+ if ( m_pVertexBufferMemory != VK_NULL_HANDLE )
+ {
+ vkFreeMemory( m_pDevice, m_pVertexBufferMemory, nullptr );
+ m_pVertexBufferMemory = VK_NULL_HANDLE;
+ }
+
+ if ( m_pIndexBuffer != VK_NULL_HANDLE )
+ {
+ vkDestroyBuffer( m_pDevice, m_pIndexBuffer, nullptr );
+ m_pIndexBuffer = VK_NULL_HANDLE;
+ }
+
+ if ( m_pIndexBufferMemory != VK_NULL_HANDLE )
+ {
+ vkFreeMemory( m_pDevice, m_pIndexBufferMemory, nullptr );
+ m_pIndexBufferMemory = VK_NULL_HANDLE;
+ }
+
+ if ( m_pImage != VK_NULL_HANDLE )
+ {
+ vkDestroyImage( m_pDevice, m_pImage, nullptr );
+ m_pImage = VK_NULL_HANDLE;
+ }
+
+ if ( m_pImageMemory != VK_NULL_HANDLE )
+ {
+ vkFreeMemory( m_pDevice, m_pImageMemory, nullptr );
+ m_pImageMemory = VK_NULL_HANDLE;
+ }
+
+ if ( m_pImageView != VK_NULL_HANDLE )
+ {
+ vkDestroyImageView( m_pDevice, m_pImageView, nullptr );
+ m_pImageView = VK_NULL_HANDLE;
+ }
+
+ if ( m_pImageStagingBuffer != VK_NULL_HANDLE )
+ {
+ vkDestroyBuffer( m_pDevice, m_pImageStagingBuffer, nullptr );
+ m_pImageStagingBuffer = VK_NULL_HANDLE;
+ }
+
+ if ( m_pImageStagingBufferMemory != VK_NULL_HANDLE )
+ {
+ vkFreeMemory( m_pDevice, m_pImageStagingBufferMemory, nullptr );
+ m_pImageStagingBufferMemory = VK_NULL_HANDLE;
+ }
+
+ for ( uint32_t nEye = 0; nEye < 2; nEye++ )
+ {
+ if ( m_pConstantBuffer[ nEye ] != VK_NULL_HANDLE )
+ {
+ vkDestroyBuffer( m_pDevice, m_pConstantBuffer[ nEye ], nullptr );
+ m_pConstantBuffer[ nEye ] = VK_NULL_HANDLE;
+ }
+
+ if ( m_pConstantBufferMemory != VK_NULL_HANDLE )
+ {
+ vkFreeMemory( m_pDevice, m_pConstantBufferMemory[ nEye ], nullptr );
+ m_pConstantBufferMemory[ nEye ] = VK_NULL_HANDLE;
+ }
+ }
+
+ if ( m_pSampler != VK_NULL_HANDLE )
+ {
+ vkDestroySampler( m_pDevice, m_pSampler, nullptr );
+ m_pSampler = VK_NULL_HANDLE;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Purpose: Draws the render model
+//-----------------------------------------------------------------------------
+void VulkanRenderModel::Draw( vr::EVREye nEye, VkCommandBuffer pCommandBuffer, VkPipelineLayout pPipelineLayout, const Matrix4 &matMVP )
+{
+ // Update the CB with the transform
+ memcpy( m_pConstantBufferData[ nEye ], &matMVP, sizeof( matMVP ) );
+
+ // Bind the descriptor set
+ vkCmdBindDescriptorSets( pCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pPipelineLayout, 0, 1, &m_pDescriptorSets[ nEye ], 0, nullptr );
+
+ // Bind the VB/IB and draw
+ VkDeviceSize nOffsets[ 1 ] = { 0 };
+ vkCmdBindVertexBuffers( pCommandBuffer, 0, 1, &m_pVertexBuffer, &nOffsets[ 0 ] );
+ vkCmdBindIndexBuffer( pCommandBuffer, m_pIndexBuffer, 0, VK_INDEX_TYPE_UINT16 );
+ vkCmdDrawIndexed( pCommandBuffer, m_unVertexCount, 1, 0, 0, 0 );
+}
+
+//-----------------------------------------------------------------------------
+// Purpose:
+//-----------------------------------------------------------------------------
+int main(int argc, char *argv[])
+{
+ CMainApplication *pMainApplication = new CMainApplication( argc, argv );
+
+ if ( !pMainApplication->BInit() )
+ {
+ pMainApplication->Shutdown();
+ return 1;
+ }
+
+ pMainApplication->RunMainLoop();
+
+ pMainApplication->Shutdown();
+
+ return 0;
+}
diff --git a/samples/shared/strtools.h b/samples/shared/strtools.h
index b69ef15..57e6271 100644
--- a/samples/shared/strtools.h
+++ b/samples/shared/strtools.h
@@ -37,13 +37,15 @@ std::string StringToLower( const std::string & sString );
// we stricmp (from WIN) but it isn't POSIX - OSX/LINUX have strcasecmp so just inline bridge to it
#if defined( OSX ) || defined( LINUX )
-#include <strings.h>
-inline int stricmp(const char *pStr1, const char *pStr2) { return strcasecmp(pStr1,pStr2); }
-#ifndef _stricmp
-#define _stricmp stricmp
+#ifndef __THROW // If __THROW is defined, these will clash with throw() versions on gcc
+ #include <strings.h>
+ inline int stricmp(const char *pStr1, const char *pStr2) { return strcasecmp(pStr1,pStr2); }
+ #ifndef _stricmp
+ #define _stricmp stricmp
+ #endif
+ inline int strnicmp( const char *pStr1, const char *pStr2, size_t unBufferLen ) { return strncasecmp( pStr1,pStr2, unBufferLen ); }
+ #define _strnicmp strnicmp
#endif
-inline int strnicmp( const char *pStr1, const char *pStr2, size_t unBufferLen ) { return strncasecmp( pStr1,pStr2, unBufferLen ); }
-#define _strnicmp strnicmp
#ifndef _vsnprintf_s
#define _vsnprintf_s vsnprintf
@@ -51,7 +53,7 @@ inline int strnicmp( const char *pStr1, const char *pStr2, size_t unBufferLen )
#define _TRUNCATE ((size_t)-1)
-#endif
+#endif // defined( OSX ) || defined( LINUX )
#if defined( OSX )
// behaviors ensure NULL-termination at least as well as _TRUNCATE does, but
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.h b/samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.h
new file mode 100644
index 0000000..8d75c49
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.h
@@ -0,0 +1,470 @@
+// Copyright 2015 The Shaderc Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SHADERC_SHADERC_H_
+#define SHADERC_SHADERC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+// Source language kind.
+typedef enum {
+ shaderc_source_language_glsl,
+ shaderc_source_language_hlsl,
+} shaderc_source_language;
+
+typedef enum {
+ // Forced shader kinds. These shader kinds force the compiler to compile the
+ // source code as the specified kind of shader.
+ shaderc_glsl_vertex_shader,
+ shaderc_glsl_fragment_shader,
+ shaderc_glsl_compute_shader,
+ shaderc_glsl_geometry_shader,
+ shaderc_glsl_tess_control_shader,
+ shaderc_glsl_tess_evaluation_shader,
+ // Deduce the shader kind from #pragma annotation in the source code. Compiler
+ // will emit error if #pragma annotation is not found.
+ shaderc_glsl_infer_from_source,
+ // Default shader kinds. Compiler will fall back to compile the source code as
+ // the specified kind of shader when #pragma annotation is not found in the
+ // source code.
+ shaderc_glsl_default_vertex_shader,
+ shaderc_glsl_default_fragment_shader,
+ shaderc_glsl_default_compute_shader,
+ shaderc_glsl_default_geometry_shader,
+ shaderc_glsl_default_tess_control_shader,
+ shaderc_glsl_default_tess_evaluation_shader,
+ shaderc_spirv_assembly,
+} shaderc_shader_kind;
+
+typedef enum {
+ shaderc_target_env_vulkan, // create SPIR-V under Vulkan semantics
+ shaderc_target_env_opengl, // create SPIR-V under OpenGL semantics
+ shaderc_target_env_opengl_compat, // create SPIR-V under OpenGL semantics,
+ // including compatibility profile
+ // functions
+ shaderc_target_env_default = shaderc_target_env_vulkan
+} shaderc_target_env;
+
+typedef enum {
+ shaderc_profile_none, // Used if and only if GLSL version did not specify
+ // profiles.
+ shaderc_profile_core,
+ shaderc_profile_compatibility,
+ shaderc_profile_es,
+} shaderc_profile;
+
+// Indicate the status of a compilation.
+typedef enum {
+ shaderc_compilation_status_success = 0,
+ shaderc_compilation_status_invalid_stage, // error stage deduction
+ shaderc_compilation_status_compilation_error,
+ shaderc_compilation_status_internal_error, // unexpected failure
+ shaderc_compilation_status_null_result_object,
+ shaderc_compilation_status_invalid_assembly,
+} shaderc_compilation_status;
+
+// Optimization level.
+typedef enum {
+ shaderc_optimization_level_zero, // no optimization
+ shaderc_optimization_level_size, // optimize towards reducing code size
+} shaderc_optimization_level;
+
+// Resource limits.
+typedef enum {
+ shaderc_limit_max_lights,
+ shaderc_limit_max_clip_planes,
+ shaderc_limit_max_texture_units,
+ shaderc_limit_max_texture_coords,
+ shaderc_limit_max_vertex_attribs,
+ shaderc_limit_max_vertex_uniform_components,
+ shaderc_limit_max_varying_floats,
+ shaderc_limit_max_vertex_texture_image_units,
+ shaderc_limit_max_combined_texture_image_units,
+ shaderc_limit_max_texture_image_units,
+ shaderc_limit_max_fragment_uniform_components,
+ shaderc_limit_max_draw_buffers,
+ shaderc_limit_max_vertex_uniform_vectors,
+ shaderc_limit_max_varying_vectors,
+ shaderc_limit_max_fragment_uniform_vectors,
+ shaderc_limit_max_vertex_output_vectors,
+ shaderc_limit_max_fragment_input_vectors,
+ shaderc_limit_min_program_texel_offset,
+ shaderc_limit_max_program_texel_offset,
+ shaderc_limit_max_clip_distances,
+ shaderc_limit_max_compute_work_group_count_x,
+ shaderc_limit_max_compute_work_group_count_y,
+ shaderc_limit_max_compute_work_group_count_z,
+ shaderc_limit_max_compute_work_group_size_x,
+ shaderc_limit_max_compute_work_group_size_y,
+ shaderc_limit_max_compute_work_group_size_z,
+ shaderc_limit_max_compute_uniform_components,
+ shaderc_limit_max_compute_texture_image_units,
+ shaderc_limit_max_compute_image_uniforms,
+ shaderc_limit_max_compute_atomic_counters,
+ shaderc_limit_max_compute_atomic_counter_buffers,
+ shaderc_limit_max_varying_components,
+ shaderc_limit_max_vertex_output_components,
+ shaderc_limit_max_geometry_input_components,
+ shaderc_limit_max_geometry_output_components,
+ shaderc_limit_max_fragment_input_components,
+ shaderc_limit_max_image_units,
+ shaderc_limit_max_combined_image_units_and_fragment_outputs,
+ shaderc_limit_max_combined_shader_output_resources,
+ shaderc_limit_max_image_samples,
+ shaderc_limit_max_vertex_image_uniforms,
+ shaderc_limit_max_tess_control_image_uniforms,
+ shaderc_limit_max_tess_evaluation_image_uniforms,
+ shaderc_limit_max_geometry_image_uniforms,
+ shaderc_limit_max_fragment_image_uniforms,
+ shaderc_limit_max_combined_image_uniforms,
+ shaderc_limit_max_geometry_texture_image_units,
+ shaderc_limit_max_geometry_output_vertices,
+ shaderc_limit_max_geometry_total_output_components,
+ shaderc_limit_max_geometry_uniform_components,
+ shaderc_limit_max_geometry_varying_components,
+ shaderc_limit_max_tess_control_input_components,
+ shaderc_limit_max_tess_control_output_components,
+ shaderc_limit_max_tess_control_texture_image_units,
+ shaderc_limit_max_tess_control_uniform_components,
+ shaderc_limit_max_tess_control_total_output_components,
+ shaderc_limit_max_tess_evaluation_input_components,
+ shaderc_limit_max_tess_evaluation_output_components,
+ shaderc_limit_max_tess_evaluation_texture_image_units,
+ shaderc_limit_max_tess_evaluation_uniform_components,
+ shaderc_limit_max_tess_patch_components,
+ shaderc_limit_max_patch_vertices,
+ shaderc_limit_max_tess_gen_level,
+ shaderc_limit_max_viewports,
+ shaderc_limit_max_vertex_atomic_counters,
+ shaderc_limit_max_tess_control_atomic_counters,
+ shaderc_limit_max_tess_evaluation_atomic_counters,
+ shaderc_limit_max_geometry_atomic_counters,
+ shaderc_limit_max_fragment_atomic_counters,
+ shaderc_limit_max_combined_atomic_counters,
+ shaderc_limit_max_atomic_counter_bindings,
+ shaderc_limit_max_vertex_atomic_counter_buffers,
+ shaderc_limit_max_tess_control_atomic_counter_buffers,
+ shaderc_limit_max_tess_evaluation_atomic_counter_buffers,
+ shaderc_limit_max_geometry_atomic_counter_buffers,
+ shaderc_limit_max_fragment_atomic_counter_buffers,
+ shaderc_limit_max_combined_atomic_counter_buffers,
+ shaderc_limit_max_atomic_counter_buffer_size,
+ shaderc_limit_max_transform_feedback_buffers,
+ shaderc_limit_max_transform_feedback_interleaved_components,
+ shaderc_limit_max_cull_distances,
+ shaderc_limit_max_combined_clip_and_cull_distances,
+ shaderc_limit_max_samples,
+} shaderc_limit;
+
+// Usage examples:
+//
+// Aggressively release compiler resources, but spend time in initialization
+// for each new use.
+// shaderc_compiler_t compiler = shaderc_compiler_initialize();
+// shaderc_compilation_result_t result = shaderc_compile_into_spv(
+// compiler, "#version 450\nvoid main() {}", 27,
+// shaderc_glsl_vertex_shader, "main.vert", "main", nullptr);
+// // Do stuff with compilation results.
+// shaderc_result_release(result);
+// shaderc_compiler_release(compiler);
+//
+// Keep the compiler object around for a long time, but pay for extra space
+// occupied.
+// shaderc_compiler_t compiler = shaderc_compiler_initialize();
+// // On the same, other or multiple simultaneous threads.
+// shaderc_compilation_result_t result = shaderc_compile_into_spv(
+// compiler, "#version 450\nvoid main() {}", 27,
+// shaderc_glsl_vertex_shader, "main.vert", "main", nullptr);
+// // Do stuff with compilation results.
+// shaderc_result_release(result);
+// // Once no more compilations are to happen.
+// shaderc_compiler_release(compiler);
+
+// An opaque handle to an object that manages all compiler state.
+typedef struct shaderc_compiler* shaderc_compiler_t;
+
+// Returns a shaderc_compiler_t that can be used to compile modules.
+// A return of NULL indicates that there was an error initializing the compiler.
+// Any function operating on shaderc_compiler_t must offer the basic
+// thread-safety guarantee.
+// [http://herbsutter.com/2014/01/13/gotw-95-solution-thread-safety-and-synchronization/]
+// That is: concurrent invocation of these functions on DIFFERENT objects needs
+// no synchronization; concurrent invocation of these functions on the SAME
+// object requires synchronization IF AND ONLY IF some of them take a non-const
+// argument.
+shaderc_compiler_t shaderc_compiler_initialize(void);
+
+// Releases the resources held by the shaderc_compiler_t.
+// After this call it is invalid to make any future calls to functions
+// involving this shaderc_compiler_t.
+void shaderc_compiler_release(shaderc_compiler_t);
+
+// An opaque handle to an object that manages options to a single compilation
+// result.
+typedef struct shaderc_compile_options* shaderc_compile_options_t;
+
+// Returns a default-initialized shaderc_compile_options_t that can be used
+// to modify the functionality of a compiled module.
+// A return of NULL indicates that there was an error initializing the options.
+// Any function operating on shaderc_compile_options_t must offer the
+// basic thread-safety guarantee.
+shaderc_compile_options_t shaderc_compile_options_initialize(void);
+
+// Returns a copy of the given shaderc_compile_options_t.
+// If NULL is passed as the parameter the call is the same as
+// shaderc_compile_options_init.
+shaderc_compile_options_t shaderc_compile_options_clone(
+ const shaderc_compile_options_t options);
+
+// Releases the compilation options. It is invalid to use the given
+// shaderc_compile_options_t object in any future calls. It is safe to pass
+// NULL to this function, and doing such will have no effect.
+void shaderc_compile_options_release(shaderc_compile_options_t options);
+
+// Adds a predefined macro to the compilation options. This has the same
+// effect as passing -Dname=value to the command-line compiler. If value
+// is NULL, it has the same effect as passing -Dname to the command-line
+// compiler. If a macro definition with the same name has previously been
+// added, the value is replaced with the new value. The macro name and
+// value are passed in with char pointers, which point to their data, and
+// the lengths of their data. The strings that the name and value pointers
+// point to must remain valid for the duration of the call, but can be
+// modified or deleted after this function has returned. In case of adding
+// a valueless macro, the value argument should be a null pointer or the
+// value_length should be 0u.
+void shaderc_compile_options_add_macro_definition(
+ shaderc_compile_options_t options, const char* name, size_t name_length,
+ const char* value, size_t value_length);
+
+// Sets the source language. The default is GLSL.
+void shaderc_compile_options_set_source_language(
+ shaderc_compile_options_t options, shaderc_source_language lang);
+
+// Sets the compiler mode to generate debug information in the output.
+void shaderc_compile_options_set_generate_debug_info(
+ shaderc_compile_options_t options);
+
+// Sets the compiler optimization level to the given level. Only the last one
+// takes effect if multiple calls of this function exist.
+void shaderc_compile_options_set_optimization_level(
+ shaderc_compile_options_t options, shaderc_optimization_level level);
+
+// Forces the GLSL language version and profile to a given pair. The version
+// number is the same as would appear in the #version annotation in the source.
+// Version and profile specified here overrides the #version annotation in the
+// source. Use profile: 'shaderc_profile_none' for GLSL versions that do not
+// define profiles, e.g. versions below 150.
+void shaderc_compile_options_set_forced_version_profile(
+ shaderc_compile_options_t options, int version, shaderc_profile profile);
+
+// Source text inclusion via #include is supported with a pair of callbacks
+// to an "includer" on the client side. The first callback processes an
+// inclusion request, and returns an include result. The includer owns
+// the contents of the result, and those contents must remain valid until the
+// second callback is invoked to release the result. Both callbacks take a
+// user_data argument to specify the client context.
+// To return an error, set the source_name to an empty string and put your
+// error message in content.
+
+// An include result.
+typedef struct shaderc_include_result {
+ // The name of the source file. The name should be fully resolved
+ // in the sense that it should be a unique name in the context of the
+ // includer. For example, if the includer maps source names to files in
+ // a filesystem, then this name should be the absolute path of the file.
+ // For a failed inclusion, this string is empty.
+ const char* source_name;
+ size_t source_name_length;
+ // The text contents of the source file in the normal case.
+ // For a failed inclusion, this contains the error message.
+ const char* content;
+ size_t content_length;
+ // User data to be passed along with this request.
+ void* user_data;
+} shaderc_include_result;
+
+// The kinds of include requests.
+enum shaderc_include_type {
+ shaderc_include_type_relative, // E.g. #include "source"
+ shaderc_include_type_standard // E.g. #include <source>
+};
+
+// An includer callback type for mapping an #include request to an include
+// result. The user_data parameter specifies the client context. The
+// requested_source parameter specifies the name of the source being requested.
+// The type parameter specifies the kind of inclusion request being made.
+// The requesting_source parameter specifies the name of the source containing
+// the #include request. The includer owns the result object and its contents,
+// and both must remain valid until the release callback is called on the result
+// object.
+typedef shaderc_include_result* (*shaderc_include_resolve_fn)(
+ void* user_data, const char* requested_source, int type,
+ const char* requesting_source, size_t include_depth);
+
+// An includer callback type for destroying an include result.
+typedef void (*shaderc_include_result_release_fn)(
+ void* user_data, shaderc_include_result* include_result);
+
+// Sets includer callback functions.
+void shaderc_compile_options_set_include_callbacks(
+ shaderc_compile_options_t options, shaderc_include_resolve_fn resolver,
+ shaderc_include_result_release_fn result_releaser, void* user_data);
+
+// Sets the compiler mode to suppress warnings, overriding warnings-as-errors
+// mode. When both suppress-warnings and warnings-as-errors modes are
+// turned on, warning messages will be inhibited, and will not be emitted
+// as error messages.
+void shaderc_compile_options_set_suppress_warnings(
+ shaderc_compile_options_t options);
+
+// Sets the target shader environment, affecting which warnings or errors will
+// be issued. The version will be for distinguishing between different versions
+// of the target environment. "0" is the only supported version at this point
+void shaderc_compile_options_set_target_env(shaderc_compile_options_t options,
+ shaderc_target_env target,
+ uint32_t version);
+
+// Sets the compiler mode to treat all warnings as errors. Note the
+// suppress-warnings mode overrides this option, i.e. if both
+// warning-as-errors and suppress-warnings modes are set, warnings will not
+// be emitted as error messages.
+void shaderc_compile_options_set_warnings_as_errors(
+ shaderc_compile_options_t options);
+
+// Sets a resource limit.
+void shaderc_compile_options_set_limit(
+ shaderc_compile_options_t options, shaderc_limit limit, int value);
+
+// Sets whether the compiler should automatically assign bindings to uniforms
+// that aren't already explicitly bound in the shader source.
+void shaderc_compile_options_set_auto_bind_uniforms(
+ shaderc_compile_options_t options, bool auto_bind);
+
+// An opaque handle to the results of a call to any shaderc_compile_into_*()
+// function.
+typedef struct shaderc_compilation_result* shaderc_compilation_result_t;
+
+// Takes a GLSL source string and the associated shader kind, input file
+// name, compiles it according to the given additional_options. If the shader
+// kind is not set to a specified kind, but shaderc_glslc_infer_from_source,
+// the compiler will try to deduce the shader kind from the source
+// string and a failure in deducing will generate an error. Currently only
+// #pragma annotation is supported. If the shader kind is set to one of the
+// default shader kinds, the compiler will fall back to the default shader
+// kind in case it failed to deduce the shader kind from source string.
+// The input_file_name is a null-termintated string. It is used as a tag to
+// identify the source string in cases like emitting error messages. It
+// doesn't have to be a 'file name'.
+// The source string will be compiled into SPIR-V binary and a
+// shaderc_compilation_result will be returned to hold the results.
+// The entry_point_name null-terminated string defines the name of the entry
+// point to associate with this GLSL source. If the additional_options
+// parameter is not null, then the compilation is modified by any options
+// present. May be safely called from multiple threads without explicit
+// synchronization. If there was failure in allocating the compiler object,
+// null will be returned.
+shaderc_compilation_result_t shaderc_compile_into_spv(
+ const shaderc_compiler_t compiler, const char* source_text,
+ size_t source_text_size, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const char* entry_point_name,
+ const shaderc_compile_options_t additional_options);
+
+// Like shaderc_compile_into_spv, but the result contains SPIR-V assembly text
+// instead of a SPIR-V binary module. The SPIR-V assembly syntax is as defined
+// by the SPIRV-Tools open source project.
+shaderc_compilation_result_t shaderc_compile_into_spv_assembly(
+ const shaderc_compiler_t compiler, const char* source_text,
+ size_t source_text_size, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const char* entry_point_name,
+ const shaderc_compile_options_t additional_options);
+
+// Like shaderc_compile_into_spv, but the result contains preprocessed source
+// code instead of a SPIR-V binary module
+shaderc_compilation_result_t shaderc_compile_into_preprocessed_text(
+ const shaderc_compiler_t compiler, const char* source_text,
+ size_t source_text_size, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const char* entry_point_name,
+ const shaderc_compile_options_t additional_options);
+
+// Takes an assembly string of the format defined in the SPIRV-Tools project
+// (https://github.com/KhronosGroup/SPIRV-Tools/blob/master/syntax.md),
+// assembles it into SPIR-V binary and a shaderc_compilation_result will be
+// returned to hold the results.
+// The assembling will pick options suitable for assembling specified in the
+// additional_options parameter.
+// May be safely called from multiple threads without explicit synchronization.
+// If there was failure in allocating the compiler object, null will be
+// returned.
+shaderc_compilation_result_t shaderc_assemble_into_spv(
+ const shaderc_compiler_t compiler, const char* source_assembly,
+ size_t source_assembly_size,
+ const shaderc_compile_options_t additional_options);
+
+// The following functions, operating on shaderc_compilation_result_t objects,
+// offer only the basic thread-safety guarantee.
+
+// Releases the resources held by the result object. It is invalid to use the
+// result object for any further operations.
+void shaderc_result_release(shaderc_compilation_result_t result);
+
+// Returns the number of bytes of the compilation output data in a result
+// object.
+size_t shaderc_result_get_length(const shaderc_compilation_result_t result);
+
+// Returns the number of warnings generated during the compilation.
+size_t shaderc_result_get_num_warnings(
+ const shaderc_compilation_result_t result);
+
+// Returns the number of errors generated during the compilation.
+size_t shaderc_result_get_num_errors(const shaderc_compilation_result_t result);
+
+// Returns the compilation status, indicating whether the compilation succeeded,
+// or failed due to some reasons, like invalid shader stage or compilation
+// errors.
+shaderc_compilation_status shaderc_result_get_compilation_status(
+ const shaderc_compilation_result_t);
+
+// Returns a pointer to the start of the compilation output data bytes, either
+// SPIR-V binary or char string. When the source string is compiled into SPIR-V
+// binary, this is guaranteed to be castable to a uint32_t*. If the result
+// contains assembly text or preprocessed source text, the pointer will point to
+// the resulting array of characters.
+const char* shaderc_result_get_bytes(const shaderc_compilation_result_t result);
+
+// Returns a null-terminated string that contains any error messages generated
+// during the compilation.
+const char* shaderc_result_get_error_message(
+ const shaderc_compilation_result_t result);
+
+// Provides the version & revision of the SPIR-V which will be produced
+void shaderc_get_spv_version(unsigned int* version, unsigned int* revision);
+
+// Parses the version and profile from a given null-terminated string
+// containing both version and profile, like: '450core'. Returns false if
+// the string can not be parsed. Returns true when the parsing succeeds. The
+// parsed version and profile are returned through arguments.
+bool shaderc_parse_version_profile(const char* str, int* version,
+ shaderc_profile* profile);
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+#endif // SHADERC_SHADERC_H_
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.hpp b/samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.hpp
new file mode 100644
index 0000000..9c5663e
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/shaderc/shaderc.hpp
@@ -0,0 +1,501 @@
+// Copyright 2015 The Shaderc Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SHADERC_SHADERC_HPP_
+#define SHADERC_SHADERC_HPP_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "shaderc.h"
+
+namespace shaderc {
+// A CompilationResult contains the compiler output, compilation status,
+// and messages.
+//
+// The compiler output is stored as an array of elements and accessed
+// via random access iterators provided by cbegin() and cend(). The iterators
+// are contiguous in the sense of "Contiguous Iterators: A Refinement of
+// Random Access Iterators", Nevin Liber, C++ Library Evolution Working
+// Group Working Paper N3884.
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n3884.pdf
+//
+// Methods begin() and end() are also provided to enable range-based for.
+// They are synonyms to cbegin() and cend(), respectively.
+template <typename OutputElementType>
+class CompilationResult {
+ public:
+ typedef OutputElementType element_type;
+ // The type used to describe the begin and end iterators on the
+ // compiler output.
+ typedef const OutputElementType* const_iterator;
+
+ // Upon creation, the CompilationResult takes ownership of the
+ // shaderc_compilation_result instance. During destruction of the
+ // CompilationResult, the shaderc_compilation_result will be released.
+ explicit CompilationResult(shaderc_compilation_result_t compilation_result)
+ : compilation_result_(compilation_result) {}
+ ~CompilationResult() { shaderc_result_release(compilation_result_); }
+
+ CompilationResult(CompilationResult&& other) {
+ compilation_result_ = other.compilation_result_;
+ other.compilation_result_ = nullptr;
+ }
+
+ // Returns any error message found during compilation.
+ std::string GetErrorMessage() const {
+ if (!compilation_result_) {
+ return "";
+ }
+ return shaderc_result_get_error_message(compilation_result_);
+ }
+
+ // Returns the compilation status, indicating whether the compilation
+ // succeeded, or failed due to some reasons, like invalid shader stage or
+ // compilation errors.
+ shaderc_compilation_status GetCompilationStatus() const {
+ if (!compilation_result_) {
+ return shaderc_compilation_status_null_result_object;
+ }
+ return shaderc_result_get_compilation_status(compilation_result_);
+ }
+
+ // Returns a random access (contiguous) iterator pointing to the start
+ // of the compilation output. It is valid for the lifetime of this object.
+ // If there is no compilation result, then returns nullptr.
+ const_iterator cbegin() const {
+ if (!compilation_result_) return nullptr;
+ return reinterpret_cast<const_iterator>(
+ shaderc_result_get_bytes(compilation_result_));
+ }
+
+ // Returns a random access (contiguous) iterator pointing to the end of
+ // the compilation output. It is valid for the lifetime of this object.
+ // If there is no compilation result, then returns nullptr.
+ const_iterator cend() const {
+ if (!compilation_result_) return nullptr;
+ return cbegin() +
+ shaderc_result_get_length(compilation_result_) /
+ sizeof(OutputElementType);
+ }
+
+ // Returns the same iterator as cbegin().
+ const_iterator begin() const { return cbegin(); }
+ // Returns the same iterator as cend().
+ const_iterator end() const { return cend(); }
+
+ // Returns the number of warnings generated during the compilation.
+ size_t GetNumWarnings() const {
+ if (!compilation_result_) {
+ return 0;
+ }
+ return shaderc_result_get_num_warnings(compilation_result_);
+ }
+
+ // Returns the number of errors generated during the compilation.
+ size_t GetNumErrors() const {
+ if (!compilation_result_) {
+ return 0;
+ }
+ return shaderc_result_get_num_errors(compilation_result_);
+ }
+
+ private:
+ CompilationResult(const CompilationResult& other) = delete;
+ CompilationResult& operator=(const CompilationResult& other) = delete;
+
+ shaderc_compilation_result_t compilation_result_;
+};
+
+// A compilation result for a SPIR-V binary module, which is an array
+// of uint32_t words.
+using SpvCompilationResult = CompilationResult<uint32_t>;
+// A compilation result in SPIR-V assembly syntax.
+using AssemblyCompilationResult = CompilationResult<char>;
+// Preprocessed source text.
+using PreprocessedSourceCompilationResult = CompilationResult<char>;
+
+// Contains any options that can have default values for a compilation.
+class CompileOptions {
+ public:
+ CompileOptions() { options_ = shaderc_compile_options_initialize(); }
+ ~CompileOptions() { shaderc_compile_options_release(options_); }
+ CompileOptions(const CompileOptions& other) {
+ options_ = shaderc_compile_options_clone(other.options_);
+ }
+ CompileOptions(CompileOptions&& other) {
+ options_ = other.options_;
+ other.options_ = nullptr;
+ }
+
+ // Adds a predefined macro to the compilation options. It behaves the same as
+ // shaderc_compile_options_add_macro_definition in shaderc.h.
+ void AddMacroDefinition(const char* name, size_t name_length,
+ const char* value, size_t value_length) {
+ shaderc_compile_options_add_macro_definition(options_, name, name_length,
+ value, value_length);
+ }
+
+ // Adds a valueless predefined macro to the compilation options.
+ void AddMacroDefinition(const std::string& name) {
+ AddMacroDefinition(name.c_str(), name.size(), nullptr, 0u);
+ }
+
+ // Adds a predefined macro to the compilation options.
+ void AddMacroDefinition(const std::string& name, const std::string& value) {
+ AddMacroDefinition(name.c_str(), name.size(), value.c_str(), value.size());
+ }
+
+ // Sets the compiler mode to generate debug information in the output.
+ void SetGenerateDebugInfo() {
+ shaderc_compile_options_set_generate_debug_info(options_);
+ }
+
+ // Sets the compiler optimization level to the given level. Only the last one
+ // takes effect if multiple calls of this function exist.
+ void SetOptimizationLevel(shaderc_optimization_level level) {
+ shaderc_compile_options_set_optimization_level(options_, level);
+ }
+
+ // A C++ version of the libshaderc includer interface.
+ class IncluderInterface {
+ public:
+ // Handles shaderc_include_resolver_fn callbacks.
+ virtual shaderc_include_result* GetInclude(const char* requested_source,
+ shaderc_include_type type,
+ const char* requesting_source,
+ size_t include_depth) = 0;
+
+ // Handles shaderc_include_result_release_fn callbacks.
+ virtual void ReleaseInclude(shaderc_include_result* data) = 0;
+ };
+
+ // Sets the includer instance for libshaderc to call during compilation, as
+ // described in shaderc_compile_options_set_include_callbacks(). Callbacks
+ // are routed to this includer's methods.
+ void SetIncluder(std::unique_ptr<IncluderInterface>&& includer) {
+ includer_ = std::move(includer);
+ shaderc_compile_options_set_include_callbacks(
+ options_,
+ [](void* user_data, const char* requested_source, int type,
+ const char* requesting_source, size_t include_depth) {
+ auto* includer = static_cast<IncluderInterface*>(user_data);
+ return includer->GetInclude(requested_source,
+ (shaderc_include_type)type,
+ requesting_source, include_depth);
+ },
+ [](void* user_data, shaderc_include_result* include_result) {
+ auto* includer = static_cast<IncluderInterface*>(user_data);
+ return includer->ReleaseInclude(include_result);
+ },
+ includer_.get());
+ }
+
+ // Forces the GLSL language version and profile to a given pair. The version
+ // number is the same as would appear in the #version annotation in the
+ // source. Version and profile specified here overrides the #version
+ // annotation in the source. Use profile: 'shaderc_profile_none' for GLSL
+ // versions that do not define profiles, e.g. versions below 150.
+ void SetForcedVersionProfile(int version, shaderc_profile profile) {
+ shaderc_compile_options_set_forced_version_profile(options_, version,
+ profile);
+ }
+
+ // Sets the compiler mode to suppress warnings. Note this option overrides
+ // warnings-as-errors mode. When both suppress-warnings and warnings-as-errors
+ // modes are turned on, warning messages will be inhibited, and will not be
+ // emitted as error message.
+ void SetSuppressWarnings() {
+ shaderc_compile_options_set_suppress_warnings(options_);
+ }
+
+ // Sets the source language. The default is GLSL.
+ void SetSourceLanguage(shaderc_source_language lang) {
+ shaderc_compile_options_set_source_language(options_, lang);
+ }
+
+ // Sets the target shader environment, affecting which warnings or errors will
+ // be issued.
+ // The version will be for distinguishing between different versions of the
+ // target environment.
+ // "0" is the only supported version at this point
+ void SetTargetEnvironment(shaderc_target_env target, uint32_t version) {
+ shaderc_compile_options_set_target_env(options_, target, version);
+ }
+
+ // Sets the compiler mode to make all warnings into errors. Note the
+ // suppress-warnings mode overrides this option, i.e. if both
+ // warning-as-errors and suppress-warnings modes are set on, warnings will not
+ // be emitted as error message.
+ void SetWarningsAsErrors() {
+ shaderc_compile_options_set_warnings_as_errors(options_);
+ }
+
+ // Sets a resource limit.
+ void SetLimit(shaderc_limit limit, int value) {
+ shaderc_compile_options_set_limit(options_, limit, value);
+ }
+
+ // Sets whether the compiler should automatically assign bindings to uniforms
+ // that aren't already explicitly bound in the shader source.
+ void SetAutoBindUniforms(bool auto_bind) {
+ shaderc_compile_options_set_auto_bind_uniforms(options_, auto_bind);
+ }
+
+ private:
+ CompileOptions& operator=(const CompileOptions& other) = delete;
+ shaderc_compile_options_t options_;
+ std::unique_ptr<IncluderInterface> includer_;
+
+ friend class Compiler;
+};
+
+// The compilation context for compiling source to SPIR-V.
+class Compiler {
+ public:
+ Compiler() : compiler_(shaderc_compiler_initialize()) {}
+ ~Compiler() { shaderc_compiler_release(compiler_); }
+
+ Compiler(Compiler&& other) {
+ compiler_ = other.compiler_;
+ other.compiler_ = nullptr;
+ }
+
+ bool IsValid() const { return compiler_ != nullptr; }
+
+ // Compiles the given source GLSL and returns a SPIR-V binary module
+ // compilation result.
+ // The source_text parameter must be a valid pointer.
+ // The source_text_size parameter must be the length of the source text.
+ // The shader_kind parameter either forces the compilation to be done with a
+ // specified shader kind, or hint the compiler how to determine the exact
+ // shader kind. If the shader kind is set to shaderc_glslc_infer_from_source,
+ // the compiler will try to deduce the shader kind from the source string and
+ // a failure in this proess will generate an error. Currently only #pragma
+ // annotation is supported. If the shader kind is set to one of the default
+ // shader kinds, the compiler will fall back to the specified default shader
+ // kind in case it failed to deduce the shader kind from the source string.
+ // The input_file_name is a null-termintated string. It is used as a tag to
+ // identify the source string in cases like emitting error messages. It
+ // doesn't have to be a 'file name'.
+ // The entry_point_name parameter is a null-terminated string specifying
+ // the entry point name for HLSL compilation. For GLSL compilation, the
+ // entry point name is assumed to be "main".
+ // The compilation is passed any options specified in the CompileOptions
+ // parameter.
+ // It is valid for the returned CompilationResult object to outlive this
+ // compiler object.
+ // Note when the options_ has disassembly mode or preprocessing only mode set
+ // on, the returned CompilationResult will hold a text string, instead of a
+ // SPIR-V binary generated with default options.
+ SpvCompilationResult CompileGlslToSpv(const char* source_text,
+ size_t source_text_size,
+ shaderc_shader_kind shader_kind,
+ const char* input_file_name,
+ const char* entry_point_name,
+ const CompileOptions& options) const {
+ shaderc_compilation_result_t compilation_result = shaderc_compile_into_spv(
+ compiler_, source_text, source_text_size, shader_kind, input_file_name,
+ entry_point_name, options.options_);
+ return SpvCompilationResult(compilation_result);
+ }
+
+ // Compiles the given source shader and returns a SPIR-V binary module
+ // compilation result.
+ // Like the first CompileGlslToSpv method but assumes the entry point name
+ // is "main".
+ SpvCompilationResult CompileGlslToSpv(const char* source_text,
+ size_t source_text_size,
+ shaderc_shader_kind shader_kind,
+ const char* input_file_name,
+ const CompileOptions& options) const {
+ return CompileGlslToSpv(source_text, source_text_size, shader_kind,
+ input_file_name, "main", options);
+ }
+
+ // Compiles the given source GLSL and returns a SPIR-V binary module
+ // compilation result.
+ // Like the previous CompileGlslToSpv method but uses default options.
+ SpvCompilationResult CompileGlslToSpv(const char* source_text,
+ size_t source_text_size,
+ shaderc_shader_kind shader_kind,
+ const char* input_file_name) const {
+ shaderc_compilation_result_t compilation_result =
+ shaderc_compile_into_spv(compiler_, source_text, source_text_size,
+ shader_kind, input_file_name, "main", nullptr);
+ return SpvCompilationResult(compilation_result);
+ }
+
+ // Compiles the given source shader and returns a SPIR-V binary module
+ // compilation result.
+ // Like the first CompileGlslToSpv method but the source is provided as
+ // a std::string, and we assume the entry point is "main".
+ SpvCompilationResult CompileGlslToSpv(const std::string& source_text,
+ shaderc_shader_kind shader_kind,
+ const char* input_file_name,
+ const CompileOptions& options) const {
+ return CompileGlslToSpv(source_text.data(), source_text.size(), shader_kind,
+ input_file_name, options);
+ }
+
+ // Compiles the given source shader and returns a SPIR-V binary module
+ // compilation result.
+ // Like the first CompileGlslToSpv method but the source is provided as
+ // a std::string.
+ SpvCompilationResult CompileGlslToSpv(const std::string& source_text,
+ shaderc_shader_kind shader_kind,
+ const char* input_file_name,
+ const char* entry_point_name,
+ const CompileOptions& options) const {
+ return CompileGlslToSpv(source_text.data(), source_text.size(), shader_kind,
+ input_file_name, entry_point_name, options);
+ }
+
+ // Compiles the given source GLSL and returns a SPIR-V binary module
+ // compilation result.
+ // Like the previous CompileGlslToSpv method but assumes the entry point
+ // name is "main".
+ SpvCompilationResult CompileGlslToSpv(const std::string& source_text,
+ shaderc_shader_kind shader_kind,
+ const char* input_file_name) const {
+ return CompileGlslToSpv(source_text.data(), source_text.size(), shader_kind,
+ input_file_name);
+ }
+
+ // Assembles the given SPIR-V assembly and returns a SPIR-V binary module
+ // compilation result.
+ // The assembly should follow the syntax defined in the SPIRV-Tools project
+ // (https://github.com/KhronosGroup/SPIRV-Tools/blob/master/syntax.md).
+ // It is valid for the returned CompilationResult object to outlive this
+ // compiler object.
+ // The assembling will pick options suitable for assembling specified in the
+ // CompileOptions parameter.
+ SpvCompilationResult AssembleToSpv(const char* source_assembly,
+ size_t source_assembly_size,
+ const CompileOptions& options) const {
+ return SpvCompilationResult(shaderc_assemble_into_spv(
+ compiler_, source_assembly, source_assembly_size, options.options_));
+ }
+
+ // Assembles the given SPIR-V assembly and returns a SPIR-V binary module
+ // compilation result.
+ // Like the first AssembleToSpv method but uses the default compiler options.
+ SpvCompilationResult AssembleToSpv(const char* source_assembly,
+ size_t source_assembly_size) const {
+ return SpvCompilationResult(shaderc_assemble_into_spv(
+ compiler_, source_assembly, source_assembly_size, nullptr));
+ }
+
+ // Assembles the given SPIR-V assembly and returns a SPIR-V binary module
+ // compilation result.
+ // Like the first AssembleToSpv method but the source is provided as a
+ // std::string.
+ SpvCompilationResult AssembleToSpv(const std::string& source_assembly,
+ const CompileOptions& options) const {
+ return SpvCompilationResult(
+ shaderc_assemble_into_spv(compiler_, source_assembly.data(),
+ source_assembly.size(), options.options_));
+ }
+
+ // Assembles the given SPIR-V assembly and returns a SPIR-V binary module
+ // compilation result.
+ // Like the first AssembleToSpv method but the source is provided as a
+ // std::string and also uses default compiler options.
+ SpvCompilationResult AssembleToSpv(const std::string& source_assembly) const {
+ return SpvCompilationResult(shaderc_assemble_into_spv(
+ compiler_, source_assembly.data(), source_assembly.size(), nullptr));
+ }
+
+ // Compiles the given source GLSL and returns the SPIR-V assembly text
+ // compilation result.
+ // Options are similar to the first CompileToSpv method.
+ AssemblyCompilationResult CompileGlslToSpvAssembly(
+ const char* source_text, size_t source_text_size,
+ shaderc_shader_kind shader_kind, const char* input_file_name,
+ const char* entry_point_name, const CompileOptions& options) const {
+ shaderc_compilation_result_t compilation_result =
+ shaderc_compile_into_spv_assembly(
+ compiler_, source_text, source_text_size, shader_kind,
+ input_file_name, entry_point_name, options.options_);
+ return AssemblyCompilationResult(compilation_result);
+ }
+
+ // Compiles the given source GLSL and returns the SPIR-V assembly text
+ // compilation result.
+ // Similare to the previous method, but assumes entry point name is "main".
+ AssemblyCompilationResult CompileGlslToSpvAssembly(
+ const char* source_text, size_t source_text_size,
+ shaderc_shader_kind shader_kind, const char* input_file_name,
+ const CompileOptions& options) const {
+ return CompileGlslToSpvAssembly(source_text, source_text_size, shader_kind,
+ input_file_name, "main", options);
+ }
+
+ // Compiles the given source GLSL and returns the SPIR-V assembly text
+ // result. Like the first CompileGlslToSpvAssembly method but the source
+ // is provided as a std::string. Options are otherwise similar to
+ // the first CompileToSpv method.
+ AssemblyCompilationResult CompileGlslToSpvAssembly(
+ const std::string& source_text, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const char* entry_point_name,
+ const CompileOptions& options) const {
+ return CompileGlslToSpvAssembly(source_text.data(), source_text.size(),
+ shader_kind, input_file_name,
+ entry_point_name, options);
+ }
+
+ // Compiles the given source GLSL and returns the SPIR-V assembly text
+ // result. Like the previous CompileGlslToSpvAssembly method but assumes
+ // the entry point name is "main".
+ AssemblyCompilationResult CompileGlslToSpvAssembly(
+ const std::string& source_text, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const CompileOptions& options) const {
+ return CompileGlslToSpvAssembly(source_text, shader_kind, input_file_name,
+ "main", options);
+ }
+
+ // Preprocesses the given source GLSL and returns the preprocessed
+ // source text as a compilation result.
+ // Options are similar to the first CompileToSpv method.
+ PreprocessedSourceCompilationResult PreprocessGlsl(
+ const char* source_text, size_t source_text_size,
+ shaderc_shader_kind shader_kind, const char* input_file_name,
+ const CompileOptions& options) const {
+ shaderc_compilation_result_t compilation_result =
+ shaderc_compile_into_preprocessed_text(
+ compiler_, source_text, source_text_size, shader_kind,
+ input_file_name, "main", options.options_);
+ return PreprocessedSourceCompilationResult(compilation_result);
+ }
+
+ // Preprocesses the given source GLSL and returns text result. Like the first
+ // PreprocessGlsl method but the source is provided as a std::string.
+ // Options are otherwise similar to the first CompileToSpv method.
+ PreprocessedSourceCompilationResult PreprocessGlsl(
+ const std::string& source_text, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const CompileOptions& options) const {
+ return PreprocessGlsl(source_text.data(), source_text.size(), shader_kind,
+ input_file_name, options);
+ }
+
+ private:
+ Compiler(const Compiler&) = delete;
+ Compiler& operator=(const Compiler& other) = delete;
+
+ shaderc_compiler_t compiler_;
+};
+} // namespace shaderc
+
+#endif // SHADERC_SHADERC_HPP_
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/GLSL.std.450.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/GLSL.std.450.h
new file mode 100644
index 0000000..54cc00e
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/GLSL.std.450.h
@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 3;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47, // Reserved
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450NMin = 79,
+ GLSLstd450NMax = 80,
+ GLSLstd450NClamp = 81,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.h
new file mode 100644
index 0000000..65a50ef
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.h
@@ -0,0 +1,971 @@
+/*
+** Copyright (c) 2014-2017 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Enumeration tokens for SPIR-V, in various styles:
+** C, C++, C++11, JSON, Lua, Python
+**
+** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+**
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive. The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+typedef unsigned int SpvId;
+
+#define SPV_VERSION 0x10100
+#define SPV_REVISION 6
+
+static const unsigned int SpvMagicNumber = 0x07230203;
+static const unsigned int SpvVersion = 0x00010100;
+static const unsigned int SpvRevision = 6;
+static const unsigned int SpvOpCodeMask = 0xffff;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+ SpvSourceLanguageUnknown = 0,
+ SpvSourceLanguageESSL = 1,
+ SpvSourceLanguageGLSL = 2,
+ SpvSourceLanguageOpenCL_C = 3,
+ SpvSourceLanguageOpenCL_CPP = 4,
+ SpvSourceLanguageHLSL = 5,
+ SpvSourceLanguageMax = 0x7fffffff,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+ SpvExecutionModelVertex = 0,
+ SpvExecutionModelTessellationControl = 1,
+ SpvExecutionModelTessellationEvaluation = 2,
+ SpvExecutionModelGeometry = 3,
+ SpvExecutionModelFragment = 4,
+ SpvExecutionModelGLCompute = 5,
+ SpvExecutionModelKernel = 6,
+ SpvExecutionModelMax = 0x7fffffff,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+ SpvAddressingModelLogical = 0,
+ SpvAddressingModelPhysical32 = 1,
+ SpvAddressingModelPhysical64 = 2,
+ SpvAddressingModelMax = 0x7fffffff,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+ SpvMemoryModelSimple = 0,
+ SpvMemoryModelGLSL450 = 1,
+ SpvMemoryModelOpenCL = 2,
+ SpvMemoryModelMax = 0x7fffffff,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+ SpvExecutionModeInvocations = 0,
+ SpvExecutionModeSpacingEqual = 1,
+ SpvExecutionModeSpacingFractionalEven = 2,
+ SpvExecutionModeSpacingFractionalOdd = 3,
+ SpvExecutionModeVertexOrderCw = 4,
+ SpvExecutionModeVertexOrderCcw = 5,
+ SpvExecutionModePixelCenterInteger = 6,
+ SpvExecutionModeOriginUpperLeft = 7,
+ SpvExecutionModeOriginLowerLeft = 8,
+ SpvExecutionModeEarlyFragmentTests = 9,
+ SpvExecutionModePointMode = 10,
+ SpvExecutionModeXfb = 11,
+ SpvExecutionModeDepthReplacing = 12,
+ SpvExecutionModeDepthGreater = 14,
+ SpvExecutionModeDepthLess = 15,
+ SpvExecutionModeDepthUnchanged = 16,
+ SpvExecutionModeLocalSize = 17,
+ SpvExecutionModeLocalSizeHint = 18,
+ SpvExecutionModeInputPoints = 19,
+ SpvExecutionModeInputLines = 20,
+ SpvExecutionModeInputLinesAdjacency = 21,
+ SpvExecutionModeTriangles = 22,
+ SpvExecutionModeInputTrianglesAdjacency = 23,
+ SpvExecutionModeQuads = 24,
+ SpvExecutionModeIsolines = 25,
+ SpvExecutionModeOutputVertices = 26,
+ SpvExecutionModeOutputPoints = 27,
+ SpvExecutionModeOutputLineStrip = 28,
+ SpvExecutionModeOutputTriangleStrip = 29,
+ SpvExecutionModeVecTypeHint = 30,
+ SpvExecutionModeContractionOff = 31,
+ SpvExecutionModeInitializer = 33,
+ SpvExecutionModeFinalizer = 34,
+ SpvExecutionModeSubgroupSize = 35,
+ SpvExecutionModeSubgroupsPerWorkgroup = 36,
+ SpvExecutionModeMax = 0x7fffffff,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+ SpvStorageClassUniformConstant = 0,
+ SpvStorageClassInput = 1,
+ SpvStorageClassUniform = 2,
+ SpvStorageClassOutput = 3,
+ SpvStorageClassWorkgroup = 4,
+ SpvStorageClassCrossWorkgroup = 5,
+ SpvStorageClassPrivate = 6,
+ SpvStorageClassFunction = 7,
+ SpvStorageClassGeneric = 8,
+ SpvStorageClassPushConstant = 9,
+ SpvStorageClassAtomicCounter = 10,
+ SpvStorageClassImage = 11,
+ SpvStorageClassStorageBuffer = 12,
+ SpvStorageClassMax = 0x7fffffff,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+ SpvDim1D = 0,
+ SpvDim2D = 1,
+ SpvDim3D = 2,
+ SpvDimCube = 3,
+ SpvDimRect = 4,
+ SpvDimBuffer = 5,
+ SpvDimSubpassData = 6,
+ SpvDimMax = 0x7fffffff,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+ SpvSamplerAddressingModeNone = 0,
+ SpvSamplerAddressingModeClampToEdge = 1,
+ SpvSamplerAddressingModeClamp = 2,
+ SpvSamplerAddressingModeRepeat = 3,
+ SpvSamplerAddressingModeRepeatMirrored = 4,
+ SpvSamplerAddressingModeMax = 0x7fffffff,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+ SpvSamplerFilterModeNearest = 0,
+ SpvSamplerFilterModeLinear = 1,
+ SpvSamplerFilterModeMax = 0x7fffffff,
+} SpvSamplerFilterMode;
+
+typedef enum SpvImageFormat_ {
+ SpvImageFormatUnknown = 0,
+ SpvImageFormatRgba32f = 1,
+ SpvImageFormatRgba16f = 2,
+ SpvImageFormatR32f = 3,
+ SpvImageFormatRgba8 = 4,
+ SpvImageFormatRgba8Snorm = 5,
+ SpvImageFormatRg32f = 6,
+ SpvImageFormatRg16f = 7,
+ SpvImageFormatR11fG11fB10f = 8,
+ SpvImageFormatR16f = 9,
+ SpvImageFormatRgba16 = 10,
+ SpvImageFormatRgb10A2 = 11,
+ SpvImageFormatRg16 = 12,
+ SpvImageFormatRg8 = 13,
+ SpvImageFormatR16 = 14,
+ SpvImageFormatR8 = 15,
+ SpvImageFormatRgba16Snorm = 16,
+ SpvImageFormatRg16Snorm = 17,
+ SpvImageFormatRg8Snorm = 18,
+ SpvImageFormatR16Snorm = 19,
+ SpvImageFormatR8Snorm = 20,
+ SpvImageFormatRgba32i = 21,
+ SpvImageFormatRgba16i = 22,
+ SpvImageFormatRgba8i = 23,
+ SpvImageFormatR32i = 24,
+ SpvImageFormatRg32i = 25,
+ SpvImageFormatRg16i = 26,
+ SpvImageFormatRg8i = 27,
+ SpvImageFormatR16i = 28,
+ SpvImageFormatR8i = 29,
+ SpvImageFormatRgba32ui = 30,
+ SpvImageFormatRgba16ui = 31,
+ SpvImageFormatRgba8ui = 32,
+ SpvImageFormatR32ui = 33,
+ SpvImageFormatRgb10a2ui = 34,
+ SpvImageFormatRg32ui = 35,
+ SpvImageFormatRg16ui = 36,
+ SpvImageFormatRg8ui = 37,
+ SpvImageFormatR16ui = 38,
+ SpvImageFormatR8ui = 39,
+ SpvImageFormatMax = 0x7fffffff,
+} SpvImageFormat;
+
+typedef enum SpvImageChannelOrder_ {
+ SpvImageChannelOrderR = 0,
+ SpvImageChannelOrderA = 1,
+ SpvImageChannelOrderRG = 2,
+ SpvImageChannelOrderRA = 3,
+ SpvImageChannelOrderRGB = 4,
+ SpvImageChannelOrderRGBA = 5,
+ SpvImageChannelOrderBGRA = 6,
+ SpvImageChannelOrderARGB = 7,
+ SpvImageChannelOrderIntensity = 8,
+ SpvImageChannelOrderLuminance = 9,
+ SpvImageChannelOrderRx = 10,
+ SpvImageChannelOrderRGx = 11,
+ SpvImageChannelOrderRGBx = 12,
+ SpvImageChannelOrderDepth = 13,
+ SpvImageChannelOrderDepthStencil = 14,
+ SpvImageChannelOrdersRGB = 15,
+ SpvImageChannelOrdersRGBx = 16,
+ SpvImageChannelOrdersRGBA = 17,
+ SpvImageChannelOrdersBGRA = 18,
+ SpvImageChannelOrderABGR = 19,
+ SpvImageChannelOrderMax = 0x7fffffff,
+} SpvImageChannelOrder;
+
+typedef enum SpvImageChannelDataType_ {
+ SpvImageChannelDataTypeSnormInt8 = 0,
+ SpvImageChannelDataTypeSnormInt16 = 1,
+ SpvImageChannelDataTypeUnormInt8 = 2,
+ SpvImageChannelDataTypeUnormInt16 = 3,
+ SpvImageChannelDataTypeUnormShort565 = 4,
+ SpvImageChannelDataTypeUnormShort555 = 5,
+ SpvImageChannelDataTypeUnormInt101010 = 6,
+ SpvImageChannelDataTypeSignedInt8 = 7,
+ SpvImageChannelDataTypeSignedInt16 = 8,
+ SpvImageChannelDataTypeSignedInt32 = 9,
+ SpvImageChannelDataTypeUnsignedInt8 = 10,
+ SpvImageChannelDataTypeUnsignedInt16 = 11,
+ SpvImageChannelDataTypeUnsignedInt32 = 12,
+ SpvImageChannelDataTypeHalfFloat = 13,
+ SpvImageChannelDataTypeFloat = 14,
+ SpvImageChannelDataTypeUnormInt24 = 15,
+ SpvImageChannelDataTypeUnormInt101010_2 = 16,
+ SpvImageChannelDataTypeMax = 0x7fffffff,
+} SpvImageChannelDataType;
+
+typedef enum SpvImageOperandsShift_ {
+ SpvImageOperandsBiasShift = 0,
+ SpvImageOperandsLodShift = 1,
+ SpvImageOperandsGradShift = 2,
+ SpvImageOperandsConstOffsetShift = 3,
+ SpvImageOperandsOffsetShift = 4,
+ SpvImageOperandsConstOffsetsShift = 5,
+ SpvImageOperandsSampleShift = 6,
+ SpvImageOperandsMinLodShift = 7,
+ SpvImageOperandsMax = 0x7fffffff,
+} SpvImageOperandsShift;
+
+typedef enum SpvImageOperandsMask_ {
+ SpvImageOperandsMaskNone = 0,
+ SpvImageOperandsBiasMask = 0x00000001,
+ SpvImageOperandsLodMask = 0x00000002,
+ SpvImageOperandsGradMask = 0x00000004,
+ SpvImageOperandsConstOffsetMask = 0x00000008,
+ SpvImageOperandsOffsetMask = 0x00000010,
+ SpvImageOperandsConstOffsetsMask = 0x00000020,
+ SpvImageOperandsSampleMask = 0x00000040,
+ SpvImageOperandsMinLodMask = 0x00000080,
+} SpvImageOperandsMask;
+
+typedef enum SpvFPFastMathModeShift_ {
+ SpvFPFastMathModeNotNaNShift = 0,
+ SpvFPFastMathModeNotInfShift = 1,
+ SpvFPFastMathModeNSZShift = 2,
+ SpvFPFastMathModeAllowRecipShift = 3,
+ SpvFPFastMathModeFastShift = 4,
+ SpvFPFastMathModeMax = 0x7fffffff,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+ SpvFPFastMathModeMaskNone = 0,
+ SpvFPFastMathModeNotNaNMask = 0x00000001,
+ SpvFPFastMathModeNotInfMask = 0x00000002,
+ SpvFPFastMathModeNSZMask = 0x00000004,
+ SpvFPFastMathModeAllowRecipMask = 0x00000008,
+ SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+ SpvFPRoundingModeRTE = 0,
+ SpvFPRoundingModeRTZ = 1,
+ SpvFPRoundingModeRTP = 2,
+ SpvFPRoundingModeRTN = 3,
+ SpvFPRoundingModeMax = 0x7fffffff,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+ SpvLinkageTypeExport = 0,
+ SpvLinkageTypeImport = 1,
+ SpvLinkageTypeMax = 0x7fffffff,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+ SpvAccessQualifierReadOnly = 0,
+ SpvAccessQualifierWriteOnly = 1,
+ SpvAccessQualifierReadWrite = 2,
+ SpvAccessQualifierMax = 0x7fffffff,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+ SpvFunctionParameterAttributeZext = 0,
+ SpvFunctionParameterAttributeSext = 1,
+ SpvFunctionParameterAttributeByVal = 2,
+ SpvFunctionParameterAttributeSret = 3,
+ SpvFunctionParameterAttributeNoAlias = 4,
+ SpvFunctionParameterAttributeNoCapture = 5,
+ SpvFunctionParameterAttributeNoWrite = 6,
+ SpvFunctionParameterAttributeNoReadWrite = 7,
+ SpvFunctionParameterAttributeMax = 0x7fffffff,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+ SpvDecorationRelaxedPrecision = 0,
+ SpvDecorationSpecId = 1,
+ SpvDecorationBlock = 2,
+ SpvDecorationBufferBlock = 3,
+ SpvDecorationRowMajor = 4,
+ SpvDecorationColMajor = 5,
+ SpvDecorationArrayStride = 6,
+ SpvDecorationMatrixStride = 7,
+ SpvDecorationGLSLShared = 8,
+ SpvDecorationGLSLPacked = 9,
+ SpvDecorationCPacked = 10,
+ SpvDecorationBuiltIn = 11,
+ SpvDecorationNoPerspective = 13,
+ SpvDecorationFlat = 14,
+ SpvDecorationPatch = 15,
+ SpvDecorationCentroid = 16,
+ SpvDecorationSample = 17,
+ SpvDecorationInvariant = 18,
+ SpvDecorationRestrict = 19,
+ SpvDecorationAliased = 20,
+ SpvDecorationVolatile = 21,
+ SpvDecorationConstant = 22,
+ SpvDecorationCoherent = 23,
+ SpvDecorationNonWritable = 24,
+ SpvDecorationNonReadable = 25,
+ SpvDecorationUniform = 26,
+ SpvDecorationSaturatedConversion = 28,
+ SpvDecorationStream = 29,
+ SpvDecorationLocation = 30,
+ SpvDecorationComponent = 31,
+ SpvDecorationIndex = 32,
+ SpvDecorationBinding = 33,
+ SpvDecorationDescriptorSet = 34,
+ SpvDecorationOffset = 35,
+ SpvDecorationXfbBuffer = 36,
+ SpvDecorationXfbStride = 37,
+ SpvDecorationFuncParamAttr = 38,
+ SpvDecorationFPRoundingMode = 39,
+ SpvDecorationFPFastMathMode = 40,
+ SpvDecorationLinkageAttributes = 41,
+ SpvDecorationNoContraction = 42,
+ SpvDecorationInputAttachmentIndex = 43,
+ SpvDecorationAlignment = 44,
+ SpvDecorationMaxByteOffset = 45,
+ SpvDecorationOverrideCoverageNV = 5248,
+ SpvDecorationPassthroughNV = 5250,
+ SpvDecorationViewportRelativeNV = 5252,
+ SpvDecorationSecondaryViewportRelativeNV = 5256,
+ SpvDecorationMax = 0x7fffffff,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+ SpvBuiltInPosition = 0,
+ SpvBuiltInPointSize = 1,
+ SpvBuiltInClipDistance = 3,
+ SpvBuiltInCullDistance = 4,
+ SpvBuiltInVertexId = 5,
+ SpvBuiltInInstanceId = 6,
+ SpvBuiltInPrimitiveId = 7,
+ SpvBuiltInInvocationId = 8,
+ SpvBuiltInLayer = 9,
+ SpvBuiltInViewportIndex = 10,
+ SpvBuiltInTessLevelOuter = 11,
+ SpvBuiltInTessLevelInner = 12,
+ SpvBuiltInTessCoord = 13,
+ SpvBuiltInPatchVertices = 14,
+ SpvBuiltInFragCoord = 15,
+ SpvBuiltInPointCoord = 16,
+ SpvBuiltInFrontFacing = 17,
+ SpvBuiltInSampleId = 18,
+ SpvBuiltInSamplePosition = 19,
+ SpvBuiltInSampleMask = 20,
+ SpvBuiltInFragDepth = 22,
+ SpvBuiltInHelperInvocation = 23,
+ SpvBuiltInNumWorkgroups = 24,
+ SpvBuiltInWorkgroupSize = 25,
+ SpvBuiltInWorkgroupId = 26,
+ SpvBuiltInLocalInvocationId = 27,
+ SpvBuiltInGlobalInvocationId = 28,
+ SpvBuiltInLocalInvocationIndex = 29,
+ SpvBuiltInWorkDim = 30,
+ SpvBuiltInGlobalSize = 31,
+ SpvBuiltInEnqueuedWorkgroupSize = 32,
+ SpvBuiltInGlobalOffset = 33,
+ SpvBuiltInGlobalLinearId = 34,
+ SpvBuiltInSubgroupSize = 36,
+ SpvBuiltInSubgroupMaxSize = 37,
+ SpvBuiltInNumSubgroups = 38,
+ SpvBuiltInNumEnqueuedSubgroups = 39,
+ SpvBuiltInSubgroupId = 40,
+ SpvBuiltInSubgroupLocalInvocationId = 41,
+ SpvBuiltInVertexIndex = 42,
+ SpvBuiltInInstanceIndex = 43,
+ SpvBuiltInSubgroupEqMaskKHR = 4416,
+ SpvBuiltInSubgroupGeMaskKHR = 4417,
+ SpvBuiltInSubgroupGtMaskKHR = 4418,
+ SpvBuiltInSubgroupLeMaskKHR = 4419,
+ SpvBuiltInSubgroupLtMaskKHR = 4420,
+ SpvBuiltInBaseVertex = 4424,
+ SpvBuiltInBaseInstance = 4425,
+ SpvBuiltInDrawIndex = 4426,
+ SpvBuiltInDeviceIndex = 4438,
+ SpvBuiltInViewIndex = 4440,
+ SpvBuiltInViewportMaskNV = 5253,
+ SpvBuiltInSecondaryPositionNV = 5257,
+ SpvBuiltInSecondaryViewportMaskNV = 5258,
+ SpvBuiltInPositionPerViewNV = 5261,
+ SpvBuiltInViewportMaskPerViewNV = 5262,
+ SpvBuiltInMax = 0x7fffffff,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+ SpvSelectionControlFlattenShift = 0,
+ SpvSelectionControlDontFlattenShift = 1,
+ SpvSelectionControlMax = 0x7fffffff,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+ SpvSelectionControlMaskNone = 0,
+ SpvSelectionControlFlattenMask = 0x00000001,
+ SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+ SpvLoopControlUnrollShift = 0,
+ SpvLoopControlDontUnrollShift = 1,
+ SpvLoopControlDependencyInfiniteShift = 2,
+ SpvLoopControlDependencyLengthShift = 3,
+ SpvLoopControlMax = 0x7fffffff,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+ SpvLoopControlMaskNone = 0,
+ SpvLoopControlUnrollMask = 0x00000001,
+ SpvLoopControlDontUnrollMask = 0x00000002,
+ SpvLoopControlDependencyInfiniteMask = 0x00000004,
+ SpvLoopControlDependencyLengthMask = 0x00000008,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+ SpvFunctionControlInlineShift = 0,
+ SpvFunctionControlDontInlineShift = 1,
+ SpvFunctionControlPureShift = 2,
+ SpvFunctionControlConstShift = 3,
+ SpvFunctionControlMax = 0x7fffffff,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+ SpvFunctionControlMaskNone = 0,
+ SpvFunctionControlInlineMask = 0x00000001,
+ SpvFunctionControlDontInlineMask = 0x00000002,
+ SpvFunctionControlPureMask = 0x00000004,
+ SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+ SpvMemorySemanticsAcquireShift = 1,
+ SpvMemorySemanticsReleaseShift = 2,
+ SpvMemorySemanticsAcquireReleaseShift = 3,
+ SpvMemorySemanticsSequentiallyConsistentShift = 4,
+ SpvMemorySemanticsUniformMemoryShift = 6,
+ SpvMemorySemanticsSubgroupMemoryShift = 7,
+ SpvMemorySemanticsWorkgroupMemoryShift = 8,
+ SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,
+ SpvMemorySemanticsAtomicCounterMemoryShift = 10,
+ SpvMemorySemanticsImageMemoryShift = 11,
+ SpvMemorySemanticsMax = 0x7fffffff,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+ SpvMemorySemanticsMaskNone = 0,
+ SpvMemorySemanticsAcquireMask = 0x00000002,
+ SpvMemorySemanticsReleaseMask = 0x00000004,
+ SpvMemorySemanticsAcquireReleaseMask = 0x00000008,
+ SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ SpvMemorySemanticsUniformMemoryMask = 0x00000040,
+ SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,
+ SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ SpvMemorySemanticsImageMemoryMask = 0x00000800,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+ SpvMemoryAccessVolatileShift = 0,
+ SpvMemoryAccessAlignedShift = 1,
+ SpvMemoryAccessNontemporalShift = 2,
+ SpvMemoryAccessMax = 0x7fffffff,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+ SpvMemoryAccessMaskNone = 0,
+ SpvMemoryAccessVolatileMask = 0x00000001,
+ SpvMemoryAccessAlignedMask = 0x00000002,
+ SpvMemoryAccessNontemporalMask = 0x00000004,
+} SpvMemoryAccessMask;
+
+typedef enum SpvScope_ {
+ SpvScopeCrossDevice = 0,
+ SpvScopeDevice = 1,
+ SpvScopeWorkgroup = 2,
+ SpvScopeSubgroup = 3,
+ SpvScopeInvocation = 4,
+ SpvScopeMax = 0x7fffffff,
+} SpvScope;
+
+typedef enum SpvGroupOperation_ {
+ SpvGroupOperationReduce = 0,
+ SpvGroupOperationInclusiveScan = 1,
+ SpvGroupOperationExclusiveScan = 2,
+ SpvGroupOperationMax = 0x7fffffff,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+ SpvKernelEnqueueFlagsNoWait = 0,
+ SpvKernelEnqueueFlagsWaitKernel = 1,
+ SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+ SpvKernelEnqueueFlagsMax = 0x7fffffff,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+ SpvKernelProfilingInfoCmdExecTimeShift = 0,
+ SpvKernelProfilingInfoMax = 0x7fffffff,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+ SpvKernelProfilingInfoMaskNone = 0,
+ SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvCapability_ {
+ SpvCapabilityMatrix = 0,
+ SpvCapabilityShader = 1,
+ SpvCapabilityGeometry = 2,
+ SpvCapabilityTessellation = 3,
+ SpvCapabilityAddresses = 4,
+ SpvCapabilityLinkage = 5,
+ SpvCapabilityKernel = 6,
+ SpvCapabilityVector16 = 7,
+ SpvCapabilityFloat16Buffer = 8,
+ SpvCapabilityFloat16 = 9,
+ SpvCapabilityFloat64 = 10,
+ SpvCapabilityInt64 = 11,
+ SpvCapabilityInt64Atomics = 12,
+ SpvCapabilityImageBasic = 13,
+ SpvCapabilityImageReadWrite = 14,
+ SpvCapabilityImageMipmap = 15,
+ SpvCapabilityPipes = 17,
+ SpvCapabilityGroups = 18,
+ SpvCapabilityDeviceEnqueue = 19,
+ SpvCapabilityLiteralSampler = 20,
+ SpvCapabilityAtomicStorage = 21,
+ SpvCapabilityInt16 = 22,
+ SpvCapabilityTessellationPointSize = 23,
+ SpvCapabilityGeometryPointSize = 24,
+ SpvCapabilityImageGatherExtended = 25,
+ SpvCapabilityStorageImageMultisample = 27,
+ SpvCapabilityUniformBufferArrayDynamicIndexing = 28,
+ SpvCapabilitySampledImageArrayDynamicIndexing = 29,
+ SpvCapabilityStorageBufferArrayDynamicIndexing = 30,
+ SpvCapabilityStorageImageArrayDynamicIndexing = 31,
+ SpvCapabilityClipDistance = 32,
+ SpvCapabilityCullDistance = 33,
+ SpvCapabilityImageCubeArray = 34,
+ SpvCapabilitySampleRateShading = 35,
+ SpvCapabilityImageRect = 36,
+ SpvCapabilitySampledRect = 37,
+ SpvCapabilityGenericPointer = 38,
+ SpvCapabilityInt8 = 39,
+ SpvCapabilityInputAttachment = 40,
+ SpvCapabilitySparseResidency = 41,
+ SpvCapabilityMinLod = 42,
+ SpvCapabilitySampled1D = 43,
+ SpvCapabilityImage1D = 44,
+ SpvCapabilitySampledCubeArray = 45,
+ SpvCapabilitySampledBuffer = 46,
+ SpvCapabilityImageBuffer = 47,
+ SpvCapabilityImageMSArray = 48,
+ SpvCapabilityStorageImageExtendedFormats = 49,
+ SpvCapabilityImageQuery = 50,
+ SpvCapabilityDerivativeControl = 51,
+ SpvCapabilityInterpolationFunction = 52,
+ SpvCapabilityTransformFeedback = 53,
+ SpvCapabilityGeometryStreams = 54,
+ SpvCapabilityStorageImageReadWithoutFormat = 55,
+ SpvCapabilityStorageImageWriteWithoutFormat = 56,
+ SpvCapabilityMultiViewport = 57,
+ SpvCapabilitySubgroupDispatch = 58,
+ SpvCapabilityNamedBarrier = 59,
+ SpvCapabilityPipeStorage = 60,
+ SpvCapabilitySubgroupBallotKHR = 4423,
+ SpvCapabilityDrawParameters = 4427,
+ SpvCapabilitySubgroupVoteKHR = 4431,
+ SpvCapabilityStorageBuffer16BitAccess = 4433,
+ SpvCapabilityStorageUniformBufferBlock16 = 4433,
+ SpvCapabilityStorageUniform16 = 4434,
+ SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434,
+ SpvCapabilityStoragePushConstant16 = 4435,
+ SpvCapabilityStorageInputOutput16 = 4436,
+ SpvCapabilityDeviceGroup = 4437,
+ SpvCapabilityMultiView = 4439,
+ SpvCapabilityVariablePointersStorageBuffer = 4441,
+ SpvCapabilityVariablePointers = 4442,
+ SpvCapabilitySampleMaskOverrideCoverageNV = 5249,
+ SpvCapabilityGeometryShaderPassthroughNV = 5251,
+ SpvCapabilityShaderViewportIndexLayerNV = 5254,
+ SpvCapabilityShaderViewportMaskNV = 5255,
+ SpvCapabilityShaderStereoViewNV = 5259,
+ SpvCapabilityPerViewAttributesNV = 5260,
+ SpvCapabilityMax = 0x7fffffff,
+} SpvCapability;
+
+typedef enum SpvOp_ {
+ SpvOpNop = 0,
+ SpvOpUndef = 1,
+ SpvOpSourceContinued = 2,
+ SpvOpSource = 3,
+ SpvOpSourceExtension = 4,
+ SpvOpName = 5,
+ SpvOpMemberName = 6,
+ SpvOpString = 7,
+ SpvOpLine = 8,
+ SpvOpExtension = 10,
+ SpvOpExtInstImport = 11,
+ SpvOpExtInst = 12,
+ SpvOpMemoryModel = 14,
+ SpvOpEntryPoint = 15,
+ SpvOpExecutionMode = 16,
+ SpvOpCapability = 17,
+ SpvOpTypeVoid = 19,
+ SpvOpTypeBool = 20,
+ SpvOpTypeInt = 21,
+ SpvOpTypeFloat = 22,
+ SpvOpTypeVector = 23,
+ SpvOpTypeMatrix = 24,
+ SpvOpTypeImage = 25,
+ SpvOpTypeSampler = 26,
+ SpvOpTypeSampledImage = 27,
+ SpvOpTypeArray = 28,
+ SpvOpTypeRuntimeArray = 29,
+ SpvOpTypeStruct = 30,
+ SpvOpTypeOpaque = 31,
+ SpvOpTypePointer = 32,
+ SpvOpTypeFunction = 33,
+ SpvOpTypeEvent = 34,
+ SpvOpTypeDeviceEvent = 35,
+ SpvOpTypeReserveId = 36,
+ SpvOpTypeQueue = 37,
+ SpvOpTypePipe = 38,
+ SpvOpTypeForwardPointer = 39,
+ SpvOpConstantTrue = 41,
+ SpvOpConstantFalse = 42,
+ SpvOpConstant = 43,
+ SpvOpConstantComposite = 44,
+ SpvOpConstantSampler = 45,
+ SpvOpConstantNull = 46,
+ SpvOpSpecConstantTrue = 48,
+ SpvOpSpecConstantFalse = 49,
+ SpvOpSpecConstant = 50,
+ SpvOpSpecConstantComposite = 51,
+ SpvOpSpecConstantOp = 52,
+ SpvOpFunction = 54,
+ SpvOpFunctionParameter = 55,
+ SpvOpFunctionEnd = 56,
+ SpvOpFunctionCall = 57,
+ SpvOpVariable = 59,
+ SpvOpImageTexelPointer = 60,
+ SpvOpLoad = 61,
+ SpvOpStore = 62,
+ SpvOpCopyMemory = 63,
+ SpvOpCopyMemorySized = 64,
+ SpvOpAccessChain = 65,
+ SpvOpInBoundsAccessChain = 66,
+ SpvOpPtrAccessChain = 67,
+ SpvOpArrayLength = 68,
+ SpvOpGenericPtrMemSemantics = 69,
+ SpvOpInBoundsPtrAccessChain = 70,
+ SpvOpDecorate = 71,
+ SpvOpMemberDecorate = 72,
+ SpvOpDecorationGroup = 73,
+ SpvOpGroupDecorate = 74,
+ SpvOpGroupMemberDecorate = 75,
+ SpvOpVectorExtractDynamic = 77,
+ SpvOpVectorInsertDynamic = 78,
+ SpvOpVectorShuffle = 79,
+ SpvOpCompositeConstruct = 80,
+ SpvOpCompositeExtract = 81,
+ SpvOpCompositeInsert = 82,
+ SpvOpCopyObject = 83,
+ SpvOpTranspose = 84,
+ SpvOpSampledImage = 86,
+ SpvOpImageSampleImplicitLod = 87,
+ SpvOpImageSampleExplicitLod = 88,
+ SpvOpImageSampleDrefImplicitLod = 89,
+ SpvOpImageSampleDrefExplicitLod = 90,
+ SpvOpImageSampleProjImplicitLod = 91,
+ SpvOpImageSampleProjExplicitLod = 92,
+ SpvOpImageSampleProjDrefImplicitLod = 93,
+ SpvOpImageSampleProjDrefExplicitLod = 94,
+ SpvOpImageFetch = 95,
+ SpvOpImageGather = 96,
+ SpvOpImageDrefGather = 97,
+ SpvOpImageRead = 98,
+ SpvOpImageWrite = 99,
+ SpvOpImage = 100,
+ SpvOpImageQueryFormat = 101,
+ SpvOpImageQueryOrder = 102,
+ SpvOpImageQuerySizeLod = 103,
+ SpvOpImageQuerySize = 104,
+ SpvOpImageQueryLod = 105,
+ SpvOpImageQueryLevels = 106,
+ SpvOpImageQuerySamples = 107,
+ SpvOpConvertFToU = 109,
+ SpvOpConvertFToS = 110,
+ SpvOpConvertSToF = 111,
+ SpvOpConvertUToF = 112,
+ SpvOpUConvert = 113,
+ SpvOpSConvert = 114,
+ SpvOpFConvert = 115,
+ SpvOpQuantizeToF16 = 116,
+ SpvOpConvertPtrToU = 117,
+ SpvOpSatConvertSToU = 118,
+ SpvOpSatConvertUToS = 119,
+ SpvOpConvertUToPtr = 120,
+ SpvOpPtrCastToGeneric = 121,
+ SpvOpGenericCastToPtr = 122,
+ SpvOpGenericCastToPtrExplicit = 123,
+ SpvOpBitcast = 124,
+ SpvOpSNegate = 126,
+ SpvOpFNegate = 127,
+ SpvOpIAdd = 128,
+ SpvOpFAdd = 129,
+ SpvOpISub = 130,
+ SpvOpFSub = 131,
+ SpvOpIMul = 132,
+ SpvOpFMul = 133,
+ SpvOpUDiv = 134,
+ SpvOpSDiv = 135,
+ SpvOpFDiv = 136,
+ SpvOpUMod = 137,
+ SpvOpSRem = 138,
+ SpvOpSMod = 139,
+ SpvOpFRem = 140,
+ SpvOpFMod = 141,
+ SpvOpVectorTimesScalar = 142,
+ SpvOpMatrixTimesScalar = 143,
+ SpvOpVectorTimesMatrix = 144,
+ SpvOpMatrixTimesVector = 145,
+ SpvOpMatrixTimesMatrix = 146,
+ SpvOpOuterProduct = 147,
+ SpvOpDot = 148,
+ SpvOpIAddCarry = 149,
+ SpvOpISubBorrow = 150,
+ SpvOpUMulExtended = 151,
+ SpvOpSMulExtended = 152,
+ SpvOpAny = 154,
+ SpvOpAll = 155,
+ SpvOpIsNan = 156,
+ SpvOpIsInf = 157,
+ SpvOpIsFinite = 158,
+ SpvOpIsNormal = 159,
+ SpvOpSignBitSet = 160,
+ SpvOpLessOrGreater = 161,
+ SpvOpOrdered = 162,
+ SpvOpUnordered = 163,
+ SpvOpLogicalEqual = 164,
+ SpvOpLogicalNotEqual = 165,
+ SpvOpLogicalOr = 166,
+ SpvOpLogicalAnd = 167,
+ SpvOpLogicalNot = 168,
+ SpvOpSelect = 169,
+ SpvOpIEqual = 170,
+ SpvOpINotEqual = 171,
+ SpvOpUGreaterThan = 172,
+ SpvOpSGreaterThan = 173,
+ SpvOpUGreaterThanEqual = 174,
+ SpvOpSGreaterThanEqual = 175,
+ SpvOpULessThan = 176,
+ SpvOpSLessThan = 177,
+ SpvOpULessThanEqual = 178,
+ SpvOpSLessThanEqual = 179,
+ SpvOpFOrdEqual = 180,
+ SpvOpFUnordEqual = 181,
+ SpvOpFOrdNotEqual = 182,
+ SpvOpFUnordNotEqual = 183,
+ SpvOpFOrdLessThan = 184,
+ SpvOpFUnordLessThan = 185,
+ SpvOpFOrdGreaterThan = 186,
+ SpvOpFUnordGreaterThan = 187,
+ SpvOpFOrdLessThanEqual = 188,
+ SpvOpFUnordLessThanEqual = 189,
+ SpvOpFOrdGreaterThanEqual = 190,
+ SpvOpFUnordGreaterThanEqual = 191,
+ SpvOpShiftRightLogical = 194,
+ SpvOpShiftRightArithmetic = 195,
+ SpvOpShiftLeftLogical = 196,
+ SpvOpBitwiseOr = 197,
+ SpvOpBitwiseXor = 198,
+ SpvOpBitwiseAnd = 199,
+ SpvOpNot = 200,
+ SpvOpBitFieldInsert = 201,
+ SpvOpBitFieldSExtract = 202,
+ SpvOpBitFieldUExtract = 203,
+ SpvOpBitReverse = 204,
+ SpvOpBitCount = 205,
+ SpvOpDPdx = 207,
+ SpvOpDPdy = 208,
+ SpvOpFwidth = 209,
+ SpvOpDPdxFine = 210,
+ SpvOpDPdyFine = 211,
+ SpvOpFwidthFine = 212,
+ SpvOpDPdxCoarse = 213,
+ SpvOpDPdyCoarse = 214,
+ SpvOpFwidthCoarse = 215,
+ SpvOpEmitVertex = 218,
+ SpvOpEndPrimitive = 219,
+ SpvOpEmitStreamVertex = 220,
+ SpvOpEndStreamPrimitive = 221,
+ SpvOpControlBarrier = 224,
+ SpvOpMemoryBarrier = 225,
+ SpvOpAtomicLoad = 227,
+ SpvOpAtomicStore = 228,
+ SpvOpAtomicExchange = 229,
+ SpvOpAtomicCompareExchange = 230,
+ SpvOpAtomicCompareExchangeWeak = 231,
+ SpvOpAtomicIIncrement = 232,
+ SpvOpAtomicIDecrement = 233,
+ SpvOpAtomicIAdd = 234,
+ SpvOpAtomicISub = 235,
+ SpvOpAtomicSMin = 236,
+ SpvOpAtomicUMin = 237,
+ SpvOpAtomicSMax = 238,
+ SpvOpAtomicUMax = 239,
+ SpvOpAtomicAnd = 240,
+ SpvOpAtomicOr = 241,
+ SpvOpAtomicXor = 242,
+ SpvOpPhi = 245,
+ SpvOpLoopMerge = 246,
+ SpvOpSelectionMerge = 247,
+ SpvOpLabel = 248,
+ SpvOpBranch = 249,
+ SpvOpBranchConditional = 250,
+ SpvOpSwitch = 251,
+ SpvOpKill = 252,
+ SpvOpReturn = 253,
+ SpvOpReturnValue = 254,
+ SpvOpUnreachable = 255,
+ SpvOpLifetimeStart = 256,
+ SpvOpLifetimeStop = 257,
+ SpvOpGroupAsyncCopy = 259,
+ SpvOpGroupWaitEvents = 260,
+ SpvOpGroupAll = 261,
+ SpvOpGroupAny = 262,
+ SpvOpGroupBroadcast = 263,
+ SpvOpGroupIAdd = 264,
+ SpvOpGroupFAdd = 265,
+ SpvOpGroupFMin = 266,
+ SpvOpGroupUMin = 267,
+ SpvOpGroupSMin = 268,
+ SpvOpGroupFMax = 269,
+ SpvOpGroupUMax = 270,
+ SpvOpGroupSMax = 271,
+ SpvOpReadPipe = 274,
+ SpvOpWritePipe = 275,
+ SpvOpReservedReadPipe = 276,
+ SpvOpReservedWritePipe = 277,
+ SpvOpReserveReadPipePackets = 278,
+ SpvOpReserveWritePipePackets = 279,
+ SpvOpCommitReadPipe = 280,
+ SpvOpCommitWritePipe = 281,
+ SpvOpIsValidReserveId = 282,
+ SpvOpGetNumPipePackets = 283,
+ SpvOpGetMaxPipePackets = 284,
+ SpvOpGroupReserveReadPipePackets = 285,
+ SpvOpGroupReserveWritePipePackets = 286,
+ SpvOpGroupCommitReadPipe = 287,
+ SpvOpGroupCommitWritePipe = 288,
+ SpvOpEnqueueMarker = 291,
+ SpvOpEnqueueKernel = 292,
+ SpvOpGetKernelNDrangeSubGroupCount = 293,
+ SpvOpGetKernelNDrangeMaxSubGroupSize = 294,
+ SpvOpGetKernelWorkGroupSize = 295,
+ SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ SpvOpRetainEvent = 297,
+ SpvOpReleaseEvent = 298,
+ SpvOpCreateUserEvent = 299,
+ SpvOpIsValidEvent = 300,
+ SpvOpSetUserEventStatus = 301,
+ SpvOpCaptureEventProfilingInfo = 302,
+ SpvOpGetDefaultQueue = 303,
+ SpvOpBuildNDRange = 304,
+ SpvOpImageSparseSampleImplicitLod = 305,
+ SpvOpImageSparseSampleExplicitLod = 306,
+ SpvOpImageSparseSampleDrefImplicitLod = 307,
+ SpvOpImageSparseSampleDrefExplicitLod = 308,
+ SpvOpImageSparseSampleProjImplicitLod = 309,
+ SpvOpImageSparseSampleProjExplicitLod = 310,
+ SpvOpImageSparseSampleProjDrefImplicitLod = 311,
+ SpvOpImageSparseSampleProjDrefExplicitLod = 312,
+ SpvOpImageSparseFetch = 313,
+ SpvOpImageSparseGather = 314,
+ SpvOpImageSparseDrefGather = 315,
+ SpvOpImageSparseTexelsResident = 316,
+ SpvOpNoLine = 317,
+ SpvOpAtomicFlagTestAndSet = 318,
+ SpvOpAtomicFlagClear = 319,
+ SpvOpImageSparseRead = 320,
+ SpvOpSizeOf = 321,
+ SpvOpTypePipeStorage = 322,
+ SpvOpConstantPipeStorage = 323,
+ SpvOpCreatePipeFromPipeStorage = 324,
+ SpvOpGetKernelLocalSizeForSubgroupCount = 325,
+ SpvOpGetKernelMaxNumSubgroups = 326,
+ SpvOpTypeNamedBarrier = 327,
+ SpvOpNamedBarrierInitialize = 328,
+ SpvOpMemoryNamedBarrier = 329,
+ SpvOpModuleProcessed = 330,
+ SpvOpSubgroupBallotKHR = 4421,
+ SpvOpSubgroupFirstInvocationKHR = 4422,
+ SpvOpSubgroupAllKHR = 4428,
+ SpvOpSubgroupAnyKHR = 4429,
+ SpvOpSubgroupAllEqualKHR = 4430,
+ SpvOpSubgroupReadInvocationKHR = 4432,
+ SpvOpMax = 0x7fffffff,
+} SpvOp;
+
+#endif // #ifndef spirv_H
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp
new file mode 100644
index 0000000..a70c595
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp
@@ -0,0 +1,980 @@
+// Copyright (c) 2014-2017 The Khronos Group Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and/or associated documentation files (the "Materials"),
+// to deal in the Materials without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Materials, and to permit persons to whom the
+// Materials are furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Materials.
+//
+// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+//
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+// IN THE MATERIALS.
+
+// This header is automatically generated by the same tool that creates
+// the Binary Section of the SPIR-V specification.
+
+// Enumeration tokens for SPIR-V, in various styles:
+// C, C++, C++11, JSON, Lua, Python
+//
+// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+//
+// Some tokens act like mask values, which can be OR'd together,
+// while others are mutually exclusive. The mask-like ones have
+// "Mask" in their name, and a parallel enum that has the shift
+// amount (1 << x) for each corresponding enumerant.
+
+#ifndef spirv_HPP
+#define spirv_HPP
+
+namespace spv {
+
+typedef unsigned int Id;
+
+#define SPV_VERSION 0x10100
+#define SPV_REVISION 6
+
+static const unsigned int MagicNumber = 0x07230203;
+static const unsigned int Version = 0x00010100;
+static const unsigned int Revision = 6;
+static const unsigned int OpCodeMask = 0xffff;
+static const unsigned int WordCountShift = 16;
+
+enum SourceLanguage {
+ SourceLanguageUnknown = 0,
+ SourceLanguageESSL = 1,
+ SourceLanguageGLSL = 2,
+ SourceLanguageOpenCL_C = 3,
+ SourceLanguageOpenCL_CPP = 4,
+ SourceLanguageHLSL = 5,
+ SourceLanguageMax = 0x7fffffff,
+};
+
+enum ExecutionModel {
+ ExecutionModelVertex = 0,
+ ExecutionModelTessellationControl = 1,
+ ExecutionModelTessellationEvaluation = 2,
+ ExecutionModelGeometry = 3,
+ ExecutionModelFragment = 4,
+ ExecutionModelGLCompute = 5,
+ ExecutionModelKernel = 6,
+ ExecutionModelMax = 0x7fffffff,
+};
+
+enum AddressingModel {
+ AddressingModelLogical = 0,
+ AddressingModelPhysical32 = 1,
+ AddressingModelPhysical64 = 2,
+ AddressingModelMax = 0x7fffffff,
+};
+
+enum MemoryModel {
+ MemoryModelSimple = 0,
+ MemoryModelGLSL450 = 1,
+ MemoryModelOpenCL = 2,
+ MemoryModelMax = 0x7fffffff,
+};
+
+enum ExecutionMode {
+ ExecutionModeInvocations = 0,
+ ExecutionModeSpacingEqual = 1,
+ ExecutionModeSpacingFractionalEven = 2,
+ ExecutionModeSpacingFractionalOdd = 3,
+ ExecutionModeVertexOrderCw = 4,
+ ExecutionModeVertexOrderCcw = 5,
+ ExecutionModePixelCenterInteger = 6,
+ ExecutionModeOriginUpperLeft = 7,
+ ExecutionModeOriginLowerLeft = 8,
+ ExecutionModeEarlyFragmentTests = 9,
+ ExecutionModePointMode = 10,
+ ExecutionModeXfb = 11,
+ ExecutionModeDepthReplacing = 12,
+ ExecutionModeDepthGreater = 14,
+ ExecutionModeDepthLess = 15,
+ ExecutionModeDepthUnchanged = 16,
+ ExecutionModeLocalSize = 17,
+ ExecutionModeLocalSizeHint = 18,
+ ExecutionModeInputPoints = 19,
+ ExecutionModeInputLines = 20,
+ ExecutionModeInputLinesAdjacency = 21,
+ ExecutionModeTriangles = 22,
+ ExecutionModeInputTrianglesAdjacency = 23,
+ ExecutionModeQuads = 24,
+ ExecutionModeIsolines = 25,
+ ExecutionModeOutputVertices = 26,
+ ExecutionModeOutputPoints = 27,
+ ExecutionModeOutputLineStrip = 28,
+ ExecutionModeOutputTriangleStrip = 29,
+ ExecutionModeVecTypeHint = 30,
+ ExecutionModeContractionOff = 31,
+ ExecutionModeInitializer = 33,
+ ExecutionModeFinalizer = 34,
+ ExecutionModeSubgroupSize = 35,
+ ExecutionModeSubgroupsPerWorkgroup = 36,
+ ExecutionModeMax = 0x7fffffff,
+};
+
+enum StorageClass {
+ StorageClassUniformConstant = 0,
+ StorageClassInput = 1,
+ StorageClassUniform = 2,
+ StorageClassOutput = 3,
+ StorageClassWorkgroup = 4,
+ StorageClassCrossWorkgroup = 5,
+ StorageClassPrivate = 6,
+ StorageClassFunction = 7,
+ StorageClassGeneric = 8,
+ StorageClassPushConstant = 9,
+ StorageClassAtomicCounter = 10,
+ StorageClassImage = 11,
+ StorageClassStorageBuffer = 12,
+ StorageClassMax = 0x7fffffff,
+};
+
+enum Dim {
+ Dim1D = 0,
+ Dim2D = 1,
+ Dim3D = 2,
+ DimCube = 3,
+ DimRect = 4,
+ DimBuffer = 5,
+ DimSubpassData = 6,
+ DimMax = 0x7fffffff,
+};
+
+enum SamplerAddressingMode {
+ SamplerAddressingModeNone = 0,
+ SamplerAddressingModeClampToEdge = 1,
+ SamplerAddressingModeClamp = 2,
+ SamplerAddressingModeRepeat = 3,
+ SamplerAddressingModeRepeatMirrored = 4,
+ SamplerAddressingModeMax = 0x7fffffff,
+};
+
+enum SamplerFilterMode {
+ SamplerFilterModeNearest = 0,
+ SamplerFilterModeLinear = 1,
+ SamplerFilterModeMax = 0x7fffffff,
+};
+
+enum ImageFormat {
+ ImageFormatUnknown = 0,
+ ImageFormatRgba32f = 1,
+ ImageFormatRgba16f = 2,
+ ImageFormatR32f = 3,
+ ImageFormatRgba8 = 4,
+ ImageFormatRgba8Snorm = 5,
+ ImageFormatRg32f = 6,
+ ImageFormatRg16f = 7,
+ ImageFormatR11fG11fB10f = 8,
+ ImageFormatR16f = 9,
+ ImageFormatRgba16 = 10,
+ ImageFormatRgb10A2 = 11,
+ ImageFormatRg16 = 12,
+ ImageFormatRg8 = 13,
+ ImageFormatR16 = 14,
+ ImageFormatR8 = 15,
+ ImageFormatRgba16Snorm = 16,
+ ImageFormatRg16Snorm = 17,
+ ImageFormatRg8Snorm = 18,
+ ImageFormatR16Snorm = 19,
+ ImageFormatR8Snorm = 20,
+ ImageFormatRgba32i = 21,
+ ImageFormatRgba16i = 22,
+ ImageFormatRgba8i = 23,
+ ImageFormatR32i = 24,
+ ImageFormatRg32i = 25,
+ ImageFormatRg16i = 26,
+ ImageFormatRg8i = 27,
+ ImageFormatR16i = 28,
+ ImageFormatR8i = 29,
+ ImageFormatRgba32ui = 30,
+ ImageFormatRgba16ui = 31,
+ ImageFormatRgba8ui = 32,
+ ImageFormatR32ui = 33,
+ ImageFormatRgb10a2ui = 34,
+ ImageFormatRg32ui = 35,
+ ImageFormatRg16ui = 36,
+ ImageFormatRg8ui = 37,
+ ImageFormatR16ui = 38,
+ ImageFormatR8ui = 39,
+ ImageFormatMax = 0x7fffffff,
+};
+
+enum ImageChannelOrder {
+ ImageChannelOrderR = 0,
+ ImageChannelOrderA = 1,
+ ImageChannelOrderRG = 2,
+ ImageChannelOrderRA = 3,
+ ImageChannelOrderRGB = 4,
+ ImageChannelOrderRGBA = 5,
+ ImageChannelOrderBGRA = 6,
+ ImageChannelOrderARGB = 7,
+ ImageChannelOrderIntensity = 8,
+ ImageChannelOrderLuminance = 9,
+ ImageChannelOrderRx = 10,
+ ImageChannelOrderRGx = 11,
+ ImageChannelOrderRGBx = 12,
+ ImageChannelOrderDepth = 13,
+ ImageChannelOrderDepthStencil = 14,
+ ImageChannelOrdersRGB = 15,
+ ImageChannelOrdersRGBx = 16,
+ ImageChannelOrdersRGBA = 17,
+ ImageChannelOrdersBGRA = 18,
+ ImageChannelOrderABGR = 19,
+ ImageChannelOrderMax = 0x7fffffff,
+};
+
+enum ImageChannelDataType {
+ ImageChannelDataTypeSnormInt8 = 0,
+ ImageChannelDataTypeSnormInt16 = 1,
+ ImageChannelDataTypeUnormInt8 = 2,
+ ImageChannelDataTypeUnormInt16 = 3,
+ ImageChannelDataTypeUnormShort565 = 4,
+ ImageChannelDataTypeUnormShort555 = 5,
+ ImageChannelDataTypeUnormInt101010 = 6,
+ ImageChannelDataTypeSignedInt8 = 7,
+ ImageChannelDataTypeSignedInt16 = 8,
+ ImageChannelDataTypeSignedInt32 = 9,
+ ImageChannelDataTypeUnsignedInt8 = 10,
+ ImageChannelDataTypeUnsignedInt16 = 11,
+ ImageChannelDataTypeUnsignedInt32 = 12,
+ ImageChannelDataTypeHalfFloat = 13,
+ ImageChannelDataTypeFloat = 14,
+ ImageChannelDataTypeUnormInt24 = 15,
+ ImageChannelDataTypeUnormInt101010_2 = 16,
+ ImageChannelDataTypeMax = 0x7fffffff,
+};
+
+enum ImageOperandsShift {
+ ImageOperandsBiasShift = 0,
+ ImageOperandsLodShift = 1,
+ ImageOperandsGradShift = 2,
+ ImageOperandsConstOffsetShift = 3,
+ ImageOperandsOffsetShift = 4,
+ ImageOperandsConstOffsetsShift = 5,
+ ImageOperandsSampleShift = 6,
+ ImageOperandsMinLodShift = 7,
+ ImageOperandsMax = 0x7fffffff,
+};
+
+enum ImageOperandsMask {
+ ImageOperandsMaskNone = 0,
+ ImageOperandsBiasMask = 0x00000001,
+ ImageOperandsLodMask = 0x00000002,
+ ImageOperandsGradMask = 0x00000004,
+ ImageOperandsConstOffsetMask = 0x00000008,
+ ImageOperandsOffsetMask = 0x00000010,
+ ImageOperandsConstOffsetsMask = 0x00000020,
+ ImageOperandsSampleMask = 0x00000040,
+ ImageOperandsMinLodMask = 0x00000080,
+};
+
+enum FPFastMathModeShift {
+ FPFastMathModeNotNaNShift = 0,
+ FPFastMathModeNotInfShift = 1,
+ FPFastMathModeNSZShift = 2,
+ FPFastMathModeAllowRecipShift = 3,
+ FPFastMathModeFastShift = 4,
+ FPFastMathModeMax = 0x7fffffff,
+};
+
+enum FPFastMathModeMask {
+ FPFastMathModeMaskNone = 0,
+ FPFastMathModeNotNaNMask = 0x00000001,
+ FPFastMathModeNotInfMask = 0x00000002,
+ FPFastMathModeNSZMask = 0x00000004,
+ FPFastMathModeAllowRecipMask = 0x00000008,
+ FPFastMathModeFastMask = 0x00000010,
+};
+
+enum FPRoundingMode {
+ FPRoundingModeRTE = 0,
+ FPRoundingModeRTZ = 1,
+ FPRoundingModeRTP = 2,
+ FPRoundingModeRTN = 3,
+ FPRoundingModeMax = 0x7fffffff,
+};
+
+enum LinkageType {
+ LinkageTypeExport = 0,
+ LinkageTypeImport = 1,
+ LinkageTypeMax = 0x7fffffff,
+};
+
+enum AccessQualifier {
+ AccessQualifierReadOnly = 0,
+ AccessQualifierWriteOnly = 1,
+ AccessQualifierReadWrite = 2,
+ AccessQualifierMax = 0x7fffffff,
+};
+
+enum FunctionParameterAttribute {
+ FunctionParameterAttributeZext = 0,
+ FunctionParameterAttributeSext = 1,
+ FunctionParameterAttributeByVal = 2,
+ FunctionParameterAttributeSret = 3,
+ FunctionParameterAttributeNoAlias = 4,
+ FunctionParameterAttributeNoCapture = 5,
+ FunctionParameterAttributeNoWrite = 6,
+ FunctionParameterAttributeNoReadWrite = 7,
+ FunctionParameterAttributeMax = 0x7fffffff,
+};
+
+enum Decoration {
+ DecorationRelaxedPrecision = 0,
+ DecorationSpecId = 1,
+ DecorationBlock = 2,
+ DecorationBufferBlock = 3,
+ DecorationRowMajor = 4,
+ DecorationColMajor = 5,
+ DecorationArrayStride = 6,
+ DecorationMatrixStride = 7,
+ DecorationGLSLShared = 8,
+ DecorationGLSLPacked = 9,
+ DecorationCPacked = 10,
+ DecorationBuiltIn = 11,
+ DecorationNoPerspective = 13,
+ DecorationFlat = 14,
+ DecorationPatch = 15,
+ DecorationCentroid = 16,
+ DecorationSample = 17,
+ DecorationInvariant = 18,
+ DecorationRestrict = 19,
+ DecorationAliased = 20,
+ DecorationVolatile = 21,
+ DecorationConstant = 22,
+ DecorationCoherent = 23,
+ DecorationNonWritable = 24,
+ DecorationNonReadable = 25,
+ DecorationUniform = 26,
+ DecorationSaturatedConversion = 28,
+ DecorationStream = 29,
+ DecorationLocation = 30,
+ DecorationComponent = 31,
+ DecorationIndex = 32,
+ DecorationBinding = 33,
+ DecorationDescriptorSet = 34,
+ DecorationOffset = 35,
+ DecorationXfbBuffer = 36,
+ DecorationXfbStride = 37,
+ DecorationFuncParamAttr = 38,
+ DecorationFPRoundingMode = 39,
+ DecorationFPFastMathMode = 40,
+ DecorationLinkageAttributes = 41,
+ DecorationNoContraction = 42,
+ DecorationInputAttachmentIndex = 43,
+ DecorationAlignment = 44,
+ DecorationMaxByteOffset = 45,
+ DecorationOverrideCoverageNV = 5248,
+ DecorationPassthroughNV = 5250,
+ DecorationViewportRelativeNV = 5252,
+ DecorationSecondaryViewportRelativeNV = 5256,
+ DecorationMax = 0x7fffffff,
+};
+
+enum BuiltIn {
+ BuiltInPosition = 0,
+ BuiltInPointSize = 1,
+ BuiltInClipDistance = 3,
+ BuiltInCullDistance = 4,
+ BuiltInVertexId = 5,
+ BuiltInInstanceId = 6,
+ BuiltInPrimitiveId = 7,
+ BuiltInInvocationId = 8,
+ BuiltInLayer = 9,
+ BuiltInViewportIndex = 10,
+ BuiltInTessLevelOuter = 11,
+ BuiltInTessLevelInner = 12,
+ BuiltInTessCoord = 13,
+ BuiltInPatchVertices = 14,
+ BuiltInFragCoord = 15,
+ BuiltInPointCoord = 16,
+ BuiltInFrontFacing = 17,
+ BuiltInSampleId = 18,
+ BuiltInSamplePosition = 19,
+ BuiltInSampleMask = 20,
+ BuiltInFragDepth = 22,
+ BuiltInHelperInvocation = 23,
+ BuiltInNumWorkgroups = 24,
+ BuiltInWorkgroupSize = 25,
+ BuiltInWorkgroupId = 26,
+ BuiltInLocalInvocationId = 27,
+ BuiltInGlobalInvocationId = 28,
+ BuiltInLocalInvocationIndex = 29,
+ BuiltInWorkDim = 30,
+ BuiltInGlobalSize = 31,
+ BuiltInEnqueuedWorkgroupSize = 32,
+ BuiltInGlobalOffset = 33,
+ BuiltInGlobalLinearId = 34,
+ BuiltInSubgroupSize = 36,
+ BuiltInSubgroupMaxSize = 37,
+ BuiltInNumSubgroups = 38,
+ BuiltInNumEnqueuedSubgroups = 39,
+ BuiltInSubgroupId = 40,
+ BuiltInSubgroupLocalInvocationId = 41,
+ BuiltInVertexIndex = 42,
+ BuiltInInstanceIndex = 43,
+ BuiltInSubgroupEqMaskKHR = 4416,
+ BuiltInSubgroupGeMaskKHR = 4417,
+ BuiltInSubgroupGtMaskKHR = 4418,
+ BuiltInSubgroupLeMaskKHR = 4419,
+ BuiltInSubgroupLtMaskKHR = 4420,
+ BuiltInBaseVertex = 4424,
+ BuiltInBaseInstance = 4425,
+ BuiltInDrawIndex = 4426,
+ BuiltInDeviceIndex = 4438,
+ BuiltInViewIndex = 4440,
+ BuiltInViewportMaskNV = 5253,
+ BuiltInSecondaryPositionNV = 5257,
+ BuiltInSecondaryViewportMaskNV = 5258,
+ BuiltInPositionPerViewNV = 5261,
+ BuiltInViewportMaskPerViewNV = 5262,
+ BuiltInMax = 0x7fffffff,
+};
+
+enum SelectionControlShift {
+ SelectionControlFlattenShift = 0,
+ SelectionControlDontFlattenShift = 1,
+ SelectionControlMax = 0x7fffffff,
+};
+
+enum SelectionControlMask {
+ SelectionControlMaskNone = 0,
+ SelectionControlFlattenMask = 0x00000001,
+ SelectionControlDontFlattenMask = 0x00000002,
+};
+
+enum LoopControlShift {
+ LoopControlUnrollShift = 0,
+ LoopControlDontUnrollShift = 1,
+ LoopControlDependencyInfiniteShift = 2,
+ LoopControlDependencyLengthShift = 3,
+ LoopControlMax = 0x7fffffff,
+};
+
+enum LoopControlMask {
+ LoopControlMaskNone = 0,
+ LoopControlUnrollMask = 0x00000001,
+ LoopControlDontUnrollMask = 0x00000002,
+ LoopControlDependencyInfiniteMask = 0x00000004,
+ LoopControlDependencyLengthMask = 0x00000008,
+};
+
+enum FunctionControlShift {
+ FunctionControlInlineShift = 0,
+ FunctionControlDontInlineShift = 1,
+ FunctionControlPureShift = 2,
+ FunctionControlConstShift = 3,
+ FunctionControlMax = 0x7fffffff,
+};
+
+enum FunctionControlMask {
+ FunctionControlMaskNone = 0,
+ FunctionControlInlineMask = 0x00000001,
+ FunctionControlDontInlineMask = 0x00000002,
+ FunctionControlPureMask = 0x00000004,
+ FunctionControlConstMask = 0x00000008,
+};
+
+enum MemorySemanticsShift {
+ MemorySemanticsAcquireShift = 1,
+ MemorySemanticsReleaseShift = 2,
+ MemorySemanticsAcquireReleaseShift = 3,
+ MemorySemanticsSequentiallyConsistentShift = 4,
+ MemorySemanticsUniformMemoryShift = 6,
+ MemorySemanticsSubgroupMemoryShift = 7,
+ MemorySemanticsWorkgroupMemoryShift = 8,
+ MemorySemanticsCrossWorkgroupMemoryShift = 9,
+ MemorySemanticsAtomicCounterMemoryShift = 10,
+ MemorySemanticsImageMemoryShift = 11,
+ MemorySemanticsMax = 0x7fffffff,
+};
+
+enum MemorySemanticsMask {
+ MemorySemanticsMaskNone = 0,
+ MemorySemanticsAcquireMask = 0x00000002,
+ MemorySemanticsReleaseMask = 0x00000004,
+ MemorySemanticsAcquireReleaseMask = 0x00000008,
+ MemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ MemorySemanticsUniformMemoryMask = 0x00000040,
+ MemorySemanticsSubgroupMemoryMask = 0x00000080,
+ MemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ MemorySemanticsImageMemoryMask = 0x00000800,
+};
+
+enum MemoryAccessShift {
+ MemoryAccessVolatileShift = 0,
+ MemoryAccessAlignedShift = 1,
+ MemoryAccessNontemporalShift = 2,
+ MemoryAccessMax = 0x7fffffff,
+};
+
+enum MemoryAccessMask {
+ MemoryAccessMaskNone = 0,
+ MemoryAccessVolatileMask = 0x00000001,
+ MemoryAccessAlignedMask = 0x00000002,
+ MemoryAccessNontemporalMask = 0x00000004,
+};
+
+enum Scope {
+ ScopeCrossDevice = 0,
+ ScopeDevice = 1,
+ ScopeWorkgroup = 2,
+ ScopeSubgroup = 3,
+ ScopeInvocation = 4,
+ ScopeMax = 0x7fffffff,
+};
+
+enum GroupOperation {
+ GroupOperationReduce = 0,
+ GroupOperationInclusiveScan = 1,
+ GroupOperationExclusiveScan = 2,
+ GroupOperationMax = 0x7fffffff,
+};
+
+enum KernelEnqueueFlags {
+ KernelEnqueueFlagsNoWait = 0,
+ KernelEnqueueFlagsWaitKernel = 1,
+ KernelEnqueueFlagsWaitWorkGroup = 2,
+ KernelEnqueueFlagsMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoShift {
+ KernelProfilingInfoCmdExecTimeShift = 0,
+ KernelProfilingInfoMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoMask {
+ KernelProfilingInfoMaskNone = 0,
+ KernelProfilingInfoCmdExecTimeMask = 0x00000001,
+};
+
+enum Capability {
+ CapabilityMatrix = 0,
+ CapabilityShader = 1,
+ CapabilityGeometry = 2,
+ CapabilityTessellation = 3,
+ CapabilityAddresses = 4,
+ CapabilityLinkage = 5,
+ CapabilityKernel = 6,
+ CapabilityVector16 = 7,
+ CapabilityFloat16Buffer = 8,
+ CapabilityFloat16 = 9,
+ CapabilityFloat64 = 10,
+ CapabilityInt64 = 11,
+ CapabilityInt64Atomics = 12,
+ CapabilityImageBasic = 13,
+ CapabilityImageReadWrite = 14,
+ CapabilityImageMipmap = 15,
+ CapabilityPipes = 17,
+ CapabilityGroups = 18,
+ CapabilityDeviceEnqueue = 19,
+ CapabilityLiteralSampler = 20,
+ CapabilityAtomicStorage = 21,
+ CapabilityInt16 = 22,
+ CapabilityTessellationPointSize = 23,
+ CapabilityGeometryPointSize = 24,
+ CapabilityImageGatherExtended = 25,
+ CapabilityStorageImageMultisample = 27,
+ CapabilityUniformBufferArrayDynamicIndexing = 28,
+ CapabilitySampledImageArrayDynamicIndexing = 29,
+ CapabilityStorageBufferArrayDynamicIndexing = 30,
+ CapabilityStorageImageArrayDynamicIndexing = 31,
+ CapabilityClipDistance = 32,
+ CapabilityCullDistance = 33,
+ CapabilityImageCubeArray = 34,
+ CapabilitySampleRateShading = 35,
+ CapabilityImageRect = 36,
+ CapabilitySampledRect = 37,
+ CapabilityGenericPointer = 38,
+ CapabilityInt8 = 39,
+ CapabilityInputAttachment = 40,
+ CapabilitySparseResidency = 41,
+ CapabilityMinLod = 42,
+ CapabilitySampled1D = 43,
+ CapabilityImage1D = 44,
+ CapabilitySampledCubeArray = 45,
+ CapabilitySampledBuffer = 46,
+ CapabilityImageBuffer = 47,
+ CapabilityImageMSArray = 48,
+ CapabilityStorageImageExtendedFormats = 49,
+ CapabilityImageQuery = 50,
+ CapabilityDerivativeControl = 51,
+ CapabilityInterpolationFunction = 52,
+ CapabilityTransformFeedback = 53,
+ CapabilityGeometryStreams = 54,
+ CapabilityStorageImageReadWithoutFormat = 55,
+ CapabilityStorageImageWriteWithoutFormat = 56,
+ CapabilityMultiViewport = 57,
+ CapabilitySubgroupDispatch = 58,
+ CapabilityNamedBarrier = 59,
+ CapabilityPipeStorage = 60,
+ CapabilitySubgroupBallotKHR = 4423,
+ CapabilityDrawParameters = 4427,
+ CapabilitySubgroupVoteKHR = 4431,
+ CapabilityStorageBuffer16BitAccess = 4433,
+ CapabilityStorageUniformBufferBlock16 = 4433,
+ CapabilityStorageUniform16 = 4434,
+ CapabilityUniformAndStorageBuffer16BitAccess = 4434,
+ CapabilityStoragePushConstant16 = 4435,
+ CapabilityStorageInputOutput16 = 4436,
+ CapabilityDeviceGroup = 4437,
+ CapabilityMultiView = 4439,
+ CapabilityVariablePointersStorageBuffer = 4441,
+ CapabilityVariablePointers = 4442,
+ CapabilitySampleMaskOverrideCoverageNV = 5249,
+ CapabilityGeometryShaderPassthroughNV = 5251,
+ CapabilityShaderViewportIndexLayerNV = 5254,
+ CapabilityShaderViewportMaskNV = 5255,
+ CapabilityShaderStereoViewNV = 5259,
+ CapabilityPerViewAttributesNV = 5260,
+ CapabilityMax = 0x7fffffff,
+};
+
+enum Op {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpSubgroupReadInvocationKHR = 4432,
+ OpMax = 0x7fffffff,
+};
+
+// Overload operator| for mask bit combining
+
+inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); }
+inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); }
+inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); }
+inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); }
+inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); }
+inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); }
+inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); }
+inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); }
+
+} // end namespace spv
+
+#endif // #ifndef spirv_HPP
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp11 b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp11
new file mode 100644
index 0000000..cb089a7
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.hpp11
@@ -0,0 +1,980 @@
+// Copyright (c) 2014-2017 The Khronos Group Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and/or associated documentation files (the "Materials"),
+// to deal in the Materials without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Materials, and to permit persons to whom the
+// Materials are furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Materials.
+//
+// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+//
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+// IN THE MATERIALS.
+
+// This header is automatically generated by the same tool that creates
+// the Binary Section of the SPIR-V specification.
+
+// Enumeration tokens for SPIR-V, in various styles:
+// C, C++, C++11, JSON, Lua, Python
+//
+// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+//
+// Some tokens act like mask values, which can be OR'd together,
+// while others are mutually exclusive. The mask-like ones have
+// "Mask" in their name, and a parallel enum that has the shift
+// amount (1 << x) for each corresponding enumerant.
+
+#ifndef spirv_HPP
+#define spirv_HPP
+
+namespace spv {
+
+typedef unsigned int Id;
+
+#define SPV_VERSION 0x10100
+#define SPV_REVISION 6
+
+static const unsigned int MagicNumber = 0x07230203;
+static const unsigned int Version = 0x00010100;
+static const unsigned int Revision = 6;
+static const unsigned int OpCodeMask = 0xffff;
+static const unsigned int WordCountShift = 16;
+
+enum class SourceLanguage : unsigned {
+ Unknown = 0,
+ ESSL = 1,
+ GLSL = 2,
+ OpenCL_C = 3,
+ OpenCL_CPP = 4,
+ HLSL = 5,
+ Max = 0x7fffffff,
+};
+
+enum class ExecutionModel : unsigned {
+ Vertex = 0,
+ TessellationControl = 1,
+ TessellationEvaluation = 2,
+ Geometry = 3,
+ Fragment = 4,
+ GLCompute = 5,
+ Kernel = 6,
+ Max = 0x7fffffff,
+};
+
+enum class AddressingModel : unsigned {
+ Logical = 0,
+ Physical32 = 1,
+ Physical64 = 2,
+ Max = 0x7fffffff,
+};
+
+enum class MemoryModel : unsigned {
+ Simple = 0,
+ GLSL450 = 1,
+ OpenCL = 2,
+ Max = 0x7fffffff,
+};
+
+enum class ExecutionMode : unsigned {
+ Invocations = 0,
+ SpacingEqual = 1,
+ SpacingFractionalEven = 2,
+ SpacingFractionalOdd = 3,
+ VertexOrderCw = 4,
+ VertexOrderCcw = 5,
+ PixelCenterInteger = 6,
+ OriginUpperLeft = 7,
+ OriginLowerLeft = 8,
+ EarlyFragmentTests = 9,
+ PointMode = 10,
+ Xfb = 11,
+ DepthReplacing = 12,
+ DepthGreater = 14,
+ DepthLess = 15,
+ DepthUnchanged = 16,
+ LocalSize = 17,
+ LocalSizeHint = 18,
+ InputPoints = 19,
+ InputLines = 20,
+ InputLinesAdjacency = 21,
+ Triangles = 22,
+ InputTrianglesAdjacency = 23,
+ Quads = 24,
+ Isolines = 25,
+ OutputVertices = 26,
+ OutputPoints = 27,
+ OutputLineStrip = 28,
+ OutputTriangleStrip = 29,
+ VecTypeHint = 30,
+ ContractionOff = 31,
+ Initializer = 33,
+ Finalizer = 34,
+ SubgroupSize = 35,
+ SubgroupsPerWorkgroup = 36,
+ Max = 0x7fffffff,
+};
+
+enum class StorageClass : unsigned {
+ UniformConstant = 0,
+ Input = 1,
+ Uniform = 2,
+ Output = 3,
+ Workgroup = 4,
+ CrossWorkgroup = 5,
+ Private = 6,
+ Function = 7,
+ Generic = 8,
+ PushConstant = 9,
+ AtomicCounter = 10,
+ Image = 11,
+ StorageBuffer = 12,
+ Max = 0x7fffffff,
+};
+
+enum class Dim : unsigned {
+ Dim1D = 0,
+ Dim2D = 1,
+ Dim3D = 2,
+ Cube = 3,
+ Rect = 4,
+ Buffer = 5,
+ SubpassData = 6,
+ Max = 0x7fffffff,
+};
+
+enum class SamplerAddressingMode : unsigned {
+ None = 0,
+ ClampToEdge = 1,
+ Clamp = 2,
+ Repeat = 3,
+ RepeatMirrored = 4,
+ Max = 0x7fffffff,
+};
+
+enum class SamplerFilterMode : unsigned {
+ Nearest = 0,
+ Linear = 1,
+ Max = 0x7fffffff,
+};
+
+enum class ImageFormat : unsigned {
+ Unknown = 0,
+ Rgba32f = 1,
+ Rgba16f = 2,
+ R32f = 3,
+ Rgba8 = 4,
+ Rgba8Snorm = 5,
+ Rg32f = 6,
+ Rg16f = 7,
+ R11fG11fB10f = 8,
+ R16f = 9,
+ Rgba16 = 10,
+ Rgb10A2 = 11,
+ Rg16 = 12,
+ Rg8 = 13,
+ R16 = 14,
+ R8 = 15,
+ Rgba16Snorm = 16,
+ Rg16Snorm = 17,
+ Rg8Snorm = 18,
+ R16Snorm = 19,
+ R8Snorm = 20,
+ Rgba32i = 21,
+ Rgba16i = 22,
+ Rgba8i = 23,
+ R32i = 24,
+ Rg32i = 25,
+ Rg16i = 26,
+ Rg8i = 27,
+ R16i = 28,
+ R8i = 29,
+ Rgba32ui = 30,
+ Rgba16ui = 31,
+ Rgba8ui = 32,
+ R32ui = 33,
+ Rgb10a2ui = 34,
+ Rg32ui = 35,
+ Rg16ui = 36,
+ Rg8ui = 37,
+ R16ui = 38,
+ R8ui = 39,
+ Max = 0x7fffffff,
+};
+
+enum class ImageChannelOrder : unsigned {
+ R = 0,
+ A = 1,
+ RG = 2,
+ RA = 3,
+ RGB = 4,
+ RGBA = 5,
+ BGRA = 6,
+ ARGB = 7,
+ Intensity = 8,
+ Luminance = 9,
+ Rx = 10,
+ RGx = 11,
+ RGBx = 12,
+ Depth = 13,
+ DepthStencil = 14,
+ sRGB = 15,
+ sRGBx = 16,
+ sRGBA = 17,
+ sBGRA = 18,
+ ABGR = 19,
+ Max = 0x7fffffff,
+};
+
+enum class ImageChannelDataType : unsigned {
+ SnormInt8 = 0,
+ SnormInt16 = 1,
+ UnormInt8 = 2,
+ UnormInt16 = 3,
+ UnormShort565 = 4,
+ UnormShort555 = 5,
+ UnormInt101010 = 6,
+ SignedInt8 = 7,
+ SignedInt16 = 8,
+ SignedInt32 = 9,
+ UnsignedInt8 = 10,
+ UnsignedInt16 = 11,
+ UnsignedInt32 = 12,
+ HalfFloat = 13,
+ Float = 14,
+ UnormInt24 = 15,
+ UnormInt101010_2 = 16,
+ Max = 0x7fffffff,
+};
+
+enum class ImageOperandsShift : unsigned {
+ Bias = 0,
+ Lod = 1,
+ Grad = 2,
+ ConstOffset = 3,
+ Offset = 4,
+ ConstOffsets = 5,
+ Sample = 6,
+ MinLod = 7,
+ Max = 0x7fffffff,
+};
+
+enum class ImageOperandsMask : unsigned {
+ MaskNone = 0,
+ Bias = 0x00000001,
+ Lod = 0x00000002,
+ Grad = 0x00000004,
+ ConstOffset = 0x00000008,
+ Offset = 0x00000010,
+ ConstOffsets = 0x00000020,
+ Sample = 0x00000040,
+ MinLod = 0x00000080,
+};
+
+enum class FPFastMathModeShift : unsigned {
+ NotNaN = 0,
+ NotInf = 1,
+ NSZ = 2,
+ AllowRecip = 3,
+ Fast = 4,
+ Max = 0x7fffffff,
+};
+
+enum class FPFastMathModeMask : unsigned {
+ MaskNone = 0,
+ NotNaN = 0x00000001,
+ NotInf = 0x00000002,
+ NSZ = 0x00000004,
+ AllowRecip = 0x00000008,
+ Fast = 0x00000010,
+};
+
+enum class FPRoundingMode : unsigned {
+ RTE = 0,
+ RTZ = 1,
+ RTP = 2,
+ RTN = 3,
+ Max = 0x7fffffff,
+};
+
+enum class LinkageType : unsigned {
+ Export = 0,
+ Import = 1,
+ Max = 0x7fffffff,
+};
+
+enum class AccessQualifier : unsigned {
+ ReadOnly = 0,
+ WriteOnly = 1,
+ ReadWrite = 2,
+ Max = 0x7fffffff,
+};
+
+enum class FunctionParameterAttribute : unsigned {
+ Zext = 0,
+ Sext = 1,
+ ByVal = 2,
+ Sret = 3,
+ NoAlias = 4,
+ NoCapture = 5,
+ NoWrite = 6,
+ NoReadWrite = 7,
+ Max = 0x7fffffff,
+};
+
+enum class Decoration : unsigned {
+ RelaxedPrecision = 0,
+ SpecId = 1,
+ Block = 2,
+ BufferBlock = 3,
+ RowMajor = 4,
+ ColMajor = 5,
+ ArrayStride = 6,
+ MatrixStride = 7,
+ GLSLShared = 8,
+ GLSLPacked = 9,
+ CPacked = 10,
+ BuiltIn = 11,
+ NoPerspective = 13,
+ Flat = 14,
+ Patch = 15,
+ Centroid = 16,
+ Sample = 17,
+ Invariant = 18,
+ Restrict = 19,
+ Aliased = 20,
+ Volatile = 21,
+ Constant = 22,
+ Coherent = 23,
+ NonWritable = 24,
+ NonReadable = 25,
+ Uniform = 26,
+ SaturatedConversion = 28,
+ Stream = 29,
+ Location = 30,
+ Component = 31,
+ Index = 32,
+ Binding = 33,
+ DescriptorSet = 34,
+ Offset = 35,
+ XfbBuffer = 36,
+ XfbStride = 37,
+ FuncParamAttr = 38,
+ FPRoundingMode = 39,
+ FPFastMathMode = 40,
+ LinkageAttributes = 41,
+ NoContraction = 42,
+ InputAttachmentIndex = 43,
+ Alignment = 44,
+ MaxByteOffset = 45,
+ OverrideCoverageNV = 5248,
+ PassthroughNV = 5250,
+ ViewportRelativeNV = 5252,
+ SecondaryViewportRelativeNV = 5256,
+ Max = 0x7fffffff,
+};
+
+enum class BuiltIn : unsigned {
+ Position = 0,
+ PointSize = 1,
+ ClipDistance = 3,
+ CullDistance = 4,
+ VertexId = 5,
+ InstanceId = 6,
+ PrimitiveId = 7,
+ InvocationId = 8,
+ Layer = 9,
+ ViewportIndex = 10,
+ TessLevelOuter = 11,
+ TessLevelInner = 12,
+ TessCoord = 13,
+ PatchVertices = 14,
+ FragCoord = 15,
+ PointCoord = 16,
+ FrontFacing = 17,
+ SampleId = 18,
+ SamplePosition = 19,
+ SampleMask = 20,
+ FragDepth = 22,
+ HelperInvocation = 23,
+ NumWorkgroups = 24,
+ WorkgroupSize = 25,
+ WorkgroupId = 26,
+ LocalInvocationId = 27,
+ GlobalInvocationId = 28,
+ LocalInvocationIndex = 29,
+ WorkDim = 30,
+ GlobalSize = 31,
+ EnqueuedWorkgroupSize = 32,
+ GlobalOffset = 33,
+ GlobalLinearId = 34,
+ SubgroupSize = 36,
+ SubgroupMaxSize = 37,
+ NumSubgroups = 38,
+ NumEnqueuedSubgroups = 39,
+ SubgroupId = 40,
+ SubgroupLocalInvocationId = 41,
+ VertexIndex = 42,
+ InstanceIndex = 43,
+ SubgroupEqMaskKHR = 4416,
+ SubgroupGeMaskKHR = 4417,
+ SubgroupGtMaskKHR = 4418,
+ SubgroupLeMaskKHR = 4419,
+ SubgroupLtMaskKHR = 4420,
+ BaseVertex = 4424,
+ BaseInstance = 4425,
+ DrawIndex = 4426,
+ DeviceIndex = 4438,
+ ViewIndex = 4440,
+ ViewportMaskNV = 5253,
+ SecondaryPositionNV = 5257,
+ SecondaryViewportMaskNV = 5258,
+ PositionPerViewNV = 5261,
+ ViewportMaskPerViewNV = 5262,
+ Max = 0x7fffffff,
+};
+
+enum class SelectionControlShift : unsigned {
+ Flatten = 0,
+ DontFlatten = 1,
+ Max = 0x7fffffff,
+};
+
+enum class SelectionControlMask : unsigned {
+ MaskNone = 0,
+ Flatten = 0x00000001,
+ DontFlatten = 0x00000002,
+};
+
+enum class LoopControlShift : unsigned {
+ Unroll = 0,
+ DontUnroll = 1,
+ DependencyInfinite = 2,
+ DependencyLength = 3,
+ Max = 0x7fffffff,
+};
+
+enum class LoopControlMask : unsigned {
+ MaskNone = 0,
+ Unroll = 0x00000001,
+ DontUnroll = 0x00000002,
+ DependencyInfinite = 0x00000004,
+ DependencyLength = 0x00000008,
+};
+
+enum class FunctionControlShift : unsigned {
+ Inline = 0,
+ DontInline = 1,
+ Pure = 2,
+ Const = 3,
+ Max = 0x7fffffff,
+};
+
+enum class FunctionControlMask : unsigned {
+ MaskNone = 0,
+ Inline = 0x00000001,
+ DontInline = 0x00000002,
+ Pure = 0x00000004,
+ Const = 0x00000008,
+};
+
+enum class MemorySemanticsShift : unsigned {
+ Acquire = 1,
+ Release = 2,
+ AcquireRelease = 3,
+ SequentiallyConsistent = 4,
+ UniformMemory = 6,
+ SubgroupMemory = 7,
+ WorkgroupMemory = 8,
+ CrossWorkgroupMemory = 9,
+ AtomicCounterMemory = 10,
+ ImageMemory = 11,
+ Max = 0x7fffffff,
+};
+
+enum class MemorySemanticsMask : unsigned {
+ MaskNone = 0,
+ Acquire = 0x00000002,
+ Release = 0x00000004,
+ AcquireRelease = 0x00000008,
+ SequentiallyConsistent = 0x00000010,
+ UniformMemory = 0x00000040,
+ SubgroupMemory = 0x00000080,
+ WorkgroupMemory = 0x00000100,
+ CrossWorkgroupMemory = 0x00000200,
+ AtomicCounterMemory = 0x00000400,
+ ImageMemory = 0x00000800,
+};
+
+enum class MemoryAccessShift : unsigned {
+ Volatile = 0,
+ Aligned = 1,
+ Nontemporal = 2,
+ Max = 0x7fffffff,
+};
+
+enum class MemoryAccessMask : unsigned {
+ MaskNone = 0,
+ Volatile = 0x00000001,
+ Aligned = 0x00000002,
+ Nontemporal = 0x00000004,
+};
+
+enum class Scope : unsigned {
+ CrossDevice = 0,
+ Device = 1,
+ Workgroup = 2,
+ Subgroup = 3,
+ Invocation = 4,
+ Max = 0x7fffffff,
+};
+
+enum class GroupOperation : unsigned {
+ Reduce = 0,
+ InclusiveScan = 1,
+ ExclusiveScan = 2,
+ Max = 0x7fffffff,
+};
+
+enum class KernelEnqueueFlags : unsigned {
+ NoWait = 0,
+ WaitKernel = 1,
+ WaitWorkGroup = 2,
+ Max = 0x7fffffff,
+};
+
+enum class KernelProfilingInfoShift : unsigned {
+ CmdExecTime = 0,
+ Max = 0x7fffffff,
+};
+
+enum class KernelProfilingInfoMask : unsigned {
+ MaskNone = 0,
+ CmdExecTime = 0x00000001,
+};
+
+enum class Capability : unsigned {
+ Matrix = 0,
+ Shader = 1,
+ Geometry = 2,
+ Tessellation = 3,
+ Addresses = 4,
+ Linkage = 5,
+ Kernel = 6,
+ Vector16 = 7,
+ Float16Buffer = 8,
+ Float16 = 9,
+ Float64 = 10,
+ Int64 = 11,
+ Int64Atomics = 12,
+ ImageBasic = 13,
+ ImageReadWrite = 14,
+ ImageMipmap = 15,
+ Pipes = 17,
+ Groups = 18,
+ DeviceEnqueue = 19,
+ LiteralSampler = 20,
+ AtomicStorage = 21,
+ Int16 = 22,
+ TessellationPointSize = 23,
+ GeometryPointSize = 24,
+ ImageGatherExtended = 25,
+ StorageImageMultisample = 27,
+ UniformBufferArrayDynamicIndexing = 28,
+ SampledImageArrayDynamicIndexing = 29,
+ StorageBufferArrayDynamicIndexing = 30,
+ StorageImageArrayDynamicIndexing = 31,
+ ClipDistance = 32,
+ CullDistance = 33,
+ ImageCubeArray = 34,
+ SampleRateShading = 35,
+ ImageRect = 36,
+ SampledRect = 37,
+ GenericPointer = 38,
+ Int8 = 39,
+ InputAttachment = 40,
+ SparseResidency = 41,
+ MinLod = 42,
+ Sampled1D = 43,
+ Image1D = 44,
+ SampledCubeArray = 45,
+ SampledBuffer = 46,
+ ImageBuffer = 47,
+ ImageMSArray = 48,
+ StorageImageExtendedFormats = 49,
+ ImageQuery = 50,
+ DerivativeControl = 51,
+ InterpolationFunction = 52,
+ TransformFeedback = 53,
+ GeometryStreams = 54,
+ StorageImageReadWithoutFormat = 55,
+ StorageImageWriteWithoutFormat = 56,
+ MultiViewport = 57,
+ SubgroupDispatch = 58,
+ NamedBarrier = 59,
+ PipeStorage = 60,
+ SubgroupBallotKHR = 4423,
+ DrawParameters = 4427,
+ SubgroupVoteKHR = 4431,
+ StorageBuffer16BitAccess = 4433,
+ StorageUniformBufferBlock16 = 4433,
+ StorageUniform16 = 4434,
+ UniformAndStorageBuffer16BitAccess = 4434,
+ StoragePushConstant16 = 4435,
+ StorageInputOutput16 = 4436,
+ DeviceGroup = 4437,
+ MultiView = 4439,
+ VariablePointersStorageBuffer = 4441,
+ VariablePointers = 4442,
+ SampleMaskOverrideCoverageNV = 5249,
+ GeometryShaderPassthroughNV = 5251,
+ ShaderViewportIndexLayerNV = 5254,
+ ShaderViewportMaskNV = 5255,
+ ShaderStereoViewNV = 5259,
+ PerViewAttributesNV = 5260,
+ Max = 0x7fffffff,
+};
+
+enum class Op : unsigned {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpSubgroupReadInvocationKHR = 4432,
+ Max = 0x7fffffff,
+};
+
+// Overload operator| for mask bit combining
+
+inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); }
+inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); }
+inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); }
+inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); }
+inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); }
+inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); }
+inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); }
+inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); }
+
+} // end namespace spv
+
+#endif // #ifndef spirv_HPP
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.json b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.json
new file mode 100644
index 0000000..b5a77d0
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.json
@@ -0,0 +1,996 @@
+{
+ "spv":
+ {
+ "meta":
+ {
+ "Comment":
+ [
+ [
+ "Copyright (c) 2014-2017 The Khronos Group Inc.",
+ "",
+ "Permission is hereby granted, free of charge, to any person obtaining a copy",
+ "of this software and/or associated documentation files (the \"Materials\"),",
+ "to deal in the Materials without restriction, including without limitation",
+ "the rights to use, copy, modify, merge, publish, distribute, sublicense,",
+ "and/or sell copies of the Materials, and to permit persons to whom the",
+ "Materials are furnished to do so, subject to the following conditions:",
+ "",
+ "The above copyright notice and this permission notice shall be included in",
+ "all copies or substantial portions of the Materials.",
+ "",
+ "MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS",
+ "STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND",
+ "HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ ",
+ "",
+ "THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS",
+ "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,",
+ "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL",
+ "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER",
+ "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING",
+ "FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS",
+ "IN THE MATERIALS."
+ ],
+ [
+ "This header is automatically generated by the same tool that creates",
+ "the Binary Section of the SPIR-V specification."
+ ],
+ [
+ "Enumeration tokens for SPIR-V, in various styles:",
+ " C, C++, C++11, JSON, Lua, Python",
+ "",
+ "- C will have tokens with a \"Spv\" prefix, e.g.: SpvSourceLanguageGLSL",
+ "- C++ will have tokens in the \"spv\" name space, e.g.: spv::SourceLanguageGLSL",
+ "- C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL",
+ "- Lua will use tables, e.g.: spv.SourceLanguage.GLSL",
+ "- Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']",
+ "",
+ "Some tokens act like mask values, which can be OR'd together,",
+ "while others are mutually exclusive. The mask-like ones have",
+ "\"Mask\" in their name, and a parallel enum that has the shift",
+ "amount (1 << x) for each corresponding enumerant."
+ ]
+ ],
+ "MagicNumber": 119734787,
+ "Version": 65792,
+ "Revision": 6,
+ "OpCodeMask": 65535,
+ "WordCountShift": 16
+ },
+ "enum":
+ [
+ {
+ "Name": "SourceLanguage",
+ "Type": "Value",
+ "Values":
+ {
+ "Unknown": 0,
+ "ESSL": 1,
+ "GLSL": 2,
+ "OpenCL_C": 3,
+ "OpenCL_CPP": 4,
+ "HLSL": 5
+ }
+ },
+ {
+ "Name": "ExecutionModel",
+ "Type": "Value",
+ "Values":
+ {
+ "Vertex": 0,
+ "TessellationControl": 1,
+ "TessellationEvaluation": 2,
+ "Geometry": 3,
+ "Fragment": 4,
+ "GLCompute": 5,
+ "Kernel": 6
+ }
+ },
+ {
+ "Name": "AddressingModel",
+ "Type": "Value",
+ "Values":
+ {
+ "Logical": 0,
+ "Physical32": 1,
+ "Physical64": 2
+ }
+ },
+ {
+ "Name": "MemoryModel",
+ "Type": "Value",
+ "Values":
+ {
+ "Simple": 0,
+ "GLSL450": 1,
+ "OpenCL": 2
+ }
+ },
+ {
+ "Name": "ExecutionMode",
+ "Type": "Value",
+ "Values":
+ {
+ "Invocations": 0,
+ "SpacingEqual": 1,
+ "SpacingFractionalEven": 2,
+ "SpacingFractionalOdd": 3,
+ "VertexOrderCw": 4,
+ "VertexOrderCcw": 5,
+ "PixelCenterInteger": 6,
+ "OriginUpperLeft": 7,
+ "OriginLowerLeft": 8,
+ "EarlyFragmentTests": 9,
+ "PointMode": 10,
+ "Xfb": 11,
+ "DepthReplacing": 12,
+ "DepthGreater": 14,
+ "DepthLess": 15,
+ "DepthUnchanged": 16,
+ "LocalSize": 17,
+ "LocalSizeHint": 18,
+ "InputPoints": 19,
+ "InputLines": 20,
+ "InputLinesAdjacency": 21,
+ "Triangles": 22,
+ "InputTrianglesAdjacency": 23,
+ "Quads": 24,
+ "Isolines": 25,
+ "OutputVertices": 26,
+ "OutputPoints": 27,
+ "OutputLineStrip": 28,
+ "OutputTriangleStrip": 29,
+ "VecTypeHint": 30,
+ "ContractionOff": 31,
+ "Initializer": 33,
+ "Finalizer": 34,
+ "SubgroupSize": 35,
+ "SubgroupsPerWorkgroup": 36
+ }
+ },
+ {
+ "Name": "StorageClass",
+ "Type": "Value",
+ "Values":
+ {
+ "UniformConstant": 0,
+ "Input": 1,
+ "Uniform": 2,
+ "Output": 3,
+ "Workgroup": 4,
+ "CrossWorkgroup": 5,
+ "Private": 6,
+ "Function": 7,
+ "Generic": 8,
+ "PushConstant": 9,
+ "AtomicCounter": 10,
+ "Image": 11,
+ "StorageBuffer": 12
+ }
+ },
+ {
+ "Name": "Dim",
+ "Type": "Value",
+ "Values":
+ {
+ "Dim1D": 0,
+ "Dim2D": 1,
+ "Dim3D": 2,
+ "Cube": 3,
+ "Rect": 4,
+ "Buffer": 5,
+ "SubpassData": 6
+ }
+ },
+ {
+ "Name": "SamplerAddressingMode",
+ "Type": "Value",
+ "Values":
+ {
+ "None": 0,
+ "ClampToEdge": 1,
+ "Clamp": 2,
+ "Repeat": 3,
+ "RepeatMirrored": 4
+ }
+ },
+ {
+ "Name": "SamplerFilterMode",
+ "Type": "Value",
+ "Values":
+ {
+ "Nearest": 0,
+ "Linear": 1
+ }
+ },
+ {
+ "Name": "ImageFormat",
+ "Type": "Value",
+ "Values":
+ {
+ "Unknown": 0,
+ "Rgba32f": 1,
+ "Rgba16f": 2,
+ "R32f": 3,
+ "Rgba8": 4,
+ "Rgba8Snorm": 5,
+ "Rg32f": 6,
+ "Rg16f": 7,
+ "R11fG11fB10f": 8,
+ "R16f": 9,
+ "Rgba16": 10,
+ "Rgb10A2": 11,
+ "Rg16": 12,
+ "Rg8": 13,
+ "R16": 14,
+ "R8": 15,
+ "Rgba16Snorm": 16,
+ "Rg16Snorm": 17,
+ "Rg8Snorm": 18,
+ "R16Snorm": 19,
+ "R8Snorm": 20,
+ "Rgba32i": 21,
+ "Rgba16i": 22,
+ "Rgba8i": 23,
+ "R32i": 24,
+ "Rg32i": 25,
+ "Rg16i": 26,
+ "Rg8i": 27,
+ "R16i": 28,
+ "R8i": 29,
+ "Rgba32ui": 30,
+ "Rgba16ui": 31,
+ "Rgba8ui": 32,
+ "R32ui": 33,
+ "Rgb10a2ui": 34,
+ "Rg32ui": 35,
+ "Rg16ui": 36,
+ "Rg8ui": 37,
+ "R16ui": 38,
+ "R8ui": 39
+ }
+ },
+ {
+ "Name": "ImageChannelOrder",
+ "Type": "Value",
+ "Values":
+ {
+ "R": 0,
+ "A": 1,
+ "RG": 2,
+ "RA": 3,
+ "RGB": 4,
+ "RGBA": 5,
+ "BGRA": 6,
+ "ARGB": 7,
+ "Intensity": 8,
+ "Luminance": 9,
+ "Rx": 10,
+ "RGx": 11,
+ "RGBx": 12,
+ "Depth": 13,
+ "DepthStencil": 14,
+ "sRGB": 15,
+ "sRGBx": 16,
+ "sRGBA": 17,
+ "sBGRA": 18,
+ "ABGR": 19
+ }
+ },
+ {
+ "Name": "ImageChannelDataType",
+ "Type": "Value",
+ "Values":
+ {
+ "SnormInt8": 0,
+ "SnormInt16": 1,
+ "UnormInt8": 2,
+ "UnormInt16": 3,
+ "UnormShort565": 4,
+ "UnormShort555": 5,
+ "UnormInt101010": 6,
+ "SignedInt8": 7,
+ "SignedInt16": 8,
+ "SignedInt32": 9,
+ "UnsignedInt8": 10,
+ "UnsignedInt16": 11,
+ "UnsignedInt32": 12,
+ "HalfFloat": 13,
+ "Float": 14,
+ "UnormInt24": 15,
+ "UnormInt101010_2": 16
+ }
+ },
+ {
+ "Name": "ImageOperands",
+ "Type": "Bit",
+ "Values":
+ {
+ "Bias": 0,
+ "Lod": 1,
+ "Grad": 2,
+ "ConstOffset": 3,
+ "Offset": 4,
+ "ConstOffsets": 5,
+ "Sample": 6,
+ "MinLod": 7
+ }
+ },
+ {
+ "Name": "FPFastMathMode",
+ "Type": "Bit",
+ "Values":
+ {
+ "NotNaN": 0,
+ "NotInf": 1,
+ "NSZ": 2,
+ "AllowRecip": 3,
+ "Fast": 4
+ }
+ },
+ {
+ "Name": "FPRoundingMode",
+ "Type": "Value",
+ "Values":
+ {
+ "RTE": 0,
+ "RTZ": 1,
+ "RTP": 2,
+ "RTN": 3
+ }
+ },
+ {
+ "Name": "LinkageType",
+ "Type": "Value",
+ "Values":
+ {
+ "Export": 0,
+ "Import": 1
+ }
+ },
+ {
+ "Name": "AccessQualifier",
+ "Type": "Value",
+ "Values":
+ {
+ "ReadOnly": 0,
+ "WriteOnly": 1,
+ "ReadWrite": 2
+ }
+ },
+ {
+ "Name": "FunctionParameterAttribute",
+ "Type": "Value",
+ "Values":
+ {
+ "Zext": 0,
+ "Sext": 1,
+ "ByVal": 2,
+ "Sret": 3,
+ "NoAlias": 4,
+ "NoCapture": 5,
+ "NoWrite": 6,
+ "NoReadWrite": 7
+ }
+ },
+ {
+ "Name": "Decoration",
+ "Type": "Value",
+ "Values":
+ {
+ "RelaxedPrecision": 0,
+ "SpecId": 1,
+ "Block": 2,
+ "BufferBlock": 3,
+ "RowMajor": 4,
+ "ColMajor": 5,
+ "ArrayStride": 6,
+ "MatrixStride": 7,
+ "GLSLShared": 8,
+ "GLSLPacked": 9,
+ "CPacked": 10,
+ "BuiltIn": 11,
+ "NoPerspective": 13,
+ "Flat": 14,
+ "Patch": 15,
+ "Centroid": 16,
+ "Sample": 17,
+ "Invariant": 18,
+ "Restrict": 19,
+ "Aliased": 20,
+ "Volatile": 21,
+ "Constant": 22,
+ "Coherent": 23,
+ "NonWritable": 24,
+ "NonReadable": 25,
+ "Uniform": 26,
+ "SaturatedConversion": 28,
+ "Stream": 29,
+ "Location": 30,
+ "Component": 31,
+ "Index": 32,
+ "Binding": 33,
+ "DescriptorSet": 34,
+ "Offset": 35,
+ "XfbBuffer": 36,
+ "XfbStride": 37,
+ "FuncParamAttr": 38,
+ "FPRoundingMode": 39,
+ "FPFastMathMode": 40,
+ "LinkageAttributes": 41,
+ "NoContraction": 42,
+ "InputAttachmentIndex": 43,
+ "Alignment": 44,
+ "MaxByteOffset": 45,
+ "OverrideCoverageNV": 5248,
+ "PassthroughNV": 5250,
+ "ViewportRelativeNV": 5252,
+ "SecondaryViewportRelativeNV": 5256
+ }
+ },
+ {
+ "Name": "BuiltIn",
+ "Type": "Value",
+ "Values":
+ {
+ "Position": 0,
+ "PointSize": 1,
+ "ClipDistance": 3,
+ "CullDistance": 4,
+ "VertexId": 5,
+ "InstanceId": 6,
+ "PrimitiveId": 7,
+ "InvocationId": 8,
+ "Layer": 9,
+ "ViewportIndex": 10,
+ "TessLevelOuter": 11,
+ "TessLevelInner": 12,
+ "TessCoord": 13,
+ "PatchVertices": 14,
+ "FragCoord": 15,
+ "PointCoord": 16,
+ "FrontFacing": 17,
+ "SampleId": 18,
+ "SamplePosition": 19,
+ "SampleMask": 20,
+ "FragDepth": 22,
+ "HelperInvocation": 23,
+ "NumWorkgroups": 24,
+ "WorkgroupSize": 25,
+ "WorkgroupId": 26,
+ "LocalInvocationId": 27,
+ "GlobalInvocationId": 28,
+ "LocalInvocationIndex": 29,
+ "WorkDim": 30,
+ "GlobalSize": 31,
+ "EnqueuedWorkgroupSize": 32,
+ "GlobalOffset": 33,
+ "GlobalLinearId": 34,
+ "SubgroupSize": 36,
+ "SubgroupMaxSize": 37,
+ "NumSubgroups": 38,
+ "NumEnqueuedSubgroups": 39,
+ "SubgroupId": 40,
+ "SubgroupLocalInvocationId": 41,
+ "VertexIndex": 42,
+ "InstanceIndex": 43,
+ "SubgroupEqMaskKHR": 4416,
+ "SubgroupGeMaskKHR": 4417,
+ "SubgroupGtMaskKHR": 4418,
+ "SubgroupLeMaskKHR": 4419,
+ "SubgroupLtMaskKHR": 4420,
+ "BaseVertex": 4424,
+ "BaseInstance": 4425,
+ "DrawIndex": 4426,
+ "DeviceIndex": 4438,
+ "ViewIndex": 4440,
+ "ViewportMaskNV": 5253,
+ "SecondaryPositionNV": 5257,
+ "SecondaryViewportMaskNV": 5258,
+ "PositionPerViewNV": 5261,
+ "ViewportMaskPerViewNV": 5262
+ }
+ },
+ {
+ "Name": "SelectionControl",
+ "Type": "Bit",
+ "Values":
+ {
+ "Flatten": 0,
+ "DontFlatten": 1
+ }
+ },
+ {
+ "Name": "LoopControl",
+ "Type": "Bit",
+ "Values":
+ {
+ "Unroll": 0,
+ "DontUnroll": 1,
+ "DependencyInfinite": 2,
+ "DependencyLength": 3
+ }
+ },
+ {
+ "Name": "FunctionControl",
+ "Type": "Bit",
+ "Values":
+ {
+ "Inline": 0,
+ "DontInline": 1,
+ "Pure": 2,
+ "Const": 3
+ }
+ },
+ {
+ "Name": "MemorySemantics",
+ "Type": "Bit",
+ "Values":
+ {
+ "Acquire": 1,
+ "Release": 2,
+ "AcquireRelease": 3,
+ "SequentiallyConsistent": 4,
+ "UniformMemory": 6,
+ "SubgroupMemory": 7,
+ "WorkgroupMemory": 8,
+ "CrossWorkgroupMemory": 9,
+ "AtomicCounterMemory": 10,
+ "ImageMemory": 11
+ }
+ },
+ {
+ "Name": "MemoryAccess",
+ "Type": "Bit",
+ "Values":
+ {
+ "Volatile": 0,
+ "Aligned": 1,
+ "Nontemporal": 2
+ }
+ },
+ {
+ "Name": "Scope",
+ "Type": "Value",
+ "Values":
+ {
+ "CrossDevice": 0,
+ "Device": 1,
+ "Workgroup": 2,
+ "Subgroup": 3,
+ "Invocation": 4
+ }
+ },
+ {
+ "Name": "GroupOperation",
+ "Type": "Value",
+ "Values":
+ {
+ "Reduce": 0,
+ "InclusiveScan": 1,
+ "ExclusiveScan": 2
+ }
+ },
+ {
+ "Name": "KernelEnqueueFlags",
+ "Type": "Value",
+ "Values":
+ {
+ "NoWait": 0,
+ "WaitKernel": 1,
+ "WaitWorkGroup": 2
+ }
+ },
+ {
+ "Name": "KernelProfilingInfo",
+ "Type": "Bit",
+ "Values":
+ {
+ "CmdExecTime": 0
+ }
+ },
+ {
+ "Name": "Capability",
+ "Type": "Value",
+ "Values":
+ {
+ "Matrix": 0,
+ "Shader": 1,
+ "Geometry": 2,
+ "Tessellation": 3,
+ "Addresses": 4,
+ "Linkage": 5,
+ "Kernel": 6,
+ "Vector16": 7,
+ "Float16Buffer": 8,
+ "Float16": 9,
+ "Float64": 10,
+ "Int64": 11,
+ "Int64Atomics": 12,
+ "ImageBasic": 13,
+ "ImageReadWrite": 14,
+ "ImageMipmap": 15,
+ "Pipes": 17,
+ "Groups": 18,
+ "DeviceEnqueue": 19,
+ "LiteralSampler": 20,
+ "AtomicStorage": 21,
+ "Int16": 22,
+ "TessellationPointSize": 23,
+ "GeometryPointSize": 24,
+ "ImageGatherExtended": 25,
+ "StorageImageMultisample": 27,
+ "UniformBufferArrayDynamicIndexing": 28,
+ "SampledImageArrayDynamicIndexing": 29,
+ "StorageBufferArrayDynamicIndexing": 30,
+ "StorageImageArrayDynamicIndexing": 31,
+ "ClipDistance": 32,
+ "CullDistance": 33,
+ "ImageCubeArray": 34,
+ "SampleRateShading": 35,
+ "ImageRect": 36,
+ "SampledRect": 37,
+ "GenericPointer": 38,
+ "Int8": 39,
+ "InputAttachment": 40,
+ "SparseResidency": 41,
+ "MinLod": 42,
+ "Sampled1D": 43,
+ "Image1D": 44,
+ "SampledCubeArray": 45,
+ "SampledBuffer": 46,
+ "ImageBuffer": 47,
+ "ImageMSArray": 48,
+ "StorageImageExtendedFormats": 49,
+ "ImageQuery": 50,
+ "DerivativeControl": 51,
+ "InterpolationFunction": 52,
+ "TransformFeedback": 53,
+ "GeometryStreams": 54,
+ "StorageImageReadWithoutFormat": 55,
+ "StorageImageWriteWithoutFormat": 56,
+ "MultiViewport": 57,
+ "SubgroupDispatch": 58,
+ "NamedBarrier": 59,
+ "PipeStorage": 60,
+ "SubgroupBallotKHR": 4423,
+ "DrawParameters": 4427,
+ "SubgroupVoteKHR": 4431,
+ "StorageBuffer16BitAccess": 4433,
+ "StorageUniformBufferBlock16": 4433,
+ "StorageUniform16": 4434,
+ "UniformAndStorageBuffer16BitAccess": 4434,
+ "StoragePushConstant16": 4435,
+ "StorageInputOutput16": 4436,
+ "DeviceGroup": 4437,
+ "MultiView": 4439,
+ "VariablePointersStorageBuffer": 4441,
+ "VariablePointers": 4442,
+ "SampleMaskOverrideCoverageNV": 5249,
+ "GeometryShaderPassthroughNV": 5251,
+ "ShaderViewportIndexLayerNV": 5254,
+ "ShaderViewportMaskNV": 5255,
+ "ShaderStereoViewNV": 5259,
+ "PerViewAttributesNV": 5260
+ }
+ },
+ {
+ "Name": "Op",
+ "Type": "Value",
+ "Values":
+ {
+ "OpNop": 0,
+ "OpUndef": 1,
+ "OpSourceContinued": 2,
+ "OpSource": 3,
+ "OpSourceExtension": 4,
+ "OpName": 5,
+ "OpMemberName": 6,
+ "OpString": 7,
+ "OpLine": 8,
+ "OpExtension": 10,
+ "OpExtInstImport": 11,
+ "OpExtInst": 12,
+ "OpMemoryModel": 14,
+ "OpEntryPoint": 15,
+ "OpExecutionMode": 16,
+ "OpCapability": 17,
+ "OpTypeVoid": 19,
+ "OpTypeBool": 20,
+ "OpTypeInt": 21,
+ "OpTypeFloat": 22,
+ "OpTypeVector": 23,
+ "OpTypeMatrix": 24,
+ "OpTypeImage": 25,
+ "OpTypeSampler": 26,
+ "OpTypeSampledImage": 27,
+ "OpTypeArray": 28,
+ "OpTypeRuntimeArray": 29,
+ "OpTypeStruct": 30,
+ "OpTypeOpaque": 31,
+ "OpTypePointer": 32,
+ "OpTypeFunction": 33,
+ "OpTypeEvent": 34,
+ "OpTypeDeviceEvent": 35,
+ "OpTypeReserveId": 36,
+ "OpTypeQueue": 37,
+ "OpTypePipe": 38,
+ "OpTypeForwardPointer": 39,
+ "OpConstantTrue": 41,
+ "OpConstantFalse": 42,
+ "OpConstant": 43,
+ "OpConstantComposite": 44,
+ "OpConstantSampler": 45,
+ "OpConstantNull": 46,
+ "OpSpecConstantTrue": 48,
+ "OpSpecConstantFalse": 49,
+ "OpSpecConstant": 50,
+ "OpSpecConstantComposite": 51,
+ "OpSpecConstantOp": 52,
+ "OpFunction": 54,
+ "OpFunctionParameter": 55,
+ "OpFunctionEnd": 56,
+ "OpFunctionCall": 57,
+ "OpVariable": 59,
+ "OpImageTexelPointer": 60,
+ "OpLoad": 61,
+ "OpStore": 62,
+ "OpCopyMemory": 63,
+ "OpCopyMemorySized": 64,
+ "OpAccessChain": 65,
+ "OpInBoundsAccessChain": 66,
+ "OpPtrAccessChain": 67,
+ "OpArrayLength": 68,
+ "OpGenericPtrMemSemantics": 69,
+ "OpInBoundsPtrAccessChain": 70,
+ "OpDecorate": 71,
+ "OpMemberDecorate": 72,
+ "OpDecorationGroup": 73,
+ "OpGroupDecorate": 74,
+ "OpGroupMemberDecorate": 75,
+ "OpVectorExtractDynamic": 77,
+ "OpVectorInsertDynamic": 78,
+ "OpVectorShuffle": 79,
+ "OpCompositeConstruct": 80,
+ "OpCompositeExtract": 81,
+ "OpCompositeInsert": 82,
+ "OpCopyObject": 83,
+ "OpTranspose": 84,
+ "OpSampledImage": 86,
+ "OpImageSampleImplicitLod": 87,
+ "OpImageSampleExplicitLod": 88,
+ "OpImageSampleDrefImplicitLod": 89,
+ "OpImageSampleDrefExplicitLod": 90,
+ "OpImageSampleProjImplicitLod": 91,
+ "OpImageSampleProjExplicitLod": 92,
+ "OpImageSampleProjDrefImplicitLod": 93,
+ "OpImageSampleProjDrefExplicitLod": 94,
+ "OpImageFetch": 95,
+ "OpImageGather": 96,
+ "OpImageDrefGather": 97,
+ "OpImageRead": 98,
+ "OpImageWrite": 99,
+ "OpImage": 100,
+ "OpImageQueryFormat": 101,
+ "OpImageQueryOrder": 102,
+ "OpImageQuerySizeLod": 103,
+ "OpImageQuerySize": 104,
+ "OpImageQueryLod": 105,
+ "OpImageQueryLevels": 106,
+ "OpImageQuerySamples": 107,
+ "OpConvertFToU": 109,
+ "OpConvertFToS": 110,
+ "OpConvertSToF": 111,
+ "OpConvertUToF": 112,
+ "OpUConvert": 113,
+ "OpSConvert": 114,
+ "OpFConvert": 115,
+ "OpQuantizeToF16": 116,
+ "OpConvertPtrToU": 117,
+ "OpSatConvertSToU": 118,
+ "OpSatConvertUToS": 119,
+ "OpConvertUToPtr": 120,
+ "OpPtrCastToGeneric": 121,
+ "OpGenericCastToPtr": 122,
+ "OpGenericCastToPtrExplicit": 123,
+ "OpBitcast": 124,
+ "OpSNegate": 126,
+ "OpFNegate": 127,
+ "OpIAdd": 128,
+ "OpFAdd": 129,
+ "OpISub": 130,
+ "OpFSub": 131,
+ "OpIMul": 132,
+ "OpFMul": 133,
+ "OpUDiv": 134,
+ "OpSDiv": 135,
+ "OpFDiv": 136,
+ "OpUMod": 137,
+ "OpSRem": 138,
+ "OpSMod": 139,
+ "OpFRem": 140,
+ "OpFMod": 141,
+ "OpVectorTimesScalar": 142,
+ "OpMatrixTimesScalar": 143,
+ "OpVectorTimesMatrix": 144,
+ "OpMatrixTimesVector": 145,
+ "OpMatrixTimesMatrix": 146,
+ "OpOuterProduct": 147,
+ "OpDot": 148,
+ "OpIAddCarry": 149,
+ "OpISubBorrow": 150,
+ "OpUMulExtended": 151,
+ "OpSMulExtended": 152,
+ "OpAny": 154,
+ "OpAll": 155,
+ "OpIsNan": 156,
+ "OpIsInf": 157,
+ "OpIsFinite": 158,
+ "OpIsNormal": 159,
+ "OpSignBitSet": 160,
+ "OpLessOrGreater": 161,
+ "OpOrdered": 162,
+ "OpUnordered": 163,
+ "OpLogicalEqual": 164,
+ "OpLogicalNotEqual": 165,
+ "OpLogicalOr": 166,
+ "OpLogicalAnd": 167,
+ "OpLogicalNot": 168,
+ "OpSelect": 169,
+ "OpIEqual": 170,
+ "OpINotEqual": 171,
+ "OpUGreaterThan": 172,
+ "OpSGreaterThan": 173,
+ "OpUGreaterThanEqual": 174,
+ "OpSGreaterThanEqual": 175,
+ "OpULessThan": 176,
+ "OpSLessThan": 177,
+ "OpULessThanEqual": 178,
+ "OpSLessThanEqual": 179,
+ "OpFOrdEqual": 180,
+ "OpFUnordEqual": 181,
+ "OpFOrdNotEqual": 182,
+ "OpFUnordNotEqual": 183,
+ "OpFOrdLessThan": 184,
+ "OpFUnordLessThan": 185,
+ "OpFOrdGreaterThan": 186,
+ "OpFUnordGreaterThan": 187,
+ "OpFOrdLessThanEqual": 188,
+ "OpFUnordLessThanEqual": 189,
+ "OpFOrdGreaterThanEqual": 190,
+ "OpFUnordGreaterThanEqual": 191,
+ "OpShiftRightLogical": 194,
+ "OpShiftRightArithmetic": 195,
+ "OpShiftLeftLogical": 196,
+ "OpBitwiseOr": 197,
+ "OpBitwiseXor": 198,
+ "OpBitwiseAnd": 199,
+ "OpNot": 200,
+ "OpBitFieldInsert": 201,
+ "OpBitFieldSExtract": 202,
+ "OpBitFieldUExtract": 203,
+ "OpBitReverse": 204,
+ "OpBitCount": 205,
+ "OpDPdx": 207,
+ "OpDPdy": 208,
+ "OpFwidth": 209,
+ "OpDPdxFine": 210,
+ "OpDPdyFine": 211,
+ "OpFwidthFine": 212,
+ "OpDPdxCoarse": 213,
+ "OpDPdyCoarse": 214,
+ "OpFwidthCoarse": 215,
+ "OpEmitVertex": 218,
+ "OpEndPrimitive": 219,
+ "OpEmitStreamVertex": 220,
+ "OpEndStreamPrimitive": 221,
+ "OpControlBarrier": 224,
+ "OpMemoryBarrier": 225,
+ "OpAtomicLoad": 227,
+ "OpAtomicStore": 228,
+ "OpAtomicExchange": 229,
+ "OpAtomicCompareExchange": 230,
+ "OpAtomicCompareExchangeWeak": 231,
+ "OpAtomicIIncrement": 232,
+ "OpAtomicIDecrement": 233,
+ "OpAtomicIAdd": 234,
+ "OpAtomicISub": 235,
+ "OpAtomicSMin": 236,
+ "OpAtomicUMin": 237,
+ "OpAtomicSMax": 238,
+ "OpAtomicUMax": 239,
+ "OpAtomicAnd": 240,
+ "OpAtomicOr": 241,
+ "OpAtomicXor": 242,
+ "OpPhi": 245,
+ "OpLoopMerge": 246,
+ "OpSelectionMerge": 247,
+ "OpLabel": 248,
+ "OpBranch": 249,
+ "OpBranchConditional": 250,
+ "OpSwitch": 251,
+ "OpKill": 252,
+ "OpReturn": 253,
+ "OpReturnValue": 254,
+ "OpUnreachable": 255,
+ "OpLifetimeStart": 256,
+ "OpLifetimeStop": 257,
+ "OpGroupAsyncCopy": 259,
+ "OpGroupWaitEvents": 260,
+ "OpGroupAll": 261,
+ "OpGroupAny": 262,
+ "OpGroupBroadcast": 263,
+ "OpGroupIAdd": 264,
+ "OpGroupFAdd": 265,
+ "OpGroupFMin": 266,
+ "OpGroupUMin": 267,
+ "OpGroupSMin": 268,
+ "OpGroupFMax": 269,
+ "OpGroupUMax": 270,
+ "OpGroupSMax": 271,
+ "OpReadPipe": 274,
+ "OpWritePipe": 275,
+ "OpReservedReadPipe": 276,
+ "OpReservedWritePipe": 277,
+ "OpReserveReadPipePackets": 278,
+ "OpReserveWritePipePackets": 279,
+ "OpCommitReadPipe": 280,
+ "OpCommitWritePipe": 281,
+ "OpIsValidReserveId": 282,
+ "OpGetNumPipePackets": 283,
+ "OpGetMaxPipePackets": 284,
+ "OpGroupReserveReadPipePackets": 285,
+ "OpGroupReserveWritePipePackets": 286,
+ "OpGroupCommitReadPipe": 287,
+ "OpGroupCommitWritePipe": 288,
+ "OpEnqueueMarker": 291,
+ "OpEnqueueKernel": 292,
+ "OpGetKernelNDrangeSubGroupCount": 293,
+ "OpGetKernelNDrangeMaxSubGroupSize": 294,
+ "OpGetKernelWorkGroupSize": 295,
+ "OpGetKernelPreferredWorkGroupSizeMultiple": 296,
+ "OpRetainEvent": 297,
+ "OpReleaseEvent": 298,
+ "OpCreateUserEvent": 299,
+ "OpIsValidEvent": 300,
+ "OpSetUserEventStatus": 301,
+ "OpCaptureEventProfilingInfo": 302,
+ "OpGetDefaultQueue": 303,
+ "OpBuildNDRange": 304,
+ "OpImageSparseSampleImplicitLod": 305,
+ "OpImageSparseSampleExplicitLod": 306,
+ "OpImageSparseSampleDrefImplicitLod": 307,
+ "OpImageSparseSampleDrefExplicitLod": 308,
+ "OpImageSparseSampleProjImplicitLod": 309,
+ "OpImageSparseSampleProjExplicitLod": 310,
+ "OpImageSparseSampleProjDrefImplicitLod": 311,
+ "OpImageSparseSampleProjDrefExplicitLod": 312,
+ "OpImageSparseFetch": 313,
+ "OpImageSparseGather": 314,
+ "OpImageSparseDrefGather": 315,
+ "OpImageSparseTexelsResident": 316,
+ "OpNoLine": 317,
+ "OpAtomicFlagTestAndSet": 318,
+ "OpAtomicFlagClear": 319,
+ "OpImageSparseRead": 320,
+ "OpSizeOf": 321,
+ "OpTypePipeStorage": 322,
+ "OpConstantPipeStorage": 323,
+ "OpCreatePipeFromPipeStorage": 324,
+ "OpGetKernelLocalSizeForSubgroupCount": 325,
+ "OpGetKernelMaxNumSubgroups": 326,
+ "OpTypeNamedBarrier": 327,
+ "OpNamedBarrierInitialize": 328,
+ "OpMemoryNamedBarrier": 329,
+ "OpModuleProcessed": 330,
+ "OpSubgroupBallotKHR": 4421,
+ "OpSubgroupFirstInvocationKHR": 4422,
+ "OpSubgroupAllKHR": 4428,
+ "OpSubgroupAnyKHR": 4429,
+ "OpSubgroupAllEqualKHR": 4430,
+ "OpSubgroupReadInvocationKHR": 4432
+ }
+ }
+ ]
+ }
+}
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.lua b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.lua
new file mode 100644
index 0000000..743ba71
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.lua
@@ -0,0 +1,927 @@
+-- Copyright (c) 2014-2017 The Khronos Group Inc.
+--
+-- Permission is hereby granted, free of charge, to any person obtaining a copy
+-- of this software and/or associated documentation files (the "Materials"),
+-- to deal in the Materials without restriction, including without limitation
+-- the rights to use, copy, modify, merge, publish, distribute, sublicense,
+-- and/or sell copies of the Materials, and to permit persons to whom the
+-- Materials are furnished to do so, subject to the following conditions:
+--
+-- The above copyright notice and this permission notice shall be included in
+-- all copies or substantial portions of the Materials.
+--
+-- MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+-- STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+-- HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+--
+-- THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+-- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+-- THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+-- FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+-- IN THE MATERIALS.
+
+-- This header is automatically generated by the same tool that creates
+-- the Binary Section of the SPIR-V specification.
+
+-- Enumeration tokens for SPIR-V, in various styles:
+-- C, C++, C++11, JSON, Lua, Python
+--
+-- - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+-- - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+-- - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+-- - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+-- - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+--
+-- Some tokens act like mask values, which can be OR'd together,
+-- while others are mutually exclusive. The mask-like ones have
+-- "Mask" in their name, and a parallel enum that has the shift
+-- amount (1 << x) for each corresponding enumerant.
+
+spv = {
+ MagicNumber = 0x07230203,
+ Version = 0x00010100,
+ Revision = 6,
+ OpCodeMask = 0xffff,
+ WordCountShift = 16,
+
+ SourceLanguage = {
+ Unknown = 0,
+ ESSL = 1,
+ GLSL = 2,
+ OpenCL_C = 3,
+ OpenCL_CPP = 4,
+ HLSL = 5,
+ },
+
+ ExecutionModel = {
+ Vertex = 0,
+ TessellationControl = 1,
+ TessellationEvaluation = 2,
+ Geometry = 3,
+ Fragment = 4,
+ GLCompute = 5,
+ Kernel = 6,
+ },
+
+ AddressingModel = {
+ Logical = 0,
+ Physical32 = 1,
+ Physical64 = 2,
+ },
+
+ MemoryModel = {
+ Simple = 0,
+ GLSL450 = 1,
+ OpenCL = 2,
+ },
+
+ ExecutionMode = {
+ Invocations = 0,
+ SpacingEqual = 1,
+ SpacingFractionalEven = 2,
+ SpacingFractionalOdd = 3,
+ VertexOrderCw = 4,
+ VertexOrderCcw = 5,
+ PixelCenterInteger = 6,
+ OriginUpperLeft = 7,
+ OriginLowerLeft = 8,
+ EarlyFragmentTests = 9,
+ PointMode = 10,
+ Xfb = 11,
+ DepthReplacing = 12,
+ DepthGreater = 14,
+ DepthLess = 15,
+ DepthUnchanged = 16,
+ LocalSize = 17,
+ LocalSizeHint = 18,
+ InputPoints = 19,
+ InputLines = 20,
+ InputLinesAdjacency = 21,
+ Triangles = 22,
+ InputTrianglesAdjacency = 23,
+ Quads = 24,
+ Isolines = 25,
+ OutputVertices = 26,
+ OutputPoints = 27,
+ OutputLineStrip = 28,
+ OutputTriangleStrip = 29,
+ VecTypeHint = 30,
+ ContractionOff = 31,
+ Initializer = 33,
+ Finalizer = 34,
+ SubgroupSize = 35,
+ SubgroupsPerWorkgroup = 36,
+ },
+
+ StorageClass = {
+ UniformConstant = 0,
+ Input = 1,
+ Uniform = 2,
+ Output = 3,
+ Workgroup = 4,
+ CrossWorkgroup = 5,
+ Private = 6,
+ Function = 7,
+ Generic = 8,
+ PushConstant = 9,
+ AtomicCounter = 10,
+ Image = 11,
+ StorageBuffer = 12,
+ },
+
+ Dim = {
+ Dim1D = 0,
+ Dim2D = 1,
+ Dim3D = 2,
+ Cube = 3,
+ Rect = 4,
+ Buffer = 5,
+ SubpassData = 6,
+ },
+
+ SamplerAddressingMode = {
+ None = 0,
+ ClampToEdge = 1,
+ Clamp = 2,
+ Repeat = 3,
+ RepeatMirrored = 4,
+ },
+
+ SamplerFilterMode = {
+ Nearest = 0,
+ Linear = 1,
+ },
+
+ ImageFormat = {
+ Unknown = 0,
+ Rgba32f = 1,
+ Rgba16f = 2,
+ R32f = 3,
+ Rgba8 = 4,
+ Rgba8Snorm = 5,
+ Rg32f = 6,
+ Rg16f = 7,
+ R11fG11fB10f = 8,
+ R16f = 9,
+ Rgba16 = 10,
+ Rgb10A2 = 11,
+ Rg16 = 12,
+ Rg8 = 13,
+ R16 = 14,
+ R8 = 15,
+ Rgba16Snorm = 16,
+ Rg16Snorm = 17,
+ Rg8Snorm = 18,
+ R16Snorm = 19,
+ R8Snorm = 20,
+ Rgba32i = 21,
+ Rgba16i = 22,
+ Rgba8i = 23,
+ R32i = 24,
+ Rg32i = 25,
+ Rg16i = 26,
+ Rg8i = 27,
+ R16i = 28,
+ R8i = 29,
+ Rgba32ui = 30,
+ Rgba16ui = 31,
+ Rgba8ui = 32,
+ R32ui = 33,
+ Rgb10a2ui = 34,
+ Rg32ui = 35,
+ Rg16ui = 36,
+ Rg8ui = 37,
+ R16ui = 38,
+ R8ui = 39,
+ },
+
+ ImageChannelOrder = {
+ R = 0,
+ A = 1,
+ RG = 2,
+ RA = 3,
+ RGB = 4,
+ RGBA = 5,
+ BGRA = 6,
+ ARGB = 7,
+ Intensity = 8,
+ Luminance = 9,
+ Rx = 10,
+ RGx = 11,
+ RGBx = 12,
+ Depth = 13,
+ DepthStencil = 14,
+ sRGB = 15,
+ sRGBx = 16,
+ sRGBA = 17,
+ sBGRA = 18,
+ ABGR = 19,
+ },
+
+ ImageChannelDataType = {
+ SnormInt8 = 0,
+ SnormInt16 = 1,
+ UnormInt8 = 2,
+ UnormInt16 = 3,
+ UnormShort565 = 4,
+ UnormShort555 = 5,
+ UnormInt101010 = 6,
+ SignedInt8 = 7,
+ SignedInt16 = 8,
+ SignedInt32 = 9,
+ UnsignedInt8 = 10,
+ UnsignedInt16 = 11,
+ UnsignedInt32 = 12,
+ HalfFloat = 13,
+ Float = 14,
+ UnormInt24 = 15,
+ UnormInt101010_2 = 16,
+ },
+
+ ImageOperandsShift = {
+ Bias = 0,
+ Lod = 1,
+ Grad = 2,
+ ConstOffset = 3,
+ Offset = 4,
+ ConstOffsets = 5,
+ Sample = 6,
+ MinLod = 7,
+ },
+
+ ImageOperandsMask = {
+ MaskNone = 0,
+ Bias = 0x00000001,
+ Lod = 0x00000002,
+ Grad = 0x00000004,
+ ConstOffset = 0x00000008,
+ Offset = 0x00000010,
+ ConstOffsets = 0x00000020,
+ Sample = 0x00000040,
+ MinLod = 0x00000080,
+ },
+
+ FPFastMathModeShift = {
+ NotNaN = 0,
+ NotInf = 1,
+ NSZ = 2,
+ AllowRecip = 3,
+ Fast = 4,
+ },
+
+ FPFastMathModeMask = {
+ MaskNone = 0,
+ NotNaN = 0x00000001,
+ NotInf = 0x00000002,
+ NSZ = 0x00000004,
+ AllowRecip = 0x00000008,
+ Fast = 0x00000010,
+ },
+
+ FPRoundingMode = {
+ RTE = 0,
+ RTZ = 1,
+ RTP = 2,
+ RTN = 3,
+ },
+
+ LinkageType = {
+ Export = 0,
+ Import = 1,
+ },
+
+ AccessQualifier = {
+ ReadOnly = 0,
+ WriteOnly = 1,
+ ReadWrite = 2,
+ },
+
+ FunctionParameterAttribute = {
+ Zext = 0,
+ Sext = 1,
+ ByVal = 2,
+ Sret = 3,
+ NoAlias = 4,
+ NoCapture = 5,
+ NoWrite = 6,
+ NoReadWrite = 7,
+ },
+
+ Decoration = {
+ RelaxedPrecision = 0,
+ SpecId = 1,
+ Block = 2,
+ BufferBlock = 3,
+ RowMajor = 4,
+ ColMajor = 5,
+ ArrayStride = 6,
+ MatrixStride = 7,
+ GLSLShared = 8,
+ GLSLPacked = 9,
+ CPacked = 10,
+ BuiltIn = 11,
+ NoPerspective = 13,
+ Flat = 14,
+ Patch = 15,
+ Centroid = 16,
+ Sample = 17,
+ Invariant = 18,
+ Restrict = 19,
+ Aliased = 20,
+ Volatile = 21,
+ Constant = 22,
+ Coherent = 23,
+ NonWritable = 24,
+ NonReadable = 25,
+ Uniform = 26,
+ SaturatedConversion = 28,
+ Stream = 29,
+ Location = 30,
+ Component = 31,
+ Index = 32,
+ Binding = 33,
+ DescriptorSet = 34,
+ Offset = 35,
+ XfbBuffer = 36,
+ XfbStride = 37,
+ FuncParamAttr = 38,
+ FPRoundingMode = 39,
+ FPFastMathMode = 40,
+ LinkageAttributes = 41,
+ NoContraction = 42,
+ InputAttachmentIndex = 43,
+ Alignment = 44,
+ MaxByteOffset = 45,
+ OverrideCoverageNV = 5248,
+ PassthroughNV = 5250,
+ ViewportRelativeNV = 5252,
+ SecondaryViewportRelativeNV = 5256,
+ },
+
+ BuiltIn = {
+ Position = 0,
+ PointSize = 1,
+ ClipDistance = 3,
+ CullDistance = 4,
+ VertexId = 5,
+ InstanceId = 6,
+ PrimitiveId = 7,
+ InvocationId = 8,
+ Layer = 9,
+ ViewportIndex = 10,
+ TessLevelOuter = 11,
+ TessLevelInner = 12,
+ TessCoord = 13,
+ PatchVertices = 14,
+ FragCoord = 15,
+ PointCoord = 16,
+ FrontFacing = 17,
+ SampleId = 18,
+ SamplePosition = 19,
+ SampleMask = 20,
+ FragDepth = 22,
+ HelperInvocation = 23,
+ NumWorkgroups = 24,
+ WorkgroupSize = 25,
+ WorkgroupId = 26,
+ LocalInvocationId = 27,
+ GlobalInvocationId = 28,
+ LocalInvocationIndex = 29,
+ WorkDim = 30,
+ GlobalSize = 31,
+ EnqueuedWorkgroupSize = 32,
+ GlobalOffset = 33,
+ GlobalLinearId = 34,
+ SubgroupSize = 36,
+ SubgroupMaxSize = 37,
+ NumSubgroups = 38,
+ NumEnqueuedSubgroups = 39,
+ SubgroupId = 40,
+ SubgroupLocalInvocationId = 41,
+ VertexIndex = 42,
+ InstanceIndex = 43,
+ SubgroupEqMaskKHR = 4416,
+ SubgroupGeMaskKHR = 4417,
+ SubgroupGtMaskKHR = 4418,
+ SubgroupLeMaskKHR = 4419,
+ SubgroupLtMaskKHR = 4420,
+ BaseVertex = 4424,
+ BaseInstance = 4425,
+ DrawIndex = 4426,
+ DeviceIndex = 4438,
+ ViewIndex = 4440,
+ ViewportMaskNV = 5253,
+ SecondaryPositionNV = 5257,
+ SecondaryViewportMaskNV = 5258,
+ PositionPerViewNV = 5261,
+ ViewportMaskPerViewNV = 5262,
+ },
+
+ SelectionControlShift = {
+ Flatten = 0,
+ DontFlatten = 1,
+ },
+
+ SelectionControlMask = {
+ MaskNone = 0,
+ Flatten = 0x00000001,
+ DontFlatten = 0x00000002,
+ },
+
+ LoopControlShift = {
+ Unroll = 0,
+ DontUnroll = 1,
+ DependencyInfinite = 2,
+ DependencyLength = 3,
+ },
+
+ LoopControlMask = {
+ MaskNone = 0,
+ Unroll = 0x00000001,
+ DontUnroll = 0x00000002,
+ DependencyInfinite = 0x00000004,
+ DependencyLength = 0x00000008,
+ },
+
+ FunctionControlShift = {
+ Inline = 0,
+ DontInline = 1,
+ Pure = 2,
+ Const = 3,
+ },
+
+ FunctionControlMask = {
+ MaskNone = 0,
+ Inline = 0x00000001,
+ DontInline = 0x00000002,
+ Pure = 0x00000004,
+ Const = 0x00000008,
+ },
+
+ MemorySemanticsShift = {
+ Acquire = 1,
+ Release = 2,
+ AcquireRelease = 3,
+ SequentiallyConsistent = 4,
+ UniformMemory = 6,
+ SubgroupMemory = 7,
+ WorkgroupMemory = 8,
+ CrossWorkgroupMemory = 9,
+ AtomicCounterMemory = 10,
+ ImageMemory = 11,
+ },
+
+ MemorySemanticsMask = {
+ MaskNone = 0,
+ Acquire = 0x00000002,
+ Release = 0x00000004,
+ AcquireRelease = 0x00000008,
+ SequentiallyConsistent = 0x00000010,
+ UniformMemory = 0x00000040,
+ SubgroupMemory = 0x00000080,
+ WorkgroupMemory = 0x00000100,
+ CrossWorkgroupMemory = 0x00000200,
+ AtomicCounterMemory = 0x00000400,
+ ImageMemory = 0x00000800,
+ },
+
+ MemoryAccessShift = {
+ Volatile = 0,
+ Aligned = 1,
+ Nontemporal = 2,
+ },
+
+ MemoryAccessMask = {
+ MaskNone = 0,
+ Volatile = 0x00000001,
+ Aligned = 0x00000002,
+ Nontemporal = 0x00000004,
+ },
+
+ Scope = {
+ CrossDevice = 0,
+ Device = 1,
+ Workgroup = 2,
+ Subgroup = 3,
+ Invocation = 4,
+ },
+
+ GroupOperation = {
+ Reduce = 0,
+ InclusiveScan = 1,
+ ExclusiveScan = 2,
+ },
+
+ KernelEnqueueFlags = {
+ NoWait = 0,
+ WaitKernel = 1,
+ WaitWorkGroup = 2,
+ },
+
+ KernelProfilingInfoShift = {
+ CmdExecTime = 0,
+ },
+
+ KernelProfilingInfoMask = {
+ MaskNone = 0,
+ CmdExecTime = 0x00000001,
+ },
+
+ Capability = {
+ Matrix = 0,
+ Shader = 1,
+ Geometry = 2,
+ Tessellation = 3,
+ Addresses = 4,
+ Linkage = 5,
+ Kernel = 6,
+ Vector16 = 7,
+ Float16Buffer = 8,
+ Float16 = 9,
+ Float64 = 10,
+ Int64 = 11,
+ Int64Atomics = 12,
+ ImageBasic = 13,
+ ImageReadWrite = 14,
+ ImageMipmap = 15,
+ Pipes = 17,
+ Groups = 18,
+ DeviceEnqueue = 19,
+ LiteralSampler = 20,
+ AtomicStorage = 21,
+ Int16 = 22,
+ TessellationPointSize = 23,
+ GeometryPointSize = 24,
+ ImageGatherExtended = 25,
+ StorageImageMultisample = 27,
+ UniformBufferArrayDynamicIndexing = 28,
+ SampledImageArrayDynamicIndexing = 29,
+ StorageBufferArrayDynamicIndexing = 30,
+ StorageImageArrayDynamicIndexing = 31,
+ ClipDistance = 32,
+ CullDistance = 33,
+ ImageCubeArray = 34,
+ SampleRateShading = 35,
+ ImageRect = 36,
+ SampledRect = 37,
+ GenericPointer = 38,
+ Int8 = 39,
+ InputAttachment = 40,
+ SparseResidency = 41,
+ MinLod = 42,
+ Sampled1D = 43,
+ Image1D = 44,
+ SampledCubeArray = 45,
+ SampledBuffer = 46,
+ ImageBuffer = 47,
+ ImageMSArray = 48,
+ StorageImageExtendedFormats = 49,
+ ImageQuery = 50,
+ DerivativeControl = 51,
+ InterpolationFunction = 52,
+ TransformFeedback = 53,
+ GeometryStreams = 54,
+ StorageImageReadWithoutFormat = 55,
+ StorageImageWriteWithoutFormat = 56,
+ MultiViewport = 57,
+ SubgroupDispatch = 58,
+ NamedBarrier = 59,
+ PipeStorage = 60,
+ SubgroupBallotKHR = 4423,
+ DrawParameters = 4427,
+ SubgroupVoteKHR = 4431,
+ StorageBuffer16BitAccess = 4433,
+ StorageUniformBufferBlock16 = 4433,
+ StorageUniform16 = 4434,
+ UniformAndStorageBuffer16BitAccess = 4434,
+ StoragePushConstant16 = 4435,
+ StorageInputOutput16 = 4436,
+ DeviceGroup = 4437,
+ MultiView = 4439,
+ VariablePointersStorageBuffer = 4441,
+ VariablePointers = 4442,
+ SampleMaskOverrideCoverageNV = 5249,
+ GeometryShaderPassthroughNV = 5251,
+ ShaderViewportIndexLayerNV = 5254,
+ ShaderViewportMaskNV = 5255,
+ ShaderStereoViewNV = 5259,
+ PerViewAttributesNV = 5260,
+ },
+
+ Op = {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpSubgroupReadInvocationKHR = 4432,
+ },
+
+}
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.py b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.py
new file mode 100755
index 0000000..75f42dc
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/spirv.py
@@ -0,0 +1,927 @@
+# Copyright (c) 2014-2017 The Khronos Group Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and/or associated documentation files (the "Materials"),
+# to deal in the Materials without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Materials, and to permit persons to whom the
+# Materials are furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Materials.
+#
+# MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+# STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+# HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+#
+# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+# IN THE MATERIALS.
+
+# This header is automatically generated by the same tool that creates
+# the Binary Section of the SPIR-V specification.
+
+# Enumeration tokens for SPIR-V, in various styles:
+# C, C++, C++11, JSON, Lua, Python
+#
+# - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+# - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+# - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+# - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+# - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+#
+# Some tokens act like mask values, which can be OR'd together,
+# while others are mutually exclusive. The mask-like ones have
+# "Mask" in their name, and a parallel enum that has the shift
+# amount (1 << x) for each corresponding enumerant.
+
+spv = {
+ 'MagicNumber' : 0x07230203,
+ 'Version' : 0x00010100,
+ 'Revision' : 6,
+ 'OpCodeMask' : 0xffff,
+ 'WordCountShift' : 16,
+
+ 'SourceLanguage' : {
+ 'Unknown' : 0,
+ 'ESSL' : 1,
+ 'GLSL' : 2,
+ 'OpenCL_C' : 3,
+ 'OpenCL_CPP' : 4,
+ 'HLSL' : 5,
+ },
+
+ 'ExecutionModel' : {
+ 'Vertex' : 0,
+ 'TessellationControl' : 1,
+ 'TessellationEvaluation' : 2,
+ 'Geometry' : 3,
+ 'Fragment' : 4,
+ 'GLCompute' : 5,
+ 'Kernel' : 6,
+ },
+
+ 'AddressingModel' : {
+ 'Logical' : 0,
+ 'Physical32' : 1,
+ 'Physical64' : 2,
+ },
+
+ 'MemoryModel' : {
+ 'Simple' : 0,
+ 'GLSL450' : 1,
+ 'OpenCL' : 2,
+ },
+
+ 'ExecutionMode' : {
+ 'Invocations' : 0,
+ 'SpacingEqual' : 1,
+ 'SpacingFractionalEven' : 2,
+ 'SpacingFractionalOdd' : 3,
+ 'VertexOrderCw' : 4,
+ 'VertexOrderCcw' : 5,
+ 'PixelCenterInteger' : 6,
+ 'OriginUpperLeft' : 7,
+ 'OriginLowerLeft' : 8,
+ 'EarlyFragmentTests' : 9,
+ 'PointMode' : 10,
+ 'Xfb' : 11,
+ 'DepthReplacing' : 12,
+ 'DepthGreater' : 14,
+ 'DepthLess' : 15,
+ 'DepthUnchanged' : 16,
+ 'LocalSize' : 17,
+ 'LocalSizeHint' : 18,
+ 'InputPoints' : 19,
+ 'InputLines' : 20,
+ 'InputLinesAdjacency' : 21,
+ 'Triangles' : 22,
+ 'InputTrianglesAdjacency' : 23,
+ 'Quads' : 24,
+ 'Isolines' : 25,
+ 'OutputVertices' : 26,
+ 'OutputPoints' : 27,
+ 'OutputLineStrip' : 28,
+ 'OutputTriangleStrip' : 29,
+ 'VecTypeHint' : 30,
+ 'ContractionOff' : 31,
+ 'Initializer' : 33,
+ 'Finalizer' : 34,
+ 'SubgroupSize' : 35,
+ 'SubgroupsPerWorkgroup' : 36,
+ },
+
+ 'StorageClass' : {
+ 'UniformConstant' : 0,
+ 'Input' : 1,
+ 'Uniform' : 2,
+ 'Output' : 3,
+ 'Workgroup' : 4,
+ 'CrossWorkgroup' : 5,
+ 'Private' : 6,
+ 'Function' : 7,
+ 'Generic' : 8,
+ 'PushConstant' : 9,
+ 'AtomicCounter' : 10,
+ 'Image' : 11,
+ 'StorageBuffer' : 12,
+ },
+
+ 'Dim' : {
+ 'Dim1D' : 0,
+ 'Dim2D' : 1,
+ 'Dim3D' : 2,
+ 'Cube' : 3,
+ 'Rect' : 4,
+ 'Buffer' : 5,
+ 'SubpassData' : 6,
+ },
+
+ 'SamplerAddressingMode' : {
+ 'None' : 0,
+ 'ClampToEdge' : 1,
+ 'Clamp' : 2,
+ 'Repeat' : 3,
+ 'RepeatMirrored' : 4,
+ },
+
+ 'SamplerFilterMode' : {
+ 'Nearest' : 0,
+ 'Linear' : 1,
+ },
+
+ 'ImageFormat' : {
+ 'Unknown' : 0,
+ 'Rgba32f' : 1,
+ 'Rgba16f' : 2,
+ 'R32f' : 3,
+ 'Rgba8' : 4,
+ 'Rgba8Snorm' : 5,
+ 'Rg32f' : 6,
+ 'Rg16f' : 7,
+ 'R11fG11fB10f' : 8,
+ 'R16f' : 9,
+ 'Rgba16' : 10,
+ 'Rgb10A2' : 11,
+ 'Rg16' : 12,
+ 'Rg8' : 13,
+ 'R16' : 14,
+ 'R8' : 15,
+ 'Rgba16Snorm' : 16,
+ 'Rg16Snorm' : 17,
+ 'Rg8Snorm' : 18,
+ 'R16Snorm' : 19,
+ 'R8Snorm' : 20,
+ 'Rgba32i' : 21,
+ 'Rgba16i' : 22,
+ 'Rgba8i' : 23,
+ 'R32i' : 24,
+ 'Rg32i' : 25,
+ 'Rg16i' : 26,
+ 'Rg8i' : 27,
+ 'R16i' : 28,
+ 'R8i' : 29,
+ 'Rgba32ui' : 30,
+ 'Rgba16ui' : 31,
+ 'Rgba8ui' : 32,
+ 'R32ui' : 33,
+ 'Rgb10a2ui' : 34,
+ 'Rg32ui' : 35,
+ 'Rg16ui' : 36,
+ 'Rg8ui' : 37,
+ 'R16ui' : 38,
+ 'R8ui' : 39,
+ },
+
+ 'ImageChannelOrder' : {
+ 'R' : 0,
+ 'A' : 1,
+ 'RG' : 2,
+ 'RA' : 3,
+ 'RGB' : 4,
+ 'RGBA' : 5,
+ 'BGRA' : 6,
+ 'ARGB' : 7,
+ 'Intensity' : 8,
+ 'Luminance' : 9,
+ 'Rx' : 10,
+ 'RGx' : 11,
+ 'RGBx' : 12,
+ 'Depth' : 13,
+ 'DepthStencil' : 14,
+ 'sRGB' : 15,
+ 'sRGBx' : 16,
+ 'sRGBA' : 17,
+ 'sBGRA' : 18,
+ 'ABGR' : 19,
+ },
+
+ 'ImageChannelDataType' : {
+ 'SnormInt8' : 0,
+ 'SnormInt16' : 1,
+ 'UnormInt8' : 2,
+ 'UnormInt16' : 3,
+ 'UnormShort565' : 4,
+ 'UnormShort555' : 5,
+ 'UnormInt101010' : 6,
+ 'SignedInt8' : 7,
+ 'SignedInt16' : 8,
+ 'SignedInt32' : 9,
+ 'UnsignedInt8' : 10,
+ 'UnsignedInt16' : 11,
+ 'UnsignedInt32' : 12,
+ 'HalfFloat' : 13,
+ 'Float' : 14,
+ 'UnormInt24' : 15,
+ 'UnormInt101010_2' : 16,
+ },
+
+ 'ImageOperandsShift' : {
+ 'Bias' : 0,
+ 'Lod' : 1,
+ 'Grad' : 2,
+ 'ConstOffset' : 3,
+ 'Offset' : 4,
+ 'ConstOffsets' : 5,
+ 'Sample' : 6,
+ 'MinLod' : 7,
+ },
+
+ 'ImageOperandsMask' : {
+ 'MaskNone' : 0,
+ 'Bias' : 0x00000001,
+ 'Lod' : 0x00000002,
+ 'Grad' : 0x00000004,
+ 'ConstOffset' : 0x00000008,
+ 'Offset' : 0x00000010,
+ 'ConstOffsets' : 0x00000020,
+ 'Sample' : 0x00000040,
+ 'MinLod' : 0x00000080,
+ },
+
+ 'FPFastMathModeShift' : {
+ 'NotNaN' : 0,
+ 'NotInf' : 1,
+ 'NSZ' : 2,
+ 'AllowRecip' : 3,
+ 'Fast' : 4,
+ },
+
+ 'FPFastMathModeMask' : {
+ 'MaskNone' : 0,
+ 'NotNaN' : 0x00000001,
+ 'NotInf' : 0x00000002,
+ 'NSZ' : 0x00000004,
+ 'AllowRecip' : 0x00000008,
+ 'Fast' : 0x00000010,
+ },
+
+ 'FPRoundingMode' : {
+ 'RTE' : 0,
+ 'RTZ' : 1,
+ 'RTP' : 2,
+ 'RTN' : 3,
+ },
+
+ 'LinkageType' : {
+ 'Export' : 0,
+ 'Import' : 1,
+ },
+
+ 'AccessQualifier' : {
+ 'ReadOnly' : 0,
+ 'WriteOnly' : 1,
+ 'ReadWrite' : 2,
+ },
+
+ 'FunctionParameterAttribute' : {
+ 'Zext' : 0,
+ 'Sext' : 1,
+ 'ByVal' : 2,
+ 'Sret' : 3,
+ 'NoAlias' : 4,
+ 'NoCapture' : 5,
+ 'NoWrite' : 6,
+ 'NoReadWrite' : 7,
+ },
+
+ 'Decoration' : {
+ 'RelaxedPrecision' : 0,
+ 'SpecId' : 1,
+ 'Block' : 2,
+ 'BufferBlock' : 3,
+ 'RowMajor' : 4,
+ 'ColMajor' : 5,
+ 'ArrayStride' : 6,
+ 'MatrixStride' : 7,
+ 'GLSLShared' : 8,
+ 'GLSLPacked' : 9,
+ 'CPacked' : 10,
+ 'BuiltIn' : 11,
+ 'NoPerspective' : 13,
+ 'Flat' : 14,
+ 'Patch' : 15,
+ 'Centroid' : 16,
+ 'Sample' : 17,
+ 'Invariant' : 18,
+ 'Restrict' : 19,
+ 'Aliased' : 20,
+ 'Volatile' : 21,
+ 'Constant' : 22,
+ 'Coherent' : 23,
+ 'NonWritable' : 24,
+ 'NonReadable' : 25,
+ 'Uniform' : 26,
+ 'SaturatedConversion' : 28,
+ 'Stream' : 29,
+ 'Location' : 30,
+ 'Component' : 31,
+ 'Index' : 32,
+ 'Binding' : 33,
+ 'DescriptorSet' : 34,
+ 'Offset' : 35,
+ 'XfbBuffer' : 36,
+ 'XfbStride' : 37,
+ 'FuncParamAttr' : 38,
+ 'FPRoundingMode' : 39,
+ 'FPFastMathMode' : 40,
+ 'LinkageAttributes' : 41,
+ 'NoContraction' : 42,
+ 'InputAttachmentIndex' : 43,
+ 'Alignment' : 44,
+ 'MaxByteOffset' : 45,
+ 'OverrideCoverageNV' : 5248,
+ 'PassthroughNV' : 5250,
+ 'ViewportRelativeNV' : 5252,
+ 'SecondaryViewportRelativeNV' : 5256,
+ },
+
+ 'BuiltIn' : {
+ 'Position' : 0,
+ 'PointSize' : 1,
+ 'ClipDistance' : 3,
+ 'CullDistance' : 4,
+ 'VertexId' : 5,
+ 'InstanceId' : 6,
+ 'PrimitiveId' : 7,
+ 'InvocationId' : 8,
+ 'Layer' : 9,
+ 'ViewportIndex' : 10,
+ 'TessLevelOuter' : 11,
+ 'TessLevelInner' : 12,
+ 'TessCoord' : 13,
+ 'PatchVertices' : 14,
+ 'FragCoord' : 15,
+ 'PointCoord' : 16,
+ 'FrontFacing' : 17,
+ 'SampleId' : 18,
+ 'SamplePosition' : 19,
+ 'SampleMask' : 20,
+ 'FragDepth' : 22,
+ 'HelperInvocation' : 23,
+ 'NumWorkgroups' : 24,
+ 'WorkgroupSize' : 25,
+ 'WorkgroupId' : 26,
+ 'LocalInvocationId' : 27,
+ 'GlobalInvocationId' : 28,
+ 'LocalInvocationIndex' : 29,
+ 'WorkDim' : 30,
+ 'GlobalSize' : 31,
+ 'EnqueuedWorkgroupSize' : 32,
+ 'GlobalOffset' : 33,
+ 'GlobalLinearId' : 34,
+ 'SubgroupSize' : 36,
+ 'SubgroupMaxSize' : 37,
+ 'NumSubgroups' : 38,
+ 'NumEnqueuedSubgroups' : 39,
+ 'SubgroupId' : 40,
+ 'SubgroupLocalInvocationId' : 41,
+ 'VertexIndex' : 42,
+ 'InstanceIndex' : 43,
+ 'SubgroupEqMaskKHR' : 4416,
+ 'SubgroupGeMaskKHR' : 4417,
+ 'SubgroupGtMaskKHR' : 4418,
+ 'SubgroupLeMaskKHR' : 4419,
+ 'SubgroupLtMaskKHR' : 4420,
+ 'BaseVertex' : 4424,
+ 'BaseInstance' : 4425,
+ 'DrawIndex' : 4426,
+ 'DeviceIndex' : 4438,
+ 'ViewIndex' : 4440,
+ 'ViewportMaskNV' : 5253,
+ 'SecondaryPositionNV' : 5257,
+ 'SecondaryViewportMaskNV' : 5258,
+ 'PositionPerViewNV' : 5261,
+ 'ViewportMaskPerViewNV' : 5262,
+ },
+
+ 'SelectionControlShift' : {
+ 'Flatten' : 0,
+ 'DontFlatten' : 1,
+ },
+
+ 'SelectionControlMask' : {
+ 'MaskNone' : 0,
+ 'Flatten' : 0x00000001,
+ 'DontFlatten' : 0x00000002,
+ },
+
+ 'LoopControlShift' : {
+ 'Unroll' : 0,
+ 'DontUnroll' : 1,
+ 'DependencyInfinite' : 2,
+ 'DependencyLength' : 3,
+ },
+
+ 'LoopControlMask' : {
+ 'MaskNone' : 0,
+ 'Unroll' : 0x00000001,
+ 'DontUnroll' : 0x00000002,
+ 'DependencyInfinite' : 0x00000004,
+ 'DependencyLength' : 0x00000008,
+ },
+
+ 'FunctionControlShift' : {
+ 'Inline' : 0,
+ 'DontInline' : 1,
+ 'Pure' : 2,
+ 'Const' : 3,
+ },
+
+ 'FunctionControlMask' : {
+ 'MaskNone' : 0,
+ 'Inline' : 0x00000001,
+ 'DontInline' : 0x00000002,
+ 'Pure' : 0x00000004,
+ 'Const' : 0x00000008,
+ },
+
+ 'MemorySemanticsShift' : {
+ 'Acquire' : 1,
+ 'Release' : 2,
+ 'AcquireRelease' : 3,
+ 'SequentiallyConsistent' : 4,
+ 'UniformMemory' : 6,
+ 'SubgroupMemory' : 7,
+ 'WorkgroupMemory' : 8,
+ 'CrossWorkgroupMemory' : 9,
+ 'AtomicCounterMemory' : 10,
+ 'ImageMemory' : 11,
+ },
+
+ 'MemorySemanticsMask' : {
+ 'MaskNone' : 0,
+ 'Acquire' : 0x00000002,
+ 'Release' : 0x00000004,
+ 'AcquireRelease' : 0x00000008,
+ 'SequentiallyConsistent' : 0x00000010,
+ 'UniformMemory' : 0x00000040,
+ 'SubgroupMemory' : 0x00000080,
+ 'WorkgroupMemory' : 0x00000100,
+ 'CrossWorkgroupMemory' : 0x00000200,
+ 'AtomicCounterMemory' : 0x00000400,
+ 'ImageMemory' : 0x00000800,
+ },
+
+ 'MemoryAccessShift' : {
+ 'Volatile' : 0,
+ 'Aligned' : 1,
+ 'Nontemporal' : 2,
+ },
+
+ 'MemoryAccessMask' : {
+ 'MaskNone' : 0,
+ 'Volatile' : 0x00000001,
+ 'Aligned' : 0x00000002,
+ 'Nontemporal' : 0x00000004,
+ },
+
+ 'Scope' : {
+ 'CrossDevice' : 0,
+ 'Device' : 1,
+ 'Workgroup' : 2,
+ 'Subgroup' : 3,
+ 'Invocation' : 4,
+ },
+
+ 'GroupOperation' : {
+ 'Reduce' : 0,
+ 'InclusiveScan' : 1,
+ 'ExclusiveScan' : 2,
+ },
+
+ 'KernelEnqueueFlags' : {
+ 'NoWait' : 0,
+ 'WaitKernel' : 1,
+ 'WaitWorkGroup' : 2,
+ },
+
+ 'KernelProfilingInfoShift' : {
+ 'CmdExecTime' : 0,
+ },
+
+ 'KernelProfilingInfoMask' : {
+ 'MaskNone' : 0,
+ 'CmdExecTime' : 0x00000001,
+ },
+
+ 'Capability' : {
+ 'Matrix' : 0,
+ 'Shader' : 1,
+ 'Geometry' : 2,
+ 'Tessellation' : 3,
+ 'Addresses' : 4,
+ 'Linkage' : 5,
+ 'Kernel' : 6,
+ 'Vector16' : 7,
+ 'Float16Buffer' : 8,
+ 'Float16' : 9,
+ 'Float64' : 10,
+ 'Int64' : 11,
+ 'Int64Atomics' : 12,
+ 'ImageBasic' : 13,
+ 'ImageReadWrite' : 14,
+ 'ImageMipmap' : 15,
+ 'Pipes' : 17,
+ 'Groups' : 18,
+ 'DeviceEnqueue' : 19,
+ 'LiteralSampler' : 20,
+ 'AtomicStorage' : 21,
+ 'Int16' : 22,
+ 'TessellationPointSize' : 23,
+ 'GeometryPointSize' : 24,
+ 'ImageGatherExtended' : 25,
+ 'StorageImageMultisample' : 27,
+ 'UniformBufferArrayDynamicIndexing' : 28,
+ 'SampledImageArrayDynamicIndexing' : 29,
+ 'StorageBufferArrayDynamicIndexing' : 30,
+ 'StorageImageArrayDynamicIndexing' : 31,
+ 'ClipDistance' : 32,
+ 'CullDistance' : 33,
+ 'ImageCubeArray' : 34,
+ 'SampleRateShading' : 35,
+ 'ImageRect' : 36,
+ 'SampledRect' : 37,
+ 'GenericPointer' : 38,
+ 'Int8' : 39,
+ 'InputAttachment' : 40,
+ 'SparseResidency' : 41,
+ 'MinLod' : 42,
+ 'Sampled1D' : 43,
+ 'Image1D' : 44,
+ 'SampledCubeArray' : 45,
+ 'SampledBuffer' : 46,
+ 'ImageBuffer' : 47,
+ 'ImageMSArray' : 48,
+ 'StorageImageExtendedFormats' : 49,
+ 'ImageQuery' : 50,
+ 'DerivativeControl' : 51,
+ 'InterpolationFunction' : 52,
+ 'TransformFeedback' : 53,
+ 'GeometryStreams' : 54,
+ 'StorageImageReadWithoutFormat' : 55,
+ 'StorageImageWriteWithoutFormat' : 56,
+ 'MultiViewport' : 57,
+ 'SubgroupDispatch' : 58,
+ 'NamedBarrier' : 59,
+ 'PipeStorage' : 60,
+ 'SubgroupBallotKHR' : 4423,
+ 'DrawParameters' : 4427,
+ 'SubgroupVoteKHR' : 4431,
+ 'StorageBuffer16BitAccess' : 4433,
+ 'StorageUniformBufferBlock16' : 4433,
+ 'StorageUniform16' : 4434,
+ 'UniformAndStorageBuffer16BitAccess' : 4434,
+ 'StoragePushConstant16' : 4435,
+ 'StorageInputOutput16' : 4436,
+ 'DeviceGroup' : 4437,
+ 'MultiView' : 4439,
+ 'VariablePointersStorageBuffer' : 4441,
+ 'VariablePointers' : 4442,
+ 'SampleMaskOverrideCoverageNV' : 5249,
+ 'GeometryShaderPassthroughNV' : 5251,
+ 'ShaderViewportIndexLayerNV' : 5254,
+ 'ShaderViewportMaskNV' : 5255,
+ 'ShaderStereoViewNV' : 5259,
+ 'PerViewAttributesNV' : 5260,
+ },
+
+ 'Op' : {
+ 'OpNop' : 0,
+ 'OpUndef' : 1,
+ 'OpSourceContinued' : 2,
+ 'OpSource' : 3,
+ 'OpSourceExtension' : 4,
+ 'OpName' : 5,
+ 'OpMemberName' : 6,
+ 'OpString' : 7,
+ 'OpLine' : 8,
+ 'OpExtension' : 10,
+ 'OpExtInstImport' : 11,
+ 'OpExtInst' : 12,
+ 'OpMemoryModel' : 14,
+ 'OpEntryPoint' : 15,
+ 'OpExecutionMode' : 16,
+ 'OpCapability' : 17,
+ 'OpTypeVoid' : 19,
+ 'OpTypeBool' : 20,
+ 'OpTypeInt' : 21,
+ 'OpTypeFloat' : 22,
+ 'OpTypeVector' : 23,
+ 'OpTypeMatrix' : 24,
+ 'OpTypeImage' : 25,
+ 'OpTypeSampler' : 26,
+ 'OpTypeSampledImage' : 27,
+ 'OpTypeArray' : 28,
+ 'OpTypeRuntimeArray' : 29,
+ 'OpTypeStruct' : 30,
+ 'OpTypeOpaque' : 31,
+ 'OpTypePointer' : 32,
+ 'OpTypeFunction' : 33,
+ 'OpTypeEvent' : 34,
+ 'OpTypeDeviceEvent' : 35,
+ 'OpTypeReserveId' : 36,
+ 'OpTypeQueue' : 37,
+ 'OpTypePipe' : 38,
+ 'OpTypeForwardPointer' : 39,
+ 'OpConstantTrue' : 41,
+ 'OpConstantFalse' : 42,
+ 'OpConstant' : 43,
+ 'OpConstantComposite' : 44,
+ 'OpConstantSampler' : 45,
+ 'OpConstantNull' : 46,
+ 'OpSpecConstantTrue' : 48,
+ 'OpSpecConstantFalse' : 49,
+ 'OpSpecConstant' : 50,
+ 'OpSpecConstantComposite' : 51,
+ 'OpSpecConstantOp' : 52,
+ 'OpFunction' : 54,
+ 'OpFunctionParameter' : 55,
+ 'OpFunctionEnd' : 56,
+ 'OpFunctionCall' : 57,
+ 'OpVariable' : 59,
+ 'OpImageTexelPointer' : 60,
+ 'OpLoad' : 61,
+ 'OpStore' : 62,
+ 'OpCopyMemory' : 63,
+ 'OpCopyMemorySized' : 64,
+ 'OpAccessChain' : 65,
+ 'OpInBoundsAccessChain' : 66,
+ 'OpPtrAccessChain' : 67,
+ 'OpArrayLength' : 68,
+ 'OpGenericPtrMemSemantics' : 69,
+ 'OpInBoundsPtrAccessChain' : 70,
+ 'OpDecorate' : 71,
+ 'OpMemberDecorate' : 72,
+ 'OpDecorationGroup' : 73,
+ 'OpGroupDecorate' : 74,
+ 'OpGroupMemberDecorate' : 75,
+ 'OpVectorExtractDynamic' : 77,
+ 'OpVectorInsertDynamic' : 78,
+ 'OpVectorShuffle' : 79,
+ 'OpCompositeConstruct' : 80,
+ 'OpCompositeExtract' : 81,
+ 'OpCompositeInsert' : 82,
+ 'OpCopyObject' : 83,
+ 'OpTranspose' : 84,
+ 'OpSampledImage' : 86,
+ 'OpImageSampleImplicitLod' : 87,
+ 'OpImageSampleExplicitLod' : 88,
+ 'OpImageSampleDrefImplicitLod' : 89,
+ 'OpImageSampleDrefExplicitLod' : 90,
+ 'OpImageSampleProjImplicitLod' : 91,
+ 'OpImageSampleProjExplicitLod' : 92,
+ 'OpImageSampleProjDrefImplicitLod' : 93,
+ 'OpImageSampleProjDrefExplicitLod' : 94,
+ 'OpImageFetch' : 95,
+ 'OpImageGather' : 96,
+ 'OpImageDrefGather' : 97,
+ 'OpImageRead' : 98,
+ 'OpImageWrite' : 99,
+ 'OpImage' : 100,
+ 'OpImageQueryFormat' : 101,
+ 'OpImageQueryOrder' : 102,
+ 'OpImageQuerySizeLod' : 103,
+ 'OpImageQuerySize' : 104,
+ 'OpImageQueryLod' : 105,
+ 'OpImageQueryLevels' : 106,
+ 'OpImageQuerySamples' : 107,
+ 'OpConvertFToU' : 109,
+ 'OpConvertFToS' : 110,
+ 'OpConvertSToF' : 111,
+ 'OpConvertUToF' : 112,
+ 'OpUConvert' : 113,
+ 'OpSConvert' : 114,
+ 'OpFConvert' : 115,
+ 'OpQuantizeToF16' : 116,
+ 'OpConvertPtrToU' : 117,
+ 'OpSatConvertSToU' : 118,
+ 'OpSatConvertUToS' : 119,
+ 'OpConvertUToPtr' : 120,
+ 'OpPtrCastToGeneric' : 121,
+ 'OpGenericCastToPtr' : 122,
+ 'OpGenericCastToPtrExplicit' : 123,
+ 'OpBitcast' : 124,
+ 'OpSNegate' : 126,
+ 'OpFNegate' : 127,
+ 'OpIAdd' : 128,
+ 'OpFAdd' : 129,
+ 'OpISub' : 130,
+ 'OpFSub' : 131,
+ 'OpIMul' : 132,
+ 'OpFMul' : 133,
+ 'OpUDiv' : 134,
+ 'OpSDiv' : 135,
+ 'OpFDiv' : 136,
+ 'OpUMod' : 137,
+ 'OpSRem' : 138,
+ 'OpSMod' : 139,
+ 'OpFRem' : 140,
+ 'OpFMod' : 141,
+ 'OpVectorTimesScalar' : 142,
+ 'OpMatrixTimesScalar' : 143,
+ 'OpVectorTimesMatrix' : 144,
+ 'OpMatrixTimesVector' : 145,
+ 'OpMatrixTimesMatrix' : 146,
+ 'OpOuterProduct' : 147,
+ 'OpDot' : 148,
+ 'OpIAddCarry' : 149,
+ 'OpISubBorrow' : 150,
+ 'OpUMulExtended' : 151,
+ 'OpSMulExtended' : 152,
+ 'OpAny' : 154,
+ 'OpAll' : 155,
+ 'OpIsNan' : 156,
+ 'OpIsInf' : 157,
+ 'OpIsFinite' : 158,
+ 'OpIsNormal' : 159,
+ 'OpSignBitSet' : 160,
+ 'OpLessOrGreater' : 161,
+ 'OpOrdered' : 162,
+ 'OpUnordered' : 163,
+ 'OpLogicalEqual' : 164,
+ 'OpLogicalNotEqual' : 165,
+ 'OpLogicalOr' : 166,
+ 'OpLogicalAnd' : 167,
+ 'OpLogicalNot' : 168,
+ 'OpSelect' : 169,
+ 'OpIEqual' : 170,
+ 'OpINotEqual' : 171,
+ 'OpUGreaterThan' : 172,
+ 'OpSGreaterThan' : 173,
+ 'OpUGreaterThanEqual' : 174,
+ 'OpSGreaterThanEqual' : 175,
+ 'OpULessThan' : 176,
+ 'OpSLessThan' : 177,
+ 'OpULessThanEqual' : 178,
+ 'OpSLessThanEqual' : 179,
+ 'OpFOrdEqual' : 180,
+ 'OpFUnordEqual' : 181,
+ 'OpFOrdNotEqual' : 182,
+ 'OpFUnordNotEqual' : 183,
+ 'OpFOrdLessThan' : 184,
+ 'OpFUnordLessThan' : 185,
+ 'OpFOrdGreaterThan' : 186,
+ 'OpFUnordGreaterThan' : 187,
+ 'OpFOrdLessThanEqual' : 188,
+ 'OpFUnordLessThanEqual' : 189,
+ 'OpFOrdGreaterThanEqual' : 190,
+ 'OpFUnordGreaterThanEqual' : 191,
+ 'OpShiftRightLogical' : 194,
+ 'OpShiftRightArithmetic' : 195,
+ 'OpShiftLeftLogical' : 196,
+ 'OpBitwiseOr' : 197,
+ 'OpBitwiseXor' : 198,
+ 'OpBitwiseAnd' : 199,
+ 'OpNot' : 200,
+ 'OpBitFieldInsert' : 201,
+ 'OpBitFieldSExtract' : 202,
+ 'OpBitFieldUExtract' : 203,
+ 'OpBitReverse' : 204,
+ 'OpBitCount' : 205,
+ 'OpDPdx' : 207,
+ 'OpDPdy' : 208,
+ 'OpFwidth' : 209,
+ 'OpDPdxFine' : 210,
+ 'OpDPdyFine' : 211,
+ 'OpFwidthFine' : 212,
+ 'OpDPdxCoarse' : 213,
+ 'OpDPdyCoarse' : 214,
+ 'OpFwidthCoarse' : 215,
+ 'OpEmitVertex' : 218,
+ 'OpEndPrimitive' : 219,
+ 'OpEmitStreamVertex' : 220,
+ 'OpEndStreamPrimitive' : 221,
+ 'OpControlBarrier' : 224,
+ 'OpMemoryBarrier' : 225,
+ 'OpAtomicLoad' : 227,
+ 'OpAtomicStore' : 228,
+ 'OpAtomicExchange' : 229,
+ 'OpAtomicCompareExchange' : 230,
+ 'OpAtomicCompareExchangeWeak' : 231,
+ 'OpAtomicIIncrement' : 232,
+ 'OpAtomicIDecrement' : 233,
+ 'OpAtomicIAdd' : 234,
+ 'OpAtomicISub' : 235,
+ 'OpAtomicSMin' : 236,
+ 'OpAtomicUMin' : 237,
+ 'OpAtomicSMax' : 238,
+ 'OpAtomicUMax' : 239,
+ 'OpAtomicAnd' : 240,
+ 'OpAtomicOr' : 241,
+ 'OpAtomicXor' : 242,
+ 'OpPhi' : 245,
+ 'OpLoopMerge' : 246,
+ 'OpSelectionMerge' : 247,
+ 'OpLabel' : 248,
+ 'OpBranch' : 249,
+ 'OpBranchConditional' : 250,
+ 'OpSwitch' : 251,
+ 'OpKill' : 252,
+ 'OpReturn' : 253,
+ 'OpReturnValue' : 254,
+ 'OpUnreachable' : 255,
+ 'OpLifetimeStart' : 256,
+ 'OpLifetimeStop' : 257,
+ 'OpGroupAsyncCopy' : 259,
+ 'OpGroupWaitEvents' : 260,
+ 'OpGroupAll' : 261,
+ 'OpGroupAny' : 262,
+ 'OpGroupBroadcast' : 263,
+ 'OpGroupIAdd' : 264,
+ 'OpGroupFAdd' : 265,
+ 'OpGroupFMin' : 266,
+ 'OpGroupUMin' : 267,
+ 'OpGroupSMin' : 268,
+ 'OpGroupFMax' : 269,
+ 'OpGroupUMax' : 270,
+ 'OpGroupSMax' : 271,
+ 'OpReadPipe' : 274,
+ 'OpWritePipe' : 275,
+ 'OpReservedReadPipe' : 276,
+ 'OpReservedWritePipe' : 277,
+ 'OpReserveReadPipePackets' : 278,
+ 'OpReserveWritePipePackets' : 279,
+ 'OpCommitReadPipe' : 280,
+ 'OpCommitWritePipe' : 281,
+ 'OpIsValidReserveId' : 282,
+ 'OpGetNumPipePackets' : 283,
+ 'OpGetMaxPipePackets' : 284,
+ 'OpGroupReserveReadPipePackets' : 285,
+ 'OpGroupReserveWritePipePackets' : 286,
+ 'OpGroupCommitReadPipe' : 287,
+ 'OpGroupCommitWritePipe' : 288,
+ 'OpEnqueueMarker' : 291,
+ 'OpEnqueueKernel' : 292,
+ 'OpGetKernelNDrangeSubGroupCount' : 293,
+ 'OpGetKernelNDrangeMaxSubGroupSize' : 294,
+ 'OpGetKernelWorkGroupSize' : 295,
+ 'OpGetKernelPreferredWorkGroupSizeMultiple' : 296,
+ 'OpRetainEvent' : 297,
+ 'OpReleaseEvent' : 298,
+ 'OpCreateUserEvent' : 299,
+ 'OpIsValidEvent' : 300,
+ 'OpSetUserEventStatus' : 301,
+ 'OpCaptureEventProfilingInfo' : 302,
+ 'OpGetDefaultQueue' : 303,
+ 'OpBuildNDRange' : 304,
+ 'OpImageSparseSampleImplicitLod' : 305,
+ 'OpImageSparseSampleExplicitLod' : 306,
+ 'OpImageSparseSampleDrefImplicitLod' : 307,
+ 'OpImageSparseSampleDrefExplicitLod' : 308,
+ 'OpImageSparseSampleProjImplicitLod' : 309,
+ 'OpImageSparseSampleProjExplicitLod' : 310,
+ 'OpImageSparseSampleProjDrefImplicitLod' : 311,
+ 'OpImageSparseSampleProjDrefExplicitLod' : 312,
+ 'OpImageSparseFetch' : 313,
+ 'OpImageSparseGather' : 314,
+ 'OpImageSparseDrefGather' : 315,
+ 'OpImageSparseTexelsResident' : 316,
+ 'OpNoLine' : 317,
+ 'OpAtomicFlagTestAndSet' : 318,
+ 'OpAtomicFlagClear' : 319,
+ 'OpImageSparseRead' : 320,
+ 'OpSizeOf' : 321,
+ 'OpTypePipeStorage' : 322,
+ 'OpConstantPipeStorage' : 323,
+ 'OpCreatePipeFromPipeStorage' : 324,
+ 'OpGetKernelLocalSizeForSubgroupCount' : 325,
+ 'OpGetKernelMaxNumSubgroups' : 326,
+ 'OpTypeNamedBarrier' : 327,
+ 'OpNamedBarrierInitialize' : 328,
+ 'OpMemoryNamedBarrier' : 329,
+ 'OpModuleProcessed' : 330,
+ 'OpSubgroupBallotKHR' : 4421,
+ 'OpSubgroupFirstInvocationKHR' : 4422,
+ 'OpSubgroupAllKHR' : 4428,
+ 'OpSubgroupAnyKHR' : 4429,
+ 'OpSubgroupAllEqualKHR' : 4430,
+ 'OpSubgroupReadInvocationKHR' : 4432,
+ },
+
+}
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_icd.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_icd.h
new file mode 100644
index 0000000..668a4d1
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_icd.h
@@ -0,0 +1,146 @@
+//
+// File: vk_icd.h
+//
+/*
+ * Copyright (c) 2015-2016 The Khronos Group Inc.
+ * Copyright (c) 2015-2016 Valve Corporation
+ * Copyright (c) 2015-2016 LunarG, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef VKICD_H
+#define VKICD_H
+
+#include "vulkan.h"
+#include <stdbool.h>
+
+// Loader-ICD version negotiation API. Versions add the following features:
+// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr
+// or vk_icdNegotiateLoaderICDInterfaceVersion.
+// Version 1 - Add support for vk_icdGetInstanceProcAddr.
+// Version 2 - Add Loader/ICD Interface version negotiation
+// via vk_icdNegotiateLoaderICDInterfaceVersion.
+// Version 3 - Add ICD creation/destruction of KHR_surface objects.
+// Version 4 - Add unknown physical device extension qyering via
+// vk_icdGetPhysicalDeviceProcAddr.
+#define CURRENT_LOADER_ICD_INTERFACE_VERSION 4
+#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0
+#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4
+typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion);
+
+// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this
+// flie directly, it won't be found.
+#ifndef PFN_GetPhysicalDeviceProcAddr
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
+#endif
+
+/*
+ * The ICD must reserve space for a pointer for the loader's dispatch
+ * table, at the start of <each object>.
+ * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro.
+ */
+
+#define ICD_LOADER_MAGIC 0x01CDC0DE
+
+typedef union {
+ uintptr_t loaderMagic;
+ void *loaderData;
+} VK_LOADER_DATA;
+
+static inline void set_loader_magic_value(void *pNewObject) {
+ VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject;
+ loader_info->loaderMagic = ICD_LOADER_MAGIC;
+}
+
+static inline bool valid_loader_magic_value(void *pNewObject) {
+ const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject;
+ return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC;
+}
+
+/*
+ * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that
+ * contains the platform-specific connection and surface information.
+ */
+typedef enum {
+ VK_ICD_WSI_PLATFORM_MIR,
+ VK_ICD_WSI_PLATFORM_WAYLAND,
+ VK_ICD_WSI_PLATFORM_WIN32,
+ VK_ICD_WSI_PLATFORM_XCB,
+ VK_ICD_WSI_PLATFORM_XLIB,
+ VK_ICD_WSI_PLATFORM_DISPLAY
+} VkIcdWsiPlatform;
+
+typedef struct {
+ VkIcdWsiPlatform platform;
+} VkIcdSurfaceBase;
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+typedef struct {
+ VkIcdSurfaceBase base;
+ MirConnection *connection;
+ MirSurface *mirSurface;
+} VkIcdSurfaceMir;
+#endif // VK_USE_PLATFORM_MIR_KHR
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+typedef struct {
+ VkIcdSurfaceBase base;
+ struct wl_display *display;
+ struct wl_surface *surface;
+} VkIcdSurfaceWayland;
+#endif // VK_USE_PLATFORM_WAYLAND_KHR
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+typedef struct {
+ VkIcdSurfaceBase base;
+ HINSTANCE hinstance;
+ HWND hwnd;
+} VkIcdSurfaceWin32;
+#endif // VK_USE_PLATFORM_WIN32_KHR
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+typedef struct {
+ VkIcdSurfaceBase base;
+ xcb_connection_t *connection;
+ xcb_window_t window;
+} VkIcdSurfaceXcb;
+#endif // VK_USE_PLATFORM_XCB_KHR
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+typedef struct {
+ VkIcdSurfaceBase base;
+ Display *dpy;
+ Window window;
+} VkIcdSurfaceXlib;
+#endif // VK_USE_PLATFORM_XLIB_KHR
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+typedef struct {
+ ANativeWindow* window;
+} VkIcdSurfaceAndroid;
+#endif //VK_USE_PLATFORM_ANDROID_KHR
+
+typedef struct {
+ VkIcdSurfaceBase base;
+ VkDisplayModeKHR displayMode;
+ uint32_t planeIndex;
+ uint32_t planeStackIndex;
+ VkSurfaceTransformFlagBitsKHR transform;
+ float globalAlpha;
+ VkDisplayPlaneAlphaFlagBitsKHR alphaMode;
+ VkExtent2D imageExtent;
+} VkIcdSurfaceDisplay;
+
+#endif // VKICD_H
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer.h
new file mode 100644
index 0000000..5458ff2
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer.h
@@ -0,0 +1,143 @@
+//
+// File: vk_layer.h
+//
+/*
+ * Copyright (c) 2015-2017 The Khronos Group Inc.
+ * Copyright (c) 2015-2017 Valve Corporation
+ * Copyright (c) 2015-2017 LunarG, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* Need to define dispatch table
+ * Core struct can then have ptr to dispatch table at the top
+ * Along with object ptrs for current and next OBJ
+ */
+#pragma once
+
+#include "vulkan.h"
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define VK_LAYER_EXPORT __attribute__((visibility("default")))
+#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
+#define VK_LAYER_EXPORT __attribute__((visibility("default")))
+#else
+#define VK_LAYER_EXPORT
+#endif
+
+// Definition for VkLayerDispatchTable and VkLayerInstanceDispatchTable now appear in externally generated header
+#include "vk_layer_dispatch_table.h"
+
+#define MAX_NUM_UNKNOWN_EXTS 250
+
+ // Loader-Layer version negotiation API. Versions add the following features:
+ // Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr
+ // or vk_icdNegotiateLoaderLayerInterfaceVersion.
+ // Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and
+ // vk_icdNegotiateLoaderLayerInterfaceVersion.
+#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2
+#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1
+
+// Version negotiation values
+typedef enum VkNegotiateLayerStructType {
+ LAYER_NEGOTIATE_UNINTIALIZED = 0,
+ LAYER_NEGOTIATE_INTERFACE_STRUCT = 1,
+} VkNegotiateLayerStructType;
+
+// Version negotiation structures
+typedef struct VkNegotiateLayerInterface {
+ VkNegotiateLayerStructType sType;
+ void *pNext;
+ uint32_t loaderLayerInterfaceVersion;
+ PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr;
+ PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr;
+ PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr;
+} VkNegotiateLayerInterface;
+
+// Version negotiation functions
+typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct);
+
+// Function prototype for unknown physical device extension command
+typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device, ...);
+
+// ------------------------------------------------------------------------------------------------
+// CreateInstance and CreateDevice support structures
+
+/* Sub type of structure for instance and device loader ext of CreateInfo.
+ * When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
+ * or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
+ * then VkLayerFunction indicates struct type pointed to by pNext
+ */
+typedef enum VkLayerFunction_ {
+ VK_LAYER_LINK_INFO = 0,
+ VK_LOADER_DATA_CALLBACK = 1
+} VkLayerFunction;
+
+typedef struct VkLayerInstanceLink_ {
+ struct VkLayerInstanceLink_ *pNext;
+ PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
+ PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr;
+} VkLayerInstanceLink;
+
+/*
+ * When creating the device chain the loader needs to pass
+ * down information about it's device structure needed at
+ * the end of the chain. Passing the data via the
+ * VkLayerDeviceInfo avoids issues with finding the
+ * exact instance being used.
+ */
+typedef struct VkLayerDeviceInfo_ {
+ void *device_info;
+ PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
+} VkLayerDeviceInfo;
+
+typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance,
+ void *object);
+typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device,
+ void *object);
+
+typedef struct {
+ VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
+ const void *pNext;
+ VkLayerFunction function;
+ union {
+ VkLayerInstanceLink *pLayerInfo;
+ PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData;
+ } u;
+} VkLayerInstanceCreateInfo;
+
+typedef struct VkLayerDeviceLink_ {
+ struct VkLayerDeviceLink_ *pNext;
+ PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
+ PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr;
+} VkLayerDeviceLink;
+
+typedef struct {
+ VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
+ const void *pNext;
+ VkLayerFunction function;
+ union {
+ VkLayerDeviceLink *pLayerInfo;
+ PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData;
+ } u;
+} VkLayerDeviceCreateInfo;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer_dispatch_table.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer_dispatch_table.h
new file mode 100644
index 0000000..141ebc2
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_layer_dispatch_table.h
@@ -0,0 +1,410 @@
+// *** THIS FILE IS GENERATED - DO NOT EDIT ***
+// See loader_extension_generator.py for modifications
+
+/*
+ * Copyright (c) 2015-2017 The Khronos Group Inc.
+ * Copyright (c) 2015-2017 Valve Corporation
+ * Copyright (c) 2015-2017 LunarG, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Mark Lobodzinski <mark@lunarg.com>
+ * Author: Mark Young <marky@lunarg.com>
+ */
+
+#pragma once
+
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
+
+// Instance function pointer dispatch table
+typedef struct VkLayerInstanceDispatchTable_ {
+ // Manually add in GetPhysicalDeviceProcAddr entry
+ PFN_GetPhysicalDeviceProcAddr GetPhysicalDeviceProcAddr;
+
+ // ---- Core 1_0 commands
+ PFN_vkCreateInstance CreateInstance;
+ PFN_vkDestroyInstance DestroyInstance;
+ PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices;
+ PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures;
+ PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties;
+ PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties;
+ PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties;
+ PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties;
+ PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties;
+ PFN_vkGetInstanceProcAddr GetInstanceProcAddr;
+ PFN_vkCreateDevice CreateDevice;
+ PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties;
+ PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties;
+ PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties;
+ PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties;
+ PFN_vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties;
+
+ // ---- VK_KHR_surface extension commands
+ PFN_vkDestroySurfaceKHR DestroySurfaceKHR;
+ PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR;
+ PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR;
+ PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR;
+ PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR;
+
+ // ---- VK_KHR_display extension commands
+ PFN_vkGetPhysicalDeviceDisplayPropertiesKHR GetPhysicalDeviceDisplayPropertiesKHR;
+ PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR GetPhysicalDeviceDisplayPlanePropertiesKHR;
+ PFN_vkGetDisplayPlaneSupportedDisplaysKHR GetDisplayPlaneSupportedDisplaysKHR;
+ PFN_vkGetDisplayModePropertiesKHR GetDisplayModePropertiesKHR;
+ PFN_vkCreateDisplayModeKHR CreateDisplayModeKHR;
+ PFN_vkGetDisplayPlaneCapabilitiesKHR GetDisplayPlaneCapabilitiesKHR;
+ PFN_vkCreateDisplayPlaneSurfaceKHR CreateDisplayPlaneSurfaceKHR;
+
+ // ---- VK_KHR_xlib_surface extension commands
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR;
+#endif // VK_USE_PLATFORM_XLIB_KHR
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR GetPhysicalDeviceXlibPresentationSupportKHR;
+#endif // VK_USE_PLATFORM_XLIB_KHR
+
+ // ---- VK_KHR_xcb_surface extension commands
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR;
+#endif // VK_USE_PLATFORM_XCB_KHR
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR GetPhysicalDeviceXcbPresentationSupportKHR;
+#endif // VK_USE_PLATFORM_XCB_KHR
+
+ // ---- VK_KHR_wayland_surface extension commands
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ PFN_vkCreateWaylandSurfaceKHR CreateWaylandSurfaceKHR;
+#endif // VK_USE_PLATFORM_WAYLAND_KHR
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR GetPhysicalDeviceWaylandPresentationSupportKHR;
+#endif // VK_USE_PLATFORM_WAYLAND_KHR
+
+ // ---- VK_KHR_mir_surface extension commands
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ PFN_vkCreateMirSurfaceKHR CreateMirSurfaceKHR;
+#endif // VK_USE_PLATFORM_MIR_KHR
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ PFN_vkGetPhysicalDeviceMirPresentationSupportKHR GetPhysicalDeviceMirPresentationSupportKHR;
+#endif // VK_USE_PLATFORM_MIR_KHR
+
+ // ---- VK_KHR_android_surface extension commands
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR;
+#endif // VK_USE_PLATFORM_ANDROID_KHR
+
+ // ---- VK_KHR_win32_surface extension commands
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR;
+#endif // VK_USE_PLATFORM_WIN32_KHR
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR GetPhysicalDeviceWin32PresentationSupportKHR;
+#endif // VK_USE_PLATFORM_WIN32_KHR
+
+ // ---- VK_KHR_get_physical_device_properties2 extension commands
+ PFN_vkGetPhysicalDeviceFeatures2KHR GetPhysicalDeviceFeatures2KHR;
+ PFN_vkGetPhysicalDeviceProperties2KHR GetPhysicalDeviceProperties2KHR;
+ PFN_vkGetPhysicalDeviceFormatProperties2KHR GetPhysicalDeviceFormatProperties2KHR;
+ PFN_vkGetPhysicalDeviceImageFormatProperties2KHR GetPhysicalDeviceImageFormatProperties2KHR;
+ PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR GetPhysicalDeviceQueueFamilyProperties2KHR;
+ PFN_vkGetPhysicalDeviceMemoryProperties2KHR GetPhysicalDeviceMemoryProperties2KHR;
+ PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR GetPhysicalDeviceSparseImageFormatProperties2KHR;
+
+ // ---- VK_KHR_get_surface_capabilities2 extension commands
+ PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR GetPhysicalDeviceSurfaceCapabilities2KHR;
+ PFN_vkGetPhysicalDeviceSurfaceFormats2KHR GetPhysicalDeviceSurfaceFormats2KHR;
+
+ // ---- VK_EXT_debug_report extension commands
+ PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT;
+ PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT;
+ PFN_vkDebugReportMessageEXT DebugReportMessageEXT;
+
+ // ---- VK_NV_external_memory_capabilities extension commands
+ PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV;
+
+ // ---- VK_KHX_device_group extension commands
+ PFN_vkGetPhysicalDevicePresentRectanglesKHX GetPhysicalDevicePresentRectanglesKHX;
+
+ // ---- VK_NN_vi_surface extension commands
+#ifdef VK_USE_PLATFORM_VI_NN
+ PFN_vkCreateViSurfaceNN CreateViSurfaceNN;
+#endif // VK_USE_PLATFORM_VI_NN
+
+ // ---- VK_KHX_device_group_creation extension commands
+ PFN_vkEnumeratePhysicalDeviceGroupsKHX EnumeratePhysicalDeviceGroupsKHX;
+
+ // ---- VK_KHX_external_memory_capabilities extension commands
+ PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHX GetPhysicalDeviceExternalBufferPropertiesKHX;
+
+ // ---- VK_KHX_external_semaphore_capabilities extension commands
+ PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHX GetPhysicalDeviceExternalSemaphorePropertiesKHX;
+
+ // ---- VK_NVX_device_generated_commands extension commands
+ PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX GetPhysicalDeviceGeneratedCommandsPropertiesNVX;
+
+ // ---- VK_EXT_direct_mode_display extension commands
+ PFN_vkReleaseDisplayEXT ReleaseDisplayEXT;
+
+ // ---- VK_EXT_acquire_xlib_display extension commands
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ PFN_vkAcquireXlibDisplayEXT AcquireXlibDisplayEXT;
+#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ PFN_vkGetRandROutputDisplayEXT GetRandROutputDisplayEXT;
+#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
+
+ // ---- VK_EXT_display_surface_counter extension commands
+ PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT GetPhysicalDeviceSurfaceCapabilities2EXT;
+
+ // ---- VK_MVK_ios_surface extension commands
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ PFN_vkCreateIOSSurfaceMVK CreateIOSSurfaceMVK;
+#endif // VK_USE_PLATFORM_IOS_MVK
+
+ // ---- VK_MVK_macos_surface extension commands
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ PFN_vkCreateMacOSSurfaceMVK CreateMacOSSurfaceMVK;
+#endif // VK_USE_PLATFORM_MACOS_MVK
+} VkLayerInstanceDispatchTable;
+
+// Device function pointer dispatch table
+typedef struct VkLayerDispatchTable_ {
+
+ // ---- Core 1_0 commands
+ PFN_vkGetDeviceProcAddr GetDeviceProcAddr;
+ PFN_vkDestroyDevice DestroyDevice;
+ PFN_vkGetDeviceQueue GetDeviceQueue;
+ PFN_vkQueueSubmit QueueSubmit;
+ PFN_vkQueueWaitIdle QueueWaitIdle;
+ PFN_vkDeviceWaitIdle DeviceWaitIdle;
+ PFN_vkAllocateMemory AllocateMemory;
+ PFN_vkFreeMemory FreeMemory;
+ PFN_vkMapMemory MapMemory;
+ PFN_vkUnmapMemory UnmapMemory;
+ PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges;
+ PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges;
+ PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment;
+ PFN_vkBindBufferMemory BindBufferMemory;
+ PFN_vkBindImageMemory BindImageMemory;
+ PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements;
+ PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements;
+ PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements;
+ PFN_vkQueueBindSparse QueueBindSparse;
+ PFN_vkCreateFence CreateFence;
+ PFN_vkDestroyFence DestroyFence;
+ PFN_vkResetFences ResetFences;
+ PFN_vkGetFenceStatus GetFenceStatus;
+ PFN_vkWaitForFences WaitForFences;
+ PFN_vkCreateSemaphore CreateSemaphore;
+ PFN_vkDestroySemaphore DestroySemaphore;
+ PFN_vkCreateEvent CreateEvent;
+ PFN_vkDestroyEvent DestroyEvent;
+ PFN_vkGetEventStatus GetEventStatus;
+ PFN_vkSetEvent SetEvent;
+ PFN_vkResetEvent ResetEvent;
+ PFN_vkCreateQueryPool CreateQueryPool;
+ PFN_vkDestroyQueryPool DestroyQueryPool;
+ PFN_vkGetQueryPoolResults GetQueryPoolResults;
+ PFN_vkCreateBuffer CreateBuffer;
+ PFN_vkDestroyBuffer DestroyBuffer;
+ PFN_vkCreateBufferView CreateBufferView;
+ PFN_vkDestroyBufferView DestroyBufferView;
+ PFN_vkCreateImage CreateImage;
+ PFN_vkDestroyImage DestroyImage;
+ PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout;
+ PFN_vkCreateImageView CreateImageView;
+ PFN_vkDestroyImageView DestroyImageView;
+ PFN_vkCreateShaderModule CreateShaderModule;
+ PFN_vkDestroyShaderModule DestroyShaderModule;
+ PFN_vkCreatePipelineCache CreatePipelineCache;
+ PFN_vkDestroyPipelineCache DestroyPipelineCache;
+ PFN_vkGetPipelineCacheData GetPipelineCacheData;
+ PFN_vkMergePipelineCaches MergePipelineCaches;
+ PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines;
+ PFN_vkCreateComputePipelines CreateComputePipelines;
+ PFN_vkDestroyPipeline DestroyPipeline;
+ PFN_vkCreatePipelineLayout CreatePipelineLayout;
+ PFN_vkDestroyPipelineLayout DestroyPipelineLayout;
+ PFN_vkCreateSampler CreateSampler;
+ PFN_vkDestroySampler DestroySampler;
+ PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout;
+ PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout;
+ PFN_vkCreateDescriptorPool CreateDescriptorPool;
+ PFN_vkDestroyDescriptorPool DestroyDescriptorPool;
+ PFN_vkResetDescriptorPool ResetDescriptorPool;
+ PFN_vkAllocateDescriptorSets AllocateDescriptorSets;
+ PFN_vkFreeDescriptorSets FreeDescriptorSets;
+ PFN_vkUpdateDescriptorSets UpdateDescriptorSets;
+ PFN_vkCreateFramebuffer CreateFramebuffer;
+ PFN_vkDestroyFramebuffer DestroyFramebuffer;
+ PFN_vkCreateRenderPass CreateRenderPass;
+ PFN_vkDestroyRenderPass DestroyRenderPass;
+ PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity;
+ PFN_vkCreateCommandPool CreateCommandPool;
+ PFN_vkDestroyCommandPool DestroyCommandPool;
+ PFN_vkResetCommandPool ResetCommandPool;
+ PFN_vkAllocateCommandBuffers AllocateCommandBuffers;
+ PFN_vkFreeCommandBuffers FreeCommandBuffers;
+ PFN_vkBeginCommandBuffer BeginCommandBuffer;
+ PFN_vkEndCommandBuffer EndCommandBuffer;
+ PFN_vkResetCommandBuffer ResetCommandBuffer;
+ PFN_vkCmdBindPipeline CmdBindPipeline;
+ PFN_vkCmdSetViewport CmdSetViewport;
+ PFN_vkCmdSetScissor CmdSetScissor;
+ PFN_vkCmdSetLineWidth CmdSetLineWidth;
+ PFN_vkCmdSetDepthBias CmdSetDepthBias;
+ PFN_vkCmdSetBlendConstants CmdSetBlendConstants;
+ PFN_vkCmdSetDepthBounds CmdSetDepthBounds;
+ PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask;
+ PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask;
+ PFN_vkCmdSetStencilReference CmdSetStencilReference;
+ PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets;
+ PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer;
+ PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers;
+ PFN_vkCmdDraw CmdDraw;
+ PFN_vkCmdDrawIndexed CmdDrawIndexed;
+ PFN_vkCmdDrawIndirect CmdDrawIndirect;
+ PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect;
+ PFN_vkCmdDispatch CmdDispatch;
+ PFN_vkCmdDispatchIndirect CmdDispatchIndirect;
+ PFN_vkCmdCopyBuffer CmdCopyBuffer;
+ PFN_vkCmdCopyImage CmdCopyImage;
+ PFN_vkCmdBlitImage CmdBlitImage;
+ PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage;
+ PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer;
+ PFN_vkCmdUpdateBuffer CmdUpdateBuffer;
+ PFN_vkCmdFillBuffer CmdFillBuffer;
+ PFN_vkCmdClearColorImage CmdClearColorImage;
+ PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage;
+ PFN_vkCmdClearAttachments CmdClearAttachments;
+ PFN_vkCmdResolveImage CmdResolveImage;
+ PFN_vkCmdSetEvent CmdSetEvent;
+ PFN_vkCmdResetEvent CmdResetEvent;
+ PFN_vkCmdWaitEvents CmdWaitEvents;
+ PFN_vkCmdPipelineBarrier CmdPipelineBarrier;
+ PFN_vkCmdBeginQuery CmdBeginQuery;
+ PFN_vkCmdEndQuery CmdEndQuery;
+ PFN_vkCmdResetQueryPool CmdResetQueryPool;
+ PFN_vkCmdWriteTimestamp CmdWriteTimestamp;
+ PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults;
+ PFN_vkCmdPushConstants CmdPushConstants;
+ PFN_vkCmdBeginRenderPass CmdBeginRenderPass;
+ PFN_vkCmdNextSubpass CmdNextSubpass;
+ PFN_vkCmdEndRenderPass CmdEndRenderPass;
+ PFN_vkCmdExecuteCommands CmdExecuteCommands;
+
+ // ---- VK_KHR_swapchain extension commands
+ PFN_vkCreateSwapchainKHR CreateSwapchainKHR;
+ PFN_vkDestroySwapchainKHR DestroySwapchainKHR;
+ PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR;
+ PFN_vkAcquireNextImageKHR AcquireNextImageKHR;
+ PFN_vkQueuePresentKHR QueuePresentKHR;
+
+ // ---- VK_KHR_display_swapchain extension commands
+ PFN_vkCreateSharedSwapchainsKHR CreateSharedSwapchainsKHR;
+
+ // ---- VK_KHR_maintenance1 extension commands
+ PFN_vkTrimCommandPoolKHR TrimCommandPoolKHR;
+
+ // ---- VK_KHR_push_descriptor extension commands
+ PFN_vkCmdPushDescriptorSetKHR CmdPushDescriptorSetKHR;
+
+ // ---- VK_KHR_descriptor_update_template extension commands
+ PFN_vkCreateDescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplateKHR;
+ PFN_vkDestroyDescriptorUpdateTemplateKHR DestroyDescriptorUpdateTemplateKHR;
+ PFN_vkUpdateDescriptorSetWithTemplateKHR UpdateDescriptorSetWithTemplateKHR;
+ PFN_vkCmdPushDescriptorSetWithTemplateKHR CmdPushDescriptorSetWithTemplateKHR;
+
+ // ---- VK_KHR_shared_presentable_image extension commands
+ PFN_vkGetSwapchainStatusKHR GetSwapchainStatusKHR;
+
+ // ---- VK_EXT_debug_marker extension commands
+ PFN_vkDebugMarkerSetObjectTagEXT DebugMarkerSetObjectTagEXT;
+ PFN_vkDebugMarkerSetObjectNameEXT DebugMarkerSetObjectNameEXT;
+ PFN_vkCmdDebugMarkerBeginEXT CmdDebugMarkerBeginEXT;
+ PFN_vkCmdDebugMarkerEndEXT CmdDebugMarkerEndEXT;
+ PFN_vkCmdDebugMarkerInsertEXT CmdDebugMarkerInsertEXT;
+
+ // ---- VK_AMD_draw_indirect_count extension commands
+ PFN_vkCmdDrawIndirectCountAMD CmdDrawIndirectCountAMD;
+ PFN_vkCmdDrawIndexedIndirectCountAMD CmdDrawIndexedIndirectCountAMD;
+
+ // ---- VK_NV_external_memory_win32 extension commands
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ PFN_vkGetMemoryWin32HandleNV GetMemoryWin32HandleNV;
+#endif // VK_USE_PLATFORM_WIN32_KHR
+
+ // ---- VK_KHX_device_group extension commands
+ PFN_vkGetDeviceGroupPeerMemoryFeaturesKHX GetDeviceGroupPeerMemoryFeaturesKHX;
+ PFN_vkBindBufferMemory2KHX BindBufferMemory2KHX;
+ PFN_vkBindImageMemory2KHX BindImageMemory2KHX;
+ PFN_vkCmdSetDeviceMaskKHX CmdSetDeviceMaskKHX;
+ PFN_vkGetDeviceGroupPresentCapabilitiesKHX GetDeviceGroupPresentCapabilitiesKHX;
+ PFN_vkGetDeviceGroupSurfacePresentModesKHX GetDeviceGroupSurfacePresentModesKHX;
+ PFN_vkAcquireNextImage2KHX AcquireNextImage2KHX;
+ PFN_vkCmdDispatchBaseKHX CmdDispatchBaseKHX;
+
+ // ---- VK_KHX_external_memory_win32 extension commands
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ PFN_vkGetMemoryWin32HandleKHX GetMemoryWin32HandleKHX;
+#endif // VK_USE_PLATFORM_WIN32_KHX
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ PFN_vkGetMemoryWin32HandlePropertiesKHX GetMemoryWin32HandlePropertiesKHX;
+#endif // VK_USE_PLATFORM_WIN32_KHX
+
+ // ---- VK_KHX_external_memory_fd extension commands
+ PFN_vkGetMemoryFdKHX GetMemoryFdKHX;
+ PFN_vkGetMemoryFdPropertiesKHX GetMemoryFdPropertiesKHX;
+
+ // ---- VK_KHX_external_semaphore_win32 extension commands
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ PFN_vkImportSemaphoreWin32HandleKHX ImportSemaphoreWin32HandleKHX;
+#endif // VK_USE_PLATFORM_WIN32_KHX
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ PFN_vkGetSemaphoreWin32HandleKHX GetSemaphoreWin32HandleKHX;
+#endif // VK_USE_PLATFORM_WIN32_KHX
+
+ // ---- VK_KHX_external_semaphore_fd extension commands
+ PFN_vkImportSemaphoreFdKHX ImportSemaphoreFdKHX;
+ PFN_vkGetSemaphoreFdKHX GetSemaphoreFdKHX;
+
+ // ---- VK_NVX_device_generated_commands extension commands
+ PFN_vkCmdProcessCommandsNVX CmdProcessCommandsNVX;
+ PFN_vkCmdReserveSpaceForCommandsNVX CmdReserveSpaceForCommandsNVX;
+ PFN_vkCreateIndirectCommandsLayoutNVX CreateIndirectCommandsLayoutNVX;
+ PFN_vkDestroyIndirectCommandsLayoutNVX DestroyIndirectCommandsLayoutNVX;
+ PFN_vkCreateObjectTableNVX CreateObjectTableNVX;
+ PFN_vkDestroyObjectTableNVX DestroyObjectTableNVX;
+ PFN_vkRegisterObjectsNVX RegisterObjectsNVX;
+ PFN_vkUnregisterObjectsNVX UnregisterObjectsNVX;
+
+ // ---- VK_NV_clip_space_w_scaling extension commands
+ PFN_vkCmdSetViewportWScalingNV CmdSetViewportWScalingNV;
+
+ // ---- VK_EXT_display_control extension commands
+ PFN_vkDisplayPowerControlEXT DisplayPowerControlEXT;
+ PFN_vkRegisterDeviceEventEXT RegisterDeviceEventEXT;
+ PFN_vkRegisterDisplayEventEXT RegisterDisplayEventEXT;
+ PFN_vkGetSwapchainCounterEXT GetSwapchainCounterEXT;
+
+ // ---- VK_GOOGLE_display_timing extension commands
+ PFN_vkGetRefreshCycleDurationGOOGLE GetRefreshCycleDurationGOOGLE;
+ PFN_vkGetPastPresentationTimingGOOGLE GetPastPresentationTimingGOOGLE;
+
+ // ---- VK_EXT_discard_rectangles extension commands
+ PFN_vkCmdSetDiscardRectangleEXT CmdSetDiscardRectangleEXT;
+
+ // ---- VK_EXT_hdr_metadata extension commands
+ PFN_vkSetHdrMetadataEXT SetHdrMetadataEXT;
+} VkLayerDispatchTable;
+
+
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_platform.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_platform.h
new file mode 100644
index 0000000..72f8049
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_platform.h
@@ -0,0 +1,120 @@
+//
+// File: vk_platform.h
+//
+/*
+** Copyright (c) 2014-2017 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#ifndef VK_PLATFORM_H_
+#define VK_PLATFORM_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+* Platform-specific directives and type declarations
+***************************************************************************************************
+*/
+
+/* Platform-specific calling convention macros.
+ *
+ * Platforms should define these so that Vulkan clients call Vulkan commands
+ * with the same calling conventions that the Vulkan implementation expects.
+ *
+ * VKAPI_ATTR - Placed before the return type in function declarations.
+ * Useful for C++11 and GCC/Clang-style function attribute syntax.
+ * VKAPI_CALL - Placed after the return type in function declarations.
+ * Useful for MSVC-style calling convention syntax.
+ * VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
+ *
+ * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
+ * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
+ */
+#if defined(_WIN32)
+ // On Windows, Vulkan commands use the stdcall convention
+ #define VKAPI_ATTR
+ #define VKAPI_CALL __stdcall
+ #define VKAPI_PTR VKAPI_CALL
+#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
+ #error "Vulkan isn't supported for the 'armeabi' NDK ABI"
+#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
+ // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat"
+ // calling convention, i.e. float parameters are passed in registers. This
+ // is true even if the rest of the application passes floats on the stack,
+ // as it does by default when compiling for the armeabi-v7a NDK ABI.
+ #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
+ #define VKAPI_CALL
+ #define VKAPI_PTR VKAPI_ATTR
+#else
+ // On other platforms, use the default calling convention
+ #define VKAPI_ATTR
+ #define VKAPI_CALL
+ #define VKAPI_PTR
+#endif
+
+#include <stddef.h>
+
+#if !defined(VK_NO_STDINT_H)
+ #if defined(_MSC_VER) && (_MSC_VER < 1600)
+ typedef signed __int8 int8_t;
+ typedef unsigned __int8 uint8_t;
+ typedef signed __int16 int16_t;
+ typedef unsigned __int16 uint16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int32 uint32_t;
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+ #else
+ #include <stdint.h>
+ #endif
+#endif // !defined(VK_NO_STDINT_H)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+// Platform-specific headers required by platform window system extensions.
+// These are enabled prior to #including "vulkan.h". The same enable then
+// controls inclusion of the extension interfaces in vulkan.h.
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#include <android/native_window.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#include <mir_toolkit/client_types.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#include <wayland-client.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#include <windows.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#include <X11/Xlib.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#include <xcb/xcb.h>
+#endif
+
+#endif
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_sdk_platform.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_sdk_platform.h
new file mode 100644
index 0000000..ef9a000
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vk_sdk_platform.h
@@ -0,0 +1,46 @@
+//
+// File: vk_sdk_platform.h
+//
+/*
+ * Copyright (c) 2015-2016 The Khronos Group Inc.
+ * Copyright (c) 2015-2016 Valve Corporation
+ * Copyright (c) 2015-2016 LunarG, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VK_SDK_PLATFORM_H
+#define VK_SDK_PLATFORM_H
+
+#if defined(_WIN32)
+#define NOMINMAX
+#ifndef __cplusplus
+#undef inline
+#define inline __inline
+#endif // __cplusplus
+
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/)
+// C99:
+// Microsoft didn't implement C99 in Visual Studio; but started adding it with
+// VS2013. However, VS2013 still didn't have snprintf(). The following is a
+// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the
+// "CMakeLists.txt" file).
+// NOTE: This is fixed in Visual Studio 2015.
+#define snprintf _snprintf
+#endif
+
+#define strdup _strdup
+
+#endif // _WIN32
+
+#endif // VK_SDK_PLATFORM_H
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.h b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.h
new file mode 100644
index 0000000..51e5e9e
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.h
@@ -0,0 +1,5926 @@
+#ifndef VULKAN_H_
+#define VULKAN_H_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2017 The Khronos Group Inc.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_VERSION_1_0 1
+#include "vk_platform.h"
+
+#define VK_MAKE_VERSION(major, minor, patch) \
+ (((major) << 22) | ((minor) << 12) | (patch))
+
+// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead.
+//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0)
+
+// Vulkan 1.0 version number
+#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)
+
+#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)
+#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
+#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
+// Version of this file
+#define VK_HEADER_VERSION 49
+
+
+#define VK_NULL_HANDLE 0
+
+
+
+#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
+
+
+#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE)
+#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
+#else
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#endif
+#endif
+
+
+
+typedef uint32_t VkFlags;
+typedef uint32_t VkBool32;
+typedef uint64_t VkDeviceSize;
+typedef uint32_t VkSampleMask;
+
+VK_DEFINE_HANDLE(VkInstance)
+VK_DEFINE_HANDLE(VkPhysicalDevice)
+VK_DEFINE_HANDLE(VkDevice)
+VK_DEFINE_HANDLE(VkQueue)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore)
+VK_DEFINE_HANDLE(VkCommandBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)
+
+#define VK_LOD_CLAMP_NONE 1000.0f
+#define VK_REMAINING_MIP_LEVELS (~0U)
+#define VK_REMAINING_ARRAY_LAYERS (~0U)
+#define VK_WHOLE_SIZE (~0ULL)
+#define VK_ATTACHMENT_UNUSED (~0U)
+#define VK_TRUE 1
+#define VK_FALSE 0
+#define VK_QUEUE_FAMILY_IGNORED (~0U)
+#define VK_SUBPASS_EXTERNAL (~0U)
+#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256
+#define VK_UUID_SIZE 16
+#define VK_MAX_MEMORY_TYPES 32
+#define VK_MAX_MEMORY_HEAPS 16
+#define VK_MAX_EXTENSION_NAME_SIZE 256
+#define VK_MAX_DESCRIPTION_SIZE 256
+
+
+typedef enum VkPipelineCacheHeaderVersion {
+ VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,
+ VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1),
+ VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCacheHeaderVersion;
+
+typedef enum VkResult {
+ VK_SUCCESS = 0,
+ VK_NOT_READY = 1,
+ VK_TIMEOUT = 2,
+ VK_EVENT_SET = 3,
+ VK_EVENT_RESET = 4,
+ VK_INCOMPLETE = 5,
+ VK_ERROR_OUT_OF_HOST_MEMORY = -1,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,
+ VK_ERROR_INITIALIZATION_FAILED = -3,
+ VK_ERROR_DEVICE_LOST = -4,
+ VK_ERROR_MEMORY_MAP_FAILED = -5,
+ VK_ERROR_LAYER_NOT_PRESENT = -6,
+ VK_ERROR_EXTENSION_NOT_PRESENT = -7,
+ VK_ERROR_FEATURE_NOT_PRESENT = -8,
+ VK_ERROR_INCOMPATIBLE_DRIVER = -9,
+ VK_ERROR_TOO_MANY_OBJECTS = -10,
+ VK_ERROR_FORMAT_NOT_SUPPORTED = -11,
+ VK_ERROR_FRAGMENTED_POOL = -12,
+ VK_ERROR_SURFACE_LOST_KHR = -1000000000,
+ VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
+ VK_SUBOPTIMAL_KHR = 1000001003,
+ VK_ERROR_OUT_OF_DATE_KHR = -1000001004,
+ VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,
+ VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,
+ VK_ERROR_INVALID_SHADER_NV = -1000012000,
+ VK_ERROR_OUT_OF_POOL_MEMORY_KHR = -1000069000,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX = -1000072003,
+ VK_RESULT_BEGIN_RANGE = VK_ERROR_FRAGMENTED_POOL,
+ VK_RESULT_END_RANGE = VK_INCOMPLETE,
+ VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FRAGMENTED_POOL + 1),
+ VK_RESULT_MAX_ENUM = 0x7FFFFFFF
+} VkResult;
+
+typedef enum VkStructureType {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2,
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3,
+ VK_STRUCTURE_TYPE_SUBMIT_INFO = 4,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5,
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6,
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7,
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8,
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9,
+ VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10,
+ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11,
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13,
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16,
+ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19,
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23,
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24,
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25,
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26,
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28,
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29,
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35,
+ VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36,
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38,
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42,
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46,
+ VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47,
+ VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48,
+ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,
+ VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000,
+ VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001,
+ VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000,
+ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,
+ VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,
+ VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
+ VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000,
+ VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,
+ VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000,
+ VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000,
+ VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001,
+ VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002,
+ VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000,
+ VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001,
+ VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002,
+ VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHX = 1000053000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX = 1000053001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX = 1000053002,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001,
+ VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = 1000059000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = 1000059001,
+ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = 1000059002,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = 1000059003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = 1000059004,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = 1000059005,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = 1000059006,
+ VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = 1000059007,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = 1000059008,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHX = 1000060000,
+ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHX = 1000060001,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHX = 1000060002,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHX = 1000060003,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHX = 1000060004,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHX = 1000060005,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHX = 1000060006,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHX = 1000060007,
+ VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHX = 1000060008,
+ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHX = 1000060009,
+ VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHX = 1000060010,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHX = 1000060011,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHX = 1000060012,
+ VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,
+ VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHX = 1000070000,
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHX = 1000070001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHX = 1000071000,
+ VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHX = 1000071001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHX = 1000071002,
+ VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHX = 1000071003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHX = 1000071004,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHX = 1000072000,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHX = 1000072001,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHX = 1000072002,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHX = 1000073000,
+ VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHX = 1000073001,
+ VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHX = 1000073002,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHX = 1000074000,
+ VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHX = 1000074001,
+ VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHX = 1000075000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHX = 1000076000,
+ VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHX = 1000076001,
+ VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHX = 1000077000,
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHX = 1000078000,
+ VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHX = 1000078001,
+ VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHX = 1000078002,
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHX = 1000079000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000,
+ VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = 1000085000,
+ VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000,
+ VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001,
+ VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX = 1000086002,
+ VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003,
+ VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004,
+ VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000,
+ VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = 1000090000,
+ VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000,
+ VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001,
+ VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002,
+ VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003,
+ VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000,
+ VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001,
+ VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000,
+ VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000,
+ VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001,
+ VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002,
+ VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000,
+ VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000,
+ VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1),
+ VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkStructureType;
+
+typedef enum VkSystemAllocationScope {
+ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,
+ VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4,
+ VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND,
+ VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE,
+ VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1),
+ VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF
+} VkSystemAllocationScope;
+
+typedef enum VkInternalAllocationType {
+ VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0,
+ VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+ VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+ VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1),
+ VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkInternalAllocationType;
+
+typedef enum VkFormat {
+ VK_FORMAT_UNDEFINED = 0,
+ VK_FORMAT_R4G4_UNORM_PACK8 = 1,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,
+ VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,
+ VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,
+ VK_FORMAT_R8_UNORM = 9,
+ VK_FORMAT_R8_SNORM = 10,
+ VK_FORMAT_R8_USCALED = 11,
+ VK_FORMAT_R8_SSCALED = 12,
+ VK_FORMAT_R8_UINT = 13,
+ VK_FORMAT_R8_SINT = 14,
+ VK_FORMAT_R8_SRGB = 15,
+ VK_FORMAT_R8G8_UNORM = 16,
+ VK_FORMAT_R8G8_SNORM = 17,
+ VK_FORMAT_R8G8_USCALED = 18,
+ VK_FORMAT_R8G8_SSCALED = 19,
+ VK_FORMAT_R8G8_UINT = 20,
+ VK_FORMAT_R8G8_SINT = 21,
+ VK_FORMAT_R8G8_SRGB = 22,
+ VK_FORMAT_R8G8B8_UNORM = 23,
+ VK_FORMAT_R8G8B8_SNORM = 24,
+ VK_FORMAT_R8G8B8_USCALED = 25,
+ VK_FORMAT_R8G8B8_SSCALED = 26,
+ VK_FORMAT_R8G8B8_UINT = 27,
+ VK_FORMAT_R8G8B8_SINT = 28,
+ VK_FORMAT_R8G8B8_SRGB = 29,
+ VK_FORMAT_B8G8R8_UNORM = 30,
+ VK_FORMAT_B8G8R8_SNORM = 31,
+ VK_FORMAT_B8G8R8_USCALED = 32,
+ VK_FORMAT_B8G8R8_SSCALED = 33,
+ VK_FORMAT_B8G8R8_UINT = 34,
+ VK_FORMAT_B8G8R8_SINT = 35,
+ VK_FORMAT_B8G8R8_SRGB = 36,
+ VK_FORMAT_R8G8B8A8_UNORM = 37,
+ VK_FORMAT_R8G8B8A8_SNORM = 38,
+ VK_FORMAT_R8G8B8A8_USCALED = 39,
+ VK_FORMAT_R8G8B8A8_SSCALED = 40,
+ VK_FORMAT_R8G8B8A8_UINT = 41,
+ VK_FORMAT_R8G8B8A8_SINT = 42,
+ VK_FORMAT_R8G8B8A8_SRGB = 43,
+ VK_FORMAT_B8G8R8A8_UNORM = 44,
+ VK_FORMAT_B8G8R8A8_SNORM = 45,
+ VK_FORMAT_B8G8R8A8_USCALED = 46,
+ VK_FORMAT_B8G8R8A8_SSCALED = 47,
+ VK_FORMAT_B8G8R8A8_UINT = 48,
+ VK_FORMAT_B8G8R8A8_SINT = 49,
+ VK_FORMAT_B8G8R8A8_SRGB = 50,
+ VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,
+ VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,
+ VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,
+ VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,
+ VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,
+ VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,
+ VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,
+ VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,
+ VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,
+ VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,
+ VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,
+ VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,
+ VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,
+ VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,
+ VK_FORMAT_R16_UNORM = 70,
+ VK_FORMAT_R16_SNORM = 71,
+ VK_FORMAT_R16_USCALED = 72,
+ VK_FORMAT_R16_SSCALED = 73,
+ VK_FORMAT_R16_UINT = 74,
+ VK_FORMAT_R16_SINT = 75,
+ VK_FORMAT_R16_SFLOAT = 76,
+ VK_FORMAT_R16G16_UNORM = 77,
+ VK_FORMAT_R16G16_SNORM = 78,
+ VK_FORMAT_R16G16_USCALED = 79,
+ VK_FORMAT_R16G16_SSCALED = 80,
+ VK_FORMAT_R16G16_UINT = 81,
+ VK_FORMAT_R16G16_SINT = 82,
+ VK_FORMAT_R16G16_SFLOAT = 83,
+ VK_FORMAT_R16G16B16_UNORM = 84,
+ VK_FORMAT_R16G16B16_SNORM = 85,
+ VK_FORMAT_R16G16B16_USCALED = 86,
+ VK_FORMAT_R16G16B16_SSCALED = 87,
+ VK_FORMAT_R16G16B16_UINT = 88,
+ VK_FORMAT_R16G16B16_SINT = 89,
+ VK_FORMAT_R16G16B16_SFLOAT = 90,
+ VK_FORMAT_R16G16B16A16_UNORM = 91,
+ VK_FORMAT_R16G16B16A16_SNORM = 92,
+ VK_FORMAT_R16G16B16A16_USCALED = 93,
+ VK_FORMAT_R16G16B16A16_SSCALED = 94,
+ VK_FORMAT_R16G16B16A16_UINT = 95,
+ VK_FORMAT_R16G16B16A16_SINT = 96,
+ VK_FORMAT_R16G16B16A16_SFLOAT = 97,
+ VK_FORMAT_R32_UINT = 98,
+ VK_FORMAT_R32_SINT = 99,
+ VK_FORMAT_R32_SFLOAT = 100,
+ VK_FORMAT_R32G32_UINT = 101,
+ VK_FORMAT_R32G32_SINT = 102,
+ VK_FORMAT_R32G32_SFLOAT = 103,
+ VK_FORMAT_R32G32B32_UINT = 104,
+ VK_FORMAT_R32G32B32_SINT = 105,
+ VK_FORMAT_R32G32B32_SFLOAT = 106,
+ VK_FORMAT_R32G32B32A32_UINT = 107,
+ VK_FORMAT_R32G32B32A32_SINT = 108,
+ VK_FORMAT_R32G32B32A32_SFLOAT = 109,
+ VK_FORMAT_R64_UINT = 110,
+ VK_FORMAT_R64_SINT = 111,
+ VK_FORMAT_R64_SFLOAT = 112,
+ VK_FORMAT_R64G64_UINT = 113,
+ VK_FORMAT_R64G64_SINT = 114,
+ VK_FORMAT_R64G64_SFLOAT = 115,
+ VK_FORMAT_R64G64B64_UINT = 116,
+ VK_FORMAT_R64G64B64_SINT = 117,
+ VK_FORMAT_R64G64B64_SFLOAT = 118,
+ VK_FORMAT_R64G64B64A64_UINT = 119,
+ VK_FORMAT_R64G64B64A64_SINT = 120,
+ VK_FORMAT_R64G64B64A64_SFLOAT = 121,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,
+ VK_FORMAT_D16_UNORM = 124,
+ VK_FORMAT_X8_D24_UNORM_PACK32 = 125,
+ VK_FORMAT_D32_SFLOAT = 126,
+ VK_FORMAT_S8_UINT = 127,
+ VK_FORMAT_D16_UNORM_S8_UINT = 128,
+ VK_FORMAT_D24_UNORM_S8_UINT = 129,
+ VK_FORMAT_D32_SFLOAT_S8_UINT = 130,
+ VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,
+ VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,
+ VK_FORMAT_BC2_UNORM_BLOCK = 135,
+ VK_FORMAT_BC2_SRGB_BLOCK = 136,
+ VK_FORMAT_BC3_UNORM_BLOCK = 137,
+ VK_FORMAT_BC3_SRGB_BLOCK = 138,
+ VK_FORMAT_BC4_UNORM_BLOCK = 139,
+ VK_FORMAT_BC4_SNORM_BLOCK = 140,
+ VK_FORMAT_BC5_UNORM_BLOCK = 141,
+ VK_FORMAT_BC5_SNORM_BLOCK = 142,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,
+ VK_FORMAT_BC7_UNORM_BLOCK = 145,
+ VK_FORMAT_BC7_SRGB_BLOCK = 146,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,
+ VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,
+ VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,
+ VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,
+ VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,
+ VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,
+ VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,
+ VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,
+ VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,
+ VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,
+ VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,
+ VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,
+ VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003,
+ VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004,
+ VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
+ VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
+ VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
+ VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED,
+ VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1),
+ VK_FORMAT_MAX_ENUM = 0x7FFFFFFF
+} VkFormat;
+
+typedef enum VkImageType {
+ VK_IMAGE_TYPE_1D = 0,
+ VK_IMAGE_TYPE_2D = 1,
+ VK_IMAGE_TYPE_3D = 2,
+ VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D,
+ VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1),
+ VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageType;
+
+typedef enum VkImageTiling {
+ VK_IMAGE_TILING_OPTIMAL = 0,
+ VK_IMAGE_TILING_LINEAR = 1,
+ VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR,
+ VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1),
+ VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF
+} VkImageTiling;
+
+typedef enum VkPhysicalDeviceType {
+ VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,
+ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,
+ VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,
+ VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,
+ VK_PHYSICAL_DEVICE_TYPE_CPU = 4,
+ VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER,
+ VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU,
+ VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1),
+ VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkPhysicalDeviceType;
+
+typedef enum VkQueryType {
+ VK_QUERY_TYPE_OCCLUSION = 0,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+ VK_QUERY_TYPE_TIMESTAMP = 2,
+ VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION,
+ VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP,
+ VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1),
+ VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkQueryType;
+
+typedef enum VkSharingMode {
+ VK_SHARING_MODE_EXCLUSIVE = 0,
+ VK_SHARING_MODE_CONCURRENT = 1,
+ VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE,
+ VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT,
+ VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1),
+ VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSharingMode;
+
+typedef enum VkImageLayout {
+ VK_IMAGE_LAYOUT_UNDEFINED = 0,
+ VK_IMAGE_LAYOUT_GENERAL = 1,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
+ VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
+ VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000,
+ VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED,
+ VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1),
+ VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF
+} VkImageLayout;
+
+typedef enum VkImageViewType {
+ VK_IMAGE_VIEW_TYPE_1D = 0,
+ VK_IMAGE_VIEW_TYPE_2D = 1,
+ VK_IMAGE_VIEW_TYPE_3D = 2,
+ VK_IMAGE_VIEW_TYPE_CUBE = 3,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6,
+ VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1),
+ VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageViewType;
+
+typedef enum VkComponentSwizzle {
+ VK_COMPONENT_SWIZZLE_IDENTITY = 0,
+ VK_COMPONENT_SWIZZLE_ZERO = 1,
+ VK_COMPONENT_SWIZZLE_ONE = 2,
+ VK_COMPONENT_SWIZZLE_R = 3,
+ VK_COMPONENT_SWIZZLE_G = 4,
+ VK_COMPONENT_SWIZZLE_B = 5,
+ VK_COMPONENT_SWIZZLE_A = 6,
+ VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A,
+ VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1),
+ VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF
+} VkComponentSwizzle;
+
+typedef enum VkVertexInputRate {
+ VK_VERTEX_INPUT_RATE_VERTEX = 0,
+ VK_VERTEX_INPUT_RATE_INSTANCE = 1,
+ VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX,
+ VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE,
+ VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1),
+ VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF
+} VkVertexInputRate;
+
+typedef enum VkPrimitiveTopology {
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10,
+ VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
+ VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1),
+ VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF
+} VkPrimitiveTopology;
+
+typedef enum VkPolygonMode {
+ VK_POLYGON_MODE_FILL = 0,
+ VK_POLYGON_MODE_LINE = 1,
+ VK_POLYGON_MODE_POINT = 2,
+ VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL,
+ VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT,
+ VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1),
+ VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkPolygonMode;
+
+typedef enum VkFrontFace {
+ VK_FRONT_FACE_COUNTER_CLOCKWISE = 0,
+ VK_FRONT_FACE_CLOCKWISE = 1,
+ VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE,
+ VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1),
+ VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF
+} VkFrontFace;
+
+typedef enum VkCompareOp {
+ VK_COMPARE_OP_NEVER = 0,
+ VK_COMPARE_OP_LESS = 1,
+ VK_COMPARE_OP_EQUAL = 2,
+ VK_COMPARE_OP_LESS_OR_EQUAL = 3,
+ VK_COMPARE_OP_GREATER = 4,
+ VK_COMPARE_OP_NOT_EQUAL = 5,
+ VK_COMPARE_OP_GREATER_OR_EQUAL = 6,
+ VK_COMPARE_OP_ALWAYS = 7,
+ VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER,
+ VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS,
+ VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1),
+ VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkCompareOp;
+
+typedef enum VkStencilOp {
+ VK_STENCIL_OP_KEEP = 0,
+ VK_STENCIL_OP_ZERO = 1,
+ VK_STENCIL_OP_REPLACE = 2,
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3,
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4,
+ VK_STENCIL_OP_INVERT = 5,
+ VK_STENCIL_OP_INCREMENT_AND_WRAP = 6,
+ VK_STENCIL_OP_DECREMENT_AND_WRAP = 7,
+ VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP,
+ VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP,
+ VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1),
+ VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF
+} VkStencilOp;
+
+typedef enum VkLogicOp {
+ VK_LOGIC_OP_CLEAR = 0,
+ VK_LOGIC_OP_AND = 1,
+ VK_LOGIC_OP_AND_REVERSE = 2,
+ VK_LOGIC_OP_COPY = 3,
+ VK_LOGIC_OP_AND_INVERTED = 4,
+ VK_LOGIC_OP_NO_OP = 5,
+ VK_LOGIC_OP_XOR = 6,
+ VK_LOGIC_OP_OR = 7,
+ VK_LOGIC_OP_NOR = 8,
+ VK_LOGIC_OP_EQUIVALENT = 9,
+ VK_LOGIC_OP_INVERT = 10,
+ VK_LOGIC_OP_OR_REVERSE = 11,
+ VK_LOGIC_OP_COPY_INVERTED = 12,
+ VK_LOGIC_OP_OR_INVERTED = 13,
+ VK_LOGIC_OP_NAND = 14,
+ VK_LOGIC_OP_SET = 15,
+ VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR,
+ VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET,
+ VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1),
+ VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF
+} VkLogicOp;
+
+typedef enum VkBlendFactor {
+ VK_BLEND_FACTOR_ZERO = 0,
+ VK_BLEND_FACTOR_ONE = 1,
+ VK_BLEND_FACTOR_SRC_COLOR = 2,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,
+ VK_BLEND_FACTOR_DST_COLOR = 4,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,
+ VK_BLEND_FACTOR_SRC_ALPHA = 6,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ VK_BLEND_FACTOR_DST_ALPHA = 8,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,
+ VK_BLEND_FACTOR_CONSTANT_COLOR = 10,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,
+ VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,
+ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,
+ VK_BLEND_FACTOR_SRC1_COLOR = 15,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,
+ VK_BLEND_FACTOR_SRC1_ALPHA = 17,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18,
+ VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO,
+ VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
+ VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1),
+ VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF
+} VkBlendFactor;
+
+typedef enum VkBlendOp {
+ VK_BLEND_OP_ADD = 0,
+ VK_BLEND_OP_SUBTRACT = 1,
+ VK_BLEND_OP_REVERSE_SUBTRACT = 2,
+ VK_BLEND_OP_MIN = 3,
+ VK_BLEND_OP_MAX = 4,
+ VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD,
+ VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX,
+ VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1),
+ VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF
+} VkBlendOp;
+
+typedef enum VkDynamicState {
+ VK_DYNAMIC_STATE_VIEWPORT = 0,
+ VK_DYNAMIC_STATE_SCISSOR = 1,
+ VK_DYNAMIC_STATE_LINE_WIDTH = 2,
+ VK_DYNAMIC_STATE_DEPTH_BIAS = 3,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5,
+ VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8,
+ VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000,
+ VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000,
+ VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1),
+ VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF
+} VkDynamicState;
+
+typedef enum VkFilter {
+ VK_FILTER_NEAREST = 0,
+ VK_FILTER_LINEAR = 1,
+ VK_FILTER_CUBIC_IMG = 1000015000,
+ VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST,
+ VK_FILTER_END_RANGE = VK_FILTER_LINEAR,
+ VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1),
+ VK_FILTER_MAX_ENUM = 0x7FFFFFFF
+} VkFilter;
+
+typedef enum VkSamplerMipmapMode {
+ VK_SAMPLER_MIPMAP_MODE_NEAREST = 0,
+ VK_SAMPLER_MIPMAP_MODE_LINEAR = 1,
+ VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+ VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR,
+ VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1),
+ VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerMipmapMode;
+
+typedef enum VkSamplerAddressMode {
+ VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3,
+ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4,
+ VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
+ VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1),
+ VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerAddressMode;
+
+typedef enum VkBorderColor {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
+ VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE,
+ VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1),
+ VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF
+} VkBorderColor;
+
+typedef enum VkDescriptorType {
+ VK_DESCRIPTOR_TYPE_SAMPLER = 0,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,
+ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,
+ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,
+ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
+ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
+ VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
+ VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
+ VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1),
+ VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorType;
+
+typedef enum VkAttachmentLoadOp {
+ VK_ATTACHMENT_LOAD_OP_LOAD = 0,
+ VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
+ VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1),
+ VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentLoadOp;
+
+typedef enum VkAttachmentStoreOp {
+ VK_ATTACHMENT_STORE_OP_STORE = 0,
+ VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
+ VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE,
+ VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1),
+ VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentStoreOp;
+
+typedef enum VkPipelineBindPoint {
+ VK_PIPELINE_BIND_POINT_GRAPHICS = 0,
+ VK_PIPELINE_BIND_POINT_COMPUTE = 1,
+ VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
+ VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1),
+ VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineBindPoint;
+
+typedef enum VkCommandBufferLevel {
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0,
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1,
+ VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY,
+ VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1),
+ VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferLevel;
+
+typedef enum VkIndexType {
+ VK_INDEX_TYPE_UINT16 = 0,
+ VK_INDEX_TYPE_UINT32 = 1,
+ VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16,
+ VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32,
+ VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1),
+ VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkIndexType;
+
+typedef enum VkSubpassContents {
+ VK_SUBPASS_CONTENTS_INLINE = 0,
+ VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1,
+ VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE,
+ VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS,
+ VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1),
+ VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF
+} VkSubpassContents;
+
+typedef enum VkObjectType {
+ VK_OBJECT_TYPE_UNKNOWN = 0,
+ VK_OBJECT_TYPE_INSTANCE = 1,
+ VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2,
+ VK_OBJECT_TYPE_DEVICE = 3,
+ VK_OBJECT_TYPE_QUEUE = 4,
+ VK_OBJECT_TYPE_SEMAPHORE = 5,
+ VK_OBJECT_TYPE_COMMAND_BUFFER = 6,
+ VK_OBJECT_TYPE_FENCE = 7,
+ VK_OBJECT_TYPE_DEVICE_MEMORY = 8,
+ VK_OBJECT_TYPE_BUFFER = 9,
+ VK_OBJECT_TYPE_IMAGE = 10,
+ VK_OBJECT_TYPE_EVENT = 11,
+ VK_OBJECT_TYPE_QUERY_POOL = 12,
+ VK_OBJECT_TYPE_BUFFER_VIEW = 13,
+ VK_OBJECT_TYPE_IMAGE_VIEW = 14,
+ VK_OBJECT_TYPE_SHADER_MODULE = 15,
+ VK_OBJECT_TYPE_PIPELINE_CACHE = 16,
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17,
+ VK_OBJECT_TYPE_RENDER_PASS = 18,
+ VK_OBJECT_TYPE_PIPELINE = 19,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20,
+ VK_OBJECT_TYPE_SAMPLER = 21,
+ VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET = 23,
+ VK_OBJECT_TYPE_FRAMEBUFFER = 24,
+ VK_OBJECT_TYPE_COMMAND_POOL = 25,
+ VK_OBJECT_TYPE_SURFACE_KHR = 1000000000,
+ VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000,
+ VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000,
+ VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001,
+ VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = 1000085000,
+ VK_OBJECT_TYPE_OBJECT_TABLE_NVX = 1000086000,
+ VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001,
+ VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN,
+ VK_OBJECT_TYPE_END_RANGE = VK_OBJECT_TYPE_COMMAND_POOL,
+ VK_OBJECT_TYPE_RANGE_SIZE = (VK_OBJECT_TYPE_COMMAND_POOL - VK_OBJECT_TYPE_UNKNOWN + 1),
+ VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkObjectType;
+
+typedef VkFlags VkInstanceCreateFlags;
+
+typedef enum VkFormatFeatureFlagBits {
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,
+ VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400,
+ VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000,
+ VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = 0x00004000,
+ VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = 0x00008000,
+ VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkFormatFeatureFlagBits;
+typedef VkFlags VkFormatFeatureFlags;
+
+typedef enum VkImageUsageFlagBits {
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002,
+ VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,
+ VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020,
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,
+ VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkImageUsageFlagBits;
+typedef VkFlags VkImageUsageFlags;
+
+typedef enum VkImageCreateFlagBits {
+ VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+ VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008,
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010,
+ VK_IMAGE_CREATE_BIND_SFR_BIT_KHX = 0x00000040,
+ VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = 0x00000020,
+ VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkImageCreateFlagBits;
+typedef VkFlags VkImageCreateFlags;
+
+typedef enum VkSampleCountFlagBits {
+ VK_SAMPLE_COUNT_1_BIT = 0x00000001,
+ VK_SAMPLE_COUNT_2_BIT = 0x00000002,
+ VK_SAMPLE_COUNT_4_BIT = 0x00000004,
+ VK_SAMPLE_COUNT_8_BIT = 0x00000008,
+ VK_SAMPLE_COUNT_16_BIT = 0x00000010,
+ VK_SAMPLE_COUNT_32_BIT = 0x00000020,
+ VK_SAMPLE_COUNT_64_BIT = 0x00000040,
+ VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSampleCountFlagBits;
+typedef VkFlags VkSampleCountFlags;
+
+typedef enum VkQueueFlagBits {
+ VK_QUEUE_GRAPHICS_BIT = 0x00000001,
+ VK_QUEUE_COMPUTE_BIT = 0x00000002,
+ VK_QUEUE_TRANSFER_BIT = 0x00000004,
+ VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008,
+ VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueueFlagBits;
+typedef VkFlags VkQueueFlags;
+
+typedef enum VkMemoryPropertyFlagBits {
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002,
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004,
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008,
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
+ VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkMemoryPropertyFlagBits;
+typedef VkFlags VkMemoryPropertyFlags;
+
+typedef enum VkMemoryHeapFlagBits {
+ VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001,
+ VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHX = 0x00000002,
+ VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkMemoryHeapFlagBits;
+typedef VkFlags VkMemoryHeapFlags;
+typedef VkFlags VkDeviceCreateFlags;
+typedef VkFlags VkDeviceQueueCreateFlags;
+
+typedef enum VkPipelineStageFlagBits {
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,
+ VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010,
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020,
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,
+ VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000,
+ VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000,
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000,
+ VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000,
+ VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineStageFlagBits;
+typedef VkFlags VkPipelineStageFlags;
+typedef VkFlags VkMemoryMapFlags;
+
+typedef enum VkImageAspectFlagBits {
+ VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001,
+ VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,
+ VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004,
+ VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008,
+ VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkImageAspectFlagBits;
+typedef VkFlags VkImageAspectFlags;
+
+typedef enum VkSparseImageFormatFlagBits {
+ VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001,
+ VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002,
+ VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004,
+ VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSparseImageFormatFlagBits;
+typedef VkFlags VkSparseImageFormatFlags;
+
+typedef enum VkSparseMemoryBindFlagBits {
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001,
+ VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSparseMemoryBindFlagBits;
+typedef VkFlags VkSparseMemoryBindFlags;
+
+typedef enum VkFenceCreateFlagBits {
+ VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
+ VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkFenceCreateFlagBits;
+typedef VkFlags VkFenceCreateFlags;
+typedef VkFlags VkSemaphoreCreateFlags;
+typedef VkFlags VkEventCreateFlags;
+typedef VkFlags VkQueryPoolCreateFlags;
+
+typedef enum VkQueryPipelineStatisticFlagBits {
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001,
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002,
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040,
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200,
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400,
+ VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueryPipelineStatisticFlagBits;
+typedef VkFlags VkQueryPipelineStatisticFlags;
+
+typedef enum VkQueryResultFlagBits {
+ VK_QUERY_RESULT_64_BIT = 0x00000001,
+ VK_QUERY_RESULT_WAIT_BIT = 0x00000002,
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,
+ VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,
+ VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueryResultFlagBits;
+typedef VkFlags VkQueryResultFlags;
+
+typedef enum VkBufferCreateFlagBits {
+ VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkBufferCreateFlagBits;
+typedef VkFlags VkBufferCreateFlags;
+
+typedef enum VkBufferUsageFlagBits {
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,
+ VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkBufferUsageFlagBits;
+typedef VkFlags VkBufferUsageFlags;
+typedef VkFlags VkBufferViewCreateFlags;
+typedef VkFlags VkImageViewCreateFlags;
+typedef VkFlags VkShaderModuleCreateFlags;
+typedef VkFlags VkPipelineCacheCreateFlags;
+
+typedef enum VkPipelineCreateFlagBits {
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
+ VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHX = 0x00000008,
+ VK_PIPELINE_CREATE_DISPATCH_BASE_KHX = 0x00000010,
+ VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCreateFlagBits;
+typedef VkFlags VkPipelineCreateFlags;
+typedef VkFlags VkPipelineShaderStageCreateFlags;
+
+typedef enum VkShaderStageFlagBits {
+ VK_SHADER_STAGE_VERTEX_BIT = 0x00000001,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004,
+ VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008,
+ VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010,
+ VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,
+ VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F,
+ VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
+ VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkShaderStageFlagBits;
+typedef VkFlags VkPipelineVertexInputStateCreateFlags;
+typedef VkFlags VkPipelineInputAssemblyStateCreateFlags;
+typedef VkFlags VkPipelineTessellationStateCreateFlags;
+typedef VkFlags VkPipelineViewportStateCreateFlags;
+typedef VkFlags VkPipelineRasterizationStateCreateFlags;
+
+typedef enum VkCullModeFlagBits {
+ VK_CULL_MODE_NONE = 0,
+ VK_CULL_MODE_FRONT_BIT = 0x00000001,
+ VK_CULL_MODE_BACK_BIT = 0x00000002,
+ VK_CULL_MODE_FRONT_AND_BACK = 0x00000003,
+ VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCullModeFlagBits;
+typedef VkFlags VkCullModeFlags;
+typedef VkFlags VkPipelineMultisampleStateCreateFlags;
+typedef VkFlags VkPipelineDepthStencilStateCreateFlags;
+typedef VkFlags VkPipelineColorBlendStateCreateFlags;
+
+typedef enum VkColorComponentFlagBits {
+ VK_COLOR_COMPONENT_R_BIT = 0x00000001,
+ VK_COLOR_COMPONENT_G_BIT = 0x00000002,
+ VK_COLOR_COMPONENT_B_BIT = 0x00000004,
+ VK_COLOR_COMPONENT_A_BIT = 0x00000008,
+ VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkColorComponentFlagBits;
+typedef VkFlags VkColorComponentFlags;
+typedef VkFlags VkPipelineDynamicStateCreateFlags;
+typedef VkFlags VkPipelineLayoutCreateFlags;
+typedef VkFlags VkShaderStageFlags;
+typedef VkFlags VkSamplerCreateFlags;
+
+typedef enum VkDescriptorSetLayoutCreateFlagBits {
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001,
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorSetLayoutCreateFlagBits;
+typedef VkFlags VkDescriptorSetLayoutCreateFlags;
+
+typedef enum VkDescriptorPoolCreateFlagBits {
+ VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001,
+ VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorPoolCreateFlagBits;
+typedef VkFlags VkDescriptorPoolCreateFlags;
+typedef VkFlags VkDescriptorPoolResetFlags;
+typedef VkFlags VkFramebufferCreateFlags;
+typedef VkFlags VkRenderPassCreateFlags;
+
+typedef enum VkAttachmentDescriptionFlagBits {
+ VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001,
+ VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentDescriptionFlagBits;
+typedef VkFlags VkAttachmentDescriptionFlags;
+
+typedef enum VkSubpassDescriptionFlagBits {
+ VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001,
+ VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002,
+ VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSubpassDescriptionFlagBits;
+typedef VkFlags VkSubpassDescriptionFlags;
+
+typedef enum VkAccessFlagBits {
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001,
+ VK_ACCESS_INDEX_READ_BIT = 0x00000002,
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004,
+ VK_ACCESS_UNIFORM_READ_BIT = 0x00000008,
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010,
+ VK_ACCESS_SHADER_READ_BIT = 0x00000020,
+ VK_ACCESS_SHADER_WRITE_BIT = 0x00000040,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080,
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400,
+ VK_ACCESS_TRANSFER_READ_BIT = 0x00000800,
+ VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000,
+ VK_ACCESS_HOST_READ_BIT = 0x00002000,
+ VK_ACCESS_HOST_WRITE_BIT = 0x00004000,
+ VK_ACCESS_MEMORY_READ_BIT = 0x00008000,
+ VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000,
+ VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x00020000,
+ VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000,
+ VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkAccessFlagBits;
+typedef VkFlags VkAccessFlags;
+
+typedef enum VkDependencyFlagBits {
+ VK_DEPENDENCY_BY_REGION_BIT = 0x00000001,
+ VK_DEPENDENCY_VIEW_LOCAL_BIT_KHX = 0x00000002,
+ VK_DEPENDENCY_DEVICE_GROUP_BIT_KHX = 0x00000004,
+ VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkDependencyFlagBits;
+typedef VkFlags VkDependencyFlags;
+
+typedef enum VkCommandPoolCreateFlagBits {
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001,
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,
+ VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandPoolCreateFlagBits;
+typedef VkFlags VkCommandPoolCreateFlags;
+
+typedef enum VkCommandPoolResetFlagBits {
+ VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+ VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandPoolResetFlagBits;
+typedef VkFlags VkCommandPoolResetFlags;
+
+typedef enum VkCommandBufferUsageFlagBits {
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001,
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002,
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004,
+ VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferUsageFlagBits;
+typedef VkFlags VkCommandBufferUsageFlags;
+
+typedef enum VkQueryControlFlagBits {
+ VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001,
+ VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkQueryControlFlagBits;
+typedef VkFlags VkQueryControlFlags;
+
+typedef enum VkCommandBufferResetFlagBits {
+ VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+ VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferResetFlagBits;
+typedef VkFlags VkCommandBufferResetFlags;
+
+typedef enum VkStencilFaceFlagBits {
+ VK_STENCIL_FACE_FRONT_BIT = 0x00000001,
+ VK_STENCIL_FACE_BACK_BIT = 0x00000002,
+ VK_STENCIL_FRONT_AND_BACK = 0x00000003,
+ VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkStencilFaceFlagBits;
+typedef VkFlags VkStencilFaceFlags;
+
+typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(
+ void* pUserData,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)(
+ void* pUserData,
+ void* pOriginal,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkFreeFunction)(
+ void* pUserData,
+ void* pMemory);
+
+typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void);
+
+typedef struct VkApplicationInfo {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pApplicationName;
+ uint32_t applicationVersion;
+ const char* pEngineName;
+ uint32_t engineVersion;
+ uint32_t apiVersion;
+} VkApplicationInfo;
+
+typedef struct VkInstanceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkInstanceCreateFlags flags;
+ const VkApplicationInfo* pApplicationInfo;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+} VkInstanceCreateInfo;
+
+typedef struct VkAllocationCallbacks {
+ void* pUserData;
+ PFN_vkAllocationFunction pfnAllocation;
+ PFN_vkReallocationFunction pfnReallocation;
+ PFN_vkFreeFunction pfnFree;
+ PFN_vkInternalAllocationNotification pfnInternalAllocation;
+ PFN_vkInternalFreeNotification pfnInternalFree;
+} VkAllocationCallbacks;
+
+typedef struct VkPhysicalDeviceFeatures {
+ VkBool32 robustBufferAccess;
+ VkBool32 fullDrawIndexUint32;
+ VkBool32 imageCubeArray;
+ VkBool32 independentBlend;
+ VkBool32 geometryShader;
+ VkBool32 tessellationShader;
+ VkBool32 sampleRateShading;
+ VkBool32 dualSrcBlend;
+ VkBool32 logicOp;
+ VkBool32 multiDrawIndirect;
+ VkBool32 drawIndirectFirstInstance;
+ VkBool32 depthClamp;
+ VkBool32 depthBiasClamp;
+ VkBool32 fillModeNonSolid;
+ VkBool32 depthBounds;
+ VkBool32 wideLines;
+ VkBool32 largePoints;
+ VkBool32 alphaToOne;
+ VkBool32 multiViewport;
+ VkBool32 samplerAnisotropy;
+ VkBool32 textureCompressionETC2;
+ VkBool32 textureCompressionASTC_LDR;
+ VkBool32 textureCompressionBC;
+ VkBool32 occlusionQueryPrecise;
+ VkBool32 pipelineStatisticsQuery;
+ VkBool32 vertexPipelineStoresAndAtomics;
+ VkBool32 fragmentStoresAndAtomics;
+ VkBool32 shaderTessellationAndGeometryPointSize;
+ VkBool32 shaderImageGatherExtended;
+ VkBool32 shaderStorageImageExtendedFormats;
+ VkBool32 shaderStorageImageMultisample;
+ VkBool32 shaderStorageImageReadWithoutFormat;
+ VkBool32 shaderStorageImageWriteWithoutFormat;
+ VkBool32 shaderUniformBufferArrayDynamicIndexing;
+ VkBool32 shaderSampledImageArrayDynamicIndexing;
+ VkBool32 shaderStorageBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageImageArrayDynamicIndexing;
+ VkBool32 shaderClipDistance;
+ VkBool32 shaderCullDistance;
+ VkBool32 shaderFloat64;
+ VkBool32 shaderInt64;
+ VkBool32 shaderInt16;
+ VkBool32 shaderResourceResidency;
+ VkBool32 shaderResourceMinLod;
+ VkBool32 sparseBinding;
+ VkBool32 sparseResidencyBuffer;
+ VkBool32 sparseResidencyImage2D;
+ VkBool32 sparseResidencyImage3D;
+ VkBool32 sparseResidency2Samples;
+ VkBool32 sparseResidency4Samples;
+ VkBool32 sparseResidency8Samples;
+ VkBool32 sparseResidency16Samples;
+ VkBool32 sparseResidencyAliased;
+ VkBool32 variableMultisampleRate;
+ VkBool32 inheritedQueries;
+} VkPhysicalDeviceFeatures;
+
+typedef struct VkFormatProperties {
+ VkFormatFeatureFlags linearTilingFeatures;
+ VkFormatFeatureFlags optimalTilingFeatures;
+ VkFormatFeatureFlags bufferFeatures;
+} VkFormatProperties;
+
+typedef struct VkExtent3D {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+} VkExtent3D;
+
+typedef struct VkImageFormatProperties {
+ VkExtent3D maxExtent;
+ uint32_t maxMipLevels;
+ uint32_t maxArrayLayers;
+ VkSampleCountFlags sampleCounts;
+ VkDeviceSize maxResourceSize;
+} VkImageFormatProperties;
+
+typedef struct VkPhysicalDeviceLimits {
+ uint32_t maxImageDimension1D;
+ uint32_t maxImageDimension2D;
+ uint32_t maxImageDimension3D;
+ uint32_t maxImageDimensionCube;
+ uint32_t maxImageArrayLayers;
+ uint32_t maxTexelBufferElements;
+ uint32_t maxUniformBufferRange;
+ uint32_t maxStorageBufferRange;
+ uint32_t maxPushConstantsSize;
+ uint32_t maxMemoryAllocationCount;
+ uint32_t maxSamplerAllocationCount;
+ VkDeviceSize bufferImageGranularity;
+ VkDeviceSize sparseAddressSpaceSize;
+ uint32_t maxBoundDescriptorSets;
+ uint32_t maxPerStageDescriptorSamplers;
+ uint32_t maxPerStageDescriptorUniformBuffers;
+ uint32_t maxPerStageDescriptorStorageBuffers;
+ uint32_t maxPerStageDescriptorSampledImages;
+ uint32_t maxPerStageDescriptorStorageImages;
+ uint32_t maxPerStageDescriptorInputAttachments;
+ uint32_t maxPerStageResources;
+ uint32_t maxDescriptorSetSamplers;
+ uint32_t maxDescriptorSetUniformBuffers;
+ uint32_t maxDescriptorSetUniformBuffersDynamic;
+ uint32_t maxDescriptorSetStorageBuffers;
+ uint32_t maxDescriptorSetStorageBuffersDynamic;
+ uint32_t maxDescriptorSetSampledImages;
+ uint32_t maxDescriptorSetStorageImages;
+ uint32_t maxDescriptorSetInputAttachments;
+ uint32_t maxVertexInputAttributes;
+ uint32_t maxVertexInputBindings;
+ uint32_t maxVertexInputAttributeOffset;
+ uint32_t maxVertexInputBindingStride;
+ uint32_t maxVertexOutputComponents;
+ uint32_t maxTessellationGenerationLevel;
+ uint32_t maxTessellationPatchSize;
+ uint32_t maxTessellationControlPerVertexInputComponents;
+ uint32_t maxTessellationControlPerVertexOutputComponents;
+ uint32_t maxTessellationControlPerPatchOutputComponents;
+ uint32_t maxTessellationControlTotalOutputComponents;
+ uint32_t maxTessellationEvaluationInputComponents;
+ uint32_t maxTessellationEvaluationOutputComponents;
+ uint32_t maxGeometryShaderInvocations;
+ uint32_t maxGeometryInputComponents;
+ uint32_t maxGeometryOutputComponents;
+ uint32_t maxGeometryOutputVertices;
+ uint32_t maxGeometryTotalOutputComponents;
+ uint32_t maxFragmentInputComponents;
+ uint32_t maxFragmentOutputAttachments;
+ uint32_t maxFragmentDualSrcAttachments;
+ uint32_t maxFragmentCombinedOutputResources;
+ uint32_t maxComputeSharedMemorySize;
+ uint32_t maxComputeWorkGroupCount[3];
+ uint32_t maxComputeWorkGroupInvocations;
+ uint32_t maxComputeWorkGroupSize[3];
+ uint32_t subPixelPrecisionBits;
+ uint32_t subTexelPrecisionBits;
+ uint32_t mipmapPrecisionBits;
+ uint32_t maxDrawIndexedIndexValue;
+ uint32_t maxDrawIndirectCount;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ uint32_t maxViewports;
+ uint32_t maxViewportDimensions[2];
+ float viewportBoundsRange[2];
+ uint32_t viewportSubPixelBits;
+ size_t minMemoryMapAlignment;
+ VkDeviceSize minTexelBufferOffsetAlignment;
+ VkDeviceSize minUniformBufferOffsetAlignment;
+ VkDeviceSize minStorageBufferOffsetAlignment;
+ int32_t minTexelOffset;
+ uint32_t maxTexelOffset;
+ int32_t minTexelGatherOffset;
+ uint32_t maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ uint32_t subPixelInterpolationOffsetBits;
+ uint32_t maxFramebufferWidth;
+ uint32_t maxFramebufferHeight;
+ uint32_t maxFramebufferLayers;
+ VkSampleCountFlags framebufferColorSampleCounts;
+ VkSampleCountFlags framebufferDepthSampleCounts;
+ VkSampleCountFlags framebufferStencilSampleCounts;
+ VkSampleCountFlags framebufferNoAttachmentsSampleCounts;
+ uint32_t maxColorAttachments;
+ VkSampleCountFlags sampledImageColorSampleCounts;
+ VkSampleCountFlags sampledImageIntegerSampleCounts;
+ VkSampleCountFlags sampledImageDepthSampleCounts;
+ VkSampleCountFlags sampledImageStencilSampleCounts;
+ VkSampleCountFlags storageImageSampleCounts;
+ uint32_t maxSampleMaskWords;
+ VkBool32 timestampComputeAndGraphics;
+ float timestampPeriod;
+ uint32_t maxClipDistances;
+ uint32_t maxCullDistances;
+ uint32_t maxCombinedClipAndCullDistances;
+ uint32_t discreteQueuePriorities;
+ float pointSizeRange[2];
+ float lineWidthRange[2];
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+ VkBool32 strictLines;
+ VkBool32 standardSampleLocations;
+ VkDeviceSize optimalBufferCopyOffsetAlignment;
+ VkDeviceSize optimalBufferCopyRowPitchAlignment;
+ VkDeviceSize nonCoherentAtomSize;
+} VkPhysicalDeviceLimits;
+
+typedef struct VkPhysicalDeviceSparseProperties {
+ VkBool32 residencyStandard2DBlockShape;
+ VkBool32 residencyStandard2DMultisampleBlockShape;
+ VkBool32 residencyStandard3DBlockShape;
+ VkBool32 residencyAlignedMipSize;
+ VkBool32 residencyNonResidentStrict;
+} VkPhysicalDeviceSparseProperties;
+
+typedef struct VkPhysicalDeviceProperties {
+ uint32_t apiVersion;
+ uint32_t driverVersion;
+ uint32_t vendorID;
+ uint32_t deviceID;
+ VkPhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
+ uint8_t pipelineCacheUUID[VK_UUID_SIZE];
+ VkPhysicalDeviceLimits limits;
+ VkPhysicalDeviceSparseProperties sparseProperties;
+} VkPhysicalDeviceProperties;
+
+typedef struct VkQueueFamilyProperties {
+ VkQueueFlags queueFlags;
+ uint32_t queueCount;
+ uint32_t timestampValidBits;
+ VkExtent3D minImageTransferGranularity;
+} VkQueueFamilyProperties;
+
+typedef struct VkMemoryType {
+ VkMemoryPropertyFlags propertyFlags;
+ uint32_t heapIndex;
+} VkMemoryType;
+
+typedef struct VkMemoryHeap {
+ VkDeviceSize size;
+ VkMemoryHeapFlags flags;
+} VkMemoryHeap;
+
+typedef struct VkPhysicalDeviceMemoryProperties {
+ uint32_t memoryTypeCount;
+ VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
+ uint32_t memoryHeapCount;
+ VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS];
+} VkPhysicalDeviceMemoryProperties;
+
+typedef struct VkDeviceQueueCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueCount;
+ const float* pQueuePriorities;
+} VkDeviceQueueCreateInfo;
+
+typedef struct VkDeviceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceCreateFlags flags;
+ uint32_t queueCreateInfoCount;
+ const VkDeviceQueueCreateInfo* pQueueCreateInfos;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+ const VkPhysicalDeviceFeatures* pEnabledFeatures;
+} VkDeviceCreateInfo;
+
+typedef struct VkExtensionProperties {
+ char extensionName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+} VkExtensionProperties;
+
+typedef struct VkLayerProperties {
+ char layerName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+ uint32_t implementationVersion;
+ char description[VK_MAX_DESCRIPTION_SIZE];
+} VkLayerProperties;
+
+typedef struct VkSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ const VkPipelineStageFlags* pWaitDstStageMask;
+ uint32_t commandBufferCount;
+ const VkCommandBuffer* pCommandBuffers;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore* pSignalSemaphores;
+} VkSubmitInfo;
+
+typedef struct VkMemoryAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+} VkMemoryAllocateInfo;
+
+typedef struct VkMappedMemoryRange {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkMappedMemoryRange;
+
+typedef struct VkMemoryRequirements {
+ VkDeviceSize size;
+ VkDeviceSize alignment;
+ uint32_t memoryTypeBits;
+} VkMemoryRequirements;
+
+typedef struct VkSparseImageFormatProperties {
+ VkImageAspectFlags aspectMask;
+ VkExtent3D imageGranularity;
+ VkSparseImageFormatFlags flags;
+} VkSparseImageFormatProperties;
+
+typedef struct VkSparseImageMemoryRequirements {
+ VkSparseImageFormatProperties formatProperties;
+ uint32_t imageMipTailFirstLod;
+ VkDeviceSize imageMipTailSize;
+ VkDeviceSize imageMipTailOffset;
+ VkDeviceSize imageMipTailStride;
+} VkSparseImageMemoryRequirements;
+
+typedef struct VkSparseMemoryBind {
+ VkDeviceSize resourceOffset;
+ VkDeviceSize size;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseMemoryBind;
+
+typedef struct VkSparseBufferMemoryBindInfo {
+ VkBuffer buffer;
+ uint32_t bindCount;
+ const VkSparseMemoryBind* pBinds;
+} VkSparseBufferMemoryBindInfo;
+
+typedef struct VkSparseImageOpaqueMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseMemoryBind* pBinds;
+} VkSparseImageOpaqueMemoryBindInfo;
+
+typedef struct VkImageSubresource {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t arrayLayer;
+} VkImageSubresource;
+
+typedef struct VkOffset3D {
+ int32_t x;
+ int32_t y;
+ int32_t z;
+} VkOffset3D;
+
+typedef struct VkSparseImageMemoryBind {
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseImageMemoryBind;
+
+typedef struct VkSparseImageMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseImageMemoryBind* pBinds;
+} VkSparseImageMemoryBindInfo;
+
+typedef struct VkBindSparseInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ uint32_t bufferBindCount;
+ const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ uint32_t imageOpaqueBindCount;
+ const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ uint32_t imageBindCount;
+ const VkSparseImageMemoryBindInfo* pImageBinds;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore* pSignalSemaphores;
+} VkBindSparseInfo;
+
+typedef struct VkFenceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFenceCreateFlags flags;
+} VkFenceCreateInfo;
+
+typedef struct VkSemaphoreCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreCreateFlags flags;
+} VkSemaphoreCreateInfo;
+
+typedef struct VkEventCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkEventCreateFlags flags;
+} VkEventCreateInfo;
+
+typedef struct VkQueryPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkQueryPoolCreateFlags flags;
+ VkQueryType queryType;
+ uint32_t queryCount;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkQueryPoolCreateInfo;
+
+typedef struct VkBufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferCreateFlags flags;
+ VkDeviceSize size;
+ VkBufferUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+} VkBufferCreateInfo;
+
+typedef struct VkBufferViewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferViewCreateFlags flags;
+ VkBuffer buffer;
+ VkFormat format;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkBufferViewCreateInfo;
+
+typedef struct VkImageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageCreateFlags flags;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
+ uint32_t mipLevels;
+ uint32_t arrayLayers;
+ VkSampleCountFlagBits samples;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ VkImageLayout initialLayout;
+} VkImageCreateInfo;
+
+typedef struct VkSubresourceLayout {
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ VkDeviceSize rowPitch;
+ VkDeviceSize arrayPitch;
+ VkDeviceSize depthPitch;
+} VkSubresourceLayout;
+
+typedef struct VkComponentMapping {
+ VkComponentSwizzle r;
+ VkComponentSwizzle g;
+ VkComponentSwizzle b;
+ VkComponentSwizzle a;
+} VkComponentMapping;
+
+typedef struct VkImageSubresourceRange {
+ VkImageAspectFlags aspectMask;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceRange;
+
+typedef struct VkImageViewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageViewCreateFlags flags;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkComponentMapping components;
+ VkImageSubresourceRange subresourceRange;
+} VkImageViewCreateInfo;
+
+typedef struct VkShaderModuleCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderModuleCreateFlags flags;
+ size_t codeSize;
+ const uint32_t* pCode;
+} VkShaderModuleCreateInfo;
+
+typedef struct VkPipelineCacheCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCacheCreateFlags flags;
+ size_t initialDataSize;
+ const void* pInitialData;
+} VkPipelineCacheCreateInfo;
+
+typedef struct VkSpecializationMapEntry {
+ uint32_t constantID;
+ uint32_t offset;
+ size_t size;
+} VkSpecializationMapEntry;
+
+typedef struct VkSpecializationInfo {
+ uint32_t mapEntryCount;
+ const VkSpecializationMapEntry* pMapEntries;
+ size_t dataSize;
+ const void* pData;
+} VkSpecializationInfo;
+
+typedef struct VkPipelineShaderStageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineShaderStageCreateFlags flags;
+ VkShaderStageFlagBits stage;
+ VkShaderModule module;
+ const char* pName;
+ const VkSpecializationInfo* pSpecializationInfo;
+} VkPipelineShaderStageCreateInfo;
+
+typedef struct VkVertexInputBindingDescription {
+ uint32_t binding;
+ uint32_t stride;
+ VkVertexInputRate inputRate;
+} VkVertexInputBindingDescription;
+
+typedef struct VkVertexInputAttributeDescription {
+ uint32_t location;
+ uint32_t binding;
+ VkFormat format;
+ uint32_t offset;
+} VkVertexInputAttributeDescription;
+
+typedef struct VkPipelineVertexInputStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineVertexInputStateCreateFlags flags;
+ uint32_t vertexBindingDescriptionCount;
+ const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ uint32_t vertexAttributeDescriptionCount;
+ const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+} VkPipelineVertexInputStateCreateInfo;
+
+typedef struct VkPipelineInputAssemblyStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineInputAssemblyStateCreateFlags flags;
+ VkPrimitiveTopology topology;
+ VkBool32 primitiveRestartEnable;
+} VkPipelineInputAssemblyStateCreateInfo;
+
+typedef struct VkPipelineTessellationStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineTessellationStateCreateFlags flags;
+ uint32_t patchControlPoints;
+} VkPipelineTessellationStateCreateInfo;
+
+typedef struct VkViewport {
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+} VkViewport;
+
+typedef struct VkOffset2D {
+ int32_t x;
+ int32_t y;
+} VkOffset2D;
+
+typedef struct VkExtent2D {
+ uint32_t width;
+ uint32_t height;
+} VkExtent2D;
+
+typedef struct VkRect2D {
+ VkOffset2D offset;
+ VkExtent2D extent;
+} VkRect2D;
+
+typedef struct VkPipelineViewportStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineViewportStateCreateFlags flags;
+ uint32_t viewportCount;
+ const VkViewport* pViewports;
+ uint32_t scissorCount;
+ const VkRect2D* pScissors;
+} VkPipelineViewportStateCreateInfo;
+
+typedef struct VkPipelineRasterizationStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineRasterizationStateCreateFlags flags;
+ VkBool32 depthClampEnable;
+ VkBool32 rasterizerDiscardEnable;
+ VkPolygonMode polygonMode;
+ VkCullModeFlags cullMode;
+ VkFrontFace frontFace;
+ VkBool32 depthBiasEnable;
+ float depthBiasConstantFactor;
+ float depthBiasClamp;
+ float depthBiasSlopeFactor;
+ float lineWidth;
+} VkPipelineRasterizationStateCreateInfo;
+
+typedef struct VkPipelineMultisampleStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineMultisampleStateCreateFlags flags;
+ VkSampleCountFlagBits rasterizationSamples;
+ VkBool32 sampleShadingEnable;
+ float minSampleShading;
+ const VkSampleMask* pSampleMask;
+ VkBool32 alphaToCoverageEnable;
+ VkBool32 alphaToOneEnable;
+} VkPipelineMultisampleStateCreateInfo;
+
+typedef struct VkStencilOpState {
+ VkStencilOp failOp;
+ VkStencilOp passOp;
+ VkStencilOp depthFailOp;
+ VkCompareOp compareOp;
+ uint32_t compareMask;
+ uint32_t writeMask;
+ uint32_t reference;
+} VkStencilOpState;
+
+typedef struct VkPipelineDepthStencilStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDepthStencilStateCreateFlags flags;
+ VkBool32 depthTestEnable;
+ VkBool32 depthWriteEnable;
+ VkCompareOp depthCompareOp;
+ VkBool32 depthBoundsTestEnable;
+ VkBool32 stencilTestEnable;
+ VkStencilOpState front;
+ VkStencilOpState back;
+ float minDepthBounds;
+ float maxDepthBounds;
+} VkPipelineDepthStencilStateCreateInfo;
+
+typedef struct VkPipelineColorBlendAttachmentState {
+ VkBool32 blendEnable;
+ VkBlendFactor srcColorBlendFactor;
+ VkBlendFactor dstColorBlendFactor;
+ VkBlendOp colorBlendOp;
+ VkBlendFactor srcAlphaBlendFactor;
+ VkBlendFactor dstAlphaBlendFactor;
+ VkBlendOp alphaBlendOp;
+ VkColorComponentFlags colorWriteMask;
+} VkPipelineColorBlendAttachmentState;
+
+typedef struct VkPipelineColorBlendStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineColorBlendStateCreateFlags flags;
+ VkBool32 logicOpEnable;
+ VkLogicOp logicOp;
+ uint32_t attachmentCount;
+ const VkPipelineColorBlendAttachmentState* pAttachments;
+ float blendConstants[4];
+} VkPipelineColorBlendStateCreateInfo;
+
+typedef struct VkPipelineDynamicStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDynamicStateCreateFlags flags;
+ uint32_t dynamicStateCount;
+ const VkDynamicState* pDynamicStates;
+} VkPipelineDynamicStateCreateInfo;
+
+typedef struct VkGraphicsPipelineCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ const VkPipelineViewportStateCreateInfo* pViewportState;
+ const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
+ const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ const VkPipelineDynamicStateCreateInfo* pDynamicState;
+ VkPipelineLayout layout;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkGraphicsPipelineCreateInfo;
+
+typedef struct VkComputePipelineCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ VkPipelineShaderStageCreateInfo stage;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkComputePipelineCreateInfo;
+
+typedef struct VkPushConstantRange {
+ VkShaderStageFlags stageFlags;
+ uint32_t offset;
+ uint32_t size;
+} VkPushConstantRange;
+
+typedef struct VkPipelineLayoutCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineLayoutCreateFlags flags;
+ uint32_t setLayoutCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+ uint32_t pushConstantRangeCount;
+ const VkPushConstantRange* pPushConstantRanges;
+} VkPipelineLayoutCreateInfo;
+
+typedef struct VkSamplerCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSamplerCreateFlags flags;
+ VkFilter magFilter;
+ VkFilter minFilter;
+ VkSamplerMipmapMode mipmapMode;
+ VkSamplerAddressMode addressModeU;
+ VkSamplerAddressMode addressModeV;
+ VkSamplerAddressMode addressModeW;
+ float mipLodBias;
+ VkBool32 anisotropyEnable;
+ float maxAnisotropy;
+ VkBool32 compareEnable;
+ VkCompareOp compareOp;
+ float minLod;
+ float maxLod;
+ VkBorderColor borderColor;
+ VkBool32 unnormalizedCoordinates;
+} VkSamplerCreateInfo;
+
+typedef struct VkDescriptorSetLayoutBinding {
+ uint32_t binding;
+ VkDescriptorType descriptorType;
+ uint32_t descriptorCount;
+ VkShaderStageFlags stageFlags;
+ const VkSampler* pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
+
+typedef struct VkDescriptorSetLayoutCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSetLayoutCreateFlags flags;
+ uint32_t bindingCount;
+ const VkDescriptorSetLayoutBinding* pBindings;
+} VkDescriptorSetLayoutCreateInfo;
+
+typedef struct VkDescriptorPoolSize {
+ VkDescriptorType type;
+ uint32_t descriptorCount;
+} VkDescriptorPoolSize;
+
+typedef struct VkDescriptorPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorPoolCreateFlags flags;
+ uint32_t maxSets;
+ uint32_t poolSizeCount;
+ const VkDescriptorPoolSize* pPoolSizes;
+} VkDescriptorPoolCreateInfo;
+
+typedef struct VkDescriptorSetAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorPool descriptorPool;
+ uint32_t descriptorSetCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+} VkDescriptorSetAllocateInfo;
+
+typedef struct VkDescriptorImageInfo {
+ VkSampler sampler;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+} VkDescriptorImageInfo;
+
+typedef struct VkDescriptorBufferInfo {
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkDescriptorBufferInfo;
+
+typedef struct VkWriteDescriptorSet {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ const VkDescriptorImageInfo* pImageInfo;
+ const VkDescriptorBufferInfo* pBufferInfo;
+ const VkBufferView* pTexelBufferView;
+} VkWriteDescriptorSet;
+
+typedef struct VkCopyDescriptorSet {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet srcSet;
+ uint32_t srcBinding;
+ uint32_t srcArrayElement;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+} VkCopyDescriptorSet;
+
+typedef struct VkFramebufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFramebufferCreateFlags flags;
+ VkRenderPass renderPass;
+ uint32_t attachmentCount;
+ const VkImageView* pAttachments;
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+} VkFramebufferCreateInfo;
+
+typedef struct VkAttachmentDescription {
+ VkAttachmentDescriptionFlags flags;
+ VkFormat format;
+ VkSampleCountFlagBits samples;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkAttachmentLoadOp stencilLoadOp;
+ VkAttachmentStoreOp stencilStoreOp;
+ VkImageLayout initialLayout;
+ VkImageLayout finalLayout;
+} VkAttachmentDescription;
+
+typedef struct VkAttachmentReference {
+ uint32_t attachment;
+ VkImageLayout layout;
+} VkAttachmentReference;
+
+typedef struct VkSubpassDescription {
+ VkSubpassDescriptionFlags flags;
+ VkPipelineBindPoint pipelineBindPoint;
+ uint32_t inputAttachmentCount;
+ const VkAttachmentReference* pInputAttachments;
+ uint32_t colorAttachmentCount;
+ const VkAttachmentReference* pColorAttachments;
+ const VkAttachmentReference* pResolveAttachments;
+ const VkAttachmentReference* pDepthStencilAttachment;
+ uint32_t preserveAttachmentCount;
+ const uint32_t* pPreserveAttachments;
+} VkSubpassDescription;
+
+typedef struct VkSubpassDependency {
+ uint32_t srcSubpass;
+ uint32_t dstSubpass;
+ VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags dstStageMask;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkDependencyFlags dependencyFlags;
+} VkSubpassDependency;
+
+typedef struct VkRenderPassCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPassCreateFlags flags;
+ uint32_t attachmentCount;
+ const VkAttachmentDescription* pAttachments;
+ uint32_t subpassCount;
+ const VkSubpassDescription* pSubpasses;
+ uint32_t dependencyCount;
+ const VkSubpassDependency* pDependencies;
+} VkRenderPassCreateInfo;
+
+typedef struct VkCommandPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandPoolCreateFlags flags;
+ uint32_t queueFamilyIndex;
+} VkCommandPoolCreateInfo;
+
+typedef struct VkCommandBufferAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandPool commandPool;
+ VkCommandBufferLevel level;
+ uint32_t commandBufferCount;
+} VkCommandBufferAllocateInfo;
+
+typedef struct VkCommandBufferInheritanceInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkFramebuffer framebuffer;
+ VkBool32 occlusionQueryEnable;
+ VkQueryControlFlags queryFlags;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkCommandBufferInheritanceInfo;
+
+typedef struct VkCommandBufferBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandBufferUsageFlags flags;
+ const VkCommandBufferInheritanceInfo* pInheritanceInfo;
+} VkCommandBufferBeginInfo;
+
+typedef struct VkBufferCopy {
+ VkDeviceSize srcOffset;
+ VkDeviceSize dstOffset;
+ VkDeviceSize size;
+} VkBufferCopy;
+
+typedef struct VkImageSubresourceLayers {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceLayers;
+
+typedef struct VkImageCopy {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageCopy;
+
+typedef struct VkImageBlit {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffsets[2];
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffsets[2];
+} VkImageBlit;
+
+typedef struct VkBufferImageCopy {
+ VkDeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ VkImageSubresourceLayers imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy;
+
+typedef union VkClearColorValue {
+ float float32[4];
+ int32_t int32[4];
+ uint32_t uint32[4];
+} VkClearColorValue;
+
+typedef struct VkClearDepthStencilValue {
+ float depth;
+ uint32_t stencil;
+} VkClearDepthStencilValue;
+
+typedef union VkClearValue {
+ VkClearColorValue color;
+ VkClearDepthStencilValue depthStencil;
+} VkClearValue;
+
+typedef struct VkClearAttachment {
+ VkImageAspectFlags aspectMask;
+ uint32_t colorAttachment;
+ VkClearValue clearValue;
+} VkClearAttachment;
+
+typedef struct VkClearRect {
+ VkRect2D rect;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkClearRect;
+
+typedef struct VkImageResolve {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageResolve;
+
+typedef struct VkMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+} VkMemoryBarrier;
+
+typedef struct VkBufferMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkBufferMemoryBarrier;
+
+typedef struct VkImageMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+} VkImageMemoryBarrier;
+
+typedef struct VkRenderPassBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+ VkRect2D renderArea;
+ uint32_t clearValueCount;
+ const VkClearValue* pClearValues;
+} VkRenderPassBeginInfo;
+
+typedef struct VkDispatchIndirectCommand {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+} VkDispatchIndirectCommand;
+
+typedef struct VkDrawIndexedIndirectCommand {
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t vertexOffset;
+ uint32_t firstInstance;
+} VkDrawIndexedIndirectCommand;
+
+typedef struct VkDrawIndirectCommand {
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+} VkDrawIndirectCommand;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance);
+typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
+typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue);
+typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
+typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);
+typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory);
+typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
+typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore);
+typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent);
+typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage);
+typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
+typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler);
+typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets);
+typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
+typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers);
+typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports);
+typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors);
+typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);
+typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
+typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects);
+typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
+ const VkInstanceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkInstance* pInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
+ VkInstance instance,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceCount,
+ VkPhysicalDevice* pPhysicalDevices);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures* pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties* pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageFormatProperties* pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties* pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(
+ VkInstance instance,
+ const char* pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
+ VkDevice device,
+ const char* pName);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
+ VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDevice* pDevice);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
+ VkDevice device,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
+ VkPhysicalDevice physicalDevice,
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
+ uint32_t* pPropertyCount,
+ VkLayerProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkLayerProperties* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
+ VkDevice device,
+ uint32_t queueFamilyIndex,
+ uint32_t queueIndex,
+ VkQueue* pQueue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
+ VkQueue queue,
+ uint32_t submitCount,
+ const VkSubmitInfo* pSubmits,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
+ VkQueue queue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
+ VkDevice device);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
+ VkDevice device,
+ const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDeviceMemory* pMemory);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeMemory(
+ VkDevice device,
+ VkDeviceMemory memory,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ void** ppData);
+
+VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(
+ VkDevice device,
+ VkDeviceMemory memory);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange* pMemoryRanges);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange* pMemoryRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize* pCommittedMemoryInBytes);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
+ VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
+ VkDevice device,
+ VkImage image,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset);
+
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(
+ VkDevice device,
+ VkBuffer buffer,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkSampleCountFlagBits samples,
+ VkImageUsageFlags usage,
+ VkImageTiling tiling,
+ uint32_t* pPropertyCount,
+ VkSparseImageFormatProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
+ VkQueue queue,
+ uint32_t bindInfoCount,
+ const VkBindSparseInfo* pBindInfo,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
+ VkDevice device,
+ const VkFenceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFence(
+ VkDevice device,
+ VkFence fence,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
+ VkDevice device,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences,
+ VkBool32 waitAll,
+ uint64_t timeout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
+ VkDevice device,
+ const VkSemaphoreCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSemaphore* pSemaphore);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(
+ VkDevice device,
+ VkSemaphore semaphore,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
+ VkDevice device,
+ const VkEventCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkEvent* pEvent);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(
+ VkDevice device,
+ VkEvent event,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
+ VkDevice device,
+ const VkQueryPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkQueryPool* pQueryPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(
+ VkDevice device,
+ VkQueryPool queryPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(
+ VkDevice device,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ size_t dataSize,
+ void* pData,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
+ VkDevice device,
+ const VkBufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBuffer* pBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(
+ VkDevice device,
+ VkBuffer buffer,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
+ VkDevice device,
+ const VkBufferViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBufferView* pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(
+ VkDevice device,
+ VkBufferView bufferView,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
+ VkDevice device,
+ const VkImageCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkImage* pImage);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImage(
+ VkDevice device,
+ VkImage image,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
+ VkDevice device,
+ VkImage image,
+ const VkImageSubresource* pSubresource,
+ VkSubresourceLayout* pLayout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
+ VkDevice device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkImageView* pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(
+ VkDevice device,
+ VkImageView imageView,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
+ VkDevice device,
+ const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkShaderModule* pShaderModule);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(
+ VkDevice device,
+ VkShaderModule shaderModule,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
+ VkDevice device,
+ const VkPipelineCacheCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineCache* pPipelineCache);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ size_t* pDataSize,
+ void* pData);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
+ VkDevice device,
+ VkPipelineCache dstCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache* pSrcCaches);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(
+ VkDevice device,
+ VkPipeline pipeline,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
+ VkDevice device,
+ const VkPipelineLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineLayout* pPipelineLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(
+ VkDevice device,
+ VkPipelineLayout pipelineLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(
+ VkDevice device,
+ const VkSamplerCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSampler* pSampler);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySampler(
+ VkDevice device,
+ VkSampler sampler,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorSetLayout* pSetLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(
+ VkDevice device,
+ const VkDescriptorPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorPool* pDescriptorPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorPoolResetFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(
+ VkDevice device,
+ const VkDescriptorSetAllocateInfo* pAllocateInfo,
+ VkDescriptorSet* pDescriptorSets);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
+ VkDevice device,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites,
+ uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet* pDescriptorCopies);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
+ VkDevice device,
+ const VkFramebufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFramebuffer* pFramebuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(
+ VkDevice device,
+ VkFramebuffer framebuffer,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
+ VkDevice device,
+ const VkRenderPassCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkRenderPass* pRenderPass);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(
+ VkDevice device,
+ VkRenderPass renderPass,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(
+ VkDevice device,
+ VkRenderPass renderPass,
+ VkExtent2D* pGranularity);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
+ VkDevice device,
+ const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkCommandPool* pCommandPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
+ VkDevice device,
+ const VkCommandBufferAllocateInfo* pAllocateInfo,
+ VkCommandBuffer* pCommandBuffers);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(
+ VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo* pBeginInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* pViewports);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(
+ VkCommandBuffer commandBuffer,
+ float lineWidth);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
+ VkCommandBuffer commandBuffer,
+ float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(
+ VkCommandBuffer commandBuffer,
+ const float blendConstants[4]);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
+ VkCommandBuffer commandBuffer,
+ float minDepthBounds,
+ float maxDepthBounds);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t compareMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t writeMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t reference);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDraw(
+ VkCommandBuffer commandBuffer,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(
+ VkCommandBuffer commandBuffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(
+ VkCommandBuffer commandBuffer,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit* pRegions,
+ VkFilter filter);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* pData);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize size,
+ uint32_t data);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearColorValue* pColor,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearDepthStencilValue* pDepthStencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
+ VkCommandBuffer commandBuffer,
+ uint32_t attachmentCount,
+ const VkClearAttachment* pAttachments,
+ uint32_t rectCount,
+ const VkClearRect* pRects);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageResolve* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(
+ VkCommandBuffer commandBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void* pValues);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(
+ VkCommandBuffer commandBuffer,
+ VkSubpassContents contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(
+ VkCommandBuffer commandBuffer,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+#endif
+
+#define VK_KHR_surface 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
+
+#define VK_KHR_SURFACE_SPEC_VERSION 25
+#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface"
+#define VK_COLORSPACE_SRGB_NONLINEAR_KHR VK_COLOR_SPACE_SRGB_NONLINEAR_KHR
+
+
+typedef enum VkColorSpaceKHR {
+ VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,
+ VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001,
+ VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002,
+ VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003,
+ VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004,
+ VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005,
+ VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006,
+ VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007,
+ VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008,
+ VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009,
+ VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010,
+ VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011,
+ VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012,
+ VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013,
+ VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1),
+ VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkColorSpaceKHR;
+
+typedef enum VkPresentModeKHR {
+ VK_PRESENT_MODE_IMMEDIATE_KHR = 0,
+ VK_PRESENT_MODE_MAILBOX_KHR = 1,
+ VK_PRESENT_MODE_FIFO_KHR = 2,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3,
+ VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000,
+ VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001,
+ VK_PRESENT_MODE_BEGIN_RANGE_KHR = VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_END_RANGE_KHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_RANGE_SIZE_KHR = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1),
+ VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkPresentModeKHR;
+
+
+typedef enum VkSurfaceTransformFlagBitsKHR {
+ VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001,
+ VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002,
+ VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004,
+ VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080,
+ VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100,
+ VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkSurfaceTransformFlagBitsKHR;
+typedef VkFlags VkSurfaceTransformFlagsKHR;
+
+typedef enum VkCompositeAlphaFlagBitsKHR {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008,
+ VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkCompositeAlphaFlagBitsKHR;
+typedef VkFlags VkCompositeAlphaFlagsKHR;
+
+typedef struct VkSurfaceCapabilitiesKHR {
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ VkExtent2D currentExtent;
+ VkExtent2D minImageExtent;
+ VkExtent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkSurfaceTransformFlagBitsKHR currentTransform;
+ VkCompositeAlphaFlagsKHR supportedCompositeAlpha;
+ VkImageUsageFlags supportedUsageFlags;
+} VkSurfaceCapabilitiesKHR;
+
+typedef struct VkSurfaceFormatKHR {
+ VkFormat format;
+ VkColorSpaceKHR colorSpace;
+} VkSurfaceFormatKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(
+ VkInstance instance,
+ VkSurfaceKHR surface,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface,
+ VkBool32* pSupported);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormatKHR* pSurfaceFormats);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pPresentModeCount,
+ VkPresentModeKHR* pPresentModes);
+#endif
+
+#define VK_KHR_swapchain 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR)
+
+#define VK_KHR_SWAPCHAIN_SPEC_VERSION 68
+#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain"
+
+
+typedef enum VkSwapchainCreateFlagBitsKHR {
+ VK_SWAPCHAIN_CREATE_BIND_SFR_BIT_KHX = 0x00000001,
+ VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkSwapchainCreateFlagBitsKHR;
+typedef VkFlags VkSwapchainCreateFlagsKHR;
+
+typedef struct VkSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainCreateFlagsKHR flags;
+ VkSurfaceKHR surface;
+ uint32_t minImageCount;
+ VkFormat imageFormat;
+ VkColorSpaceKHR imageColorSpace;
+ VkExtent2D imageExtent;
+ uint32_t imageArrayLayers;
+ VkImageUsageFlags imageUsage;
+ VkSharingMode imageSharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ VkSurfaceTransformFlagBitsKHR preTransform;
+ VkCompositeAlphaFlagBitsKHR compositeAlpha;
+ VkPresentModeKHR presentMode;
+ VkBool32 clipped;
+ VkSwapchainKHR oldSwapchain;
+} VkSwapchainCreateInfoKHR;
+
+typedef struct VkPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ uint32_t swapchainCount;
+ const VkSwapchainKHR* pSwapchains;
+ const uint32_t* pImageIndices;
+ VkResult* pResults;
+} VkPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
+typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages);
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);
+typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
+ VkDevice device,
+ const VkSwapchainCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSwapchainKHR* pSwapchain);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t* pSwapchainImageCount,
+ VkImage* pSwapchainImages);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint64_t timeout,
+ VkSemaphore semaphore,
+ VkFence fence,
+ uint32_t* pImageIndex);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
+ VkQueue queue,
+ const VkPresentInfoKHR* pPresentInfo);
+#endif
+
+#define VK_KHR_display 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR)
+
+#define VK_KHR_DISPLAY_SPEC_VERSION 21
+#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display"
+
+
+typedef enum VkDisplayPlaneAlphaFlagBitsKHR {
+ VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+ VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002,
+ VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004,
+ VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008,
+ VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkDisplayPlaneAlphaFlagBitsKHR;
+typedef VkFlags VkDisplayPlaneAlphaFlagsKHR;
+typedef VkFlags VkDisplayModeCreateFlagsKHR;
+typedef VkFlags VkDisplaySurfaceCreateFlagsKHR;
+
+typedef struct VkDisplayPropertiesKHR {
+ VkDisplayKHR display;
+ const char* displayName;
+ VkExtent2D physicalDimensions;
+ VkExtent2D physicalResolution;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkBool32 planeReorderPossible;
+ VkBool32 persistentContent;
+} VkDisplayPropertiesKHR;
+
+typedef struct VkDisplayModeParametersKHR {
+ VkExtent2D visibleRegion;
+ uint32_t refreshRate;
+} VkDisplayModeParametersKHR;
+
+typedef struct VkDisplayModePropertiesKHR {
+ VkDisplayModeKHR displayMode;
+ VkDisplayModeParametersKHR parameters;
+} VkDisplayModePropertiesKHR;
+
+typedef struct VkDisplayModeCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayModeCreateFlagsKHR flags;
+ VkDisplayModeParametersKHR parameters;
+} VkDisplayModeCreateInfoKHR;
+
+typedef struct VkDisplayPlaneCapabilitiesKHR {
+ VkDisplayPlaneAlphaFlagsKHR supportedAlpha;
+ VkOffset2D minSrcPosition;
+ VkOffset2D maxSrcPosition;
+ VkExtent2D minSrcExtent;
+ VkExtent2D maxSrcExtent;
+ VkOffset2D minDstPosition;
+ VkOffset2D maxDstPosition;
+ VkExtent2D minDstExtent;
+ VkExtent2D maxDstExtent;
+} VkDisplayPlaneCapabilitiesKHR;
+
+typedef struct VkDisplayPlanePropertiesKHR {
+ VkDisplayKHR currentDisplay;
+ uint32_t currentStackIndex;
+} VkDisplayPlanePropertiesKHR;
+
+typedef struct VkDisplaySurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplaySurfaceCreateFlagsKHR flags;
+ VkDisplayModeKHR displayMode;
+ uint32_t planeIndex;
+ uint32_t planeStackIndex;
+ VkSurfaceTransformFlagBitsKHR transform;
+ float globalAlpha;
+ VkDisplayPlaneAlphaFlagBitsKHR alphaMode;
+ VkExtent2D imageExtent;
+} VkDisplaySurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPlanePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t planeIndex,
+ uint32_t* pDisplayCount,
+ VkDisplayKHR* pDisplays);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ uint32_t* pPropertyCount,
+ VkDisplayModePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ const VkDisplayModeCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDisplayModeKHR* pMode);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayModeKHR mode,
+ uint32_t planeIndex,
+ VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
+ VkInstance instance,
+ const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+
+#define VK_KHR_display_swapchain 1
+#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9
+#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain"
+
+typedef struct VkDisplayPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkRect2D srcRect;
+ VkRect2D dstRect;
+ VkBool32 persistent;
+} VkDisplayPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(
+ VkDevice device,
+ uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkSwapchainKHR* pSwapchains);
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#define VK_KHR_xlib_surface 1
+#include <X11/Xlib.h>
+
+#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6
+#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface"
+
+typedef VkFlags VkXlibSurfaceCreateFlagsKHR;
+
+typedef struct VkXlibSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkXlibSurfaceCreateFlagsKHR flags;
+ Display* dpy;
+ Window window;
+} VkXlibSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
+ VkInstance instance,
+ const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ Display* dpy,
+ VisualID visualID);
+#endif
+#endif /* VK_USE_PLATFORM_XLIB_KHR */
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#define VK_KHR_xcb_surface 1
+#include <xcb/xcb.h>
+
+#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6
+#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
+
+typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
+
+typedef struct VkXcbSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkXcbSurfaceCreateFlagsKHR flags;
+ xcb_connection_t* connection;
+ xcb_window_t window;
+} VkXcbSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
+ VkInstance instance,
+ const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ xcb_connection_t* connection,
+ xcb_visualid_t visual_id);
+#endif
+#endif /* VK_USE_PLATFORM_XCB_KHR */
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#define VK_KHR_wayland_surface 1
+#include <wayland-client.h>
+
+#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6
+#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface"
+
+typedef VkFlags VkWaylandSurfaceCreateFlagsKHR;
+
+typedef struct VkWaylandSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkWaylandSurfaceCreateFlagsKHR flags;
+ struct wl_display* display;
+ struct wl_surface* surface;
+} VkWaylandSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
+ VkInstance instance,
+ const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ struct wl_display* display);
+#endif
+#endif /* VK_USE_PLATFORM_WAYLAND_KHR */
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#define VK_KHR_mir_surface 1
+#include <mir_toolkit/client_types.h>
+
+#define VK_KHR_MIR_SURFACE_SPEC_VERSION 4
+#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface"
+
+typedef VkFlags VkMirSurfaceCreateFlagsKHR;
+
+typedef struct VkMirSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkMirSurfaceCreateFlagsKHR flags;
+ MirConnection* connection;
+ MirSurface* mirSurface;
+} VkMirSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
+ VkInstance instance,
+ const VkMirSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ MirConnection* connection);
+#endif
+#endif /* VK_USE_PLATFORM_MIR_KHR */
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_KHR_android_surface 1
+#include <android/native_window.h>
+
+#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
+#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface"
+
+typedef VkFlags VkAndroidSurfaceCreateFlagsKHR;
+
+typedef struct VkAndroidSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkAndroidSurfaceCreateFlagsKHR flags;
+ ANativeWindow* window;
+} VkAndroidSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
+ VkInstance instance,
+ const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+#endif /* VK_USE_PLATFORM_ANDROID_KHR */
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#define VK_KHR_win32_surface 1
+#include <windows.h>
+
+#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 5
+#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface"
+
+typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
+
+typedef struct VkWin32SurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkWin32SurfaceCreateFlagsKHR flags;
+ HINSTANCE hinstance;
+ HWND hwnd;
+} VkWin32SurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
+ VkInstance instance,
+ const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex);
+#endif
+#endif /* VK_USE_PLATFORM_WIN32_KHR */
+
+#define VK_KHR_sampler_mirror_clamp_to_edge 1
+#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 1
+#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge"
+
+
+#define VK_KHR_get_physical_device_properties2 1
+#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 1
+#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2"
+
+typedef struct VkPhysicalDeviceFeatures2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkPhysicalDeviceFeatures features;
+} VkPhysicalDeviceFeatures2KHR;
+
+typedef struct VkPhysicalDeviceProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkPhysicalDeviceProperties properties;
+} VkPhysicalDeviceProperties2KHR;
+
+typedef struct VkFormatProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkFormatProperties formatProperties;
+} VkFormatProperties2KHR;
+
+typedef struct VkImageFormatProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkImageFormatProperties imageFormatProperties;
+} VkImageFormatProperties2KHR;
+
+typedef struct VkPhysicalDeviceImageFormatInfo2KHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ VkImageType type;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkImageCreateFlags flags;
+} VkPhysicalDeviceImageFormatInfo2KHR;
+
+typedef struct VkQueueFamilyProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkQueueFamilyProperties queueFamilyProperties;
+} VkQueueFamilyProperties2KHR;
+
+typedef struct VkPhysicalDeviceMemoryProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkPhysicalDeviceMemoryProperties memoryProperties;
+} VkPhysicalDeviceMemoryProperties2KHR;
+
+typedef struct VkSparseImageFormatProperties2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkSparseImageFormatProperties properties;
+} VkSparseImageFormatProperties2KHR;
+
+typedef struct VkPhysicalDeviceSparseImageFormatInfo2KHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ VkImageType type;
+ VkSampleCountFlagBits samples;
+ VkImageUsageFlags usage;
+ VkImageTiling tiling;
+} VkPhysicalDeviceSparseImageFormatInfo2KHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2KHR* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2KHR* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2KHR* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo, VkImageFormatProperties2KHR* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2KHR* pMemoryProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2KHR* pProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures2KHR* pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties2KHR* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties2KHR* pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo,
+ VkImageFormatProperties2KHR* pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2KHR* pMemoryProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo,
+ uint32_t* pPropertyCount,
+ VkSparseImageFormatProperties2KHR* pProperties);
+#endif
+
+#define VK_KHR_shader_draw_parameters 1
+#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1
+#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters"
+
+
+#define VK_KHR_maintenance1 1
+#define VK_KHR_MAINTENANCE1_SPEC_VERSION 1
+#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1"
+
+typedef VkFlags VkCommandPoolTrimFlagsKHR;
+
+typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlagsKHR flags);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlagsKHR flags);
+#endif
+
+#define VK_KHR_push_descriptor 1
+#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 1
+#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor"
+
+typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxPushDescriptors;
+} VkPhysicalDevicePushDescriptorPropertiesKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites);
+#endif
+
+#define VK_KHR_incremental_present 1
+#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1
+#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present"
+
+typedef struct VkRectLayerKHR {
+ VkOffset2D offset;
+ VkExtent2D extent;
+ uint32_t layer;
+} VkRectLayerKHR;
+
+typedef struct VkPresentRegionKHR {
+ uint32_t rectangleCount;
+ const VkRectLayerKHR* pRectangles;
+} VkPresentRegionKHR;
+
+typedef struct VkPresentRegionsKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const VkPresentRegionKHR* pRegions;
+} VkPresentRegionsKHR;
+
+
+
+#define VK_KHR_descriptor_update_template 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplateKHR)
+
+#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1
+#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template"
+
+
+typedef enum VkDescriptorUpdateTemplateTypeKHR {
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = 0,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_BEGIN_RANGE_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_END_RANGE_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR,
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_RANGE_SIZE_KHR = (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR + 1),
+ VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkDescriptorUpdateTemplateTypeKHR;
+
+typedef VkFlags VkDescriptorUpdateTemplateCreateFlagsKHR;
+
+typedef struct VkDescriptorUpdateTemplateEntryKHR {
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ size_t offset;
+ size_t stride;
+} VkDescriptorUpdateTemplateEntryKHR;
+
+typedef struct VkDescriptorUpdateTemplateCreateInfoKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkDescriptorUpdateTemplateCreateFlagsKHR flags;
+ uint32_t descriptorUpdateEntryCount;
+ const VkDescriptorUpdateTemplateEntryKHR* pDescriptorUpdateEntries;
+ VkDescriptorUpdateTemplateTypeKHR templateType;
+ VkDescriptorSetLayout descriptorSetLayout;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkPipelineLayout pipelineLayout;
+ uint32_t set;
+} VkDescriptorUpdateTemplateCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR(
+ VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR(
+ VkDevice device,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR(
+ VkDevice device,
+ VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void* pData);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR(
+ VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkPipelineLayout layout,
+ uint32_t set,
+ const void* pData);
+#endif
+
+#define VK_KHR_shared_presentable_image 1
+#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1
+#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image"
+
+typedef struct VkSharedPresentSurfaceCapabilitiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkImageUsageFlags sharedPresentSupportedUsageFlags;
+} VkSharedPresentSurfaceCapabilitiesKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain);
+#endif
+
+#define VK_KHR_get_surface_capabilities2 1
+#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1
+#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2"
+
+typedef struct VkPhysicalDeviceSurfaceInfo2KHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSurfaceKHR surface;
+} VkPhysicalDeviceSurfaceInfo2KHR;
+
+typedef struct VkSurfaceCapabilities2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkSurfaceCapabilitiesKHR surfaceCapabilities;
+} VkSurfaceCapabilities2KHR;
+
+typedef struct VkSurfaceFormat2KHR {
+ VkStructureType sType;
+ void* pNext;
+ VkSurfaceFormatKHR surfaceFormat;
+} VkSurfaceFormat2KHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+ VkSurfaceCapabilities2KHR* pSurfaceCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormat2KHR* pSurfaceFormats);
+#endif
+
+#define VK_EXT_debug_report 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)
+
+#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 6
+#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report"
+#define VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT
+
+
+typedef enum VkDebugReportObjectTypeEXT {
+ VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10,
+ VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = 28,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30,
+ VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = 1000085000,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_END_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT - VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT + 1),
+ VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugReportObjectTypeEXT;
+
+typedef enum VkDebugReportErrorEXT {
+ VK_DEBUG_REPORT_ERROR_NONE_EXT = 0,
+ VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT = 1,
+ VK_DEBUG_REPORT_ERROR_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_ERROR_NONE_EXT,
+ VK_DEBUG_REPORT_ERROR_END_RANGE_EXT = VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT,
+ VK_DEBUG_REPORT_ERROR_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT - VK_DEBUG_REPORT_ERROR_NONE_EXT + 1),
+ VK_DEBUG_REPORT_ERROR_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugReportErrorEXT;
+
+
+typedef enum VkDebugReportFlagBitsEXT {
+ VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002,
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008,
+ VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010,
+ VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDebugReportFlagBitsEXT;
+typedef VkFlags VkDebugReportFlagsEXT;
+
+typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)(
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage,
+ void* pUserData);
+
+
+typedef struct VkDebugReportCallbackCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportFlagsEXT flags;
+ PFN_vkDebugReportCallbackEXT pfnCallback;
+ void* pUserData;
+} VkDebugReportCallbackCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback);
+typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
+ VkInstance instance,
+ const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDebugReportCallbackEXT* pCallback);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
+ VkInstance instance,
+ VkDebugReportCallbackEXT callback,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
+ VkInstance instance,
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage);
+#endif
+
+#define VK_NV_glsl_shader 1
+#define VK_NV_GLSL_SHADER_SPEC_VERSION 1
+#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader"
+
+
+#define VK_IMG_filter_cubic 1
+#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1
+#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic"
+
+
+#define VK_AMD_rasterization_order 1
+#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1
+#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order"
+
+
+typedef enum VkRasterizationOrderAMD {
+ VK_RASTERIZATION_ORDER_STRICT_AMD = 0,
+ VK_RASTERIZATION_ORDER_RELAXED_AMD = 1,
+ VK_RASTERIZATION_ORDER_BEGIN_RANGE_AMD = VK_RASTERIZATION_ORDER_STRICT_AMD,
+ VK_RASTERIZATION_ORDER_END_RANGE_AMD = VK_RASTERIZATION_ORDER_RELAXED_AMD,
+ VK_RASTERIZATION_ORDER_RANGE_SIZE_AMD = (VK_RASTERIZATION_ORDER_RELAXED_AMD - VK_RASTERIZATION_ORDER_STRICT_AMD + 1),
+ VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF
+} VkRasterizationOrderAMD;
+
+typedef struct VkPipelineRasterizationStateRasterizationOrderAMD {
+ VkStructureType sType;
+ const void* pNext;
+ VkRasterizationOrderAMD rasterizationOrder;
+} VkPipelineRasterizationStateRasterizationOrderAMD;
+
+
+
+#define VK_AMD_shader_trinary_minmax 1
+#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1
+#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax"
+
+
+#define VK_AMD_shader_explicit_vertex_parameter 1
+#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1
+#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter"
+
+
+#define VK_EXT_debug_marker 1
+#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4
+#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker"
+
+typedef struct VkDebugMarkerObjectNameInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportObjectTypeEXT objectType;
+ uint64_t object;
+ const char* pObjectName;
+} VkDebugMarkerObjectNameInfoEXT;
+
+typedef struct VkDebugMarkerObjectTagInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportObjectTypeEXT objectType;
+ uint64_t object;
+ uint64_t tagName;
+ size_t tagSize;
+ const void* pTag;
+} VkDebugMarkerObjectTagInfoEXT;
+
+typedef struct VkDebugMarkerMarkerInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pMarkerName;
+ float color[4];
+} VkDebugMarkerMarkerInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, VkDebugMarkerObjectTagInfoEXT* pTagInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, VkDebugMarkerObjectNameInfoEXT* pNameInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT(
+ VkDevice device,
+ VkDebugMarkerObjectTagInfoEXT* pTagInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT(
+ VkDevice device,
+ VkDebugMarkerObjectNameInfoEXT* pNameInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT(
+ VkCommandBuffer commandBuffer,
+ VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT(
+ VkCommandBuffer commandBuffer,
+ VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+#endif
+
+#define VK_AMD_gcn_shader 1
+#define VK_AMD_GCN_SHADER_SPEC_VERSION 1
+#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader"
+
+
+#define VK_NV_dedicated_allocation 1
+#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1
+#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation"
+
+typedef struct VkDedicatedAllocationImageCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 dedicatedAllocation;
+} VkDedicatedAllocationImageCreateInfoNV;
+
+typedef struct VkDedicatedAllocationBufferCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 dedicatedAllocation;
+} VkDedicatedAllocationBufferCreateInfoNV;
+
+typedef struct VkDedicatedAllocationMemoryAllocateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkBuffer buffer;
+} VkDedicatedAllocationMemoryAllocateInfoNV;
+
+
+
+#define VK_AMD_draw_indirect_count 1
+#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 1
+#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count"
+
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+#endif
+
+#define VK_AMD_negative_viewport_height 1
+#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1
+#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height"
+
+
+#define VK_AMD_gpu_shader_half_float 1
+#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 1
+#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float"
+
+
+#define VK_AMD_shader_ballot 1
+#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1
+#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot"
+
+
+#define VK_KHX_multiview 1
+#define VK_KHX_MULTIVIEW_SPEC_VERSION 1
+#define VK_KHX_MULTIVIEW_EXTENSION_NAME "VK_KHX_multiview"
+
+typedef struct VkRenderPassMultiviewCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t subpassCount;
+ const uint32_t* pViewMasks;
+ uint32_t dependencyCount;
+ const int32_t* pViewOffsets;
+ uint32_t correlationMaskCount;
+ const uint32_t* pCorrelationMasks;
+} VkRenderPassMultiviewCreateInfoKHX;
+
+typedef struct VkPhysicalDeviceMultiviewFeaturesKHX {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 multiview;
+ VkBool32 multiviewGeometryShader;
+ VkBool32 multiviewTessellationShader;
+} VkPhysicalDeviceMultiviewFeaturesKHX;
+
+typedef struct VkPhysicalDeviceMultiviewPropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxMultiviewViewCount;
+ uint32_t maxMultiviewInstanceIndex;
+} VkPhysicalDeviceMultiviewPropertiesKHX;
+
+
+
+#define VK_IMG_format_pvrtc 1
+#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1
+#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc"
+
+
+#define VK_NV_external_memory_capabilities 1
+#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities"
+
+
+typedef enum VkExternalMemoryHandleTypeFlagBitsNV {
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkExternalMemoryHandleTypeFlagBitsNV;
+typedef VkFlags VkExternalMemoryHandleTypeFlagsNV;
+
+typedef enum VkExternalMemoryFeatureFlagBitsNV {
+ VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001,
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002,
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004,
+ VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkExternalMemoryFeatureFlagBitsNV;
+typedef VkFlags VkExternalMemoryFeatureFlagsNV;
+
+typedef struct VkExternalImageFormatPropertiesNV {
+ VkImageFormatProperties imageFormatProperties;
+ VkExternalMemoryFeatureFlagsNV externalMemoryFeatures;
+ VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes;
+ VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes;
+} VkExternalImageFormatPropertiesNV;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkExternalMemoryHandleTypeFlagsNV externalHandleType,
+ VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);
+#endif
+
+#define VK_NV_external_memory 1
+#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory"
+
+typedef struct VkExternalMemoryImageCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsNV handleTypes;
+} VkExternalMemoryImageCreateInfoNV;
+
+typedef struct VkExportMemoryAllocateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsNV handleTypes;
+} VkExportMemoryAllocateInfoNV;
+
+
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#define VK_NV_external_memory_win32 1
+#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32"
+
+typedef struct VkImportMemoryWin32HandleInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsNV handleType;
+ HANDLE handle;
+} VkImportMemoryWin32HandleInfoNV;
+
+typedef struct VkExportMemoryWin32HandleInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+} VkExportMemoryWin32HandleInfoNV;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkExternalMemoryHandleTypeFlagsNV handleType,
+ HANDLE* pHandle);
+#endif
+#endif /* VK_USE_PLATFORM_WIN32_KHR */
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#define VK_NV_win32_keyed_mutex 1
+#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 1
+#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex"
+
+typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t acquireCount;
+ const VkDeviceMemory* pAcquireSyncs;
+ const uint64_t* pAcquireKeys;
+ const uint32_t* pAcquireTimeoutMilliseconds;
+ uint32_t releaseCount;
+ const VkDeviceMemory* pReleaseSyncs;
+ const uint64_t* pReleaseKeys;
+} VkWin32KeyedMutexAcquireReleaseInfoNV;
+
+
+#endif /* VK_USE_PLATFORM_WIN32_KHR */
+
+#define VK_KHX_device_group 1
+#define VK_MAX_DEVICE_GROUP_SIZE_KHX 32
+#define VK_KHX_DEVICE_GROUP_SPEC_VERSION 1
+#define VK_KHX_DEVICE_GROUP_EXTENSION_NAME "VK_KHX_device_group"
+
+
+typedef enum VkPeerMemoryFeatureFlagBitsKHX {
+ VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHX = 0x00000001,
+ VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHX = 0x00000002,
+ VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHX = 0x00000004,
+ VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHX = 0x00000008,
+ VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkPeerMemoryFeatureFlagBitsKHX;
+typedef VkFlags VkPeerMemoryFeatureFlagsKHX;
+
+typedef enum VkMemoryAllocateFlagBitsKHX {
+ VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHX = 0x00000001,
+ VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkMemoryAllocateFlagBitsKHX;
+typedef VkFlags VkMemoryAllocateFlagsKHX;
+
+typedef enum VkDeviceGroupPresentModeFlagBitsKHX {
+ VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHX = 0x00000001,
+ VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHX = 0x00000002,
+ VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHX = 0x00000004,
+ VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHX = 0x00000008,
+ VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkDeviceGroupPresentModeFlagBitsKHX;
+typedef VkFlags VkDeviceGroupPresentModeFlagsKHX;
+
+typedef struct VkMemoryAllocateFlagsInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryAllocateFlagsKHX flags;
+ uint32_t deviceMask;
+} VkMemoryAllocateFlagsInfoKHX;
+
+typedef struct VkBindBufferMemoryInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer buffer;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+} VkBindBufferMemoryInfoKHX;
+
+typedef struct VkBindImageMemoryInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+ uint32_t SFRRectCount;
+ const VkRect2D* pSFRRects;
+} VkBindImageMemoryInfoKHX;
+
+typedef struct VkDeviceGroupRenderPassBeginInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t deviceMask;
+ uint32_t deviceRenderAreaCount;
+ const VkRect2D* pDeviceRenderAreas;
+} VkDeviceGroupRenderPassBeginInfoKHX;
+
+typedef struct VkDeviceGroupCommandBufferBeginInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t deviceMask;
+} VkDeviceGroupCommandBufferBeginInfoKHX;
+
+typedef struct VkDeviceGroupSubmitInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const uint32_t* pWaitSemaphoreDeviceIndices;
+ uint32_t commandBufferCount;
+ const uint32_t* pCommandBufferDeviceMasks;
+ uint32_t signalSemaphoreCount;
+ const uint32_t* pSignalSemaphoreDeviceIndices;
+} VkDeviceGroupSubmitInfoKHX;
+
+typedef struct VkDeviceGroupBindSparseInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t resourceDeviceIndex;
+ uint32_t memoryDeviceIndex;
+} VkDeviceGroupBindSparseInfoKHX;
+
+typedef struct VkDeviceGroupPresentCapabilitiesKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE_KHX];
+ VkDeviceGroupPresentModeFlagsKHX modes;
+} VkDeviceGroupPresentCapabilitiesKHX;
+
+typedef struct VkImageSwapchainCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainKHR swapchain;
+} VkImageSwapchainCreateInfoKHX;
+
+typedef struct VkBindImageMemorySwapchainInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainKHR swapchain;
+ uint32_t imageIndex;
+} VkBindImageMemorySwapchainInfoKHX;
+
+typedef struct VkAcquireNextImageInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainKHR swapchain;
+ uint64_t timeout;
+ VkSemaphore semaphore;
+ VkFence fence;
+ uint32_t deviceMask;
+} VkAcquireNextImageInfoKHX;
+
+typedef struct VkDeviceGroupPresentInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const uint32_t* pDeviceMasks;
+ VkDeviceGroupPresentModeFlagBitsKHX mode;
+} VkDeviceGroupPresentInfoKHX;
+
+typedef struct VkDeviceGroupSwapchainCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceGroupPresentModeFlagsKHX modes;
+} VkDeviceGroupSwapchainCreateInfoKHX;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHX)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlagsKHX* pPeerMemoryFeatures);
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHX)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHX* pBindInfos);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHX)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHX* pBindInfos);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHX)(VkCommandBuffer commandBuffer, uint32_t deviceMask);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHX)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHX)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHX* pModes);
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHX)(VkDevice device, const VkAcquireNextImageInfoKHX* pAcquireInfo, uint32_t* pImageIndex);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHX)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHX)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHX(
+ VkDevice device,
+ uint32_t heapIndex,
+ uint32_t localDeviceIndex,
+ uint32_t remoteDeviceIndex,
+ VkPeerMemoryFeatureFlagsKHX* pPeerMemoryFeatures);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHX(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHX* pBindInfos);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHX(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfoKHX* pBindInfos);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHX(
+ VkCommandBuffer commandBuffer,
+ uint32_t deviceMask);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHX(
+ VkDevice device,
+ VkDeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHX(
+ VkDevice device,
+ VkSurfaceKHR surface,
+ VkDeviceGroupPresentModeFlagsKHX* pModes);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHX(
+ VkDevice device,
+ const VkAcquireNextImageInfoKHX* pAcquireInfo,
+ uint32_t* pImageIndex);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHX(
+ VkCommandBuffer commandBuffer,
+ uint32_t baseGroupX,
+ uint32_t baseGroupY,
+ uint32_t baseGroupZ,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHX(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pRectCount,
+ VkRect2D* pRects);
+#endif
+
+#define VK_EXT_validation_flags 1
+#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 1
+#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags"
+
+
+typedef enum VkValidationCheckEXT {
+ VK_VALIDATION_CHECK_ALL_EXT = 0,
+ VK_VALIDATION_CHECK_BEGIN_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT,
+ VK_VALIDATION_CHECK_END_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT,
+ VK_VALIDATION_CHECK_RANGE_SIZE_EXT = (VK_VALIDATION_CHECK_ALL_EXT - VK_VALIDATION_CHECK_ALL_EXT + 1),
+ VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkValidationCheckEXT;
+
+typedef struct VkValidationFlagsEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t disabledValidationCheckCount;
+ VkValidationCheckEXT* pDisabledValidationChecks;
+} VkValidationFlagsEXT;
+
+
+
+#ifdef VK_USE_PLATFORM_VI_NN
+#define VK_NN_vi_surface 1
+#define VK_NN_VI_SURFACE_SPEC_VERSION 1
+#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface"
+
+typedef VkFlags VkViSurfaceCreateFlagsNN;
+
+typedef struct VkViSurfaceCreateInfoNN {
+ VkStructureType sType;
+ const void* pNext;
+ VkViSurfaceCreateFlagsNN flags;
+ void* window;
+} VkViSurfaceCreateInfoNN;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN(
+ VkInstance instance,
+ const VkViSurfaceCreateInfoNN* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+#endif /* VK_USE_PLATFORM_VI_NN */
+
+#define VK_EXT_shader_subgroup_ballot 1
+#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1
+#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot"
+
+
+#define VK_EXT_shader_subgroup_vote 1
+#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1
+#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote"
+
+
+#define VK_KHX_device_group_creation 1
+#define VK_KHX_DEVICE_GROUP_CREATION_SPEC_VERSION 1
+#define VK_KHX_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHX_device_group_creation"
+
+typedef struct VkPhysicalDeviceGroupPropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t physicalDeviceCount;
+ VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE_KHX];
+ VkBool32 subsetAllocation;
+} VkPhysicalDeviceGroupPropertiesKHX;
+
+typedef struct VkDeviceGroupDeviceCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t physicalDeviceCount;
+ const VkPhysicalDevice* pPhysicalDevices;
+} VkDeviceGroupDeviceCreateInfoKHX;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHX)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX* pPhysicalDeviceGroupProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHX(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupPropertiesKHX* pPhysicalDeviceGroupProperties);
+#endif
+
+#define VK_KHX_external_memory_capabilities 1
+#define VK_LUID_SIZE_KHX 8
+#define VK_KHX_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHX_external_memory_capabilities"
+
+
+typedef enum VkExternalMemoryHandleTypeFlagBitsKHX {
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHX = 0x00000001,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHX = 0x00000002,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHX = 0x00000004,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHX = 0x00000008,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHX = 0x00000010,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHX = 0x00000020,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHX = 0x00000040,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkExternalMemoryHandleTypeFlagBitsKHX;
+typedef VkFlags VkExternalMemoryHandleTypeFlagsKHX;
+
+typedef enum VkExternalMemoryFeatureFlagBitsKHX {
+ VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHX = 0x00000001,
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHX = 0x00000002,
+ VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHX = 0x00000004,
+ VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkExternalMemoryFeatureFlagBitsKHX;
+typedef VkFlags VkExternalMemoryFeatureFlagsKHX;
+
+typedef struct VkExternalMemoryPropertiesKHX {
+ VkExternalMemoryFeatureFlagsKHX externalMemoryFeatures;
+ VkExternalMemoryHandleTypeFlagsKHX exportFromImportedHandleTypes;
+ VkExternalMemoryHandleTypeFlagsKHX compatibleHandleTypes;
+} VkExternalMemoryPropertiesKHX;
+
+typedef struct VkPhysicalDeviceExternalImageFormatInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType;
+} VkPhysicalDeviceExternalImageFormatInfoKHX;
+
+typedef struct VkExternalImageFormatPropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalMemoryPropertiesKHX externalMemoryProperties;
+} VkExternalImageFormatPropertiesKHX;
+
+typedef struct VkPhysicalDeviceExternalBufferInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferCreateFlags flags;
+ VkBufferUsageFlags usage;
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType;
+} VkPhysicalDeviceExternalBufferInfoKHX;
+
+typedef struct VkExternalBufferPropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalMemoryPropertiesKHX externalMemoryProperties;
+} VkExternalBufferPropertiesKHX;
+
+typedef struct VkPhysicalDeviceIDPropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ uint8_t deviceUUID[VK_UUID_SIZE];
+ uint8_t driverUUID[VK_UUID_SIZE];
+ uint8_t deviceLUID[VK_LUID_SIZE_KHX];
+ VkBool32 deviceLUIDValid;
+} VkPhysicalDeviceIDPropertiesKHX;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHX)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfoKHX* pExternalBufferInfo, VkExternalBufferPropertiesKHX* pExternalBufferProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHX(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalBufferInfoKHX* pExternalBufferInfo,
+ VkExternalBufferPropertiesKHX* pExternalBufferProperties);
+#endif
+
+#define VK_KHX_external_memory 1
+#define VK_KHX_EXTERNAL_MEMORY_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHX_external_memory"
+#define VK_QUEUE_FAMILY_EXTERNAL_KHX (~0U-1)
+
+typedef struct VkExternalMemoryImageCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsKHX handleTypes;
+} VkExternalMemoryImageCreateInfoKHX;
+
+typedef struct VkExternalMemoryBufferCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsKHX handleTypes;
+} VkExternalMemoryBufferCreateInfoKHX;
+
+typedef struct VkExportMemoryAllocateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagsKHX handleTypes;
+} VkExportMemoryAllocateInfoKHX;
+
+
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+#define VK_KHX_external_memory_win32 1
+#define VK_KHX_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHX_external_memory_win32"
+
+typedef struct VkImportMemoryWin32HandleInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType;
+ HANDLE handle;
+} VkImportMemoryWin32HandleInfoKHX;
+
+typedef struct VkExportMemoryWin32HandleInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+} VkExportMemoryWin32HandleInfoKHX;
+
+typedef struct VkMemoryWin32HandlePropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t memoryTypeBits;
+} VkMemoryWin32HandlePropertiesKHX;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHX)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE* pHandle);
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHX)(VkDevice device, VkExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHX* pMemoryWin32HandleProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHX(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType,
+ HANDLE* pHandle);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHX(
+ VkDevice device,
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType,
+ HANDLE handle,
+ VkMemoryWin32HandlePropertiesKHX* pMemoryWin32HandleProperties);
+#endif
+#endif /* VK_USE_PLATFORM_WIN32_KHX */
+
+#define VK_KHX_external_memory_fd 1
+#define VK_KHX_EXTERNAL_MEMORY_FD_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHX_external_memory_fd"
+
+typedef struct VkImportMemoryFdInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType;
+ int fd;
+} VkImportMemoryFdInfoKHX;
+
+typedef struct VkMemoryFdPropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t memoryTypeBits;
+} VkMemoryFdPropertiesKHX;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHX)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagBitsKHX handleType, int* pFd);
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHX)(VkDevice device, VkExternalMemoryHandleTypeFlagBitsKHX handleType, int fd, VkMemoryFdPropertiesKHX* pMemoryFdProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHX(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType,
+ int* pFd);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHX(
+ VkDevice device,
+ VkExternalMemoryHandleTypeFlagBitsKHX handleType,
+ int fd,
+ VkMemoryFdPropertiesKHX* pMemoryFdProperties);
+#endif
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#define VK_KHX_win32_keyed_mutex 1
+#define VK_KHX_WIN32_KEYED_MUTEX_SPEC_VERSION 1
+#define VK_KHX_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHX_win32_keyed_mutex"
+
+typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t acquireCount;
+ const VkDeviceMemory* pAcquireSyncs;
+ const uint64_t* pAcquireKeys;
+ const uint32_t* pAcquireTimeouts;
+ uint32_t releaseCount;
+ const VkDeviceMemory* pReleaseSyncs;
+ const uint64_t* pReleaseKeys;
+} VkWin32KeyedMutexAcquireReleaseInfoKHX;
+
+
+#endif /* VK_USE_PLATFORM_WIN32_KHR */
+
+#define VK_KHX_external_semaphore_capabilities 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHX_external_semaphore_capabilities"
+
+
+typedef enum VkExternalSemaphoreHandleTypeFlagBitsKHX {
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX = 0x00000001,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHX = 0x00000002,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHX = 0x00000004,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHX = 0x00000008,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FENCE_FD_BIT_KHX = 0x00000010,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkExternalSemaphoreHandleTypeFlagBitsKHX;
+typedef VkFlags VkExternalSemaphoreHandleTypeFlagsKHX;
+
+typedef enum VkExternalSemaphoreFeatureFlagBitsKHX {
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHX = 0x00000001,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHX = 0x00000002,
+ VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF
+} VkExternalSemaphoreFeatureFlagBitsKHX;
+typedef VkFlags VkExternalSemaphoreFeatureFlagsKHX;
+
+typedef struct VkPhysicalDeviceExternalSemaphoreInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalSemaphoreHandleTypeFlagBitsKHX handleType;
+} VkPhysicalDeviceExternalSemaphoreInfoKHX;
+
+typedef struct VkExternalSemaphorePropertiesKHX {
+ VkStructureType sType;
+ void* pNext;
+ VkExternalSemaphoreHandleTypeFlagsKHX exportFromImportedHandleTypes;
+ VkExternalSemaphoreHandleTypeFlagsKHX compatibleHandleTypes;
+ VkExternalSemaphoreFeatureFlagsKHX externalSemaphoreFeatures;
+} VkExternalSemaphorePropertiesKHX;
+
+
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHX)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfoKHX* pExternalSemaphoreInfo, VkExternalSemaphorePropertiesKHX* pExternalSemaphoreProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHX(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalSemaphoreInfoKHX* pExternalSemaphoreInfo,
+ VkExternalSemaphorePropertiesKHX* pExternalSemaphoreProperties);
+#endif
+
+#define VK_KHX_external_semaphore 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHX_external_semaphore"
+
+typedef struct VkExportSemaphoreCreateInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkExternalSemaphoreHandleTypeFlagsKHX handleTypes;
+} VkExportSemaphoreCreateInfoKHX;
+
+
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+#define VK_KHX_external_semaphore_win32 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHX_external_semaphore_win32"
+
+typedef struct VkImportSemaphoreWin32HandleInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ VkExternalSemaphoreHandleTypeFlagsKHX handleType;
+ HANDLE handle;
+} VkImportSemaphoreWin32HandleInfoKHX;
+
+typedef struct VkExportSemaphoreWin32HandleInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+} VkExportSemaphoreWin32HandleInfoKHX;
+
+typedef struct VkD3D12FenceSubmitInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreValuesCount;
+ const uint64_t* pWaitSemaphoreValues;
+ uint32_t signalSemaphoreValuesCount;
+ const uint64_t* pSignalSemaphoreValues;
+} VkD3D12FenceSubmitInfoKHX;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHX)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHX* pImportSemaphoreWin32HandleInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHX)(VkDevice device, VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHX handleType, HANDLE* pHandle);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHX(
+ VkDevice device,
+ const VkImportSemaphoreWin32HandleInfoKHX* pImportSemaphoreWin32HandleInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHX(
+ VkDevice device,
+ VkSemaphore semaphore,
+ VkExternalSemaphoreHandleTypeFlagBitsKHX handleType,
+ HANDLE* pHandle);
+#endif
+#endif /* VK_USE_PLATFORM_WIN32_KHX */
+
+#define VK_KHX_external_semaphore_fd 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1
+#define VK_KHX_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHX_external_semaphore_fd"
+
+typedef struct VkImportSemaphoreFdInfoKHX {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ VkExternalSemaphoreHandleTypeFlagBitsKHX handleType;
+ int fd;
+} VkImportSemaphoreFdInfoKHX;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHX)(VkDevice device, const VkImportSemaphoreFdInfoKHX* pImportSemaphoreFdInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHX)(VkDevice device, VkSemaphore semaphore, VkExternalSemaphoreHandleTypeFlagBitsKHX handleType, int* pFd);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHX(
+ VkDevice device,
+ const VkImportSemaphoreFdInfoKHX* pImportSemaphoreFdInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHX(
+ VkDevice device,
+ VkSemaphore semaphore,
+ VkExternalSemaphoreHandleTypeFlagBitsKHX handleType,
+ int* pFd);
+#endif
+
+#define VK_NVX_device_generated_commands 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkObjectTableNVX)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNVX)
+
+#define VK_NVX_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 1
+#define VK_NVX_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NVX_device_generated_commands"
+
+
+typedef enum VkIndirectCommandsTokenTypeNVX {
+ VK_INDIRECT_COMMANDS_TOKEN_PIPELINE_NVX = 0,
+ VK_INDIRECT_COMMANDS_TOKEN_DESCRIPTOR_SET_NVX = 1,
+ VK_INDIRECT_COMMANDS_TOKEN_INDEX_BUFFER_NVX = 2,
+ VK_INDIRECT_COMMANDS_TOKEN_VERTEX_BUFFER_NVX = 3,
+ VK_INDIRECT_COMMANDS_TOKEN_PUSH_CONSTANT_NVX = 4,
+ VK_INDIRECT_COMMANDS_TOKEN_DRAW_INDEXED_NVX = 5,
+ VK_INDIRECT_COMMANDS_TOKEN_DRAW_NVX = 6,
+ VK_INDIRECT_COMMANDS_TOKEN_DISPATCH_NVX = 7,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_BEGIN_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_PIPELINE_NVX,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_END_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_DISPATCH_NVX,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_RANGE_SIZE_NVX = (VK_INDIRECT_COMMANDS_TOKEN_DISPATCH_NVX - VK_INDIRECT_COMMANDS_TOKEN_PIPELINE_NVX + 1),
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkIndirectCommandsTokenTypeNVX;
+
+typedef enum VkObjectEntryTypeNVX {
+ VK_OBJECT_ENTRY_DESCRIPTOR_SET_NVX = 0,
+ VK_OBJECT_ENTRY_PIPELINE_NVX = 1,
+ VK_OBJECT_ENTRY_INDEX_BUFFER_NVX = 2,
+ VK_OBJECT_ENTRY_VERTEX_BUFFER_NVX = 3,
+ VK_OBJECT_ENTRY_PUSH_CONSTANT_NVX = 4,
+ VK_OBJECT_ENTRY_TYPE_BEGIN_RANGE_NVX = VK_OBJECT_ENTRY_DESCRIPTOR_SET_NVX,
+ VK_OBJECT_ENTRY_TYPE_END_RANGE_NVX = VK_OBJECT_ENTRY_PUSH_CONSTANT_NVX,
+ VK_OBJECT_ENTRY_TYPE_RANGE_SIZE_NVX = (VK_OBJECT_ENTRY_PUSH_CONSTANT_NVX - VK_OBJECT_ENTRY_DESCRIPTOR_SET_NVX + 1),
+ VK_OBJECT_ENTRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkObjectEntryTypeNVX;
+
+
+typedef enum VkIndirectCommandsLayoutUsageFlagBitsNVX {
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX = 0x00000001,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX = 0x00000002,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX = 0x00000004,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX = 0x00000008,
+ VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkIndirectCommandsLayoutUsageFlagBitsNVX;
+typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNVX;
+
+typedef enum VkObjectEntryUsageFlagBitsNVX {
+ VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX = 0x00000001,
+ VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX = 0x00000002,
+ VK_OBJECT_ENTRY_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
+} VkObjectEntryUsageFlagBitsNVX;
+typedef VkFlags VkObjectEntryUsageFlagsNVX;
+
+typedef struct VkDeviceGeneratedCommandsFeaturesNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 computeBindingPointSupport;
+} VkDeviceGeneratedCommandsFeaturesNVX;
+
+typedef struct VkDeviceGeneratedCommandsLimitsNVX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t maxIndirectCommandsLayoutTokenCount;
+ uint32_t maxObjectEntryCounts;
+ uint32_t minSequenceCountBufferOffsetAlignment;
+ uint32_t minSequenceIndexBufferOffsetAlignment;
+ uint32_t minCommandsTokenBufferOffsetAlignment;
+} VkDeviceGeneratedCommandsLimitsNVX;
+
+typedef struct VkIndirectCommandsTokenNVX {
+ VkIndirectCommandsTokenTypeNVX tokenType;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+} VkIndirectCommandsTokenNVX;
+
+typedef struct VkIndirectCommandsLayoutTokenNVX {
+ VkIndirectCommandsTokenTypeNVX tokenType;
+ uint32_t bindingUnit;
+ uint32_t dynamicCount;
+ uint32_t divisor;
+} VkIndirectCommandsLayoutTokenNVX;
+
+typedef struct VkIndirectCommandsLayoutCreateInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkIndirectCommandsLayoutUsageFlagsNVX flags;
+ uint32_t tokenCount;
+ const VkIndirectCommandsLayoutTokenNVX* pTokens;
+} VkIndirectCommandsLayoutCreateInfoNVX;
+
+typedef struct VkCmdProcessCommandsInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkObjectTableNVX objectTable;
+ VkIndirectCommandsLayoutNVX indirectCommandsLayout;
+ uint32_t indirectCommandsTokenCount;
+ const VkIndirectCommandsTokenNVX* pIndirectCommandsTokens;
+ uint32_t maxSequencesCount;
+ VkCommandBuffer targetCommandBuffer;
+ VkBuffer sequencesCountBuffer;
+ VkDeviceSize sequencesCountOffset;
+ VkBuffer sequencesIndexBuffer;
+ VkDeviceSize sequencesIndexOffset;
+} VkCmdProcessCommandsInfoNVX;
+
+typedef struct VkCmdReserveSpaceForCommandsInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkObjectTableNVX objectTable;
+ VkIndirectCommandsLayoutNVX indirectCommandsLayout;
+ uint32_t maxSequencesCount;
+} VkCmdReserveSpaceForCommandsInfoNVX;
+
+typedef struct VkObjectTableCreateInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t objectCount;
+ const VkObjectEntryTypeNVX* pObjectEntryTypes;
+ const uint32_t* pObjectEntryCounts;
+ const VkObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags;
+ uint32_t maxUniformBuffersPerDescriptor;
+ uint32_t maxStorageBuffersPerDescriptor;
+ uint32_t maxStorageImagesPerDescriptor;
+ uint32_t maxSampledImagesPerDescriptor;
+ uint32_t maxPipelineLayouts;
+} VkObjectTableCreateInfoNVX;
+
+typedef struct VkObjectTableEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+} VkObjectTableEntryNVX;
+
+typedef struct VkObjectTablePipelineEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkPipeline pipeline;
+} VkObjectTablePipelineEntryNVX;
+
+typedef struct VkObjectTableDescriptorSetEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkPipelineLayout pipelineLayout;
+ VkDescriptorSet descriptorSet;
+} VkObjectTableDescriptorSetEntryNVX;
+
+typedef struct VkObjectTableVertexBufferEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkBuffer buffer;
+} VkObjectTableVertexBufferEntryNVX;
+
+typedef struct VkObjectTableIndexBufferEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkBuffer buffer;
+ VkIndexType indexType;
+} VkObjectTableIndexBufferEntryNVX;
+
+typedef struct VkObjectTablePushConstantEntryNVX {
+ VkObjectEntryTypeNVX type;
+ VkObjectEntryUsageFlagsNVX flags;
+ VkPipelineLayout pipelineLayout;
+ VkShaderStageFlags stageFlags;
+} VkObjectTablePushConstantEntryNVX;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdProcessCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdReserveSpaceForCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNVX)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNVX)(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateObjectTableNVX)(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable);
+typedef void (VKAPI_PTR *PFN_vkDestroyObjectTableNVX)(VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkRegisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices);
+typedef VkResult (VKAPI_PTR *PFN_vkUnregisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX)(VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdProcessCommandsNVX(
+ VkCommandBuffer commandBuffer,
+ const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdReserveSpaceForCommandsNVX(
+ VkCommandBuffer commandBuffer,
+ const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNVX(
+ VkDevice device,
+ const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNVX(
+ VkDevice device,
+ VkIndirectCommandsLayoutNVX indirectCommandsLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateObjectTableNVX(
+ VkDevice device,
+ const VkObjectTableCreateInfoNVX* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkObjectTableNVX* pObjectTable);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyObjectTableNVX(
+ VkDevice device,
+ VkObjectTableNVX objectTable,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkRegisterObjectsNVX(
+ VkDevice device,
+ VkObjectTableNVX objectTable,
+ uint32_t objectCount,
+ const VkObjectTableEntryNVX* const* ppObjectTableEntries,
+ const uint32_t* pObjectIndices);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkUnregisterObjectsNVX(
+ VkDevice device,
+ VkObjectTableNVX objectTable,
+ uint32_t objectCount,
+ const VkObjectEntryTypeNVX* pObjectEntryTypes,
+ const uint32_t* pObjectIndices);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(
+ VkPhysicalDevice physicalDevice,
+ VkDeviceGeneratedCommandsFeaturesNVX* pFeatures,
+ VkDeviceGeneratedCommandsLimitsNVX* pLimits);
+#endif
+
+#define VK_NV_clip_space_w_scaling 1
+#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1
+#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling"
+
+typedef struct VkViewportWScalingNV {
+ float xcoeff;
+ float ycoeff;
+} VkViewportWScalingNV;
+
+typedef struct VkPipelineViewportWScalingStateCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 viewportWScalingEnable;
+ uint32_t viewportCount;
+ const VkViewportWScalingNV* pViewportWScalings;
+} VkPipelineViewportWScalingStateCreateInfoNV;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewportWScalingNV* pViewportWScalings);
+#endif
+
+#define VK_EXT_direct_mode_display 1
+#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1
+#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display"
+
+typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display);
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+#define VK_EXT_acquire_xlib_display 1
+#include <X11/extensions/Xrandr.h>
+
+#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1
+#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display"
+
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display);
+typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT(
+ VkPhysicalDevice physicalDevice,
+ Display* dpy,
+ VkDisplayKHR display);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT(
+ VkPhysicalDevice physicalDevice,
+ Display* dpy,
+ RROutput rrOutput,
+ VkDisplayKHR* pDisplay);
+#endif
+#endif /* VK_USE_PLATFORM_XLIB_XRANDR_EXT */
+
+#define VK_EXT_display_surface_counter 1
+#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1
+#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter"
+
+
+typedef enum VkSurfaceCounterFlagBitsEXT {
+ VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001,
+ VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkSurfaceCounterFlagBitsEXT;
+typedef VkFlags VkSurfaceCounterFlagsEXT;
+
+typedef struct VkSurfaceCapabilities2EXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ VkExtent2D currentExtent;
+ VkExtent2D minImageExtent;
+ VkExtent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkSurfaceTransformFlagBitsKHR currentTransform;
+ VkCompositeAlphaFlagsKHR supportedCompositeAlpha;
+ VkImageUsageFlags supportedUsageFlags;
+ VkSurfaceCounterFlagsEXT supportedSurfaceCounters;
+} VkSurfaceCapabilities2EXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ VkSurfaceCapabilities2EXT* pSurfaceCapabilities);
+#endif
+
+#define VK_EXT_display_control 1
+#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1
+#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control"
+
+
+typedef enum VkDisplayPowerStateEXT {
+ VK_DISPLAY_POWER_STATE_OFF_EXT = 0,
+ VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1,
+ VK_DISPLAY_POWER_STATE_ON_EXT = 2,
+ VK_DISPLAY_POWER_STATE_BEGIN_RANGE_EXT = VK_DISPLAY_POWER_STATE_OFF_EXT,
+ VK_DISPLAY_POWER_STATE_END_RANGE_EXT = VK_DISPLAY_POWER_STATE_ON_EXT,
+ VK_DISPLAY_POWER_STATE_RANGE_SIZE_EXT = (VK_DISPLAY_POWER_STATE_ON_EXT - VK_DISPLAY_POWER_STATE_OFF_EXT + 1),
+ VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDisplayPowerStateEXT;
+
+typedef enum VkDeviceEventTypeEXT {
+ VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0,
+ VK_DEVICE_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT,
+ VK_DEVICE_EVENT_TYPE_END_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT,
+ VK_DEVICE_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT - VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + 1),
+ VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDeviceEventTypeEXT;
+
+typedef enum VkDisplayEventTypeEXT {
+ VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0,
+ VK_DISPLAY_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT,
+ VK_DISPLAY_EVENT_TYPE_END_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT,
+ VK_DISPLAY_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT - VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + 1),
+ VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDisplayEventTypeEXT;
+
+typedef struct VkDisplayPowerInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayPowerStateEXT powerState;
+} VkDisplayPowerInfoEXT;
+
+typedef struct VkDeviceEventInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceEventTypeEXT deviceEvent;
+} VkDeviceEventInfoEXT;
+
+typedef struct VkDisplayEventInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayEventTypeEXT displayEvent;
+} VkDisplayEventInfoEXT;
+
+typedef struct VkSwapchainCounterCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkSurfaceCounterFlagsEXT surfaceCounters;
+} VkSwapchainCounterCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT(
+ VkDevice device,
+ VkDisplayKHR display,
+ const VkDisplayPowerInfoEXT* pDisplayPowerInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT(
+ VkDevice device,
+ const VkDeviceEventInfoEXT* pDeviceEventInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT(
+ VkDevice device,
+ VkDisplayKHR display,
+ const VkDisplayEventInfoEXT* pDisplayEventInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ VkSurfaceCounterFlagBitsEXT counter,
+ uint64_t* pCounterValue);
+#endif
+
+#define VK_GOOGLE_display_timing 1
+#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1
+#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing"
+
+typedef struct VkRefreshCycleDurationGOOGLE {
+ uint64_t refreshDuration;
+} VkRefreshCycleDurationGOOGLE;
+
+typedef struct VkPastPresentationTimingGOOGLE {
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+ uint64_t actualPresentTime;
+ uint64_t earliestPresentTime;
+ uint64_t presentMargin;
+} VkPastPresentationTimingGOOGLE;
+
+typedef struct VkPresentTimeGOOGLE {
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+} VkPresentTimeGOOGLE;
+
+typedef struct VkPresentTimesInfoGOOGLE {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const VkPresentTimeGOOGLE* pTimes;
+} VkPresentTimesInfoGOOGLE;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t* pPresentationTimingCount,
+ VkPastPresentationTimingGOOGLE* pPresentationTimings);
+#endif
+
+#define VK_NV_sample_mask_override_coverage 1
+#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1
+#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage"
+
+
+#define VK_NV_geometry_shader_passthrough 1
+#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1
+#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough"
+
+
+#define VK_NV_viewport_array2 1
+#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1
+#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2"
+
+
+#define VK_NVX_multiview_per_view_attributes 1
+#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1
+#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes"
+
+typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 perViewPositionAllComponents;
+} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX;
+
+
+
+#define VK_NV_viewport_swizzle 1
+#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1
+#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle"
+
+
+typedef enum VkViewportCoordinateSwizzleNV {
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_BEGIN_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_END_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV,
+ VK_VIEWPORT_COORDINATE_SWIZZLE_RANGE_SIZE_NV = (VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV - VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV + 1),
+ VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkViewportCoordinateSwizzleNV;
+
+typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV;
+
+typedef struct VkViewportSwizzleNV {
+ VkViewportCoordinateSwizzleNV x;
+ VkViewportCoordinateSwizzleNV y;
+ VkViewportCoordinateSwizzleNV z;
+ VkViewportCoordinateSwizzleNV w;
+} VkViewportSwizzleNV;
+
+typedef struct VkPipelineViewportSwizzleStateCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineViewportSwizzleStateCreateFlagsNV flags;
+ uint32_t viewportCount;
+ const VkViewportSwizzleNV* pViewportSwizzles;
+} VkPipelineViewportSwizzleStateCreateInfoNV;
+
+
+
+#define VK_EXT_discard_rectangles 1
+#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1
+#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles"
+
+
+typedef enum VkDiscardRectangleModeEXT {
+ VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0,
+ VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1,
+ VK_DISCARD_RECTANGLE_MODE_BEGIN_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT,
+ VK_DISCARD_RECTANGLE_MODE_END_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT,
+ VK_DISCARD_RECTANGLE_MODE_RANGE_SIZE_EXT = (VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT - VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT + 1),
+ VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkDiscardRectangleModeEXT;
+
+typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT;
+
+typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxDiscardRectangles;
+} VkPhysicalDeviceDiscardRectanglePropertiesEXT;
+
+typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDiscardRectangleStateCreateFlagsEXT flags;
+ VkDiscardRectangleModeEXT discardRectangleMode;
+ uint32_t discardRectangleCount;
+ const VkRect2D* pDiscardRectangles;
+} VkPipelineDiscardRectangleStateCreateInfoEXT;
+
+
+typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstDiscardRectangle,
+ uint32_t discardRectangleCount,
+ const VkRect2D* pDiscardRectangles);
+#endif
+
+#define VK_EXT_swapchain_colorspace 1
+#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 2
+#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace"
+
+
+#define VK_EXT_hdr_metadata 1
+#define VK_EXT_HDR_METADATA_SPEC_VERSION 1
+#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata"
+
+typedef struct VkXYColorEXT {
+ float x;
+ float y;
+} VkXYColorEXT;
+
+typedef struct VkHdrMetadataEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkXYColorEXT displayPrimaryRed;
+ VkXYColorEXT displayPrimaryGreen;
+ VkXYColorEXT displayPrimaryBlue;
+ VkXYColorEXT whitePoint;
+ float maxLuminance;
+ float minLuminance;
+ float maxContentLightLevel;
+ float maxFrameAverageLightLevel;
+} VkHdrMetadataEXT;
+
+
+typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT(
+ VkDevice device,
+ uint32_t swapchainCount,
+ const VkSwapchainKHR* pSwapchains,
+ const VkHdrMetadataEXT* pMetadata);
+#endif
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+#define VK_MVK_ios_surface 1
+#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2
+#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface"
+
+typedef VkFlags VkIOSSurfaceCreateFlagsMVK;
+
+typedef struct VkIOSSurfaceCreateInfoMVK {
+ VkStructureType sType;
+ const void* pNext;
+ VkIOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+} VkIOSSurfaceCreateInfoMVK;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(
+ VkInstance instance,
+ const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+#endif /* VK_USE_PLATFORM_IOS_MVK */
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+#define VK_MVK_macos_surface 1
+#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2
+#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface"
+
+typedef VkFlags VkMacOSSurfaceCreateFlagsMVK;
+
+typedef struct VkMacOSSurfaceCreateInfoMVK {
+ VkStructureType sType;
+ const void* pNext;
+ VkMacOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+} VkMacOSSurfaceCreateInfoMVK;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(
+ VkInstance instance,
+ const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+#endif /* VK_USE_PLATFORM_MACOS_MVK */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.hpp b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.hpp
new file mode 100644
index 0000000..dc93801
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/include/vulkan/vulkan.hpp
@@ -0,0 +1,30540 @@
+// Copyright (c) 2015-2017 The Khronos Group Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a
+// copy of this software and/or associated documentation files (the
+// "Materials"), to deal in the Materials without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Materials, and to
+// permit persons to whom the Materials are furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be included
+// in all copies or substantial portions of the Materials.
+//
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+// MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+
+// This header is generated from the Khronos Vulkan XML API Registry.
+
+
+#ifndef VULKAN_HPP
+#define VULKAN_HPP
+
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <type_traits>
+#include <vulkan/vulkan.h>
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+# include <memory>
+# include <vector>
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+static_assert( VK_HEADER_VERSION == 49 , "Wrong VK_HEADER_VERSION!" );
+
+// 32-bit vulkan is not typesafe for handles, so don't allow copy constructors on this platform by default.
+// To enable this feature on 32-bit platforms please define VULKAN_HPP_TYPESAFE_CONVERSION
+#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+# if !defined( VULKAN_HPP_TYPESAFE_CONVERSION )
+# define VULKAN_HPP_TYPESAFE_CONVERSION
+# endif
+#endif
+
+#if !defined(VULKAN_HPP_HAS_UNRESTRICTED_UNIONS)
+# if defined(__clang__)
+# if __has_feature(cxx_unrestricted_unions)
+# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS
+# endif
+# elif defined(__GNUC__)
+# define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+# if 40600 <= GCC_VERSION
+# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS
+# endif
+# elif defined(_MSC_VER)
+# if 1900 <= _MSC_VER
+# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS
+# endif
+# endif
+#endif
+
+#if !defined(VULKAN_HPP_INLINE)
+# if defined(__clang___)
+# if __has_attribute(always_inline)
+# define VULKAN_HPP_INLINE __attribute__((always_inline)) __inline__
+# else
+# define VULKAN_HPP_INLINE inline
+# endif
+# elif defined(__GNUC__)
+# define VULKAN_HPP_INLINE __attribute__((always_inline)) __inline__
+# elif defined(_MSC_VER)
+# define VULKAN_HPP_INLINE __forceinline
+# else
+# define VULKAN_HPP_INLINE inline
+# endif
+#endif
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+# define VULKAN_HPP_TYPESAFE_EXPLICIT
+#else
+# define VULKAN_HPP_TYPESAFE_EXPLICIT explicit
+#endif
+
+namespace vk
+{
+ template <typename FlagBitsType> struct FlagTraits
+ {
+ enum { allFlags = 0 };
+ };
+
+ template <typename BitType, typename MaskType = VkFlags>
+ class Flags
+ {
+ public:
+ Flags()
+ : m_mask(0)
+ {
+ }
+
+ Flags(BitType bit)
+ : m_mask(static_cast<MaskType>(bit))
+ {
+ }
+
+ Flags(Flags<BitType> const& rhs)
+ : m_mask(rhs.m_mask)
+ {
+ }
+
+ Flags<BitType> & operator=(Flags<BitType> const& rhs)
+ {
+ m_mask = rhs.m_mask;
+ return *this;
+ }
+
+ Flags<BitType> & operator|=(Flags<BitType> const& rhs)
+ {
+ m_mask |= rhs.m_mask;
+ return *this;
+ }
+
+ Flags<BitType> & operator&=(Flags<BitType> const& rhs)
+ {
+ m_mask &= rhs.m_mask;
+ return *this;
+ }
+
+ Flags<BitType> & operator^=(Flags<BitType> const& rhs)
+ {
+ m_mask ^= rhs.m_mask;
+ return *this;
+ }
+
+ Flags<BitType> operator|(Flags<BitType> const& rhs) const
+ {
+ Flags<BitType> result(*this);
+ result |= rhs;
+ return result;
+ }
+
+ Flags<BitType> operator&(Flags<BitType> const& rhs) const
+ {
+ Flags<BitType> result(*this);
+ result &= rhs;
+ return result;
+ }
+
+ Flags<BitType> operator^(Flags<BitType> const& rhs) const
+ {
+ Flags<BitType> result(*this);
+ result ^= rhs;
+ return result;
+ }
+
+ bool operator!() const
+ {
+ return !m_mask;
+ }
+
+ Flags<BitType> operator~() const
+ {
+ Flags<BitType> result(*this);
+ result.m_mask ^= FlagTraits<BitType>::allFlags;
+ return result;
+ }
+
+ bool operator==(Flags<BitType> const& rhs) const
+ {
+ return m_mask == rhs.m_mask;
+ }
+
+ bool operator!=(Flags<BitType> const& rhs) const
+ {
+ return m_mask != rhs.m_mask;
+ }
+
+ explicit operator bool() const
+ {
+ return !!m_mask;
+ }
+
+ explicit operator MaskType() const
+ {
+ return m_mask;
+ }
+
+ private:
+ MaskType m_mask;
+ };
+
+ template <typename BitType>
+ Flags<BitType> operator|(BitType bit, Flags<BitType> const& flags)
+ {
+ return flags | bit;
+ }
+
+ template <typename BitType>
+ Flags<BitType> operator&(BitType bit, Flags<BitType> const& flags)
+ {
+ return flags & bit;
+ }
+
+ template <typename BitType>
+ Flags<BitType> operator^(BitType bit, Flags<BitType> const& flags)
+ {
+ return flags ^ bit;
+ }
+
+
+ template <typename RefType>
+ class Optional
+ {
+ public:
+ Optional(RefType & reference) { m_ptr = &reference; }
+ Optional(RefType * ptr) { m_ptr = ptr; }
+ Optional(std::nullptr_t) { m_ptr = nullptr; }
+
+ operator RefType*() const { return m_ptr; }
+ RefType const* operator->() const { return m_ptr; }
+ explicit operator bool() const { return !!m_ptr; }
+
+ private:
+ RefType *m_ptr;
+ };
+
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ class ArrayProxy
+ {
+ public:
+ ArrayProxy(std::nullptr_t)
+ : m_count(0)
+ , m_ptr(nullptr)
+ {}
+
+ ArrayProxy(T & ptr)
+ : m_count(1)
+ , m_ptr(&ptr)
+ {}
+
+ ArrayProxy(uint32_t count, T * ptr)
+ : m_count(count)
+ , m_ptr(ptr)
+ {}
+
+ template <size_t N>
+ ArrayProxy(std::array<typename std::remove_const<T>::type, N> & data)
+ : m_count(N)
+ , m_ptr(data.data())
+ {}
+
+ template <size_t N>
+ ArrayProxy(std::array<typename std::remove_const<T>::type, N> const& data)
+ : m_count(N)
+ , m_ptr(data.data())
+ {}
+
+ template <class Allocator = std::allocator<typename std::remove_const<T>::type>>
+ ArrayProxy(std::vector<typename std::remove_const<T>::type, Allocator> & data)
+ : m_count(static_cast<uint32_t>(data.size()))
+ , m_ptr(data.data())
+ {}
+
+ template <class Allocator = std::allocator<typename std::remove_const<T>::type>>
+ ArrayProxy(std::vector<typename std::remove_const<T>::type, Allocator> const& data)
+ : m_count(static_cast<uint32_t>(data.size()))
+ , m_ptr(data.data())
+ {}
+
+ ArrayProxy(std::initializer_list<T> const& data)
+ : m_count(static_cast<uint32_t>(data.end() - data.begin()))
+ , m_ptr(data.begin())
+ {}
+
+ const T * begin() const
+ {
+ return m_ptr;
+ }
+
+ const T * end() const
+ {
+ return m_ptr + m_count;
+ }
+
+ const T & front() const
+ {
+ assert(m_count && m_ptr);
+ return *m_ptr;
+ }
+
+ const T & back() const
+ {
+ assert(m_count && m_ptr);
+ return *(m_ptr + m_count - 1);
+ }
+
+ bool empty() const
+ {
+ return (m_count == 0);
+ }
+
+ uint32_t size() const
+ {
+ return m_count;
+ }
+
+ T * data() const
+ {
+ return m_ptr;
+ }
+
+ private:
+ uint32_t m_count;
+ T * m_ptr;
+ };
+#endif
+
+
+#if defined(VULKAN_HPP_NO_EXCEPTIONS) && !defined(VULKAN_HPP_NO_SMART_HANDLE)
+# define VULKAN_HPP_NO_SMART_HANDLE
+#endif
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Type, typename Deleter>
+ class UniqueHandle
+ {
+ public:
+ explicit UniqueHandle( Type const& value = Type(), Deleter const& deleter = Deleter() )
+ : m_value( value )
+ , m_deleter( deleter )
+ {}
+
+ UniqueHandle( UniqueHandle const& ) = delete;
+
+ UniqueHandle( UniqueHandle && other )
+ : m_value( other.release() )
+ , m_deleter( std::move( other.m_deleter ) )
+ {}
+
+ ~UniqueHandle()
+ {
+ destroy();
+ }
+
+ UniqueHandle & operator=( UniqueHandle const& ) = delete;
+
+ UniqueHandle & operator=( UniqueHandle && other )
+ {
+ reset( other.release() );
+ m_deleter = std::move( other.m_deleter );
+ return *this;
+ }
+
+ explicit operator bool() const
+ {
+ return m_value.operator bool();
+ }
+
+ Type const* operator->() const
+ {
+ return &m_value;
+ }
+
+ Type const& operator*() const
+ {
+ return m_value;
+ }
+
+ Type get() const
+ {
+ return m_value;
+ }
+
+ Deleter & getDeleter()
+ {
+ return m_deleter;
+ }
+
+ Deleter const& getDeleter() const
+ {
+ return m_deleter;
+ }
+
+ void reset( Type const& value = Type() )
+ {
+ if ( m_value != value )
+ {
+ destroy();
+ m_value = value;
+ }
+ }
+
+ Type release()
+ {
+ Type value = m_value;
+ m_value = nullptr;
+ return value;
+ }
+
+ void swap( UniqueHandle<Type, Deleter> & rhs )
+ {
+ std::swap(m_value, rhs.m_value);
+ std::swap(m_deleter, rhs.m_deleter);
+ }
+
+ private:
+ void destroy()
+ {
+ if ( m_value )
+ {
+ m_deleter( m_value );
+ }
+ }
+
+ private:
+ Type m_value;
+ Deleter m_deleter;
+ };
+
+ template <typename Type, typename Deleter>
+ VULKAN_HPP_INLINE void swap( UniqueHandle<Type,Deleter> & lhs, UniqueHandle<Type,Deleter> & rhs )
+ {
+ lhs.swap( rhs );
+ }
+#endif
+
+ enum class Result
+ {
+ eSuccess = VK_SUCCESS,
+ eNotReady = VK_NOT_READY,
+ eTimeout = VK_TIMEOUT,
+ eEventSet = VK_EVENT_SET,
+ eEventReset = VK_EVENT_RESET,
+ eIncomplete = VK_INCOMPLETE,
+ eErrorOutOfHostMemory = VK_ERROR_OUT_OF_HOST_MEMORY,
+ eErrorOutOfDeviceMemory = VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ eErrorInitializationFailed = VK_ERROR_INITIALIZATION_FAILED,
+ eErrorDeviceLost = VK_ERROR_DEVICE_LOST,
+ eErrorMemoryMapFailed = VK_ERROR_MEMORY_MAP_FAILED,
+ eErrorLayerNotPresent = VK_ERROR_LAYER_NOT_PRESENT,
+ eErrorExtensionNotPresent = VK_ERROR_EXTENSION_NOT_PRESENT,
+ eErrorFeatureNotPresent = VK_ERROR_FEATURE_NOT_PRESENT,
+ eErrorIncompatibleDriver = VK_ERROR_INCOMPATIBLE_DRIVER,
+ eErrorTooManyObjects = VK_ERROR_TOO_MANY_OBJECTS,
+ eErrorFormatNotSupported = VK_ERROR_FORMAT_NOT_SUPPORTED,
+ eErrorFragmentedPool = VK_ERROR_FRAGMENTED_POOL,
+ eErrorSurfaceLostKHR = VK_ERROR_SURFACE_LOST_KHR,
+ eErrorNativeWindowInUseKHR = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR,
+ eSuboptimalKHR = VK_SUBOPTIMAL_KHR,
+ eErrorOutOfDateKHR = VK_ERROR_OUT_OF_DATE_KHR,
+ eErrorIncompatibleDisplayKHR = VK_ERROR_INCOMPATIBLE_DISPLAY_KHR,
+ eErrorValidationFailedEXT = VK_ERROR_VALIDATION_FAILED_EXT,
+ eErrorInvalidShaderNV = VK_ERROR_INVALID_SHADER_NV,
+ eErrorOutOfPoolMemoryKHR = VK_ERROR_OUT_OF_POOL_MEMORY_KHR,
+ eErrorInvalidExternalHandleKHX = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX
+ };
+
+ VULKAN_HPP_INLINE std::string to_string(Result value)
+ {
+ switch (value)
+ {
+ case Result::eSuccess: return "Success";
+ case Result::eNotReady: return "NotReady";
+ case Result::eTimeout: return "Timeout";
+ case Result::eEventSet: return "EventSet";
+ case Result::eEventReset: return "EventReset";
+ case Result::eIncomplete: return "Incomplete";
+ case Result::eErrorOutOfHostMemory: return "ErrorOutOfHostMemory";
+ case Result::eErrorOutOfDeviceMemory: return "ErrorOutOfDeviceMemory";
+ case Result::eErrorInitializationFailed: return "ErrorInitializationFailed";
+ case Result::eErrorDeviceLost: return "ErrorDeviceLost";
+ case Result::eErrorMemoryMapFailed: return "ErrorMemoryMapFailed";
+ case Result::eErrorLayerNotPresent: return "ErrorLayerNotPresent";
+ case Result::eErrorExtensionNotPresent: return "ErrorExtensionNotPresent";
+ case Result::eErrorFeatureNotPresent: return "ErrorFeatureNotPresent";
+ case Result::eErrorIncompatibleDriver: return "ErrorIncompatibleDriver";
+ case Result::eErrorTooManyObjects: return "ErrorTooManyObjects";
+ case Result::eErrorFormatNotSupported: return "ErrorFormatNotSupported";
+ case Result::eErrorFragmentedPool: return "ErrorFragmentedPool";
+ case Result::eErrorSurfaceLostKHR: return "ErrorSurfaceLostKHR";
+ case Result::eErrorNativeWindowInUseKHR: return "ErrorNativeWindowInUseKHR";
+ case Result::eSuboptimalKHR: return "SuboptimalKHR";
+ case Result::eErrorOutOfDateKHR: return "ErrorOutOfDateKHR";
+ case Result::eErrorIncompatibleDisplayKHR: return "ErrorIncompatibleDisplayKHR";
+ case Result::eErrorValidationFailedEXT: return "ErrorValidationFailedEXT";
+ case Result::eErrorInvalidShaderNV: return "ErrorInvalidShaderNV";
+ case Result::eErrorOutOfPoolMemoryKHR: return "ErrorOutOfPoolMemoryKHR";
+ case Result::eErrorInvalidExternalHandleKHX: return "ErrorInvalidExternalHandleKHX";
+ default: return "invalid";
+ }
+ }
+
+#if defined(_MSC_VER) && (_MSC_VER == 1800)
+# define noexcept _NOEXCEPT
+#endif
+
+ class ErrorCategoryImpl : public std::error_category
+ {
+ public:
+ virtual const char* name() const noexcept override { return "vk::Result"; }
+ virtual std::string message(int ev) const override { return to_string(static_cast<Result>(ev)); }
+ };
+
+#if defined(_MSC_VER) && (_MSC_VER == 1800)
+# undef noexcept
+#endif
+
+ VULKAN_HPP_INLINE const std::error_category& errorCategory()
+ {
+ static ErrorCategoryImpl instance;
+ return instance;
+ }
+
+ VULKAN_HPP_INLINE std::error_code make_error_code(Result e)
+ {
+ return std::error_code(static_cast<int>(e), errorCategory());
+ }
+
+ VULKAN_HPP_INLINE std::error_condition make_error_condition(Result e)
+ {
+ return std::error_condition(static_cast<int>(e), errorCategory());
+ }
+
+} // namespace vk
+
+namespace std
+{
+ template <>
+ struct is_error_code_enum<vk::Result> : public true_type
+ {};
+}
+
+namespace vk
+{
+ template <typename T>
+ struct ResultValue
+ {
+ ResultValue( Result r, T & v )
+ : result( r )
+ , value( v )
+ {}
+
+ Result result;
+ T value;
+
+ operator std::tuple<Result&, T&>() { return std::tuple<Result&, T&>(result, value); }
+ };
+
+ template <typename T>
+ struct ResultValueType
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ typedef ResultValue<T> type;
+#else
+ typedef T type;
+#endif
+ };
+
+ template <> struct ResultValueType<void>
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ typedef Result type;
+#else
+ typedef void type;
+#endif
+ };
+
+ VULKAN_HPP_INLINE ResultValueType<void>::type createResultValue( Result result, char const * message )
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( result == Result::eSuccess );
+ return result;
+#else
+ if ( result != Result::eSuccess )
+ {
+ throw std::system_error( result, message );
+ }
+#endif
+ }
+
+ template <typename T>
+ VULKAN_HPP_INLINE typename ResultValueType<T>::type createResultValue( Result result, T & data, char const * message )
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( result == Result::eSuccess );
+ return ResultValue<T>( result, data );
+#else
+ if ( result != Result::eSuccess )
+ {
+ throw std::system_error( result, message );
+ }
+ return data;
+#endif
+ }
+
+ VULKAN_HPP_INLINE Result createResultValue( Result result, char const * message, std::initializer_list<Result> successCodes )
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() );
+#else
+ if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() )
+ {
+ throw std::system_error( result, message );
+ }
+#endif
+ return result;
+ }
+
+ template <typename T>
+ VULKAN_HPP_INLINE ResultValue<T> createResultValue( Result result, T & data, char const * message, std::initializer_list<Result> successCodes )
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() );
+#else
+ if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() )
+ {
+ throw std::system_error( result, message );
+ }
+#endif
+ return ResultValue<T>( result, data );
+ }
+
+ using SampleMask = uint32_t;
+
+ using Bool32 = uint32_t;
+
+ using DeviceSize = uint64_t;
+
+ enum class FramebufferCreateFlagBits
+ {
+ };
+
+ using FramebufferCreateFlags = Flags<FramebufferCreateFlagBits, VkFramebufferCreateFlags>;
+
+ VULKAN_HPP_INLINE FramebufferCreateFlags operator|( FramebufferCreateFlagBits bit0, FramebufferCreateFlagBits bit1 )
+ {
+ return FramebufferCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class QueryPoolCreateFlagBits
+ {
+ };
+
+ using QueryPoolCreateFlags = Flags<QueryPoolCreateFlagBits, VkQueryPoolCreateFlags>;
+
+ VULKAN_HPP_INLINE QueryPoolCreateFlags operator|( QueryPoolCreateFlagBits bit0, QueryPoolCreateFlagBits bit1 )
+ {
+ return QueryPoolCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class RenderPassCreateFlagBits
+ {
+ };
+
+ using RenderPassCreateFlags = Flags<RenderPassCreateFlagBits, VkRenderPassCreateFlags>;
+
+ VULKAN_HPP_INLINE RenderPassCreateFlags operator|( RenderPassCreateFlagBits bit0, RenderPassCreateFlagBits bit1 )
+ {
+ return RenderPassCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class SamplerCreateFlagBits
+ {
+ };
+
+ using SamplerCreateFlags = Flags<SamplerCreateFlagBits, VkSamplerCreateFlags>;
+
+ VULKAN_HPP_INLINE SamplerCreateFlags operator|( SamplerCreateFlagBits bit0, SamplerCreateFlagBits bit1 )
+ {
+ return SamplerCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineLayoutCreateFlagBits
+ {
+ };
+
+ using PipelineLayoutCreateFlags = Flags<PipelineLayoutCreateFlagBits, VkPipelineLayoutCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineLayoutCreateFlags operator|( PipelineLayoutCreateFlagBits bit0, PipelineLayoutCreateFlagBits bit1 )
+ {
+ return PipelineLayoutCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineCacheCreateFlagBits
+ {
+ };
+
+ using PipelineCacheCreateFlags = Flags<PipelineCacheCreateFlagBits, VkPipelineCacheCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineCacheCreateFlags operator|( PipelineCacheCreateFlagBits bit0, PipelineCacheCreateFlagBits bit1 )
+ {
+ return PipelineCacheCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineDepthStencilStateCreateFlagBits
+ {
+ };
+
+ using PipelineDepthStencilStateCreateFlags = Flags<PipelineDepthStencilStateCreateFlagBits, VkPipelineDepthStencilStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineDepthStencilStateCreateFlags operator|( PipelineDepthStencilStateCreateFlagBits bit0, PipelineDepthStencilStateCreateFlagBits bit1 )
+ {
+ return PipelineDepthStencilStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineDynamicStateCreateFlagBits
+ {
+ };
+
+ using PipelineDynamicStateCreateFlags = Flags<PipelineDynamicStateCreateFlagBits, VkPipelineDynamicStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineDynamicStateCreateFlags operator|( PipelineDynamicStateCreateFlagBits bit0, PipelineDynamicStateCreateFlagBits bit1 )
+ {
+ return PipelineDynamicStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineColorBlendStateCreateFlagBits
+ {
+ };
+
+ using PipelineColorBlendStateCreateFlags = Flags<PipelineColorBlendStateCreateFlagBits, VkPipelineColorBlendStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineColorBlendStateCreateFlags operator|( PipelineColorBlendStateCreateFlagBits bit0, PipelineColorBlendStateCreateFlagBits bit1 )
+ {
+ return PipelineColorBlendStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineMultisampleStateCreateFlagBits
+ {
+ };
+
+ using PipelineMultisampleStateCreateFlags = Flags<PipelineMultisampleStateCreateFlagBits, VkPipelineMultisampleStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineMultisampleStateCreateFlags operator|( PipelineMultisampleStateCreateFlagBits bit0, PipelineMultisampleStateCreateFlagBits bit1 )
+ {
+ return PipelineMultisampleStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineRasterizationStateCreateFlagBits
+ {
+ };
+
+ using PipelineRasterizationStateCreateFlags = Flags<PipelineRasterizationStateCreateFlagBits, VkPipelineRasterizationStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineRasterizationStateCreateFlags operator|( PipelineRasterizationStateCreateFlagBits bit0, PipelineRasterizationStateCreateFlagBits bit1 )
+ {
+ return PipelineRasterizationStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineViewportStateCreateFlagBits
+ {
+ };
+
+ using PipelineViewportStateCreateFlags = Flags<PipelineViewportStateCreateFlagBits, VkPipelineViewportStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineViewportStateCreateFlags operator|( PipelineViewportStateCreateFlagBits bit0, PipelineViewportStateCreateFlagBits bit1 )
+ {
+ return PipelineViewportStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineTessellationStateCreateFlagBits
+ {
+ };
+
+ using PipelineTessellationStateCreateFlags = Flags<PipelineTessellationStateCreateFlagBits, VkPipelineTessellationStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineTessellationStateCreateFlags operator|( PipelineTessellationStateCreateFlagBits bit0, PipelineTessellationStateCreateFlagBits bit1 )
+ {
+ return PipelineTessellationStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineInputAssemblyStateCreateFlagBits
+ {
+ };
+
+ using PipelineInputAssemblyStateCreateFlags = Flags<PipelineInputAssemblyStateCreateFlagBits, VkPipelineInputAssemblyStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineInputAssemblyStateCreateFlags operator|( PipelineInputAssemblyStateCreateFlagBits bit0, PipelineInputAssemblyStateCreateFlagBits bit1 )
+ {
+ return PipelineInputAssemblyStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineVertexInputStateCreateFlagBits
+ {
+ };
+
+ using PipelineVertexInputStateCreateFlags = Flags<PipelineVertexInputStateCreateFlagBits, VkPipelineVertexInputStateCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineVertexInputStateCreateFlags operator|( PipelineVertexInputStateCreateFlagBits bit0, PipelineVertexInputStateCreateFlagBits bit1 )
+ {
+ return PipelineVertexInputStateCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class PipelineShaderStageCreateFlagBits
+ {
+ };
+
+ using PipelineShaderStageCreateFlags = Flags<PipelineShaderStageCreateFlagBits, VkPipelineShaderStageCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineShaderStageCreateFlags operator|( PipelineShaderStageCreateFlagBits bit0, PipelineShaderStageCreateFlagBits bit1 )
+ {
+ return PipelineShaderStageCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class BufferViewCreateFlagBits
+ {
+ };
+
+ using BufferViewCreateFlags = Flags<BufferViewCreateFlagBits, VkBufferViewCreateFlags>;
+
+ VULKAN_HPP_INLINE BufferViewCreateFlags operator|( BufferViewCreateFlagBits bit0, BufferViewCreateFlagBits bit1 )
+ {
+ return BufferViewCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class InstanceCreateFlagBits
+ {
+ };
+
+ using InstanceCreateFlags = Flags<InstanceCreateFlagBits, VkInstanceCreateFlags>;
+
+ VULKAN_HPP_INLINE InstanceCreateFlags operator|( InstanceCreateFlagBits bit0, InstanceCreateFlagBits bit1 )
+ {
+ return InstanceCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class DeviceCreateFlagBits
+ {
+ };
+
+ using DeviceCreateFlags = Flags<DeviceCreateFlagBits, VkDeviceCreateFlags>;
+
+ VULKAN_HPP_INLINE DeviceCreateFlags operator|( DeviceCreateFlagBits bit0, DeviceCreateFlagBits bit1 )
+ {
+ return DeviceCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class DeviceQueueCreateFlagBits
+ {
+ };
+
+ using DeviceQueueCreateFlags = Flags<DeviceQueueCreateFlagBits, VkDeviceQueueCreateFlags>;
+
+ VULKAN_HPP_INLINE DeviceQueueCreateFlags operator|( DeviceQueueCreateFlagBits bit0, DeviceQueueCreateFlagBits bit1 )
+ {
+ return DeviceQueueCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class ImageViewCreateFlagBits
+ {
+ };
+
+ using ImageViewCreateFlags = Flags<ImageViewCreateFlagBits, VkImageViewCreateFlags>;
+
+ VULKAN_HPP_INLINE ImageViewCreateFlags operator|( ImageViewCreateFlagBits bit0, ImageViewCreateFlagBits bit1 )
+ {
+ return ImageViewCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class SemaphoreCreateFlagBits
+ {
+ };
+
+ using SemaphoreCreateFlags = Flags<SemaphoreCreateFlagBits, VkSemaphoreCreateFlags>;
+
+ VULKAN_HPP_INLINE SemaphoreCreateFlags operator|( SemaphoreCreateFlagBits bit0, SemaphoreCreateFlagBits bit1 )
+ {
+ return SemaphoreCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class ShaderModuleCreateFlagBits
+ {
+ };
+
+ using ShaderModuleCreateFlags = Flags<ShaderModuleCreateFlagBits, VkShaderModuleCreateFlags>;
+
+ VULKAN_HPP_INLINE ShaderModuleCreateFlags operator|( ShaderModuleCreateFlagBits bit0, ShaderModuleCreateFlagBits bit1 )
+ {
+ return ShaderModuleCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class EventCreateFlagBits
+ {
+ };
+
+ using EventCreateFlags = Flags<EventCreateFlagBits, VkEventCreateFlags>;
+
+ VULKAN_HPP_INLINE EventCreateFlags operator|( EventCreateFlagBits bit0, EventCreateFlagBits bit1 )
+ {
+ return EventCreateFlags( bit0 ) | bit1;
+ }
+
+ enum class MemoryMapFlagBits
+ {
+ };
+
+ using MemoryMapFlags = Flags<MemoryMapFlagBits, VkMemoryMapFlags>;
+
+ VULKAN_HPP_INLINE MemoryMapFlags operator|( MemoryMapFlagBits bit0, MemoryMapFlagBits bit1 )
+ {
+ return MemoryMapFlags( bit0 ) | bit1;
+ }
+
+ enum class DescriptorPoolResetFlagBits
+ {
+ };
+
+ using DescriptorPoolResetFlags = Flags<DescriptorPoolResetFlagBits, VkDescriptorPoolResetFlags>;
+
+ VULKAN_HPP_INLINE DescriptorPoolResetFlags operator|( DescriptorPoolResetFlagBits bit0, DescriptorPoolResetFlagBits bit1 )
+ {
+ return DescriptorPoolResetFlags( bit0 ) | bit1;
+ }
+
+ enum class DescriptorUpdateTemplateCreateFlagBitsKHR
+ {
+ };
+
+ using DescriptorUpdateTemplateCreateFlagsKHR = Flags<DescriptorUpdateTemplateCreateFlagBitsKHR, VkDescriptorUpdateTemplateCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE DescriptorUpdateTemplateCreateFlagsKHR operator|( DescriptorUpdateTemplateCreateFlagBitsKHR bit0, DescriptorUpdateTemplateCreateFlagBitsKHR bit1 )
+ {
+ return DescriptorUpdateTemplateCreateFlagsKHR( bit0 ) | bit1;
+ }
+
+ enum class DisplayModeCreateFlagBitsKHR
+ {
+ };
+
+ using DisplayModeCreateFlagsKHR = Flags<DisplayModeCreateFlagBitsKHR, VkDisplayModeCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE DisplayModeCreateFlagsKHR operator|( DisplayModeCreateFlagBitsKHR bit0, DisplayModeCreateFlagBitsKHR bit1 )
+ {
+ return DisplayModeCreateFlagsKHR( bit0 ) | bit1;
+ }
+
+ enum class DisplaySurfaceCreateFlagBitsKHR
+ {
+ };
+
+ using DisplaySurfaceCreateFlagsKHR = Flags<DisplaySurfaceCreateFlagBitsKHR, VkDisplaySurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE DisplaySurfaceCreateFlagsKHR operator|( DisplaySurfaceCreateFlagBitsKHR bit0, DisplaySurfaceCreateFlagBitsKHR bit1 )
+ {
+ return DisplaySurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ enum class AndroidSurfaceCreateFlagBitsKHR
+ {
+ };
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ using AndroidSurfaceCreateFlagsKHR = Flags<AndroidSurfaceCreateFlagBitsKHR, VkAndroidSurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE AndroidSurfaceCreateFlagsKHR operator|( AndroidSurfaceCreateFlagBitsKHR bit0, AndroidSurfaceCreateFlagBitsKHR bit1 )
+ {
+ return AndroidSurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ enum class MirSurfaceCreateFlagBitsKHR
+ {
+ };
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ using MirSurfaceCreateFlagsKHR = Flags<MirSurfaceCreateFlagBitsKHR, VkMirSurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE MirSurfaceCreateFlagsKHR operator|( MirSurfaceCreateFlagBitsKHR bit0, MirSurfaceCreateFlagBitsKHR bit1 )
+ {
+ return MirSurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ enum class ViSurfaceCreateFlagBitsNN
+ {
+ };
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ using ViSurfaceCreateFlagsNN = Flags<ViSurfaceCreateFlagBitsNN, VkViSurfaceCreateFlagsNN>;
+
+ VULKAN_HPP_INLINE ViSurfaceCreateFlagsNN operator|( ViSurfaceCreateFlagBitsNN bit0, ViSurfaceCreateFlagBitsNN bit1 )
+ {
+ return ViSurfaceCreateFlagsNN( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ enum class WaylandSurfaceCreateFlagBitsKHR
+ {
+ };
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ using WaylandSurfaceCreateFlagsKHR = Flags<WaylandSurfaceCreateFlagBitsKHR, VkWaylandSurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE WaylandSurfaceCreateFlagsKHR operator|( WaylandSurfaceCreateFlagBitsKHR bit0, WaylandSurfaceCreateFlagBitsKHR bit1 )
+ {
+ return WaylandSurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ enum class Win32SurfaceCreateFlagBitsKHR
+ {
+ };
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ using Win32SurfaceCreateFlagsKHR = Flags<Win32SurfaceCreateFlagBitsKHR, VkWin32SurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE Win32SurfaceCreateFlagsKHR operator|( Win32SurfaceCreateFlagBitsKHR bit0, Win32SurfaceCreateFlagBitsKHR bit1 )
+ {
+ return Win32SurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ enum class XlibSurfaceCreateFlagBitsKHR
+ {
+ };
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ using XlibSurfaceCreateFlagsKHR = Flags<XlibSurfaceCreateFlagBitsKHR, VkXlibSurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE XlibSurfaceCreateFlagsKHR operator|( XlibSurfaceCreateFlagBitsKHR bit0, XlibSurfaceCreateFlagBitsKHR bit1 )
+ {
+ return XlibSurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ enum class XcbSurfaceCreateFlagBitsKHR
+ {
+ };
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ using XcbSurfaceCreateFlagsKHR = Flags<XcbSurfaceCreateFlagBitsKHR, VkXcbSurfaceCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE XcbSurfaceCreateFlagsKHR operator|( XcbSurfaceCreateFlagBitsKHR bit0, XcbSurfaceCreateFlagBitsKHR bit1 )
+ {
+ return XcbSurfaceCreateFlagsKHR( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ enum class IOSSurfaceCreateFlagBitsMVK
+ {
+ };
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ using IOSSurfaceCreateFlagsMVK = Flags<IOSSurfaceCreateFlagBitsMVK, VkIOSSurfaceCreateFlagsMVK>;
+
+ VULKAN_HPP_INLINE IOSSurfaceCreateFlagsMVK operator|( IOSSurfaceCreateFlagBitsMVK bit0, IOSSurfaceCreateFlagBitsMVK bit1 )
+ {
+ return IOSSurfaceCreateFlagsMVK( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ enum class MacOSSurfaceCreateFlagBitsMVK
+ {
+ };
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ using MacOSSurfaceCreateFlagsMVK = Flags<MacOSSurfaceCreateFlagBitsMVK, VkMacOSSurfaceCreateFlagsMVK>;
+
+ VULKAN_HPP_INLINE MacOSSurfaceCreateFlagsMVK operator|( MacOSSurfaceCreateFlagBitsMVK bit0, MacOSSurfaceCreateFlagBitsMVK bit1 )
+ {
+ return MacOSSurfaceCreateFlagsMVK( bit0 ) | bit1;
+ }
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+ enum class CommandPoolTrimFlagBitsKHR
+ {
+ };
+
+ using CommandPoolTrimFlagsKHR = Flags<CommandPoolTrimFlagBitsKHR, VkCommandPoolTrimFlagsKHR>;
+
+ VULKAN_HPP_INLINE CommandPoolTrimFlagsKHR operator|( CommandPoolTrimFlagBitsKHR bit0, CommandPoolTrimFlagBitsKHR bit1 )
+ {
+ return CommandPoolTrimFlagsKHR( bit0 ) | bit1;
+ }
+
+ enum class PipelineViewportSwizzleStateCreateFlagBitsNV
+ {
+ };
+
+ using PipelineViewportSwizzleStateCreateFlagsNV = Flags<PipelineViewportSwizzleStateCreateFlagBitsNV, VkPipelineViewportSwizzleStateCreateFlagsNV>;
+
+ VULKAN_HPP_INLINE PipelineViewportSwizzleStateCreateFlagsNV operator|( PipelineViewportSwizzleStateCreateFlagBitsNV bit0, PipelineViewportSwizzleStateCreateFlagBitsNV bit1 )
+ {
+ return PipelineViewportSwizzleStateCreateFlagsNV( bit0 ) | bit1;
+ }
+
+ enum class PipelineDiscardRectangleStateCreateFlagBitsEXT
+ {
+ };
+
+ using PipelineDiscardRectangleStateCreateFlagsEXT = Flags<PipelineDiscardRectangleStateCreateFlagBitsEXT, VkPipelineDiscardRectangleStateCreateFlagsEXT>;
+
+ VULKAN_HPP_INLINE PipelineDiscardRectangleStateCreateFlagsEXT operator|( PipelineDiscardRectangleStateCreateFlagBitsEXT bit0, PipelineDiscardRectangleStateCreateFlagBitsEXT bit1 )
+ {
+ return PipelineDiscardRectangleStateCreateFlagsEXT( bit0 ) | bit1;
+ }
+
+ class DeviceMemory
+ {
+ public:
+ DeviceMemory()
+ : m_deviceMemory(VK_NULL_HANDLE)
+ {}
+
+ DeviceMemory( std::nullptr_t )
+ : m_deviceMemory(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DeviceMemory(VkDeviceMemory deviceMemory)
+ : m_deviceMemory(deviceMemory)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DeviceMemory& operator=(VkDeviceMemory deviceMemory)
+ {
+ m_deviceMemory = deviceMemory;
+ return *this;
+ }
+#endif
+
+ DeviceMemory& operator=( std::nullptr_t )
+ {
+ m_deviceMemory = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DeviceMemory const &rhs) const
+ {
+ return m_deviceMemory == rhs.m_deviceMemory;
+ }
+
+ bool operator!=(DeviceMemory const &rhs) const
+ {
+ return m_deviceMemory != rhs.m_deviceMemory;
+ }
+
+ bool operator<(DeviceMemory const &rhs) const
+ {
+ return m_deviceMemory < rhs.m_deviceMemory;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeviceMemory() const
+ {
+ return m_deviceMemory;
+ }
+
+ explicit operator bool() const
+ {
+ return m_deviceMemory != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_deviceMemory == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDeviceMemory m_deviceMemory;
+ };
+ static_assert( sizeof( DeviceMemory ) == sizeof( VkDeviceMemory ), "handle and wrapper have different size!" );
+
+ class CommandPool
+ {
+ public:
+ CommandPool()
+ : m_commandPool(VK_NULL_HANDLE)
+ {}
+
+ CommandPool( std::nullptr_t )
+ : m_commandPool(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT CommandPool(VkCommandPool commandPool)
+ : m_commandPool(commandPool)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ CommandPool& operator=(VkCommandPool commandPool)
+ {
+ m_commandPool = commandPool;
+ return *this;
+ }
+#endif
+
+ CommandPool& operator=( std::nullptr_t )
+ {
+ m_commandPool = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(CommandPool const &rhs) const
+ {
+ return m_commandPool == rhs.m_commandPool;
+ }
+
+ bool operator!=(CommandPool const &rhs) const
+ {
+ return m_commandPool != rhs.m_commandPool;
+ }
+
+ bool operator<(CommandPool const &rhs) const
+ {
+ return m_commandPool < rhs.m_commandPool;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandPool() const
+ {
+ return m_commandPool;
+ }
+
+ explicit operator bool() const
+ {
+ return m_commandPool != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_commandPool == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkCommandPool m_commandPool;
+ };
+ static_assert( sizeof( CommandPool ) == sizeof( VkCommandPool ), "handle and wrapper have different size!" );
+
+ class Buffer
+ {
+ public:
+ Buffer()
+ : m_buffer(VK_NULL_HANDLE)
+ {}
+
+ Buffer( std::nullptr_t )
+ : m_buffer(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Buffer(VkBuffer buffer)
+ : m_buffer(buffer)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Buffer& operator=(VkBuffer buffer)
+ {
+ m_buffer = buffer;
+ return *this;
+ }
+#endif
+
+ Buffer& operator=( std::nullptr_t )
+ {
+ m_buffer = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Buffer const &rhs) const
+ {
+ return m_buffer == rhs.m_buffer;
+ }
+
+ bool operator!=(Buffer const &rhs) const
+ {
+ return m_buffer != rhs.m_buffer;
+ }
+
+ bool operator<(Buffer const &rhs) const
+ {
+ return m_buffer < rhs.m_buffer;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBuffer() const
+ {
+ return m_buffer;
+ }
+
+ explicit operator bool() const
+ {
+ return m_buffer != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_buffer == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkBuffer m_buffer;
+ };
+ static_assert( sizeof( Buffer ) == sizeof( VkBuffer ), "handle and wrapper have different size!" );
+
+ class BufferView
+ {
+ public:
+ BufferView()
+ : m_bufferView(VK_NULL_HANDLE)
+ {}
+
+ BufferView( std::nullptr_t )
+ : m_bufferView(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT BufferView(VkBufferView bufferView)
+ : m_bufferView(bufferView)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ BufferView& operator=(VkBufferView bufferView)
+ {
+ m_bufferView = bufferView;
+ return *this;
+ }
+#endif
+
+ BufferView& operator=( std::nullptr_t )
+ {
+ m_bufferView = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(BufferView const &rhs) const
+ {
+ return m_bufferView == rhs.m_bufferView;
+ }
+
+ bool operator!=(BufferView const &rhs) const
+ {
+ return m_bufferView != rhs.m_bufferView;
+ }
+
+ bool operator<(BufferView const &rhs) const
+ {
+ return m_bufferView < rhs.m_bufferView;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBufferView() const
+ {
+ return m_bufferView;
+ }
+
+ explicit operator bool() const
+ {
+ return m_bufferView != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_bufferView == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkBufferView m_bufferView;
+ };
+ static_assert( sizeof( BufferView ) == sizeof( VkBufferView ), "handle and wrapper have different size!" );
+
+ class Image
+ {
+ public:
+ Image()
+ : m_image(VK_NULL_HANDLE)
+ {}
+
+ Image( std::nullptr_t )
+ : m_image(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Image(VkImage image)
+ : m_image(image)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Image& operator=(VkImage image)
+ {
+ m_image = image;
+ return *this;
+ }
+#endif
+
+ Image& operator=( std::nullptr_t )
+ {
+ m_image = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Image const &rhs) const
+ {
+ return m_image == rhs.m_image;
+ }
+
+ bool operator!=(Image const &rhs) const
+ {
+ return m_image != rhs.m_image;
+ }
+
+ bool operator<(Image const &rhs) const
+ {
+ return m_image < rhs.m_image;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImage() const
+ {
+ return m_image;
+ }
+
+ explicit operator bool() const
+ {
+ return m_image != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_image == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkImage m_image;
+ };
+ static_assert( sizeof( Image ) == sizeof( VkImage ), "handle and wrapper have different size!" );
+
+ class ImageView
+ {
+ public:
+ ImageView()
+ : m_imageView(VK_NULL_HANDLE)
+ {}
+
+ ImageView( std::nullptr_t )
+ : m_imageView(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT ImageView(VkImageView imageView)
+ : m_imageView(imageView)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ ImageView& operator=(VkImageView imageView)
+ {
+ m_imageView = imageView;
+ return *this;
+ }
+#endif
+
+ ImageView& operator=( std::nullptr_t )
+ {
+ m_imageView = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(ImageView const &rhs) const
+ {
+ return m_imageView == rhs.m_imageView;
+ }
+
+ bool operator!=(ImageView const &rhs) const
+ {
+ return m_imageView != rhs.m_imageView;
+ }
+
+ bool operator<(ImageView const &rhs) const
+ {
+ return m_imageView < rhs.m_imageView;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImageView() const
+ {
+ return m_imageView;
+ }
+
+ explicit operator bool() const
+ {
+ return m_imageView != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_imageView == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkImageView m_imageView;
+ };
+ static_assert( sizeof( ImageView ) == sizeof( VkImageView ), "handle and wrapper have different size!" );
+
+ class ShaderModule
+ {
+ public:
+ ShaderModule()
+ : m_shaderModule(VK_NULL_HANDLE)
+ {}
+
+ ShaderModule( std::nullptr_t )
+ : m_shaderModule(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT ShaderModule(VkShaderModule shaderModule)
+ : m_shaderModule(shaderModule)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ ShaderModule& operator=(VkShaderModule shaderModule)
+ {
+ m_shaderModule = shaderModule;
+ return *this;
+ }
+#endif
+
+ ShaderModule& operator=( std::nullptr_t )
+ {
+ m_shaderModule = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(ShaderModule const &rhs) const
+ {
+ return m_shaderModule == rhs.m_shaderModule;
+ }
+
+ bool operator!=(ShaderModule const &rhs) const
+ {
+ return m_shaderModule != rhs.m_shaderModule;
+ }
+
+ bool operator<(ShaderModule const &rhs) const
+ {
+ return m_shaderModule < rhs.m_shaderModule;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkShaderModule() const
+ {
+ return m_shaderModule;
+ }
+
+ explicit operator bool() const
+ {
+ return m_shaderModule != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_shaderModule == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkShaderModule m_shaderModule;
+ };
+ static_assert( sizeof( ShaderModule ) == sizeof( VkShaderModule ), "handle and wrapper have different size!" );
+
+ class Pipeline
+ {
+ public:
+ Pipeline()
+ : m_pipeline(VK_NULL_HANDLE)
+ {}
+
+ Pipeline( std::nullptr_t )
+ : m_pipeline(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Pipeline(VkPipeline pipeline)
+ : m_pipeline(pipeline)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Pipeline& operator=(VkPipeline pipeline)
+ {
+ m_pipeline = pipeline;
+ return *this;
+ }
+#endif
+
+ Pipeline& operator=( std::nullptr_t )
+ {
+ m_pipeline = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Pipeline const &rhs) const
+ {
+ return m_pipeline == rhs.m_pipeline;
+ }
+
+ bool operator!=(Pipeline const &rhs) const
+ {
+ return m_pipeline != rhs.m_pipeline;
+ }
+
+ bool operator<(Pipeline const &rhs) const
+ {
+ return m_pipeline < rhs.m_pipeline;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipeline() const
+ {
+ return m_pipeline;
+ }
+
+ explicit operator bool() const
+ {
+ return m_pipeline != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_pipeline == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkPipeline m_pipeline;
+ };
+ static_assert( sizeof( Pipeline ) == sizeof( VkPipeline ), "handle and wrapper have different size!" );
+
+ class PipelineLayout
+ {
+ public:
+ PipelineLayout()
+ : m_pipelineLayout(VK_NULL_HANDLE)
+ {}
+
+ PipelineLayout( std::nullptr_t )
+ : m_pipelineLayout(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT PipelineLayout(VkPipelineLayout pipelineLayout)
+ : m_pipelineLayout(pipelineLayout)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ PipelineLayout& operator=(VkPipelineLayout pipelineLayout)
+ {
+ m_pipelineLayout = pipelineLayout;
+ return *this;
+ }
+#endif
+
+ PipelineLayout& operator=( std::nullptr_t )
+ {
+ m_pipelineLayout = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(PipelineLayout const &rhs) const
+ {
+ return m_pipelineLayout == rhs.m_pipelineLayout;
+ }
+
+ bool operator!=(PipelineLayout const &rhs) const
+ {
+ return m_pipelineLayout != rhs.m_pipelineLayout;
+ }
+
+ bool operator<(PipelineLayout const &rhs) const
+ {
+ return m_pipelineLayout < rhs.m_pipelineLayout;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineLayout() const
+ {
+ return m_pipelineLayout;
+ }
+
+ explicit operator bool() const
+ {
+ return m_pipelineLayout != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_pipelineLayout == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkPipelineLayout m_pipelineLayout;
+ };
+ static_assert( sizeof( PipelineLayout ) == sizeof( VkPipelineLayout ), "handle and wrapper have different size!" );
+
+ class Sampler
+ {
+ public:
+ Sampler()
+ : m_sampler(VK_NULL_HANDLE)
+ {}
+
+ Sampler( std::nullptr_t )
+ : m_sampler(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Sampler(VkSampler sampler)
+ : m_sampler(sampler)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Sampler& operator=(VkSampler sampler)
+ {
+ m_sampler = sampler;
+ return *this;
+ }
+#endif
+
+ Sampler& operator=( std::nullptr_t )
+ {
+ m_sampler = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Sampler const &rhs) const
+ {
+ return m_sampler == rhs.m_sampler;
+ }
+
+ bool operator!=(Sampler const &rhs) const
+ {
+ return m_sampler != rhs.m_sampler;
+ }
+
+ bool operator<(Sampler const &rhs) const
+ {
+ return m_sampler < rhs.m_sampler;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSampler() const
+ {
+ return m_sampler;
+ }
+
+ explicit operator bool() const
+ {
+ return m_sampler != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_sampler == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkSampler m_sampler;
+ };
+ static_assert( sizeof( Sampler ) == sizeof( VkSampler ), "handle and wrapper have different size!" );
+
+ class DescriptorSet
+ {
+ public:
+ DescriptorSet()
+ : m_descriptorSet(VK_NULL_HANDLE)
+ {}
+
+ DescriptorSet( std::nullptr_t )
+ : m_descriptorSet(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorSet(VkDescriptorSet descriptorSet)
+ : m_descriptorSet(descriptorSet)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DescriptorSet& operator=(VkDescriptorSet descriptorSet)
+ {
+ m_descriptorSet = descriptorSet;
+ return *this;
+ }
+#endif
+
+ DescriptorSet& operator=( std::nullptr_t )
+ {
+ m_descriptorSet = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DescriptorSet const &rhs) const
+ {
+ return m_descriptorSet == rhs.m_descriptorSet;
+ }
+
+ bool operator!=(DescriptorSet const &rhs) const
+ {
+ return m_descriptorSet != rhs.m_descriptorSet;
+ }
+
+ bool operator<(DescriptorSet const &rhs) const
+ {
+ return m_descriptorSet < rhs.m_descriptorSet;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSet() const
+ {
+ return m_descriptorSet;
+ }
+
+ explicit operator bool() const
+ {
+ return m_descriptorSet != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_descriptorSet == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDescriptorSet m_descriptorSet;
+ };
+ static_assert( sizeof( DescriptorSet ) == sizeof( VkDescriptorSet ), "handle and wrapper have different size!" );
+
+ class DescriptorSetLayout
+ {
+ public:
+ DescriptorSetLayout()
+ : m_descriptorSetLayout(VK_NULL_HANDLE)
+ {}
+
+ DescriptorSetLayout( std::nullptr_t )
+ : m_descriptorSetLayout(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorSetLayout(VkDescriptorSetLayout descriptorSetLayout)
+ : m_descriptorSetLayout(descriptorSetLayout)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DescriptorSetLayout& operator=(VkDescriptorSetLayout descriptorSetLayout)
+ {
+ m_descriptorSetLayout = descriptorSetLayout;
+ return *this;
+ }
+#endif
+
+ DescriptorSetLayout& operator=( std::nullptr_t )
+ {
+ m_descriptorSetLayout = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DescriptorSetLayout const &rhs) const
+ {
+ return m_descriptorSetLayout == rhs.m_descriptorSetLayout;
+ }
+
+ bool operator!=(DescriptorSetLayout const &rhs) const
+ {
+ return m_descriptorSetLayout != rhs.m_descriptorSetLayout;
+ }
+
+ bool operator<(DescriptorSetLayout const &rhs) const
+ {
+ return m_descriptorSetLayout < rhs.m_descriptorSetLayout;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSetLayout() const
+ {
+ return m_descriptorSetLayout;
+ }
+
+ explicit operator bool() const
+ {
+ return m_descriptorSetLayout != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_descriptorSetLayout == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDescriptorSetLayout m_descriptorSetLayout;
+ };
+ static_assert( sizeof( DescriptorSetLayout ) == sizeof( VkDescriptorSetLayout ), "handle and wrapper have different size!" );
+
+ class DescriptorPool
+ {
+ public:
+ DescriptorPool()
+ : m_descriptorPool(VK_NULL_HANDLE)
+ {}
+
+ DescriptorPool( std::nullptr_t )
+ : m_descriptorPool(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorPool(VkDescriptorPool descriptorPool)
+ : m_descriptorPool(descriptorPool)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DescriptorPool& operator=(VkDescriptorPool descriptorPool)
+ {
+ m_descriptorPool = descriptorPool;
+ return *this;
+ }
+#endif
+
+ DescriptorPool& operator=( std::nullptr_t )
+ {
+ m_descriptorPool = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DescriptorPool const &rhs) const
+ {
+ return m_descriptorPool == rhs.m_descriptorPool;
+ }
+
+ bool operator!=(DescriptorPool const &rhs) const
+ {
+ return m_descriptorPool != rhs.m_descriptorPool;
+ }
+
+ bool operator<(DescriptorPool const &rhs) const
+ {
+ return m_descriptorPool < rhs.m_descriptorPool;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorPool() const
+ {
+ return m_descriptorPool;
+ }
+
+ explicit operator bool() const
+ {
+ return m_descriptorPool != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_descriptorPool == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDescriptorPool m_descriptorPool;
+ };
+ static_assert( sizeof( DescriptorPool ) == sizeof( VkDescriptorPool ), "handle and wrapper have different size!" );
+
+ class Fence
+ {
+ public:
+ Fence()
+ : m_fence(VK_NULL_HANDLE)
+ {}
+
+ Fence( std::nullptr_t )
+ : m_fence(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Fence(VkFence fence)
+ : m_fence(fence)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Fence& operator=(VkFence fence)
+ {
+ m_fence = fence;
+ return *this;
+ }
+#endif
+
+ Fence& operator=( std::nullptr_t )
+ {
+ m_fence = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Fence const &rhs) const
+ {
+ return m_fence == rhs.m_fence;
+ }
+
+ bool operator!=(Fence const &rhs) const
+ {
+ return m_fence != rhs.m_fence;
+ }
+
+ bool operator<(Fence const &rhs) const
+ {
+ return m_fence < rhs.m_fence;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFence() const
+ {
+ return m_fence;
+ }
+
+ explicit operator bool() const
+ {
+ return m_fence != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_fence == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkFence m_fence;
+ };
+ static_assert( sizeof( Fence ) == sizeof( VkFence ), "handle and wrapper have different size!" );
+
+ class Semaphore
+ {
+ public:
+ Semaphore()
+ : m_semaphore(VK_NULL_HANDLE)
+ {}
+
+ Semaphore( std::nullptr_t )
+ : m_semaphore(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Semaphore(VkSemaphore semaphore)
+ : m_semaphore(semaphore)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Semaphore& operator=(VkSemaphore semaphore)
+ {
+ m_semaphore = semaphore;
+ return *this;
+ }
+#endif
+
+ Semaphore& operator=( std::nullptr_t )
+ {
+ m_semaphore = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Semaphore const &rhs) const
+ {
+ return m_semaphore == rhs.m_semaphore;
+ }
+
+ bool operator!=(Semaphore const &rhs) const
+ {
+ return m_semaphore != rhs.m_semaphore;
+ }
+
+ bool operator<(Semaphore const &rhs) const
+ {
+ return m_semaphore < rhs.m_semaphore;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSemaphore() const
+ {
+ return m_semaphore;
+ }
+
+ explicit operator bool() const
+ {
+ return m_semaphore != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_semaphore == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkSemaphore m_semaphore;
+ };
+ static_assert( sizeof( Semaphore ) == sizeof( VkSemaphore ), "handle and wrapper have different size!" );
+
+ class Event
+ {
+ public:
+ Event()
+ : m_event(VK_NULL_HANDLE)
+ {}
+
+ Event( std::nullptr_t )
+ : m_event(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Event(VkEvent event)
+ : m_event(event)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Event& operator=(VkEvent event)
+ {
+ m_event = event;
+ return *this;
+ }
+#endif
+
+ Event& operator=( std::nullptr_t )
+ {
+ m_event = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Event const &rhs) const
+ {
+ return m_event == rhs.m_event;
+ }
+
+ bool operator!=(Event const &rhs) const
+ {
+ return m_event != rhs.m_event;
+ }
+
+ bool operator<(Event const &rhs) const
+ {
+ return m_event < rhs.m_event;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkEvent() const
+ {
+ return m_event;
+ }
+
+ explicit operator bool() const
+ {
+ return m_event != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_event == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkEvent m_event;
+ };
+ static_assert( sizeof( Event ) == sizeof( VkEvent ), "handle and wrapper have different size!" );
+
+ class QueryPool
+ {
+ public:
+ QueryPool()
+ : m_queryPool(VK_NULL_HANDLE)
+ {}
+
+ QueryPool( std::nullptr_t )
+ : m_queryPool(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT QueryPool(VkQueryPool queryPool)
+ : m_queryPool(queryPool)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ QueryPool& operator=(VkQueryPool queryPool)
+ {
+ m_queryPool = queryPool;
+ return *this;
+ }
+#endif
+
+ QueryPool& operator=( std::nullptr_t )
+ {
+ m_queryPool = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(QueryPool const &rhs) const
+ {
+ return m_queryPool == rhs.m_queryPool;
+ }
+
+ bool operator!=(QueryPool const &rhs) const
+ {
+ return m_queryPool != rhs.m_queryPool;
+ }
+
+ bool operator<(QueryPool const &rhs) const
+ {
+ return m_queryPool < rhs.m_queryPool;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueryPool() const
+ {
+ return m_queryPool;
+ }
+
+ explicit operator bool() const
+ {
+ return m_queryPool != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_queryPool == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkQueryPool m_queryPool;
+ };
+ static_assert( sizeof( QueryPool ) == sizeof( VkQueryPool ), "handle and wrapper have different size!" );
+
+ class Framebuffer
+ {
+ public:
+ Framebuffer()
+ : m_framebuffer(VK_NULL_HANDLE)
+ {}
+
+ Framebuffer( std::nullptr_t )
+ : m_framebuffer(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Framebuffer(VkFramebuffer framebuffer)
+ : m_framebuffer(framebuffer)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Framebuffer& operator=(VkFramebuffer framebuffer)
+ {
+ m_framebuffer = framebuffer;
+ return *this;
+ }
+#endif
+
+ Framebuffer& operator=( std::nullptr_t )
+ {
+ m_framebuffer = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Framebuffer const &rhs) const
+ {
+ return m_framebuffer == rhs.m_framebuffer;
+ }
+
+ bool operator!=(Framebuffer const &rhs) const
+ {
+ return m_framebuffer != rhs.m_framebuffer;
+ }
+
+ bool operator<(Framebuffer const &rhs) const
+ {
+ return m_framebuffer < rhs.m_framebuffer;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFramebuffer() const
+ {
+ return m_framebuffer;
+ }
+
+ explicit operator bool() const
+ {
+ return m_framebuffer != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_framebuffer == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkFramebuffer m_framebuffer;
+ };
+ static_assert( sizeof( Framebuffer ) == sizeof( VkFramebuffer ), "handle and wrapper have different size!" );
+
+ class RenderPass
+ {
+ public:
+ RenderPass()
+ : m_renderPass(VK_NULL_HANDLE)
+ {}
+
+ RenderPass( std::nullptr_t )
+ : m_renderPass(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT RenderPass(VkRenderPass renderPass)
+ : m_renderPass(renderPass)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ RenderPass& operator=(VkRenderPass renderPass)
+ {
+ m_renderPass = renderPass;
+ return *this;
+ }
+#endif
+
+ RenderPass& operator=( std::nullptr_t )
+ {
+ m_renderPass = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(RenderPass const &rhs) const
+ {
+ return m_renderPass == rhs.m_renderPass;
+ }
+
+ bool operator!=(RenderPass const &rhs) const
+ {
+ return m_renderPass != rhs.m_renderPass;
+ }
+
+ bool operator<(RenderPass const &rhs) const
+ {
+ return m_renderPass < rhs.m_renderPass;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkRenderPass() const
+ {
+ return m_renderPass;
+ }
+
+ explicit operator bool() const
+ {
+ return m_renderPass != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_renderPass == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkRenderPass m_renderPass;
+ };
+ static_assert( sizeof( RenderPass ) == sizeof( VkRenderPass ), "handle and wrapper have different size!" );
+
+ class PipelineCache
+ {
+ public:
+ PipelineCache()
+ : m_pipelineCache(VK_NULL_HANDLE)
+ {}
+
+ PipelineCache( std::nullptr_t )
+ : m_pipelineCache(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT PipelineCache(VkPipelineCache pipelineCache)
+ : m_pipelineCache(pipelineCache)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ PipelineCache& operator=(VkPipelineCache pipelineCache)
+ {
+ m_pipelineCache = pipelineCache;
+ return *this;
+ }
+#endif
+
+ PipelineCache& operator=( std::nullptr_t )
+ {
+ m_pipelineCache = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(PipelineCache const &rhs) const
+ {
+ return m_pipelineCache == rhs.m_pipelineCache;
+ }
+
+ bool operator!=(PipelineCache const &rhs) const
+ {
+ return m_pipelineCache != rhs.m_pipelineCache;
+ }
+
+ bool operator<(PipelineCache const &rhs) const
+ {
+ return m_pipelineCache < rhs.m_pipelineCache;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineCache() const
+ {
+ return m_pipelineCache;
+ }
+
+ explicit operator bool() const
+ {
+ return m_pipelineCache != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_pipelineCache == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkPipelineCache m_pipelineCache;
+ };
+ static_assert( sizeof( PipelineCache ) == sizeof( VkPipelineCache ), "handle and wrapper have different size!" );
+
+ class ObjectTableNVX
+ {
+ public:
+ ObjectTableNVX()
+ : m_objectTableNVX(VK_NULL_HANDLE)
+ {}
+
+ ObjectTableNVX( std::nullptr_t )
+ : m_objectTableNVX(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT ObjectTableNVX(VkObjectTableNVX objectTableNVX)
+ : m_objectTableNVX(objectTableNVX)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ ObjectTableNVX& operator=(VkObjectTableNVX objectTableNVX)
+ {
+ m_objectTableNVX = objectTableNVX;
+ return *this;
+ }
+#endif
+
+ ObjectTableNVX& operator=( std::nullptr_t )
+ {
+ m_objectTableNVX = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(ObjectTableNVX const &rhs) const
+ {
+ return m_objectTableNVX == rhs.m_objectTableNVX;
+ }
+
+ bool operator!=(ObjectTableNVX const &rhs) const
+ {
+ return m_objectTableNVX != rhs.m_objectTableNVX;
+ }
+
+ bool operator<(ObjectTableNVX const &rhs) const
+ {
+ return m_objectTableNVX < rhs.m_objectTableNVX;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkObjectTableNVX() const
+ {
+ return m_objectTableNVX;
+ }
+
+ explicit operator bool() const
+ {
+ return m_objectTableNVX != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_objectTableNVX == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkObjectTableNVX m_objectTableNVX;
+ };
+ static_assert( sizeof( ObjectTableNVX ) == sizeof( VkObjectTableNVX ), "handle and wrapper have different size!" );
+
+ class IndirectCommandsLayoutNVX
+ {
+ public:
+ IndirectCommandsLayoutNVX()
+ : m_indirectCommandsLayoutNVX(VK_NULL_HANDLE)
+ {}
+
+ IndirectCommandsLayoutNVX( std::nullptr_t )
+ : m_indirectCommandsLayoutNVX(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT IndirectCommandsLayoutNVX(VkIndirectCommandsLayoutNVX indirectCommandsLayoutNVX)
+ : m_indirectCommandsLayoutNVX(indirectCommandsLayoutNVX)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ IndirectCommandsLayoutNVX& operator=(VkIndirectCommandsLayoutNVX indirectCommandsLayoutNVX)
+ {
+ m_indirectCommandsLayoutNVX = indirectCommandsLayoutNVX;
+ return *this;
+ }
+#endif
+
+ IndirectCommandsLayoutNVX& operator=( std::nullptr_t )
+ {
+ m_indirectCommandsLayoutNVX = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(IndirectCommandsLayoutNVX const &rhs) const
+ {
+ return m_indirectCommandsLayoutNVX == rhs.m_indirectCommandsLayoutNVX;
+ }
+
+ bool operator!=(IndirectCommandsLayoutNVX const &rhs) const
+ {
+ return m_indirectCommandsLayoutNVX != rhs.m_indirectCommandsLayoutNVX;
+ }
+
+ bool operator<(IndirectCommandsLayoutNVX const &rhs) const
+ {
+ return m_indirectCommandsLayoutNVX < rhs.m_indirectCommandsLayoutNVX;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkIndirectCommandsLayoutNVX() const
+ {
+ return m_indirectCommandsLayoutNVX;
+ }
+
+ explicit operator bool() const
+ {
+ return m_indirectCommandsLayoutNVX != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_indirectCommandsLayoutNVX == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkIndirectCommandsLayoutNVX m_indirectCommandsLayoutNVX;
+ };
+ static_assert( sizeof( IndirectCommandsLayoutNVX ) == sizeof( VkIndirectCommandsLayoutNVX ), "handle and wrapper have different size!" );
+
+ class DescriptorUpdateTemplateKHR
+ {
+ public:
+ DescriptorUpdateTemplateKHR()
+ : m_descriptorUpdateTemplateKHR(VK_NULL_HANDLE)
+ {}
+
+ DescriptorUpdateTemplateKHR( std::nullptr_t )
+ : m_descriptorUpdateTemplateKHR(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorUpdateTemplateKHR(VkDescriptorUpdateTemplateKHR descriptorUpdateTemplateKHR)
+ : m_descriptorUpdateTemplateKHR(descriptorUpdateTemplateKHR)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DescriptorUpdateTemplateKHR& operator=(VkDescriptorUpdateTemplateKHR descriptorUpdateTemplateKHR)
+ {
+ m_descriptorUpdateTemplateKHR = descriptorUpdateTemplateKHR;
+ return *this;
+ }
+#endif
+
+ DescriptorUpdateTemplateKHR& operator=( std::nullptr_t )
+ {
+ m_descriptorUpdateTemplateKHR = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DescriptorUpdateTemplateKHR const &rhs) const
+ {
+ return m_descriptorUpdateTemplateKHR == rhs.m_descriptorUpdateTemplateKHR;
+ }
+
+ bool operator!=(DescriptorUpdateTemplateKHR const &rhs) const
+ {
+ return m_descriptorUpdateTemplateKHR != rhs.m_descriptorUpdateTemplateKHR;
+ }
+
+ bool operator<(DescriptorUpdateTemplateKHR const &rhs) const
+ {
+ return m_descriptorUpdateTemplateKHR < rhs.m_descriptorUpdateTemplateKHR;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorUpdateTemplateKHR() const
+ {
+ return m_descriptorUpdateTemplateKHR;
+ }
+
+ explicit operator bool() const
+ {
+ return m_descriptorUpdateTemplateKHR != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_descriptorUpdateTemplateKHR == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDescriptorUpdateTemplateKHR m_descriptorUpdateTemplateKHR;
+ };
+ static_assert( sizeof( DescriptorUpdateTemplateKHR ) == sizeof( VkDescriptorUpdateTemplateKHR ), "handle and wrapper have different size!" );
+
+ class DisplayKHR
+ {
+ public:
+ DisplayKHR()
+ : m_displayKHR(VK_NULL_HANDLE)
+ {}
+
+ DisplayKHR( std::nullptr_t )
+ : m_displayKHR(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DisplayKHR(VkDisplayKHR displayKHR)
+ : m_displayKHR(displayKHR)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DisplayKHR& operator=(VkDisplayKHR displayKHR)
+ {
+ m_displayKHR = displayKHR;
+ return *this;
+ }
+#endif
+
+ DisplayKHR& operator=( std::nullptr_t )
+ {
+ m_displayKHR = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DisplayKHR const &rhs) const
+ {
+ return m_displayKHR == rhs.m_displayKHR;
+ }
+
+ bool operator!=(DisplayKHR const &rhs) const
+ {
+ return m_displayKHR != rhs.m_displayKHR;
+ }
+
+ bool operator<(DisplayKHR const &rhs) const
+ {
+ return m_displayKHR < rhs.m_displayKHR;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayKHR() const
+ {
+ return m_displayKHR;
+ }
+
+ explicit operator bool() const
+ {
+ return m_displayKHR != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_displayKHR == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDisplayKHR m_displayKHR;
+ };
+ static_assert( sizeof( DisplayKHR ) == sizeof( VkDisplayKHR ), "handle and wrapper have different size!" );
+
+ class DisplayModeKHR
+ {
+ public:
+ DisplayModeKHR()
+ : m_displayModeKHR(VK_NULL_HANDLE)
+ {}
+
+ DisplayModeKHR( std::nullptr_t )
+ : m_displayModeKHR(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DisplayModeKHR(VkDisplayModeKHR displayModeKHR)
+ : m_displayModeKHR(displayModeKHR)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DisplayModeKHR& operator=(VkDisplayModeKHR displayModeKHR)
+ {
+ m_displayModeKHR = displayModeKHR;
+ return *this;
+ }
+#endif
+
+ DisplayModeKHR& operator=( std::nullptr_t )
+ {
+ m_displayModeKHR = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DisplayModeKHR const &rhs) const
+ {
+ return m_displayModeKHR == rhs.m_displayModeKHR;
+ }
+
+ bool operator!=(DisplayModeKHR const &rhs) const
+ {
+ return m_displayModeKHR != rhs.m_displayModeKHR;
+ }
+
+ bool operator<(DisplayModeKHR const &rhs) const
+ {
+ return m_displayModeKHR < rhs.m_displayModeKHR;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayModeKHR() const
+ {
+ return m_displayModeKHR;
+ }
+
+ explicit operator bool() const
+ {
+ return m_displayModeKHR != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_displayModeKHR == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDisplayModeKHR m_displayModeKHR;
+ };
+ static_assert( sizeof( DisplayModeKHR ) == sizeof( VkDisplayModeKHR ), "handle and wrapper have different size!" );
+
+ class SurfaceKHR
+ {
+ public:
+ SurfaceKHR()
+ : m_surfaceKHR(VK_NULL_HANDLE)
+ {}
+
+ SurfaceKHR( std::nullptr_t )
+ : m_surfaceKHR(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT SurfaceKHR(VkSurfaceKHR surfaceKHR)
+ : m_surfaceKHR(surfaceKHR)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ SurfaceKHR& operator=(VkSurfaceKHR surfaceKHR)
+ {
+ m_surfaceKHR = surfaceKHR;
+ return *this;
+ }
+#endif
+
+ SurfaceKHR& operator=( std::nullptr_t )
+ {
+ m_surfaceKHR = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(SurfaceKHR const &rhs) const
+ {
+ return m_surfaceKHR == rhs.m_surfaceKHR;
+ }
+
+ bool operator!=(SurfaceKHR const &rhs) const
+ {
+ return m_surfaceKHR != rhs.m_surfaceKHR;
+ }
+
+ bool operator<(SurfaceKHR const &rhs) const
+ {
+ return m_surfaceKHR < rhs.m_surfaceKHR;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSurfaceKHR() const
+ {
+ return m_surfaceKHR;
+ }
+
+ explicit operator bool() const
+ {
+ return m_surfaceKHR != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_surfaceKHR == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkSurfaceKHR m_surfaceKHR;
+ };
+ static_assert( sizeof( SurfaceKHR ) == sizeof( VkSurfaceKHR ), "handle and wrapper have different size!" );
+
+ class SwapchainKHR
+ {
+ public:
+ SwapchainKHR()
+ : m_swapchainKHR(VK_NULL_HANDLE)
+ {}
+
+ SwapchainKHR( std::nullptr_t )
+ : m_swapchainKHR(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT SwapchainKHR(VkSwapchainKHR swapchainKHR)
+ : m_swapchainKHR(swapchainKHR)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ SwapchainKHR& operator=(VkSwapchainKHR swapchainKHR)
+ {
+ m_swapchainKHR = swapchainKHR;
+ return *this;
+ }
+#endif
+
+ SwapchainKHR& operator=( std::nullptr_t )
+ {
+ m_swapchainKHR = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(SwapchainKHR const &rhs) const
+ {
+ return m_swapchainKHR == rhs.m_swapchainKHR;
+ }
+
+ bool operator!=(SwapchainKHR const &rhs) const
+ {
+ return m_swapchainKHR != rhs.m_swapchainKHR;
+ }
+
+ bool operator<(SwapchainKHR const &rhs) const
+ {
+ return m_swapchainKHR < rhs.m_swapchainKHR;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSwapchainKHR() const
+ {
+ return m_swapchainKHR;
+ }
+
+ explicit operator bool() const
+ {
+ return m_swapchainKHR != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_swapchainKHR == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkSwapchainKHR m_swapchainKHR;
+ };
+ static_assert( sizeof( SwapchainKHR ) == sizeof( VkSwapchainKHR ), "handle and wrapper have different size!" );
+
+ class DebugReportCallbackEXT
+ {
+ public:
+ DebugReportCallbackEXT()
+ : m_debugReportCallbackEXT(VK_NULL_HANDLE)
+ {}
+
+ DebugReportCallbackEXT( std::nullptr_t )
+ : m_debugReportCallbackEXT(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT DebugReportCallbackEXT(VkDebugReportCallbackEXT debugReportCallbackEXT)
+ : m_debugReportCallbackEXT(debugReportCallbackEXT)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ DebugReportCallbackEXT& operator=(VkDebugReportCallbackEXT debugReportCallbackEXT)
+ {
+ m_debugReportCallbackEXT = debugReportCallbackEXT;
+ return *this;
+ }
+#endif
+
+ DebugReportCallbackEXT& operator=( std::nullptr_t )
+ {
+ m_debugReportCallbackEXT = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(DebugReportCallbackEXT const &rhs) const
+ {
+ return m_debugReportCallbackEXT == rhs.m_debugReportCallbackEXT;
+ }
+
+ bool operator!=(DebugReportCallbackEXT const &rhs) const
+ {
+ return m_debugReportCallbackEXT != rhs.m_debugReportCallbackEXT;
+ }
+
+ bool operator<(DebugReportCallbackEXT const &rhs) const
+ {
+ return m_debugReportCallbackEXT < rhs.m_debugReportCallbackEXT;
+ }
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugReportCallbackEXT() const
+ {
+ return m_debugReportCallbackEXT;
+ }
+
+ explicit operator bool() const
+ {
+ return m_debugReportCallbackEXT != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_debugReportCallbackEXT == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDebugReportCallbackEXT m_debugReportCallbackEXT;
+ };
+ static_assert( sizeof( DebugReportCallbackEXT ) == sizeof( VkDebugReportCallbackEXT ), "handle and wrapper have different size!" );
+
+ struct Offset2D
+ {
+ Offset2D( int32_t x_ = 0, int32_t y_ = 0 )
+ : x( x_ )
+ , y( y_ )
+ {
+ }
+
+ Offset2D( VkOffset2D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Offset2D) );
+ }
+
+ Offset2D& operator=( VkOffset2D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Offset2D) );
+ return *this;
+ }
+
+ Offset2D& setX( int32_t x_ )
+ {
+ x = x_;
+ return *this;
+ }
+
+ Offset2D& setY( int32_t y_ )
+ {
+ y = y_;
+ return *this;
+ }
+
+ operator const VkOffset2D&() const
+ {
+ return *reinterpret_cast<const VkOffset2D*>(this);
+ }
+
+ bool operator==( Offset2D const& rhs ) const
+ {
+ return ( x == rhs.x )
+ && ( y == rhs.y );
+ }
+
+ bool operator!=( Offset2D const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ int32_t x;
+ int32_t y;
+ };
+ static_assert( sizeof( Offset2D ) == sizeof( VkOffset2D ), "struct and wrapper have different size!" );
+
+ struct Offset3D
+ {
+ Offset3D( int32_t x_ = 0, int32_t y_ = 0, int32_t z_ = 0 )
+ : x( x_ )
+ , y( y_ )
+ , z( z_ )
+ {
+ }
+
+ Offset3D( VkOffset3D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Offset3D) );
+ }
+
+ Offset3D& operator=( VkOffset3D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Offset3D) );
+ return *this;
+ }
+
+ Offset3D& setX( int32_t x_ )
+ {
+ x = x_;
+ return *this;
+ }
+
+ Offset3D& setY( int32_t y_ )
+ {
+ y = y_;
+ return *this;
+ }
+
+ Offset3D& setZ( int32_t z_ )
+ {
+ z = z_;
+ return *this;
+ }
+
+ operator const VkOffset3D&() const
+ {
+ return *reinterpret_cast<const VkOffset3D*>(this);
+ }
+
+ bool operator==( Offset3D const& rhs ) const
+ {
+ return ( x == rhs.x )
+ && ( y == rhs.y )
+ && ( z == rhs.z );
+ }
+
+ bool operator!=( Offset3D const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ int32_t x;
+ int32_t y;
+ int32_t z;
+ };
+ static_assert( sizeof( Offset3D ) == sizeof( VkOffset3D ), "struct and wrapper have different size!" );
+
+ struct Extent2D
+ {
+ Extent2D( uint32_t width_ = 0, uint32_t height_ = 0 )
+ : width( width_ )
+ , height( height_ )
+ {
+ }
+
+ Extent2D( VkExtent2D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Extent2D) );
+ }
+
+ Extent2D& operator=( VkExtent2D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Extent2D) );
+ return *this;
+ }
+
+ Extent2D& setWidth( uint32_t width_ )
+ {
+ width = width_;
+ return *this;
+ }
+
+ Extent2D& setHeight( uint32_t height_ )
+ {
+ height = height_;
+ return *this;
+ }
+
+ operator const VkExtent2D&() const
+ {
+ return *reinterpret_cast<const VkExtent2D*>(this);
+ }
+
+ bool operator==( Extent2D const& rhs ) const
+ {
+ return ( width == rhs.width )
+ && ( height == rhs.height );
+ }
+
+ bool operator!=( Extent2D const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t width;
+ uint32_t height;
+ };
+ static_assert( sizeof( Extent2D ) == sizeof( VkExtent2D ), "struct and wrapper have different size!" );
+
+ struct Extent3D
+ {
+ Extent3D( uint32_t width_ = 0, uint32_t height_ = 0, uint32_t depth_ = 0 )
+ : width( width_ )
+ , height( height_ )
+ , depth( depth_ )
+ {
+ }
+
+ Extent3D( VkExtent3D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Extent3D) );
+ }
+
+ Extent3D& operator=( VkExtent3D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Extent3D) );
+ return *this;
+ }
+
+ Extent3D& setWidth( uint32_t width_ )
+ {
+ width = width_;
+ return *this;
+ }
+
+ Extent3D& setHeight( uint32_t height_ )
+ {
+ height = height_;
+ return *this;
+ }
+
+ Extent3D& setDepth( uint32_t depth_ )
+ {
+ depth = depth_;
+ return *this;
+ }
+
+ operator const VkExtent3D&() const
+ {
+ return *reinterpret_cast<const VkExtent3D*>(this);
+ }
+
+ bool operator==( Extent3D const& rhs ) const
+ {
+ return ( width == rhs.width )
+ && ( height == rhs.height )
+ && ( depth == rhs.depth );
+ }
+
+ bool operator!=( Extent3D const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ };
+ static_assert( sizeof( Extent3D ) == sizeof( VkExtent3D ), "struct and wrapper have different size!" );
+
+ struct Viewport
+ {
+ Viewport( float x_ = 0, float y_ = 0, float width_ = 0, float height_ = 0, float minDepth_ = 0, float maxDepth_ = 0 )
+ : x( x_ )
+ , y( y_ )
+ , width( width_ )
+ , height( height_ )
+ , minDepth( minDepth_ )
+ , maxDepth( maxDepth_ )
+ {
+ }
+
+ Viewport( VkViewport const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Viewport) );
+ }
+
+ Viewport& operator=( VkViewport const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Viewport) );
+ return *this;
+ }
+
+ Viewport& setX( float x_ )
+ {
+ x = x_;
+ return *this;
+ }
+
+ Viewport& setY( float y_ )
+ {
+ y = y_;
+ return *this;
+ }
+
+ Viewport& setWidth( float width_ )
+ {
+ width = width_;
+ return *this;
+ }
+
+ Viewport& setHeight( float height_ )
+ {
+ height = height_;
+ return *this;
+ }
+
+ Viewport& setMinDepth( float minDepth_ )
+ {
+ minDepth = minDepth_;
+ return *this;
+ }
+
+ Viewport& setMaxDepth( float maxDepth_ )
+ {
+ maxDepth = maxDepth_;
+ return *this;
+ }
+
+ operator const VkViewport&() const
+ {
+ return *reinterpret_cast<const VkViewport*>(this);
+ }
+
+ bool operator==( Viewport const& rhs ) const
+ {
+ return ( x == rhs.x )
+ && ( y == rhs.y )
+ && ( width == rhs.width )
+ && ( height == rhs.height )
+ && ( minDepth == rhs.minDepth )
+ && ( maxDepth == rhs.maxDepth );
+ }
+
+ bool operator!=( Viewport const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+ };
+ static_assert( sizeof( Viewport ) == sizeof( VkViewport ), "struct and wrapper have different size!" );
+
+ struct Rect2D
+ {
+ Rect2D( Offset2D offset_ = Offset2D(), Extent2D extent_ = Extent2D() )
+ : offset( offset_ )
+ , extent( extent_ )
+ {
+ }
+
+ Rect2D( VkRect2D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Rect2D) );
+ }
+
+ Rect2D& operator=( VkRect2D const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Rect2D) );
+ return *this;
+ }
+
+ Rect2D& setOffset( Offset2D offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ Rect2D& setExtent( Extent2D extent_ )
+ {
+ extent = extent_;
+ return *this;
+ }
+
+ operator const VkRect2D&() const
+ {
+ return *reinterpret_cast<const VkRect2D*>(this);
+ }
+
+ bool operator==( Rect2D const& rhs ) const
+ {
+ return ( offset == rhs.offset )
+ && ( extent == rhs.extent );
+ }
+
+ bool operator!=( Rect2D const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Offset2D offset;
+ Extent2D extent;
+ };
+ static_assert( sizeof( Rect2D ) == sizeof( VkRect2D ), "struct and wrapper have different size!" );
+
+ struct ClearRect
+ {
+ ClearRect( Rect2D rect_ = Rect2D(), uint32_t baseArrayLayer_ = 0, uint32_t layerCount_ = 0 )
+ : rect( rect_ )
+ , baseArrayLayer( baseArrayLayer_ )
+ , layerCount( layerCount_ )
+ {
+ }
+
+ ClearRect( VkClearRect const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ClearRect) );
+ }
+
+ ClearRect& operator=( VkClearRect const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ClearRect) );
+ return *this;
+ }
+
+ ClearRect& setRect( Rect2D rect_ )
+ {
+ rect = rect_;
+ return *this;
+ }
+
+ ClearRect& setBaseArrayLayer( uint32_t baseArrayLayer_ )
+ {
+ baseArrayLayer = baseArrayLayer_;
+ return *this;
+ }
+
+ ClearRect& setLayerCount( uint32_t layerCount_ )
+ {
+ layerCount = layerCount_;
+ return *this;
+ }
+
+ operator const VkClearRect&() const
+ {
+ return *reinterpret_cast<const VkClearRect*>(this);
+ }
+
+ bool operator==( ClearRect const& rhs ) const
+ {
+ return ( rect == rhs.rect )
+ && ( baseArrayLayer == rhs.baseArrayLayer )
+ && ( layerCount == rhs.layerCount );
+ }
+
+ bool operator!=( ClearRect const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Rect2D rect;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+ };
+ static_assert( sizeof( ClearRect ) == sizeof( VkClearRect ), "struct and wrapper have different size!" );
+
+ struct ExtensionProperties
+ {
+ operator const VkExtensionProperties&() const
+ {
+ return *reinterpret_cast<const VkExtensionProperties*>(this);
+ }
+
+ bool operator==( ExtensionProperties const& rhs ) const
+ {
+ return ( memcmp( extensionName, rhs.extensionName, VK_MAX_EXTENSION_NAME_SIZE * sizeof( char ) ) == 0 )
+ && ( specVersion == rhs.specVersion );
+ }
+
+ bool operator!=( ExtensionProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ char extensionName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+ };
+ static_assert( sizeof( ExtensionProperties ) == sizeof( VkExtensionProperties ), "struct and wrapper have different size!" );
+
+ struct LayerProperties
+ {
+ operator const VkLayerProperties&() const
+ {
+ return *reinterpret_cast<const VkLayerProperties*>(this);
+ }
+
+ bool operator==( LayerProperties const& rhs ) const
+ {
+ return ( memcmp( layerName, rhs.layerName, VK_MAX_EXTENSION_NAME_SIZE * sizeof( char ) ) == 0 )
+ && ( specVersion == rhs.specVersion )
+ && ( implementationVersion == rhs.implementationVersion )
+ && ( memcmp( description, rhs.description, VK_MAX_DESCRIPTION_SIZE * sizeof( char ) ) == 0 );
+ }
+
+ bool operator!=( LayerProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ char layerName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+ uint32_t implementationVersion;
+ char description[VK_MAX_DESCRIPTION_SIZE];
+ };
+ static_assert( sizeof( LayerProperties ) == sizeof( VkLayerProperties ), "struct and wrapper have different size!" );
+
+ struct AllocationCallbacks
+ {
+ AllocationCallbacks( void* pUserData_ = nullptr, PFN_vkAllocationFunction pfnAllocation_ = nullptr, PFN_vkReallocationFunction pfnReallocation_ = nullptr, PFN_vkFreeFunction pfnFree_ = nullptr, PFN_vkInternalAllocationNotification pfnInternalAllocation_ = nullptr, PFN_vkInternalFreeNotification pfnInternalFree_ = nullptr )
+ : pUserData( pUserData_ )
+ , pfnAllocation( pfnAllocation_ )
+ , pfnReallocation( pfnReallocation_ )
+ , pfnFree( pfnFree_ )
+ , pfnInternalAllocation( pfnInternalAllocation_ )
+ , pfnInternalFree( pfnInternalFree_ )
+ {
+ }
+
+ AllocationCallbacks( VkAllocationCallbacks const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AllocationCallbacks) );
+ }
+
+ AllocationCallbacks& operator=( VkAllocationCallbacks const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AllocationCallbacks) );
+ return *this;
+ }
+
+ AllocationCallbacks& setPUserData( void* pUserData_ )
+ {
+ pUserData = pUserData_;
+ return *this;
+ }
+
+ AllocationCallbacks& setPfnAllocation( PFN_vkAllocationFunction pfnAllocation_ )
+ {
+ pfnAllocation = pfnAllocation_;
+ return *this;
+ }
+
+ AllocationCallbacks& setPfnReallocation( PFN_vkReallocationFunction pfnReallocation_ )
+ {
+ pfnReallocation = pfnReallocation_;
+ return *this;
+ }
+
+ AllocationCallbacks& setPfnFree( PFN_vkFreeFunction pfnFree_ )
+ {
+ pfnFree = pfnFree_;
+ return *this;
+ }
+
+ AllocationCallbacks& setPfnInternalAllocation( PFN_vkInternalAllocationNotification pfnInternalAllocation_ )
+ {
+ pfnInternalAllocation = pfnInternalAllocation_;
+ return *this;
+ }
+
+ AllocationCallbacks& setPfnInternalFree( PFN_vkInternalFreeNotification pfnInternalFree_ )
+ {
+ pfnInternalFree = pfnInternalFree_;
+ return *this;
+ }
+
+ operator const VkAllocationCallbacks&() const
+ {
+ return *reinterpret_cast<const VkAllocationCallbacks*>(this);
+ }
+
+ bool operator==( AllocationCallbacks const& rhs ) const
+ {
+ return ( pUserData == rhs.pUserData )
+ && ( pfnAllocation == rhs.pfnAllocation )
+ && ( pfnReallocation == rhs.pfnReallocation )
+ && ( pfnFree == rhs.pfnFree )
+ && ( pfnInternalAllocation == rhs.pfnInternalAllocation )
+ && ( pfnInternalFree == rhs.pfnInternalFree );
+ }
+
+ bool operator!=( AllocationCallbacks const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ void* pUserData;
+ PFN_vkAllocationFunction pfnAllocation;
+ PFN_vkReallocationFunction pfnReallocation;
+ PFN_vkFreeFunction pfnFree;
+ PFN_vkInternalAllocationNotification pfnInternalAllocation;
+ PFN_vkInternalFreeNotification pfnInternalFree;
+ };
+ static_assert( sizeof( AllocationCallbacks ) == sizeof( VkAllocationCallbacks ), "struct and wrapper have different size!" );
+
+ struct MemoryRequirements
+ {
+ operator const VkMemoryRequirements&() const
+ {
+ return *reinterpret_cast<const VkMemoryRequirements*>(this);
+ }
+
+ bool operator==( MemoryRequirements const& rhs ) const
+ {
+ return ( size == rhs.size )
+ && ( alignment == rhs.alignment )
+ && ( memoryTypeBits == rhs.memoryTypeBits );
+ }
+
+ bool operator!=( MemoryRequirements const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DeviceSize size;
+ DeviceSize alignment;
+ uint32_t memoryTypeBits;
+ };
+ static_assert( sizeof( MemoryRequirements ) == sizeof( VkMemoryRequirements ), "struct and wrapper have different size!" );
+
+ struct DescriptorBufferInfo
+ {
+ DescriptorBufferInfo( Buffer buffer_ = Buffer(), DeviceSize offset_ = 0, DeviceSize range_ = 0 )
+ : buffer( buffer_ )
+ , offset( offset_ )
+ , range( range_ )
+ {
+ }
+
+ DescriptorBufferInfo( VkDescriptorBufferInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorBufferInfo) );
+ }
+
+ DescriptorBufferInfo& operator=( VkDescriptorBufferInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorBufferInfo) );
+ return *this;
+ }
+
+ DescriptorBufferInfo& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ DescriptorBufferInfo& setOffset( DeviceSize offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ DescriptorBufferInfo& setRange( DeviceSize range_ )
+ {
+ range = range_;
+ return *this;
+ }
+
+ operator const VkDescriptorBufferInfo&() const
+ {
+ return *reinterpret_cast<const VkDescriptorBufferInfo*>(this);
+ }
+
+ bool operator==( DescriptorBufferInfo const& rhs ) const
+ {
+ return ( buffer == rhs.buffer )
+ && ( offset == rhs.offset )
+ && ( range == rhs.range );
+ }
+
+ bool operator!=( DescriptorBufferInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Buffer buffer;
+ DeviceSize offset;
+ DeviceSize range;
+ };
+ static_assert( sizeof( DescriptorBufferInfo ) == sizeof( VkDescriptorBufferInfo ), "struct and wrapper have different size!" );
+
+ struct SubresourceLayout
+ {
+ operator const VkSubresourceLayout&() const
+ {
+ return *reinterpret_cast<const VkSubresourceLayout*>(this);
+ }
+
+ bool operator==( SubresourceLayout const& rhs ) const
+ {
+ return ( offset == rhs.offset )
+ && ( size == rhs.size )
+ && ( rowPitch == rhs.rowPitch )
+ && ( arrayPitch == rhs.arrayPitch )
+ && ( depthPitch == rhs.depthPitch );
+ }
+
+ bool operator!=( SubresourceLayout const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DeviceSize offset;
+ DeviceSize size;
+ DeviceSize rowPitch;
+ DeviceSize arrayPitch;
+ DeviceSize depthPitch;
+ };
+ static_assert( sizeof( SubresourceLayout ) == sizeof( VkSubresourceLayout ), "struct and wrapper have different size!" );
+
+ struct BufferCopy
+ {
+ BufferCopy( DeviceSize srcOffset_ = 0, DeviceSize dstOffset_ = 0, DeviceSize size_ = 0 )
+ : srcOffset( srcOffset_ )
+ , dstOffset( dstOffset_ )
+ , size( size_ )
+ {
+ }
+
+ BufferCopy( VkBufferCopy const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferCopy) );
+ }
+
+ BufferCopy& operator=( VkBufferCopy const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferCopy) );
+ return *this;
+ }
+
+ BufferCopy& setSrcOffset( DeviceSize srcOffset_ )
+ {
+ srcOffset = srcOffset_;
+ return *this;
+ }
+
+ BufferCopy& setDstOffset( DeviceSize dstOffset_ )
+ {
+ dstOffset = dstOffset_;
+ return *this;
+ }
+
+ BufferCopy& setSize( DeviceSize size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ operator const VkBufferCopy&() const
+ {
+ return *reinterpret_cast<const VkBufferCopy*>(this);
+ }
+
+ bool operator==( BufferCopy const& rhs ) const
+ {
+ return ( srcOffset == rhs.srcOffset )
+ && ( dstOffset == rhs.dstOffset )
+ && ( size == rhs.size );
+ }
+
+ bool operator!=( BufferCopy const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DeviceSize srcOffset;
+ DeviceSize dstOffset;
+ DeviceSize size;
+ };
+ static_assert( sizeof( BufferCopy ) == sizeof( VkBufferCopy ), "struct and wrapper have different size!" );
+
+ struct SpecializationMapEntry
+ {
+ SpecializationMapEntry( uint32_t constantID_ = 0, uint32_t offset_ = 0, size_t size_ = 0 )
+ : constantID( constantID_ )
+ , offset( offset_ )
+ , size( size_ )
+ {
+ }
+
+ SpecializationMapEntry( VkSpecializationMapEntry const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SpecializationMapEntry) );
+ }
+
+ SpecializationMapEntry& operator=( VkSpecializationMapEntry const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SpecializationMapEntry) );
+ return *this;
+ }
+
+ SpecializationMapEntry& setConstantID( uint32_t constantID_ )
+ {
+ constantID = constantID_;
+ return *this;
+ }
+
+ SpecializationMapEntry& setOffset( uint32_t offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ SpecializationMapEntry& setSize( size_t size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ operator const VkSpecializationMapEntry&() const
+ {
+ return *reinterpret_cast<const VkSpecializationMapEntry*>(this);
+ }
+
+ bool operator==( SpecializationMapEntry const& rhs ) const
+ {
+ return ( constantID == rhs.constantID )
+ && ( offset == rhs.offset )
+ && ( size == rhs.size );
+ }
+
+ bool operator!=( SpecializationMapEntry const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t constantID;
+ uint32_t offset;
+ size_t size;
+ };
+ static_assert( sizeof( SpecializationMapEntry ) == sizeof( VkSpecializationMapEntry ), "struct and wrapper have different size!" );
+
+ struct SpecializationInfo
+ {
+ SpecializationInfo( uint32_t mapEntryCount_ = 0, const SpecializationMapEntry* pMapEntries_ = nullptr, size_t dataSize_ = 0, const void* pData_ = nullptr )
+ : mapEntryCount( mapEntryCount_ )
+ , pMapEntries( pMapEntries_ )
+ , dataSize( dataSize_ )
+ , pData( pData_ )
+ {
+ }
+
+ SpecializationInfo( VkSpecializationInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SpecializationInfo) );
+ }
+
+ SpecializationInfo& operator=( VkSpecializationInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SpecializationInfo) );
+ return *this;
+ }
+
+ SpecializationInfo& setMapEntryCount( uint32_t mapEntryCount_ )
+ {
+ mapEntryCount = mapEntryCount_;
+ return *this;
+ }
+
+ SpecializationInfo& setPMapEntries( const SpecializationMapEntry* pMapEntries_ )
+ {
+ pMapEntries = pMapEntries_;
+ return *this;
+ }
+
+ SpecializationInfo& setDataSize( size_t dataSize_ )
+ {
+ dataSize = dataSize_;
+ return *this;
+ }
+
+ SpecializationInfo& setPData( const void* pData_ )
+ {
+ pData = pData_;
+ return *this;
+ }
+
+ operator const VkSpecializationInfo&() const
+ {
+ return *reinterpret_cast<const VkSpecializationInfo*>(this);
+ }
+
+ bool operator==( SpecializationInfo const& rhs ) const
+ {
+ return ( mapEntryCount == rhs.mapEntryCount )
+ && ( pMapEntries == rhs.pMapEntries )
+ && ( dataSize == rhs.dataSize )
+ && ( pData == rhs.pData );
+ }
+
+ bool operator!=( SpecializationInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t mapEntryCount;
+ const SpecializationMapEntry* pMapEntries;
+ size_t dataSize;
+ const void* pData;
+ };
+ static_assert( sizeof( SpecializationInfo ) == sizeof( VkSpecializationInfo ), "struct and wrapper have different size!" );
+
+ union ClearColorValue
+ {
+ ClearColorValue( const std::array<float,4>& float32_ = { {0} } )
+ {
+ memcpy( &float32, float32_.data(), 4 * sizeof( float ) );
+ }
+
+ ClearColorValue( const std::array<int32_t,4>& int32_ )
+ {
+ memcpy( &int32, int32_.data(), 4 * sizeof( int32_t ) );
+ }
+
+ ClearColorValue( const std::array<uint32_t,4>& uint32_ )
+ {
+ memcpy( &uint32, uint32_.data(), 4 * sizeof( uint32_t ) );
+ }
+
+ ClearColorValue& setFloat32( std::array<float,4> float32_ )
+ {
+ memcpy( &float32, float32_.data(), 4 * sizeof( float ) );
+ return *this;
+ }
+
+ ClearColorValue& setInt32( std::array<int32_t,4> int32_ )
+ {
+ memcpy( &int32, int32_.data(), 4 * sizeof( int32_t ) );
+ return *this;
+ }
+
+ ClearColorValue& setUint32( std::array<uint32_t,4> uint32_ )
+ {
+ memcpy( &uint32, uint32_.data(), 4 * sizeof( uint32_t ) );
+ return *this;
+ }
+
+ operator VkClearColorValue const& () const
+ {
+ return *reinterpret_cast<const VkClearColorValue*>(this);
+ }
+
+ float float32[4];
+ int32_t int32[4];
+ uint32_t uint32[4];
+ };
+
+ struct ClearDepthStencilValue
+ {
+ ClearDepthStencilValue( float depth_ = 0, uint32_t stencil_ = 0 )
+ : depth( depth_ )
+ , stencil( stencil_ )
+ {
+ }
+
+ ClearDepthStencilValue( VkClearDepthStencilValue const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ClearDepthStencilValue) );
+ }
+
+ ClearDepthStencilValue& operator=( VkClearDepthStencilValue const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ClearDepthStencilValue) );
+ return *this;
+ }
+
+ ClearDepthStencilValue& setDepth( float depth_ )
+ {
+ depth = depth_;
+ return *this;
+ }
+
+ ClearDepthStencilValue& setStencil( uint32_t stencil_ )
+ {
+ stencil = stencil_;
+ return *this;
+ }
+
+ operator const VkClearDepthStencilValue&() const
+ {
+ return *reinterpret_cast<const VkClearDepthStencilValue*>(this);
+ }
+
+ bool operator==( ClearDepthStencilValue const& rhs ) const
+ {
+ return ( depth == rhs.depth )
+ && ( stencil == rhs.stencil );
+ }
+
+ bool operator!=( ClearDepthStencilValue const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ float depth;
+ uint32_t stencil;
+ };
+ static_assert( sizeof( ClearDepthStencilValue ) == sizeof( VkClearDepthStencilValue ), "struct and wrapper have different size!" );
+
+ union ClearValue
+ {
+ ClearValue( ClearColorValue color_ = ClearColorValue() )
+ {
+ color = color_;
+ }
+
+ ClearValue( ClearDepthStencilValue depthStencil_ )
+ {
+ depthStencil = depthStencil_;
+ }
+
+ ClearValue& setColor( ClearColorValue color_ )
+ {
+ color = color_;
+ return *this;
+ }
+
+ ClearValue& setDepthStencil( ClearDepthStencilValue depthStencil_ )
+ {
+ depthStencil = depthStencil_;
+ return *this;
+ }
+
+ operator VkClearValue const& () const
+ {
+ return *reinterpret_cast<const VkClearValue*>(this);
+ }
+
+#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS
+ ClearColorValue color;
+ ClearDepthStencilValue depthStencil;
+#else
+ VkClearColorValue color;
+ VkClearDepthStencilValue depthStencil;
+#endif // VULKAN_HPP_HAS_UNRESTRICTED_UNIONS
+ };
+
+ struct PhysicalDeviceFeatures
+ {
+ PhysicalDeviceFeatures( Bool32 robustBufferAccess_ = 0, Bool32 fullDrawIndexUint32_ = 0, Bool32 imageCubeArray_ = 0, Bool32 independentBlend_ = 0, Bool32 geometryShader_ = 0, Bool32 tessellationShader_ = 0, Bool32 sampleRateShading_ = 0, Bool32 dualSrcBlend_ = 0, Bool32 logicOp_ = 0, Bool32 multiDrawIndirect_ = 0, Bool32 drawIndirectFirstInstance_ = 0, Bool32 depthClamp_ = 0, Bool32 depthBiasClamp_ = 0, Bool32 fillModeNonSolid_ = 0, Bool32 depthBounds_ = 0, Bool32 wideLines_ = 0, Bool32 largePoints_ = 0, Bool32 alphaToOne_ = 0, Bool32 multiViewport_ = 0, Bool32 samplerAnisotropy_ = 0, Bool32 textureCompressionETC2_ = 0, Bool32 textureCompressionASTC_LDR_ = 0, Bool32 textureCompressionBC_ = 0, Bool32 occlusionQueryPrecise_ = 0, Bool32 pipelineStatisticsQuery_ = 0, Bool32 vertexPipelineStoresAndAtomics_ = 0, Bool32 fragmentStoresAndAtomics_ = 0, Bool32 shaderTessellationAndGeometryPointSize_ = 0, Bool32 shaderImageGatherExtended_ = 0, Bool32 shaderStorageImageExtendedFormats_ = 0, Bool32 shaderStorageImageMultisample_ = 0, Bool32 shaderStorageImageReadWithoutFormat_ = 0, Bool32 shaderStorageImageWriteWithoutFormat_ = 0, Bool32 shaderUniformBufferArrayDynamicIndexing_ = 0, Bool32 shaderSampledImageArrayDynamicIndexing_ = 0, Bool32 shaderStorageBufferArrayDynamicIndexing_ = 0, Bool32 shaderStorageImageArrayDynamicIndexing_ = 0, Bool32 shaderClipDistance_ = 0, Bool32 shaderCullDistance_ = 0, Bool32 shaderFloat64_ = 0, Bool32 shaderInt64_ = 0, Bool32 shaderInt16_ = 0, Bool32 shaderResourceResidency_ = 0, Bool32 shaderResourceMinLod_ = 0, Bool32 sparseBinding_ = 0, Bool32 sparseResidencyBuffer_ = 0, Bool32 sparseResidencyImage2D_ = 0, Bool32 sparseResidencyImage3D_ = 0, Bool32 sparseResidency2Samples_ = 0, Bool32 sparseResidency4Samples_ = 0, Bool32 sparseResidency8Samples_ = 0, Bool32 sparseResidency16Samples_ = 0, Bool32 sparseResidencyAliased_ = 0, Bool32 variableMultisampleRate_ = 0, Bool32 inheritedQueries_ = 0 )
+ : robustBufferAccess( robustBufferAccess_ )
+ , fullDrawIndexUint32( fullDrawIndexUint32_ )
+ , imageCubeArray( imageCubeArray_ )
+ , independentBlend( independentBlend_ )
+ , geometryShader( geometryShader_ )
+ , tessellationShader( tessellationShader_ )
+ , sampleRateShading( sampleRateShading_ )
+ , dualSrcBlend( dualSrcBlend_ )
+ , logicOp( logicOp_ )
+ , multiDrawIndirect( multiDrawIndirect_ )
+ , drawIndirectFirstInstance( drawIndirectFirstInstance_ )
+ , depthClamp( depthClamp_ )
+ , depthBiasClamp( depthBiasClamp_ )
+ , fillModeNonSolid( fillModeNonSolid_ )
+ , depthBounds( depthBounds_ )
+ , wideLines( wideLines_ )
+ , largePoints( largePoints_ )
+ , alphaToOne( alphaToOne_ )
+ , multiViewport( multiViewport_ )
+ , samplerAnisotropy( samplerAnisotropy_ )
+ , textureCompressionETC2( textureCompressionETC2_ )
+ , textureCompressionASTC_LDR( textureCompressionASTC_LDR_ )
+ , textureCompressionBC( textureCompressionBC_ )
+ , occlusionQueryPrecise( occlusionQueryPrecise_ )
+ , pipelineStatisticsQuery( pipelineStatisticsQuery_ )
+ , vertexPipelineStoresAndAtomics( vertexPipelineStoresAndAtomics_ )
+ , fragmentStoresAndAtomics( fragmentStoresAndAtomics_ )
+ , shaderTessellationAndGeometryPointSize( shaderTessellationAndGeometryPointSize_ )
+ , shaderImageGatherExtended( shaderImageGatherExtended_ )
+ , shaderStorageImageExtendedFormats( shaderStorageImageExtendedFormats_ )
+ , shaderStorageImageMultisample( shaderStorageImageMultisample_ )
+ , shaderStorageImageReadWithoutFormat( shaderStorageImageReadWithoutFormat_ )
+ , shaderStorageImageWriteWithoutFormat( shaderStorageImageWriteWithoutFormat_ )
+ , shaderUniformBufferArrayDynamicIndexing( shaderUniformBufferArrayDynamicIndexing_ )
+ , shaderSampledImageArrayDynamicIndexing( shaderSampledImageArrayDynamicIndexing_ )
+ , shaderStorageBufferArrayDynamicIndexing( shaderStorageBufferArrayDynamicIndexing_ )
+ , shaderStorageImageArrayDynamicIndexing( shaderStorageImageArrayDynamicIndexing_ )
+ , shaderClipDistance( shaderClipDistance_ )
+ , shaderCullDistance( shaderCullDistance_ )
+ , shaderFloat64( shaderFloat64_ )
+ , shaderInt64( shaderInt64_ )
+ , shaderInt16( shaderInt16_ )
+ , shaderResourceResidency( shaderResourceResidency_ )
+ , shaderResourceMinLod( shaderResourceMinLod_ )
+ , sparseBinding( sparseBinding_ )
+ , sparseResidencyBuffer( sparseResidencyBuffer_ )
+ , sparseResidencyImage2D( sparseResidencyImage2D_ )
+ , sparseResidencyImage3D( sparseResidencyImage3D_ )
+ , sparseResidency2Samples( sparseResidency2Samples_ )
+ , sparseResidency4Samples( sparseResidency4Samples_ )
+ , sparseResidency8Samples( sparseResidency8Samples_ )
+ , sparseResidency16Samples( sparseResidency16Samples_ )
+ , sparseResidencyAliased( sparseResidencyAliased_ )
+ , variableMultisampleRate( variableMultisampleRate_ )
+ , inheritedQueries( inheritedQueries_ )
+ {
+ }
+
+ PhysicalDeviceFeatures( VkPhysicalDeviceFeatures const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceFeatures) );
+ }
+
+ PhysicalDeviceFeatures& operator=( VkPhysicalDeviceFeatures const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceFeatures) );
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setRobustBufferAccess( Bool32 robustBufferAccess_ )
+ {
+ robustBufferAccess = robustBufferAccess_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setFullDrawIndexUint32( Bool32 fullDrawIndexUint32_ )
+ {
+ fullDrawIndexUint32 = fullDrawIndexUint32_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setImageCubeArray( Bool32 imageCubeArray_ )
+ {
+ imageCubeArray = imageCubeArray_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setIndependentBlend( Bool32 independentBlend_ )
+ {
+ independentBlend = independentBlend_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setGeometryShader( Bool32 geometryShader_ )
+ {
+ geometryShader = geometryShader_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setTessellationShader( Bool32 tessellationShader_ )
+ {
+ tessellationShader = tessellationShader_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSampleRateShading( Bool32 sampleRateShading_ )
+ {
+ sampleRateShading = sampleRateShading_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setDualSrcBlend( Bool32 dualSrcBlend_ )
+ {
+ dualSrcBlend = dualSrcBlend_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setLogicOp( Bool32 logicOp_ )
+ {
+ logicOp = logicOp_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setMultiDrawIndirect( Bool32 multiDrawIndirect_ )
+ {
+ multiDrawIndirect = multiDrawIndirect_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setDrawIndirectFirstInstance( Bool32 drawIndirectFirstInstance_ )
+ {
+ drawIndirectFirstInstance = drawIndirectFirstInstance_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setDepthClamp( Bool32 depthClamp_ )
+ {
+ depthClamp = depthClamp_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setDepthBiasClamp( Bool32 depthBiasClamp_ )
+ {
+ depthBiasClamp = depthBiasClamp_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setFillModeNonSolid( Bool32 fillModeNonSolid_ )
+ {
+ fillModeNonSolid = fillModeNonSolid_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setDepthBounds( Bool32 depthBounds_ )
+ {
+ depthBounds = depthBounds_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setWideLines( Bool32 wideLines_ )
+ {
+ wideLines = wideLines_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setLargePoints( Bool32 largePoints_ )
+ {
+ largePoints = largePoints_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setAlphaToOne( Bool32 alphaToOne_ )
+ {
+ alphaToOne = alphaToOne_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setMultiViewport( Bool32 multiViewport_ )
+ {
+ multiViewport = multiViewport_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSamplerAnisotropy( Bool32 samplerAnisotropy_ )
+ {
+ samplerAnisotropy = samplerAnisotropy_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setTextureCompressionETC2( Bool32 textureCompressionETC2_ )
+ {
+ textureCompressionETC2 = textureCompressionETC2_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setTextureCompressionASTC_LDR( Bool32 textureCompressionASTC_LDR_ )
+ {
+ textureCompressionASTC_LDR = textureCompressionASTC_LDR_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setTextureCompressionBC( Bool32 textureCompressionBC_ )
+ {
+ textureCompressionBC = textureCompressionBC_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setOcclusionQueryPrecise( Bool32 occlusionQueryPrecise_ )
+ {
+ occlusionQueryPrecise = occlusionQueryPrecise_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setPipelineStatisticsQuery( Bool32 pipelineStatisticsQuery_ )
+ {
+ pipelineStatisticsQuery = pipelineStatisticsQuery_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setVertexPipelineStoresAndAtomics( Bool32 vertexPipelineStoresAndAtomics_ )
+ {
+ vertexPipelineStoresAndAtomics = vertexPipelineStoresAndAtomics_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setFragmentStoresAndAtomics( Bool32 fragmentStoresAndAtomics_ )
+ {
+ fragmentStoresAndAtomics = fragmentStoresAndAtomics_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderTessellationAndGeometryPointSize( Bool32 shaderTessellationAndGeometryPointSize_ )
+ {
+ shaderTessellationAndGeometryPointSize = shaderTessellationAndGeometryPointSize_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderImageGatherExtended( Bool32 shaderImageGatherExtended_ )
+ {
+ shaderImageGatherExtended = shaderImageGatherExtended_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderStorageImageExtendedFormats( Bool32 shaderStorageImageExtendedFormats_ )
+ {
+ shaderStorageImageExtendedFormats = shaderStorageImageExtendedFormats_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderStorageImageMultisample( Bool32 shaderStorageImageMultisample_ )
+ {
+ shaderStorageImageMultisample = shaderStorageImageMultisample_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderStorageImageReadWithoutFormat( Bool32 shaderStorageImageReadWithoutFormat_ )
+ {
+ shaderStorageImageReadWithoutFormat = shaderStorageImageReadWithoutFormat_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderStorageImageWriteWithoutFormat( Bool32 shaderStorageImageWriteWithoutFormat_ )
+ {
+ shaderStorageImageWriteWithoutFormat = shaderStorageImageWriteWithoutFormat_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderUniformBufferArrayDynamicIndexing( Bool32 shaderUniformBufferArrayDynamicIndexing_ )
+ {
+ shaderUniformBufferArrayDynamicIndexing = shaderUniformBufferArrayDynamicIndexing_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderSampledImageArrayDynamicIndexing( Bool32 shaderSampledImageArrayDynamicIndexing_ )
+ {
+ shaderSampledImageArrayDynamicIndexing = shaderSampledImageArrayDynamicIndexing_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderStorageBufferArrayDynamicIndexing( Bool32 shaderStorageBufferArrayDynamicIndexing_ )
+ {
+ shaderStorageBufferArrayDynamicIndexing = shaderStorageBufferArrayDynamicIndexing_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderStorageImageArrayDynamicIndexing( Bool32 shaderStorageImageArrayDynamicIndexing_ )
+ {
+ shaderStorageImageArrayDynamicIndexing = shaderStorageImageArrayDynamicIndexing_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderClipDistance( Bool32 shaderClipDistance_ )
+ {
+ shaderClipDistance = shaderClipDistance_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderCullDistance( Bool32 shaderCullDistance_ )
+ {
+ shaderCullDistance = shaderCullDistance_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderFloat64( Bool32 shaderFloat64_ )
+ {
+ shaderFloat64 = shaderFloat64_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderInt64( Bool32 shaderInt64_ )
+ {
+ shaderInt64 = shaderInt64_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderInt16( Bool32 shaderInt16_ )
+ {
+ shaderInt16 = shaderInt16_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderResourceResidency( Bool32 shaderResourceResidency_ )
+ {
+ shaderResourceResidency = shaderResourceResidency_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setShaderResourceMinLod( Bool32 shaderResourceMinLod_ )
+ {
+ shaderResourceMinLod = shaderResourceMinLod_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseBinding( Bool32 sparseBinding_ )
+ {
+ sparseBinding = sparseBinding_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidencyBuffer( Bool32 sparseResidencyBuffer_ )
+ {
+ sparseResidencyBuffer = sparseResidencyBuffer_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidencyImage2D( Bool32 sparseResidencyImage2D_ )
+ {
+ sparseResidencyImage2D = sparseResidencyImage2D_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidencyImage3D( Bool32 sparseResidencyImage3D_ )
+ {
+ sparseResidencyImage3D = sparseResidencyImage3D_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidency2Samples( Bool32 sparseResidency2Samples_ )
+ {
+ sparseResidency2Samples = sparseResidency2Samples_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidency4Samples( Bool32 sparseResidency4Samples_ )
+ {
+ sparseResidency4Samples = sparseResidency4Samples_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidency8Samples( Bool32 sparseResidency8Samples_ )
+ {
+ sparseResidency8Samples = sparseResidency8Samples_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidency16Samples( Bool32 sparseResidency16Samples_ )
+ {
+ sparseResidency16Samples = sparseResidency16Samples_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setSparseResidencyAliased( Bool32 sparseResidencyAliased_ )
+ {
+ sparseResidencyAliased = sparseResidencyAliased_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setVariableMultisampleRate( Bool32 variableMultisampleRate_ )
+ {
+ variableMultisampleRate = variableMultisampleRate_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures& setInheritedQueries( Bool32 inheritedQueries_ )
+ {
+ inheritedQueries = inheritedQueries_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceFeatures&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceFeatures*>(this);
+ }
+
+ bool operator==( PhysicalDeviceFeatures const& rhs ) const
+ {
+ return ( robustBufferAccess == rhs.robustBufferAccess )
+ && ( fullDrawIndexUint32 == rhs.fullDrawIndexUint32 )
+ && ( imageCubeArray == rhs.imageCubeArray )
+ && ( independentBlend == rhs.independentBlend )
+ && ( geometryShader == rhs.geometryShader )
+ && ( tessellationShader == rhs.tessellationShader )
+ && ( sampleRateShading == rhs.sampleRateShading )
+ && ( dualSrcBlend == rhs.dualSrcBlend )
+ && ( logicOp == rhs.logicOp )
+ && ( multiDrawIndirect == rhs.multiDrawIndirect )
+ && ( drawIndirectFirstInstance == rhs.drawIndirectFirstInstance )
+ && ( depthClamp == rhs.depthClamp )
+ && ( depthBiasClamp == rhs.depthBiasClamp )
+ && ( fillModeNonSolid == rhs.fillModeNonSolid )
+ && ( depthBounds == rhs.depthBounds )
+ && ( wideLines == rhs.wideLines )
+ && ( largePoints == rhs.largePoints )
+ && ( alphaToOne == rhs.alphaToOne )
+ && ( multiViewport == rhs.multiViewport )
+ && ( samplerAnisotropy == rhs.samplerAnisotropy )
+ && ( textureCompressionETC2 == rhs.textureCompressionETC2 )
+ && ( textureCompressionASTC_LDR == rhs.textureCompressionASTC_LDR )
+ && ( textureCompressionBC == rhs.textureCompressionBC )
+ && ( occlusionQueryPrecise == rhs.occlusionQueryPrecise )
+ && ( pipelineStatisticsQuery == rhs.pipelineStatisticsQuery )
+ && ( vertexPipelineStoresAndAtomics == rhs.vertexPipelineStoresAndAtomics )
+ && ( fragmentStoresAndAtomics == rhs.fragmentStoresAndAtomics )
+ && ( shaderTessellationAndGeometryPointSize == rhs.shaderTessellationAndGeometryPointSize )
+ && ( shaderImageGatherExtended == rhs.shaderImageGatherExtended )
+ && ( shaderStorageImageExtendedFormats == rhs.shaderStorageImageExtendedFormats )
+ && ( shaderStorageImageMultisample == rhs.shaderStorageImageMultisample )
+ && ( shaderStorageImageReadWithoutFormat == rhs.shaderStorageImageReadWithoutFormat )
+ && ( shaderStorageImageWriteWithoutFormat == rhs.shaderStorageImageWriteWithoutFormat )
+ && ( shaderUniformBufferArrayDynamicIndexing == rhs.shaderUniformBufferArrayDynamicIndexing )
+ && ( shaderSampledImageArrayDynamicIndexing == rhs.shaderSampledImageArrayDynamicIndexing )
+ && ( shaderStorageBufferArrayDynamicIndexing == rhs.shaderStorageBufferArrayDynamicIndexing )
+ && ( shaderStorageImageArrayDynamicIndexing == rhs.shaderStorageImageArrayDynamicIndexing )
+ && ( shaderClipDistance == rhs.shaderClipDistance )
+ && ( shaderCullDistance == rhs.shaderCullDistance )
+ && ( shaderFloat64 == rhs.shaderFloat64 )
+ && ( shaderInt64 == rhs.shaderInt64 )
+ && ( shaderInt16 == rhs.shaderInt16 )
+ && ( shaderResourceResidency == rhs.shaderResourceResidency )
+ && ( shaderResourceMinLod == rhs.shaderResourceMinLod )
+ && ( sparseBinding == rhs.sparseBinding )
+ && ( sparseResidencyBuffer == rhs.sparseResidencyBuffer )
+ && ( sparseResidencyImage2D == rhs.sparseResidencyImage2D )
+ && ( sparseResidencyImage3D == rhs.sparseResidencyImage3D )
+ && ( sparseResidency2Samples == rhs.sparseResidency2Samples )
+ && ( sparseResidency4Samples == rhs.sparseResidency4Samples )
+ && ( sparseResidency8Samples == rhs.sparseResidency8Samples )
+ && ( sparseResidency16Samples == rhs.sparseResidency16Samples )
+ && ( sparseResidencyAliased == rhs.sparseResidencyAliased )
+ && ( variableMultisampleRate == rhs.variableMultisampleRate )
+ && ( inheritedQueries == rhs.inheritedQueries );
+ }
+
+ bool operator!=( PhysicalDeviceFeatures const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Bool32 robustBufferAccess;
+ Bool32 fullDrawIndexUint32;
+ Bool32 imageCubeArray;
+ Bool32 independentBlend;
+ Bool32 geometryShader;
+ Bool32 tessellationShader;
+ Bool32 sampleRateShading;
+ Bool32 dualSrcBlend;
+ Bool32 logicOp;
+ Bool32 multiDrawIndirect;
+ Bool32 drawIndirectFirstInstance;
+ Bool32 depthClamp;
+ Bool32 depthBiasClamp;
+ Bool32 fillModeNonSolid;
+ Bool32 depthBounds;
+ Bool32 wideLines;
+ Bool32 largePoints;
+ Bool32 alphaToOne;
+ Bool32 multiViewport;
+ Bool32 samplerAnisotropy;
+ Bool32 textureCompressionETC2;
+ Bool32 textureCompressionASTC_LDR;
+ Bool32 textureCompressionBC;
+ Bool32 occlusionQueryPrecise;
+ Bool32 pipelineStatisticsQuery;
+ Bool32 vertexPipelineStoresAndAtomics;
+ Bool32 fragmentStoresAndAtomics;
+ Bool32 shaderTessellationAndGeometryPointSize;
+ Bool32 shaderImageGatherExtended;
+ Bool32 shaderStorageImageExtendedFormats;
+ Bool32 shaderStorageImageMultisample;
+ Bool32 shaderStorageImageReadWithoutFormat;
+ Bool32 shaderStorageImageWriteWithoutFormat;
+ Bool32 shaderUniformBufferArrayDynamicIndexing;
+ Bool32 shaderSampledImageArrayDynamicIndexing;
+ Bool32 shaderStorageBufferArrayDynamicIndexing;
+ Bool32 shaderStorageImageArrayDynamicIndexing;
+ Bool32 shaderClipDistance;
+ Bool32 shaderCullDistance;
+ Bool32 shaderFloat64;
+ Bool32 shaderInt64;
+ Bool32 shaderInt16;
+ Bool32 shaderResourceResidency;
+ Bool32 shaderResourceMinLod;
+ Bool32 sparseBinding;
+ Bool32 sparseResidencyBuffer;
+ Bool32 sparseResidencyImage2D;
+ Bool32 sparseResidencyImage3D;
+ Bool32 sparseResidency2Samples;
+ Bool32 sparseResidency4Samples;
+ Bool32 sparseResidency8Samples;
+ Bool32 sparseResidency16Samples;
+ Bool32 sparseResidencyAliased;
+ Bool32 variableMultisampleRate;
+ Bool32 inheritedQueries;
+ };
+ static_assert( sizeof( PhysicalDeviceFeatures ) == sizeof( VkPhysicalDeviceFeatures ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceSparseProperties
+ {
+ operator const VkPhysicalDeviceSparseProperties&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceSparseProperties*>(this);
+ }
+
+ bool operator==( PhysicalDeviceSparseProperties const& rhs ) const
+ {
+ return ( residencyStandard2DBlockShape == rhs.residencyStandard2DBlockShape )
+ && ( residencyStandard2DMultisampleBlockShape == rhs.residencyStandard2DMultisampleBlockShape )
+ && ( residencyStandard3DBlockShape == rhs.residencyStandard3DBlockShape )
+ && ( residencyAlignedMipSize == rhs.residencyAlignedMipSize )
+ && ( residencyNonResidentStrict == rhs.residencyNonResidentStrict );
+ }
+
+ bool operator!=( PhysicalDeviceSparseProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Bool32 residencyStandard2DBlockShape;
+ Bool32 residencyStandard2DMultisampleBlockShape;
+ Bool32 residencyStandard3DBlockShape;
+ Bool32 residencyAlignedMipSize;
+ Bool32 residencyNonResidentStrict;
+ };
+ static_assert( sizeof( PhysicalDeviceSparseProperties ) == sizeof( VkPhysicalDeviceSparseProperties ), "struct and wrapper have different size!" );
+
+ struct DrawIndirectCommand
+ {
+ DrawIndirectCommand( uint32_t vertexCount_ = 0, uint32_t instanceCount_ = 0, uint32_t firstVertex_ = 0, uint32_t firstInstance_ = 0 )
+ : vertexCount( vertexCount_ )
+ , instanceCount( instanceCount_ )
+ , firstVertex( firstVertex_ )
+ , firstInstance( firstInstance_ )
+ {
+ }
+
+ DrawIndirectCommand( VkDrawIndirectCommand const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DrawIndirectCommand) );
+ }
+
+ DrawIndirectCommand& operator=( VkDrawIndirectCommand const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DrawIndirectCommand) );
+ return *this;
+ }
+
+ DrawIndirectCommand& setVertexCount( uint32_t vertexCount_ )
+ {
+ vertexCount = vertexCount_;
+ return *this;
+ }
+
+ DrawIndirectCommand& setInstanceCount( uint32_t instanceCount_ )
+ {
+ instanceCount = instanceCount_;
+ return *this;
+ }
+
+ DrawIndirectCommand& setFirstVertex( uint32_t firstVertex_ )
+ {
+ firstVertex = firstVertex_;
+ return *this;
+ }
+
+ DrawIndirectCommand& setFirstInstance( uint32_t firstInstance_ )
+ {
+ firstInstance = firstInstance_;
+ return *this;
+ }
+
+ operator const VkDrawIndirectCommand&() const
+ {
+ return *reinterpret_cast<const VkDrawIndirectCommand*>(this);
+ }
+
+ bool operator==( DrawIndirectCommand const& rhs ) const
+ {
+ return ( vertexCount == rhs.vertexCount )
+ && ( instanceCount == rhs.instanceCount )
+ && ( firstVertex == rhs.firstVertex )
+ && ( firstInstance == rhs.firstInstance );
+ }
+
+ bool operator!=( DrawIndirectCommand const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+ };
+ static_assert( sizeof( DrawIndirectCommand ) == sizeof( VkDrawIndirectCommand ), "struct and wrapper have different size!" );
+
+ struct DrawIndexedIndirectCommand
+ {
+ DrawIndexedIndirectCommand( uint32_t indexCount_ = 0, uint32_t instanceCount_ = 0, uint32_t firstIndex_ = 0, int32_t vertexOffset_ = 0, uint32_t firstInstance_ = 0 )
+ : indexCount( indexCount_ )
+ , instanceCount( instanceCount_ )
+ , firstIndex( firstIndex_ )
+ , vertexOffset( vertexOffset_ )
+ , firstInstance( firstInstance_ )
+ {
+ }
+
+ DrawIndexedIndirectCommand( VkDrawIndexedIndirectCommand const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DrawIndexedIndirectCommand) );
+ }
+
+ DrawIndexedIndirectCommand& operator=( VkDrawIndexedIndirectCommand const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DrawIndexedIndirectCommand) );
+ return *this;
+ }
+
+ DrawIndexedIndirectCommand& setIndexCount( uint32_t indexCount_ )
+ {
+ indexCount = indexCount_;
+ return *this;
+ }
+
+ DrawIndexedIndirectCommand& setInstanceCount( uint32_t instanceCount_ )
+ {
+ instanceCount = instanceCount_;
+ return *this;
+ }
+
+ DrawIndexedIndirectCommand& setFirstIndex( uint32_t firstIndex_ )
+ {
+ firstIndex = firstIndex_;
+ return *this;
+ }
+
+ DrawIndexedIndirectCommand& setVertexOffset( int32_t vertexOffset_ )
+ {
+ vertexOffset = vertexOffset_;
+ return *this;
+ }
+
+ DrawIndexedIndirectCommand& setFirstInstance( uint32_t firstInstance_ )
+ {
+ firstInstance = firstInstance_;
+ return *this;
+ }
+
+ operator const VkDrawIndexedIndirectCommand&() const
+ {
+ return *reinterpret_cast<const VkDrawIndexedIndirectCommand*>(this);
+ }
+
+ bool operator==( DrawIndexedIndirectCommand const& rhs ) const
+ {
+ return ( indexCount == rhs.indexCount )
+ && ( instanceCount == rhs.instanceCount )
+ && ( firstIndex == rhs.firstIndex )
+ && ( vertexOffset == rhs.vertexOffset )
+ && ( firstInstance == rhs.firstInstance );
+ }
+
+ bool operator!=( DrawIndexedIndirectCommand const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t vertexOffset;
+ uint32_t firstInstance;
+ };
+ static_assert( sizeof( DrawIndexedIndirectCommand ) == sizeof( VkDrawIndexedIndirectCommand ), "struct and wrapper have different size!" );
+
+ struct DispatchIndirectCommand
+ {
+ DispatchIndirectCommand( uint32_t x_ = 0, uint32_t y_ = 0, uint32_t z_ = 0 )
+ : x( x_ )
+ , y( y_ )
+ , z( z_ )
+ {
+ }
+
+ DispatchIndirectCommand( VkDispatchIndirectCommand const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DispatchIndirectCommand) );
+ }
+
+ DispatchIndirectCommand& operator=( VkDispatchIndirectCommand const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DispatchIndirectCommand) );
+ return *this;
+ }
+
+ DispatchIndirectCommand& setX( uint32_t x_ )
+ {
+ x = x_;
+ return *this;
+ }
+
+ DispatchIndirectCommand& setY( uint32_t y_ )
+ {
+ y = y_;
+ return *this;
+ }
+
+ DispatchIndirectCommand& setZ( uint32_t z_ )
+ {
+ z = z_;
+ return *this;
+ }
+
+ operator const VkDispatchIndirectCommand&() const
+ {
+ return *reinterpret_cast<const VkDispatchIndirectCommand*>(this);
+ }
+
+ bool operator==( DispatchIndirectCommand const& rhs ) const
+ {
+ return ( x == rhs.x )
+ && ( y == rhs.y )
+ && ( z == rhs.z );
+ }
+
+ bool operator!=( DispatchIndirectCommand const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+ };
+ static_assert( sizeof( DispatchIndirectCommand ) == sizeof( VkDispatchIndirectCommand ), "struct and wrapper have different size!" );
+
+ struct DisplayPlanePropertiesKHR
+ {
+ operator const VkDisplayPlanePropertiesKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayPlanePropertiesKHR*>(this);
+ }
+
+ bool operator==( DisplayPlanePropertiesKHR const& rhs ) const
+ {
+ return ( currentDisplay == rhs.currentDisplay )
+ && ( currentStackIndex == rhs.currentStackIndex );
+ }
+
+ bool operator!=( DisplayPlanePropertiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DisplayKHR currentDisplay;
+ uint32_t currentStackIndex;
+ };
+ static_assert( sizeof( DisplayPlanePropertiesKHR ) == sizeof( VkDisplayPlanePropertiesKHR ), "struct and wrapper have different size!" );
+
+ struct DisplayModeParametersKHR
+ {
+ DisplayModeParametersKHR( Extent2D visibleRegion_ = Extent2D(), uint32_t refreshRate_ = 0 )
+ : visibleRegion( visibleRegion_ )
+ , refreshRate( refreshRate_ )
+ {
+ }
+
+ DisplayModeParametersKHR( VkDisplayModeParametersKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayModeParametersKHR) );
+ }
+
+ DisplayModeParametersKHR& operator=( VkDisplayModeParametersKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayModeParametersKHR) );
+ return *this;
+ }
+
+ DisplayModeParametersKHR& setVisibleRegion( Extent2D visibleRegion_ )
+ {
+ visibleRegion = visibleRegion_;
+ return *this;
+ }
+
+ DisplayModeParametersKHR& setRefreshRate( uint32_t refreshRate_ )
+ {
+ refreshRate = refreshRate_;
+ return *this;
+ }
+
+ operator const VkDisplayModeParametersKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayModeParametersKHR*>(this);
+ }
+
+ bool operator==( DisplayModeParametersKHR const& rhs ) const
+ {
+ return ( visibleRegion == rhs.visibleRegion )
+ && ( refreshRate == rhs.refreshRate );
+ }
+
+ bool operator!=( DisplayModeParametersKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Extent2D visibleRegion;
+ uint32_t refreshRate;
+ };
+ static_assert( sizeof( DisplayModeParametersKHR ) == sizeof( VkDisplayModeParametersKHR ), "struct and wrapper have different size!" );
+
+ struct DisplayModePropertiesKHR
+ {
+ operator const VkDisplayModePropertiesKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayModePropertiesKHR*>(this);
+ }
+
+ bool operator==( DisplayModePropertiesKHR const& rhs ) const
+ {
+ return ( displayMode == rhs.displayMode )
+ && ( parameters == rhs.parameters );
+ }
+
+ bool operator!=( DisplayModePropertiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DisplayModeKHR displayMode;
+ DisplayModeParametersKHR parameters;
+ };
+ static_assert( sizeof( DisplayModePropertiesKHR ) == sizeof( VkDisplayModePropertiesKHR ), "struct and wrapper have different size!" );
+
+ struct RectLayerKHR
+ {
+ RectLayerKHR( Offset2D offset_ = Offset2D(), Extent2D extent_ = Extent2D(), uint32_t layer_ = 0 )
+ : offset( offset_ )
+ , extent( extent_ )
+ , layer( layer_ )
+ {
+ }
+
+ RectLayerKHR( VkRectLayerKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RectLayerKHR) );
+ }
+
+ RectLayerKHR& operator=( VkRectLayerKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RectLayerKHR) );
+ return *this;
+ }
+
+ RectLayerKHR& setOffset( Offset2D offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ RectLayerKHR& setExtent( Extent2D extent_ )
+ {
+ extent = extent_;
+ return *this;
+ }
+
+ RectLayerKHR& setLayer( uint32_t layer_ )
+ {
+ layer = layer_;
+ return *this;
+ }
+
+ operator const VkRectLayerKHR&() const
+ {
+ return *reinterpret_cast<const VkRectLayerKHR*>(this);
+ }
+
+ bool operator==( RectLayerKHR const& rhs ) const
+ {
+ return ( offset == rhs.offset )
+ && ( extent == rhs.extent )
+ && ( layer == rhs.layer );
+ }
+
+ bool operator!=( RectLayerKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Offset2D offset;
+ Extent2D extent;
+ uint32_t layer;
+ };
+ static_assert( sizeof( RectLayerKHR ) == sizeof( VkRectLayerKHR ), "struct and wrapper have different size!" );
+
+ struct PresentRegionKHR
+ {
+ PresentRegionKHR( uint32_t rectangleCount_ = 0, const RectLayerKHR* pRectangles_ = nullptr )
+ : rectangleCount( rectangleCount_ )
+ , pRectangles( pRectangles_ )
+ {
+ }
+
+ PresentRegionKHR( VkPresentRegionKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentRegionKHR) );
+ }
+
+ PresentRegionKHR& operator=( VkPresentRegionKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentRegionKHR) );
+ return *this;
+ }
+
+ PresentRegionKHR& setRectangleCount( uint32_t rectangleCount_ )
+ {
+ rectangleCount = rectangleCount_;
+ return *this;
+ }
+
+ PresentRegionKHR& setPRectangles( const RectLayerKHR* pRectangles_ )
+ {
+ pRectangles = pRectangles_;
+ return *this;
+ }
+
+ operator const VkPresentRegionKHR&() const
+ {
+ return *reinterpret_cast<const VkPresentRegionKHR*>(this);
+ }
+
+ bool operator==( PresentRegionKHR const& rhs ) const
+ {
+ return ( rectangleCount == rhs.rectangleCount )
+ && ( pRectangles == rhs.pRectangles );
+ }
+
+ bool operator!=( PresentRegionKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t rectangleCount;
+ const RectLayerKHR* pRectangles;
+ };
+ static_assert( sizeof( PresentRegionKHR ) == sizeof( VkPresentRegionKHR ), "struct and wrapper have different size!" );
+
+ struct XYColorEXT
+ {
+ XYColorEXT( float x_ = 0, float y_ = 0 )
+ : x( x_ )
+ , y( y_ )
+ {
+ }
+
+ XYColorEXT( VkXYColorEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(XYColorEXT) );
+ }
+
+ XYColorEXT& operator=( VkXYColorEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(XYColorEXT) );
+ return *this;
+ }
+
+ XYColorEXT& setX( float x_ )
+ {
+ x = x_;
+ return *this;
+ }
+
+ XYColorEXT& setY( float y_ )
+ {
+ y = y_;
+ return *this;
+ }
+
+ operator const VkXYColorEXT&() const
+ {
+ return *reinterpret_cast<const VkXYColorEXT*>(this);
+ }
+
+ bool operator==( XYColorEXT const& rhs ) const
+ {
+ return ( x == rhs.x )
+ && ( y == rhs.y );
+ }
+
+ bool operator!=( XYColorEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ float x;
+ float y;
+ };
+ static_assert( sizeof( XYColorEXT ) == sizeof( VkXYColorEXT ), "struct and wrapper have different size!" );
+
+ struct RefreshCycleDurationGOOGLE
+ {
+ RefreshCycleDurationGOOGLE( uint64_t refreshDuration_ = 0 )
+ : refreshDuration( refreshDuration_ )
+ {
+ }
+
+ RefreshCycleDurationGOOGLE( VkRefreshCycleDurationGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RefreshCycleDurationGOOGLE) );
+ }
+
+ RefreshCycleDurationGOOGLE& operator=( VkRefreshCycleDurationGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RefreshCycleDurationGOOGLE) );
+ return *this;
+ }
+
+ RefreshCycleDurationGOOGLE& setRefreshDuration( uint64_t refreshDuration_ )
+ {
+ refreshDuration = refreshDuration_;
+ return *this;
+ }
+
+ operator const VkRefreshCycleDurationGOOGLE&() const
+ {
+ return *reinterpret_cast<const VkRefreshCycleDurationGOOGLE*>(this);
+ }
+
+ bool operator==( RefreshCycleDurationGOOGLE const& rhs ) const
+ {
+ return ( refreshDuration == rhs.refreshDuration );
+ }
+
+ bool operator!=( RefreshCycleDurationGOOGLE const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint64_t refreshDuration;
+ };
+ static_assert( sizeof( RefreshCycleDurationGOOGLE ) == sizeof( VkRefreshCycleDurationGOOGLE ), "struct and wrapper have different size!" );
+
+ struct PastPresentationTimingGOOGLE
+ {
+ PastPresentationTimingGOOGLE( uint32_t presentID_ = 0, uint64_t desiredPresentTime_ = 0, uint64_t actualPresentTime_ = 0, uint64_t earliestPresentTime_ = 0, uint64_t presentMargin_ = 0 )
+ : presentID( presentID_ )
+ , desiredPresentTime( desiredPresentTime_ )
+ , actualPresentTime( actualPresentTime_ )
+ , earliestPresentTime( earliestPresentTime_ )
+ , presentMargin( presentMargin_ )
+ {
+ }
+
+ PastPresentationTimingGOOGLE( VkPastPresentationTimingGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PastPresentationTimingGOOGLE) );
+ }
+
+ PastPresentationTimingGOOGLE& operator=( VkPastPresentationTimingGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PastPresentationTimingGOOGLE) );
+ return *this;
+ }
+
+ PastPresentationTimingGOOGLE& setPresentID( uint32_t presentID_ )
+ {
+ presentID = presentID_;
+ return *this;
+ }
+
+ PastPresentationTimingGOOGLE& setDesiredPresentTime( uint64_t desiredPresentTime_ )
+ {
+ desiredPresentTime = desiredPresentTime_;
+ return *this;
+ }
+
+ PastPresentationTimingGOOGLE& setActualPresentTime( uint64_t actualPresentTime_ )
+ {
+ actualPresentTime = actualPresentTime_;
+ return *this;
+ }
+
+ PastPresentationTimingGOOGLE& setEarliestPresentTime( uint64_t earliestPresentTime_ )
+ {
+ earliestPresentTime = earliestPresentTime_;
+ return *this;
+ }
+
+ PastPresentationTimingGOOGLE& setPresentMargin( uint64_t presentMargin_ )
+ {
+ presentMargin = presentMargin_;
+ return *this;
+ }
+
+ operator const VkPastPresentationTimingGOOGLE&() const
+ {
+ return *reinterpret_cast<const VkPastPresentationTimingGOOGLE*>(this);
+ }
+
+ bool operator==( PastPresentationTimingGOOGLE const& rhs ) const
+ {
+ return ( presentID == rhs.presentID )
+ && ( desiredPresentTime == rhs.desiredPresentTime )
+ && ( actualPresentTime == rhs.actualPresentTime )
+ && ( earliestPresentTime == rhs.earliestPresentTime )
+ && ( presentMargin == rhs.presentMargin );
+ }
+
+ bool operator!=( PastPresentationTimingGOOGLE const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+ uint64_t actualPresentTime;
+ uint64_t earliestPresentTime;
+ uint64_t presentMargin;
+ };
+ static_assert( sizeof( PastPresentationTimingGOOGLE ) == sizeof( VkPastPresentationTimingGOOGLE ), "struct and wrapper have different size!" );
+
+ struct PresentTimeGOOGLE
+ {
+ PresentTimeGOOGLE( uint32_t presentID_ = 0, uint64_t desiredPresentTime_ = 0 )
+ : presentID( presentID_ )
+ , desiredPresentTime( desiredPresentTime_ )
+ {
+ }
+
+ PresentTimeGOOGLE( VkPresentTimeGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentTimeGOOGLE) );
+ }
+
+ PresentTimeGOOGLE& operator=( VkPresentTimeGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentTimeGOOGLE) );
+ return *this;
+ }
+
+ PresentTimeGOOGLE& setPresentID( uint32_t presentID_ )
+ {
+ presentID = presentID_;
+ return *this;
+ }
+
+ PresentTimeGOOGLE& setDesiredPresentTime( uint64_t desiredPresentTime_ )
+ {
+ desiredPresentTime = desiredPresentTime_;
+ return *this;
+ }
+
+ operator const VkPresentTimeGOOGLE&() const
+ {
+ return *reinterpret_cast<const VkPresentTimeGOOGLE*>(this);
+ }
+
+ bool operator==( PresentTimeGOOGLE const& rhs ) const
+ {
+ return ( presentID == rhs.presentID )
+ && ( desiredPresentTime == rhs.desiredPresentTime );
+ }
+
+ bool operator!=( PresentTimeGOOGLE const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+ };
+ static_assert( sizeof( PresentTimeGOOGLE ) == sizeof( VkPresentTimeGOOGLE ), "struct and wrapper have different size!" );
+
+ struct ViewportWScalingNV
+ {
+ ViewportWScalingNV( float xcoeff_ = 0, float ycoeff_ = 0 )
+ : xcoeff( xcoeff_ )
+ , ycoeff( ycoeff_ )
+ {
+ }
+
+ ViewportWScalingNV( VkViewportWScalingNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ViewportWScalingNV) );
+ }
+
+ ViewportWScalingNV& operator=( VkViewportWScalingNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ViewportWScalingNV) );
+ return *this;
+ }
+
+ ViewportWScalingNV& setXcoeff( float xcoeff_ )
+ {
+ xcoeff = xcoeff_;
+ return *this;
+ }
+
+ ViewportWScalingNV& setYcoeff( float ycoeff_ )
+ {
+ ycoeff = ycoeff_;
+ return *this;
+ }
+
+ operator const VkViewportWScalingNV&() const
+ {
+ return *reinterpret_cast<const VkViewportWScalingNV*>(this);
+ }
+
+ bool operator==( ViewportWScalingNV const& rhs ) const
+ {
+ return ( xcoeff == rhs.xcoeff )
+ && ( ycoeff == rhs.ycoeff );
+ }
+
+ bool operator!=( ViewportWScalingNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ float xcoeff;
+ float ycoeff;
+ };
+ static_assert( sizeof( ViewportWScalingNV ) == sizeof( VkViewportWScalingNV ), "struct and wrapper have different size!" );
+
+ enum class ImageLayout
+ {
+ eUndefined = VK_IMAGE_LAYOUT_UNDEFINED,
+ eGeneral = VK_IMAGE_LAYOUT_GENERAL,
+ eColorAttachmentOptimal = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ eDepthStencilAttachmentOptimal = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+ eDepthStencilReadOnlyOptimal = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
+ eShaderReadOnlyOptimal = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ eTransferSrcOptimal = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ eTransferDstOptimal = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ ePreinitialized = VK_IMAGE_LAYOUT_PREINITIALIZED,
+ ePresentSrcKHR = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ eSharedPresentKHR = VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR
+ };
+
+ struct DescriptorImageInfo
+ {
+ DescriptorImageInfo( Sampler sampler_ = Sampler(), ImageView imageView_ = ImageView(), ImageLayout imageLayout_ = ImageLayout::eUndefined )
+ : sampler( sampler_ )
+ , imageView( imageView_ )
+ , imageLayout( imageLayout_ )
+ {
+ }
+
+ DescriptorImageInfo( VkDescriptorImageInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorImageInfo) );
+ }
+
+ DescriptorImageInfo& operator=( VkDescriptorImageInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorImageInfo) );
+ return *this;
+ }
+
+ DescriptorImageInfo& setSampler( Sampler sampler_ )
+ {
+ sampler = sampler_;
+ return *this;
+ }
+
+ DescriptorImageInfo& setImageView( ImageView imageView_ )
+ {
+ imageView = imageView_;
+ return *this;
+ }
+
+ DescriptorImageInfo& setImageLayout( ImageLayout imageLayout_ )
+ {
+ imageLayout = imageLayout_;
+ return *this;
+ }
+
+ operator const VkDescriptorImageInfo&() const
+ {
+ return *reinterpret_cast<const VkDescriptorImageInfo*>(this);
+ }
+
+ bool operator==( DescriptorImageInfo const& rhs ) const
+ {
+ return ( sampler == rhs.sampler )
+ && ( imageView == rhs.imageView )
+ && ( imageLayout == rhs.imageLayout );
+ }
+
+ bool operator!=( DescriptorImageInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Sampler sampler;
+ ImageView imageView;
+ ImageLayout imageLayout;
+ };
+ static_assert( sizeof( DescriptorImageInfo ) == sizeof( VkDescriptorImageInfo ), "struct and wrapper have different size!" );
+
+ struct AttachmentReference
+ {
+ AttachmentReference( uint32_t attachment_ = 0, ImageLayout layout_ = ImageLayout::eUndefined )
+ : attachment( attachment_ )
+ , layout( layout_ )
+ {
+ }
+
+ AttachmentReference( VkAttachmentReference const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AttachmentReference) );
+ }
+
+ AttachmentReference& operator=( VkAttachmentReference const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AttachmentReference) );
+ return *this;
+ }
+
+ AttachmentReference& setAttachment( uint32_t attachment_ )
+ {
+ attachment = attachment_;
+ return *this;
+ }
+
+ AttachmentReference& setLayout( ImageLayout layout_ )
+ {
+ layout = layout_;
+ return *this;
+ }
+
+ operator const VkAttachmentReference&() const
+ {
+ return *reinterpret_cast<const VkAttachmentReference*>(this);
+ }
+
+ bool operator==( AttachmentReference const& rhs ) const
+ {
+ return ( attachment == rhs.attachment )
+ && ( layout == rhs.layout );
+ }
+
+ bool operator!=( AttachmentReference const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t attachment;
+ ImageLayout layout;
+ };
+ static_assert( sizeof( AttachmentReference ) == sizeof( VkAttachmentReference ), "struct and wrapper have different size!" );
+
+ enum class AttachmentLoadOp
+ {
+ eLoad = VK_ATTACHMENT_LOAD_OP_LOAD,
+ eClear = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ eDontCare = VK_ATTACHMENT_LOAD_OP_DONT_CARE
+ };
+
+ enum class AttachmentStoreOp
+ {
+ eStore = VK_ATTACHMENT_STORE_OP_STORE,
+ eDontCare = VK_ATTACHMENT_STORE_OP_DONT_CARE
+ };
+
+ enum class ImageType
+ {
+ e1D = VK_IMAGE_TYPE_1D,
+ e2D = VK_IMAGE_TYPE_2D,
+ e3D = VK_IMAGE_TYPE_3D
+ };
+
+ enum class ImageTiling
+ {
+ eOptimal = VK_IMAGE_TILING_OPTIMAL,
+ eLinear = VK_IMAGE_TILING_LINEAR
+ };
+
+ enum class ImageViewType
+ {
+ e1D = VK_IMAGE_VIEW_TYPE_1D,
+ e2D = VK_IMAGE_VIEW_TYPE_2D,
+ e3D = VK_IMAGE_VIEW_TYPE_3D,
+ eCube = VK_IMAGE_VIEW_TYPE_CUBE,
+ e1DArray = VK_IMAGE_VIEW_TYPE_1D_ARRAY,
+ e2DArray = VK_IMAGE_VIEW_TYPE_2D_ARRAY,
+ eCubeArray = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY
+ };
+
+ enum class CommandBufferLevel
+ {
+ ePrimary = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ eSecondary = VK_COMMAND_BUFFER_LEVEL_SECONDARY
+ };
+
+ enum class ComponentSwizzle
+ {
+ eIdentity = VK_COMPONENT_SWIZZLE_IDENTITY,
+ eZero = VK_COMPONENT_SWIZZLE_ZERO,
+ eOne = VK_COMPONENT_SWIZZLE_ONE,
+ eR = VK_COMPONENT_SWIZZLE_R,
+ eG = VK_COMPONENT_SWIZZLE_G,
+ eB = VK_COMPONENT_SWIZZLE_B,
+ eA = VK_COMPONENT_SWIZZLE_A
+ };
+
+ struct ComponentMapping
+ {
+ ComponentMapping( ComponentSwizzle r_ = ComponentSwizzle::eIdentity, ComponentSwizzle g_ = ComponentSwizzle::eIdentity, ComponentSwizzle b_ = ComponentSwizzle::eIdentity, ComponentSwizzle a_ = ComponentSwizzle::eIdentity )
+ : r( r_ )
+ , g( g_ )
+ , b( b_ )
+ , a( a_ )
+ {
+ }
+
+ ComponentMapping( VkComponentMapping const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ComponentMapping) );
+ }
+
+ ComponentMapping& operator=( VkComponentMapping const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ComponentMapping) );
+ return *this;
+ }
+
+ ComponentMapping& setR( ComponentSwizzle r_ )
+ {
+ r = r_;
+ return *this;
+ }
+
+ ComponentMapping& setG( ComponentSwizzle g_ )
+ {
+ g = g_;
+ return *this;
+ }
+
+ ComponentMapping& setB( ComponentSwizzle b_ )
+ {
+ b = b_;
+ return *this;
+ }
+
+ ComponentMapping& setA( ComponentSwizzle a_ )
+ {
+ a = a_;
+ return *this;
+ }
+
+ operator const VkComponentMapping&() const
+ {
+ return *reinterpret_cast<const VkComponentMapping*>(this);
+ }
+
+ bool operator==( ComponentMapping const& rhs ) const
+ {
+ return ( r == rhs.r )
+ && ( g == rhs.g )
+ && ( b == rhs.b )
+ && ( a == rhs.a );
+ }
+
+ bool operator!=( ComponentMapping const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ComponentSwizzle r;
+ ComponentSwizzle g;
+ ComponentSwizzle b;
+ ComponentSwizzle a;
+ };
+ static_assert( sizeof( ComponentMapping ) == sizeof( VkComponentMapping ), "struct and wrapper have different size!" );
+
+ enum class DescriptorType
+ {
+ eSampler = VK_DESCRIPTOR_TYPE_SAMPLER,
+ eCombinedImageSampler = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ eSampledImage = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
+ eStorageImage = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ eUniformTexelBuffer = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
+ eStorageTexelBuffer = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
+ eUniformBuffer = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ eStorageBuffer = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ eUniformBufferDynamic = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
+ eStorageBufferDynamic = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
+ eInputAttachment = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
+ };
+
+ struct DescriptorPoolSize
+ {
+ DescriptorPoolSize( DescriptorType type_ = DescriptorType::eSampler, uint32_t descriptorCount_ = 0 )
+ : type( type_ )
+ , descriptorCount( descriptorCount_ )
+ {
+ }
+
+ DescriptorPoolSize( VkDescriptorPoolSize const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorPoolSize) );
+ }
+
+ DescriptorPoolSize& operator=( VkDescriptorPoolSize const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorPoolSize) );
+ return *this;
+ }
+
+ DescriptorPoolSize& setType( DescriptorType type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ DescriptorPoolSize& setDescriptorCount( uint32_t descriptorCount_ )
+ {
+ descriptorCount = descriptorCount_;
+ return *this;
+ }
+
+ operator const VkDescriptorPoolSize&() const
+ {
+ return *reinterpret_cast<const VkDescriptorPoolSize*>(this);
+ }
+
+ bool operator==( DescriptorPoolSize const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( descriptorCount == rhs.descriptorCount );
+ }
+
+ bool operator!=( DescriptorPoolSize const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DescriptorType type;
+ uint32_t descriptorCount;
+ };
+ static_assert( sizeof( DescriptorPoolSize ) == sizeof( VkDescriptorPoolSize ), "struct and wrapper have different size!" );
+
+ struct DescriptorUpdateTemplateEntryKHR
+ {
+ DescriptorUpdateTemplateEntryKHR( uint32_t dstBinding_ = 0, uint32_t dstArrayElement_ = 0, uint32_t descriptorCount_ = 0, DescriptorType descriptorType_ = DescriptorType::eSampler, size_t offset_ = 0, size_t stride_ = 0 )
+ : dstBinding( dstBinding_ )
+ , dstArrayElement( dstArrayElement_ )
+ , descriptorCount( descriptorCount_ )
+ , descriptorType( descriptorType_ )
+ , offset( offset_ )
+ , stride( stride_ )
+ {
+ }
+
+ DescriptorUpdateTemplateEntryKHR( VkDescriptorUpdateTemplateEntryKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorUpdateTemplateEntryKHR) );
+ }
+
+ DescriptorUpdateTemplateEntryKHR& operator=( VkDescriptorUpdateTemplateEntryKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorUpdateTemplateEntryKHR) );
+ return *this;
+ }
+
+ DescriptorUpdateTemplateEntryKHR& setDstBinding( uint32_t dstBinding_ )
+ {
+ dstBinding = dstBinding_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateEntryKHR& setDstArrayElement( uint32_t dstArrayElement_ )
+ {
+ dstArrayElement = dstArrayElement_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateEntryKHR& setDescriptorCount( uint32_t descriptorCount_ )
+ {
+ descriptorCount = descriptorCount_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateEntryKHR& setDescriptorType( DescriptorType descriptorType_ )
+ {
+ descriptorType = descriptorType_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateEntryKHR& setOffset( size_t offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateEntryKHR& setStride( size_t stride_ )
+ {
+ stride = stride_;
+ return *this;
+ }
+
+ operator const VkDescriptorUpdateTemplateEntryKHR&() const
+ {
+ return *reinterpret_cast<const VkDescriptorUpdateTemplateEntryKHR*>(this);
+ }
+
+ bool operator==( DescriptorUpdateTemplateEntryKHR const& rhs ) const
+ {
+ return ( dstBinding == rhs.dstBinding )
+ && ( dstArrayElement == rhs.dstArrayElement )
+ && ( descriptorCount == rhs.descriptorCount )
+ && ( descriptorType == rhs.descriptorType )
+ && ( offset == rhs.offset )
+ && ( stride == rhs.stride );
+ }
+
+ bool operator!=( DescriptorUpdateTemplateEntryKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ DescriptorType descriptorType;
+ size_t offset;
+ size_t stride;
+ };
+ static_assert( sizeof( DescriptorUpdateTemplateEntryKHR ) == sizeof( VkDescriptorUpdateTemplateEntryKHR ), "struct and wrapper have different size!" );
+
+ enum class QueryType
+ {
+ eOcclusion = VK_QUERY_TYPE_OCCLUSION,
+ ePipelineStatistics = VK_QUERY_TYPE_PIPELINE_STATISTICS,
+ eTimestamp = VK_QUERY_TYPE_TIMESTAMP
+ };
+
+ enum class BorderColor
+ {
+ eFloatTransparentBlack = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ eIntTransparentBlack = VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,
+ eFloatOpaqueBlack = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
+ eIntOpaqueBlack = VK_BORDER_COLOR_INT_OPAQUE_BLACK,
+ eFloatOpaqueWhite = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
+ eIntOpaqueWhite = VK_BORDER_COLOR_INT_OPAQUE_WHITE
+ };
+
+ enum class PipelineBindPoint
+ {
+ eGraphics = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ eCompute = VK_PIPELINE_BIND_POINT_COMPUTE
+ };
+
+ enum class PipelineCacheHeaderVersion
+ {
+ eOne = VK_PIPELINE_CACHE_HEADER_VERSION_ONE
+ };
+
+ enum class PrimitiveTopology
+ {
+ ePointList = VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ eLineList = VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ eLineStrip = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP,
+ eTriangleList = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ eTriangleStrip = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ eTriangleFan = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
+ eLineListWithAdjacency = VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
+ eLineStripWithAdjacency = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
+ eTriangleListWithAdjacency = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
+ eTriangleStripWithAdjacency = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
+ ePatchList = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
+ };
+
+ enum class SharingMode
+ {
+ eExclusive = VK_SHARING_MODE_EXCLUSIVE,
+ eConcurrent = VK_SHARING_MODE_CONCURRENT
+ };
+
+ enum class IndexType
+ {
+ eUint16 = VK_INDEX_TYPE_UINT16,
+ eUint32 = VK_INDEX_TYPE_UINT32
+ };
+
+ enum class Filter
+ {
+ eNearest = VK_FILTER_NEAREST,
+ eLinear = VK_FILTER_LINEAR,
+ eCubicIMG = VK_FILTER_CUBIC_IMG
+ };
+
+ enum class SamplerMipmapMode
+ {
+ eNearest = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+ eLinear = VK_SAMPLER_MIPMAP_MODE_LINEAR
+ };
+
+ enum class SamplerAddressMode
+ {
+ eRepeat = VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ eMirroredRepeat = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
+ eClampToEdge = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ eClampToBorder = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
+ eMirrorClampToEdge = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
+ };
+
+ enum class CompareOp
+ {
+ eNever = VK_COMPARE_OP_NEVER,
+ eLess = VK_COMPARE_OP_LESS,
+ eEqual = VK_COMPARE_OP_EQUAL,
+ eLessOrEqual = VK_COMPARE_OP_LESS_OR_EQUAL,
+ eGreater = VK_COMPARE_OP_GREATER,
+ eNotEqual = VK_COMPARE_OP_NOT_EQUAL,
+ eGreaterOrEqual = VK_COMPARE_OP_GREATER_OR_EQUAL,
+ eAlways = VK_COMPARE_OP_ALWAYS
+ };
+
+ enum class PolygonMode
+ {
+ eFill = VK_POLYGON_MODE_FILL,
+ eLine = VK_POLYGON_MODE_LINE,
+ ePoint = VK_POLYGON_MODE_POINT
+ };
+
+ enum class CullModeFlagBits
+ {
+ eNone = VK_CULL_MODE_NONE,
+ eFront = VK_CULL_MODE_FRONT_BIT,
+ eBack = VK_CULL_MODE_BACK_BIT,
+ eFrontAndBack = VK_CULL_MODE_FRONT_AND_BACK
+ };
+
+ using CullModeFlags = Flags<CullModeFlagBits, VkCullModeFlags>;
+
+ VULKAN_HPP_INLINE CullModeFlags operator|( CullModeFlagBits bit0, CullModeFlagBits bit1 )
+ {
+ return CullModeFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE CullModeFlags operator~( CullModeFlagBits bits )
+ {
+ return ~( CullModeFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<CullModeFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(CullModeFlagBits::eNone) | VkFlags(CullModeFlagBits::eFront) | VkFlags(CullModeFlagBits::eBack) | VkFlags(CullModeFlagBits::eFrontAndBack)
+ };
+ };
+
+ enum class FrontFace
+ {
+ eCounterClockwise = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ eClockwise = VK_FRONT_FACE_CLOCKWISE
+ };
+
+ enum class BlendFactor
+ {
+ eZero = VK_BLEND_FACTOR_ZERO,
+ eOne = VK_BLEND_FACTOR_ONE,
+ eSrcColor = VK_BLEND_FACTOR_SRC_COLOR,
+ eOneMinusSrcColor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,
+ eDstColor = VK_BLEND_FACTOR_DST_COLOR,
+ eOneMinusDstColor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR,
+ eSrcAlpha = VK_BLEND_FACTOR_SRC_ALPHA,
+ eOneMinusSrcAlpha = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
+ eDstAlpha = VK_BLEND_FACTOR_DST_ALPHA,
+ eOneMinusDstAlpha = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,
+ eConstantColor = VK_BLEND_FACTOR_CONSTANT_COLOR,
+ eOneMinusConstantColor = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,
+ eConstantAlpha = VK_BLEND_FACTOR_CONSTANT_ALPHA,
+ eOneMinusConstantAlpha = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA,
+ eSrcAlphaSaturate = VK_BLEND_FACTOR_SRC_ALPHA_SATURATE,
+ eSrc1Color = VK_BLEND_FACTOR_SRC1_COLOR,
+ eOneMinusSrc1Color = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR,
+ eSrc1Alpha = VK_BLEND_FACTOR_SRC1_ALPHA,
+ eOneMinusSrc1Alpha = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
+ };
+
+ enum class BlendOp
+ {
+ eAdd = VK_BLEND_OP_ADD,
+ eSubtract = VK_BLEND_OP_SUBTRACT,
+ eReverseSubtract = VK_BLEND_OP_REVERSE_SUBTRACT,
+ eMin = VK_BLEND_OP_MIN,
+ eMax = VK_BLEND_OP_MAX
+ };
+
+ enum class StencilOp
+ {
+ eKeep = VK_STENCIL_OP_KEEP,
+ eZero = VK_STENCIL_OP_ZERO,
+ eReplace = VK_STENCIL_OP_REPLACE,
+ eIncrementAndClamp = VK_STENCIL_OP_INCREMENT_AND_CLAMP,
+ eDecrementAndClamp = VK_STENCIL_OP_DECREMENT_AND_CLAMP,
+ eInvert = VK_STENCIL_OP_INVERT,
+ eIncrementAndWrap = VK_STENCIL_OP_INCREMENT_AND_WRAP,
+ eDecrementAndWrap = VK_STENCIL_OP_DECREMENT_AND_WRAP
+ };
+
+ struct StencilOpState
+ {
+ StencilOpState( StencilOp failOp_ = StencilOp::eKeep, StencilOp passOp_ = StencilOp::eKeep, StencilOp depthFailOp_ = StencilOp::eKeep, CompareOp compareOp_ = CompareOp::eNever, uint32_t compareMask_ = 0, uint32_t writeMask_ = 0, uint32_t reference_ = 0 )
+ : failOp( failOp_ )
+ , passOp( passOp_ )
+ , depthFailOp( depthFailOp_ )
+ , compareOp( compareOp_ )
+ , compareMask( compareMask_ )
+ , writeMask( writeMask_ )
+ , reference( reference_ )
+ {
+ }
+
+ StencilOpState( VkStencilOpState const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(StencilOpState) );
+ }
+
+ StencilOpState& operator=( VkStencilOpState const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(StencilOpState) );
+ return *this;
+ }
+
+ StencilOpState& setFailOp( StencilOp failOp_ )
+ {
+ failOp = failOp_;
+ return *this;
+ }
+
+ StencilOpState& setPassOp( StencilOp passOp_ )
+ {
+ passOp = passOp_;
+ return *this;
+ }
+
+ StencilOpState& setDepthFailOp( StencilOp depthFailOp_ )
+ {
+ depthFailOp = depthFailOp_;
+ return *this;
+ }
+
+ StencilOpState& setCompareOp( CompareOp compareOp_ )
+ {
+ compareOp = compareOp_;
+ return *this;
+ }
+
+ StencilOpState& setCompareMask( uint32_t compareMask_ )
+ {
+ compareMask = compareMask_;
+ return *this;
+ }
+
+ StencilOpState& setWriteMask( uint32_t writeMask_ )
+ {
+ writeMask = writeMask_;
+ return *this;
+ }
+
+ StencilOpState& setReference( uint32_t reference_ )
+ {
+ reference = reference_;
+ return *this;
+ }
+
+ operator const VkStencilOpState&() const
+ {
+ return *reinterpret_cast<const VkStencilOpState*>(this);
+ }
+
+ bool operator==( StencilOpState const& rhs ) const
+ {
+ return ( failOp == rhs.failOp )
+ && ( passOp == rhs.passOp )
+ && ( depthFailOp == rhs.depthFailOp )
+ && ( compareOp == rhs.compareOp )
+ && ( compareMask == rhs.compareMask )
+ && ( writeMask == rhs.writeMask )
+ && ( reference == rhs.reference );
+ }
+
+ bool operator!=( StencilOpState const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ StencilOp failOp;
+ StencilOp passOp;
+ StencilOp depthFailOp;
+ CompareOp compareOp;
+ uint32_t compareMask;
+ uint32_t writeMask;
+ uint32_t reference;
+ };
+ static_assert( sizeof( StencilOpState ) == sizeof( VkStencilOpState ), "struct and wrapper have different size!" );
+
+ enum class LogicOp
+ {
+ eClear = VK_LOGIC_OP_CLEAR,
+ eAnd = VK_LOGIC_OP_AND,
+ eAndReverse = VK_LOGIC_OP_AND_REVERSE,
+ eCopy = VK_LOGIC_OP_COPY,
+ eAndInverted = VK_LOGIC_OP_AND_INVERTED,
+ eNoOp = VK_LOGIC_OP_NO_OP,
+ eXor = VK_LOGIC_OP_XOR,
+ eOr = VK_LOGIC_OP_OR,
+ eNor = VK_LOGIC_OP_NOR,
+ eEquivalent = VK_LOGIC_OP_EQUIVALENT,
+ eInvert = VK_LOGIC_OP_INVERT,
+ eOrReverse = VK_LOGIC_OP_OR_REVERSE,
+ eCopyInverted = VK_LOGIC_OP_COPY_INVERTED,
+ eOrInverted = VK_LOGIC_OP_OR_INVERTED,
+ eNand = VK_LOGIC_OP_NAND,
+ eSet = VK_LOGIC_OP_SET
+ };
+
+ enum class InternalAllocationType
+ {
+ eExecutable = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE
+ };
+
+ enum class SystemAllocationScope
+ {
+ eCommand = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND,
+ eObject = VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
+ eCache = VK_SYSTEM_ALLOCATION_SCOPE_CACHE,
+ eDevice = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
+ eInstance = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
+ };
+
+ enum class PhysicalDeviceType
+ {
+ eOther = VK_PHYSICAL_DEVICE_TYPE_OTHER,
+ eIntegratedGpu = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
+ eDiscreteGpu = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU,
+ eVirtualGpu = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU,
+ eCpu = VK_PHYSICAL_DEVICE_TYPE_CPU
+ };
+
+ enum class VertexInputRate
+ {
+ eVertex = VK_VERTEX_INPUT_RATE_VERTEX,
+ eInstance = VK_VERTEX_INPUT_RATE_INSTANCE
+ };
+
+ struct VertexInputBindingDescription
+ {
+ VertexInputBindingDescription( uint32_t binding_ = 0, uint32_t stride_ = 0, VertexInputRate inputRate_ = VertexInputRate::eVertex )
+ : binding( binding_ )
+ , stride( stride_ )
+ , inputRate( inputRate_ )
+ {
+ }
+
+ VertexInputBindingDescription( VkVertexInputBindingDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(VertexInputBindingDescription) );
+ }
+
+ VertexInputBindingDescription& operator=( VkVertexInputBindingDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(VertexInputBindingDescription) );
+ return *this;
+ }
+
+ VertexInputBindingDescription& setBinding( uint32_t binding_ )
+ {
+ binding = binding_;
+ return *this;
+ }
+
+ VertexInputBindingDescription& setStride( uint32_t stride_ )
+ {
+ stride = stride_;
+ return *this;
+ }
+
+ VertexInputBindingDescription& setInputRate( VertexInputRate inputRate_ )
+ {
+ inputRate = inputRate_;
+ return *this;
+ }
+
+ operator const VkVertexInputBindingDescription&() const
+ {
+ return *reinterpret_cast<const VkVertexInputBindingDescription*>(this);
+ }
+
+ bool operator==( VertexInputBindingDescription const& rhs ) const
+ {
+ return ( binding == rhs.binding )
+ && ( stride == rhs.stride )
+ && ( inputRate == rhs.inputRate );
+ }
+
+ bool operator!=( VertexInputBindingDescription const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t binding;
+ uint32_t stride;
+ VertexInputRate inputRate;
+ };
+ static_assert( sizeof( VertexInputBindingDescription ) == sizeof( VkVertexInputBindingDescription ), "struct and wrapper have different size!" );
+
+ enum class Format
+ {
+ eUndefined = VK_FORMAT_UNDEFINED,
+ eR4G4UnormPack8 = VK_FORMAT_R4G4_UNORM_PACK8,
+ eR4G4B4A4UnormPack16 = VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ eB4G4R4A4UnormPack16 = VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ eR5G6B5UnormPack16 = VK_FORMAT_R5G6B5_UNORM_PACK16,
+ eB5G6R5UnormPack16 = VK_FORMAT_B5G6R5_UNORM_PACK16,
+ eR5G5B5A1UnormPack16 = VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+ eB5G5R5A1UnormPack16 = VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ eA1R5G5B5UnormPack16 = VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+ eR8Unorm = VK_FORMAT_R8_UNORM,
+ eR8Snorm = VK_FORMAT_R8_SNORM,
+ eR8Uscaled = VK_FORMAT_R8_USCALED,
+ eR8Sscaled = VK_FORMAT_R8_SSCALED,
+ eR8Uint = VK_FORMAT_R8_UINT,
+ eR8Sint = VK_FORMAT_R8_SINT,
+ eR8Srgb = VK_FORMAT_R8_SRGB,
+ eR8G8Unorm = VK_FORMAT_R8G8_UNORM,
+ eR8G8Snorm = VK_FORMAT_R8G8_SNORM,
+ eR8G8Uscaled = VK_FORMAT_R8G8_USCALED,
+ eR8G8Sscaled = VK_FORMAT_R8G8_SSCALED,
+ eR8G8Uint = VK_FORMAT_R8G8_UINT,
+ eR8G8Sint = VK_FORMAT_R8G8_SINT,
+ eR8G8Srgb = VK_FORMAT_R8G8_SRGB,
+ eR8G8B8Unorm = VK_FORMAT_R8G8B8_UNORM,
+ eR8G8B8Snorm = VK_FORMAT_R8G8B8_SNORM,
+ eR8G8B8Uscaled = VK_FORMAT_R8G8B8_USCALED,
+ eR8G8B8Sscaled = VK_FORMAT_R8G8B8_SSCALED,
+ eR8G8B8Uint = VK_FORMAT_R8G8B8_UINT,
+ eR8G8B8Sint = VK_FORMAT_R8G8B8_SINT,
+ eR8G8B8Srgb = VK_FORMAT_R8G8B8_SRGB,
+ eB8G8R8Unorm = VK_FORMAT_B8G8R8_UNORM,
+ eB8G8R8Snorm = VK_FORMAT_B8G8R8_SNORM,
+ eB8G8R8Uscaled = VK_FORMAT_B8G8R8_USCALED,
+ eB8G8R8Sscaled = VK_FORMAT_B8G8R8_SSCALED,
+ eB8G8R8Uint = VK_FORMAT_B8G8R8_UINT,
+ eB8G8R8Sint = VK_FORMAT_B8G8R8_SINT,
+ eB8G8R8Srgb = VK_FORMAT_B8G8R8_SRGB,
+ eR8G8B8A8Unorm = VK_FORMAT_R8G8B8A8_UNORM,
+ eR8G8B8A8Snorm = VK_FORMAT_R8G8B8A8_SNORM,
+ eR8G8B8A8Uscaled = VK_FORMAT_R8G8B8A8_USCALED,
+ eR8G8B8A8Sscaled = VK_FORMAT_R8G8B8A8_SSCALED,
+ eR8G8B8A8Uint = VK_FORMAT_R8G8B8A8_UINT,
+ eR8G8B8A8Sint = VK_FORMAT_R8G8B8A8_SINT,
+ eR8G8B8A8Srgb = VK_FORMAT_R8G8B8A8_SRGB,
+ eB8G8R8A8Unorm = VK_FORMAT_B8G8R8A8_UNORM,
+ eB8G8R8A8Snorm = VK_FORMAT_B8G8R8A8_SNORM,
+ eB8G8R8A8Uscaled = VK_FORMAT_B8G8R8A8_USCALED,
+ eB8G8R8A8Sscaled = VK_FORMAT_B8G8R8A8_SSCALED,
+ eB8G8R8A8Uint = VK_FORMAT_B8G8R8A8_UINT,
+ eB8G8R8A8Sint = VK_FORMAT_B8G8R8A8_SINT,
+ eB8G8R8A8Srgb = VK_FORMAT_B8G8R8A8_SRGB,
+ eA8B8G8R8UnormPack32 = VK_FORMAT_A8B8G8R8_UNORM_PACK32,
+ eA8B8G8R8SnormPack32 = VK_FORMAT_A8B8G8R8_SNORM_PACK32,
+ eA8B8G8R8UscaledPack32 = VK_FORMAT_A8B8G8R8_USCALED_PACK32,
+ eA8B8G8R8SscaledPack32 = VK_FORMAT_A8B8G8R8_SSCALED_PACK32,
+ eA8B8G8R8UintPack32 = VK_FORMAT_A8B8G8R8_UINT_PACK32,
+ eA8B8G8R8SintPack32 = VK_FORMAT_A8B8G8R8_SINT_PACK32,
+ eA8B8G8R8SrgbPack32 = VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ eA2R10G10B10UnormPack32 = VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ eA2R10G10B10SnormPack32 = VK_FORMAT_A2R10G10B10_SNORM_PACK32,
+ eA2R10G10B10UscaledPack32 = VK_FORMAT_A2R10G10B10_USCALED_PACK32,
+ eA2R10G10B10SscaledPack32 = VK_FORMAT_A2R10G10B10_SSCALED_PACK32,
+ eA2R10G10B10UintPack32 = VK_FORMAT_A2R10G10B10_UINT_PACK32,
+ eA2R10G10B10SintPack32 = VK_FORMAT_A2R10G10B10_SINT_PACK32,
+ eA2B10G10R10UnormPack32 = VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ eA2B10G10R10SnormPack32 = VK_FORMAT_A2B10G10R10_SNORM_PACK32,
+ eA2B10G10R10UscaledPack32 = VK_FORMAT_A2B10G10R10_USCALED_PACK32,
+ eA2B10G10R10SscaledPack32 = VK_FORMAT_A2B10G10R10_SSCALED_PACK32,
+ eA2B10G10R10UintPack32 = VK_FORMAT_A2B10G10R10_UINT_PACK32,
+ eA2B10G10R10SintPack32 = VK_FORMAT_A2B10G10R10_SINT_PACK32,
+ eR16Unorm = VK_FORMAT_R16_UNORM,
+ eR16Snorm = VK_FORMAT_R16_SNORM,
+ eR16Uscaled = VK_FORMAT_R16_USCALED,
+ eR16Sscaled = VK_FORMAT_R16_SSCALED,
+ eR16Uint = VK_FORMAT_R16_UINT,
+ eR16Sint = VK_FORMAT_R16_SINT,
+ eR16Sfloat = VK_FORMAT_R16_SFLOAT,
+ eR16G16Unorm = VK_FORMAT_R16G16_UNORM,
+ eR16G16Snorm = VK_FORMAT_R16G16_SNORM,
+ eR16G16Uscaled = VK_FORMAT_R16G16_USCALED,
+ eR16G16Sscaled = VK_FORMAT_R16G16_SSCALED,
+ eR16G16Uint = VK_FORMAT_R16G16_UINT,
+ eR16G16Sint = VK_FORMAT_R16G16_SINT,
+ eR16G16Sfloat = VK_FORMAT_R16G16_SFLOAT,
+ eR16G16B16Unorm = VK_FORMAT_R16G16B16_UNORM,
+ eR16G16B16Snorm = VK_FORMAT_R16G16B16_SNORM,
+ eR16G16B16Uscaled = VK_FORMAT_R16G16B16_USCALED,
+ eR16G16B16Sscaled = VK_FORMAT_R16G16B16_SSCALED,
+ eR16G16B16Uint = VK_FORMAT_R16G16B16_UINT,
+ eR16G16B16Sint = VK_FORMAT_R16G16B16_SINT,
+ eR16G16B16Sfloat = VK_FORMAT_R16G16B16_SFLOAT,
+ eR16G16B16A16Unorm = VK_FORMAT_R16G16B16A16_UNORM,
+ eR16G16B16A16Snorm = VK_FORMAT_R16G16B16A16_SNORM,
+ eR16G16B16A16Uscaled = VK_FORMAT_R16G16B16A16_USCALED,
+ eR16G16B16A16Sscaled = VK_FORMAT_R16G16B16A16_SSCALED,
+ eR16G16B16A16Uint = VK_FORMAT_R16G16B16A16_UINT,
+ eR16G16B16A16Sint = VK_FORMAT_R16G16B16A16_SINT,
+ eR16G16B16A16Sfloat = VK_FORMAT_R16G16B16A16_SFLOAT,
+ eR32Uint = VK_FORMAT_R32_UINT,
+ eR32Sint = VK_FORMAT_R32_SINT,
+ eR32Sfloat = VK_FORMAT_R32_SFLOAT,
+ eR32G32Uint = VK_FORMAT_R32G32_UINT,
+ eR32G32Sint = VK_FORMAT_R32G32_SINT,
+ eR32G32Sfloat = VK_FORMAT_R32G32_SFLOAT,
+ eR32G32B32Uint = VK_FORMAT_R32G32B32_UINT,
+ eR32G32B32Sint = VK_FORMAT_R32G32B32_SINT,
+ eR32G32B32Sfloat = VK_FORMAT_R32G32B32_SFLOAT,
+ eR32G32B32A32Uint = VK_FORMAT_R32G32B32A32_UINT,
+ eR32G32B32A32Sint = VK_FORMAT_R32G32B32A32_SINT,
+ eR32G32B32A32Sfloat = VK_FORMAT_R32G32B32A32_SFLOAT,
+ eR64Uint = VK_FORMAT_R64_UINT,
+ eR64Sint = VK_FORMAT_R64_SINT,
+ eR64Sfloat = VK_FORMAT_R64_SFLOAT,
+ eR64G64Uint = VK_FORMAT_R64G64_UINT,
+ eR64G64Sint = VK_FORMAT_R64G64_SINT,
+ eR64G64Sfloat = VK_FORMAT_R64G64_SFLOAT,
+ eR64G64B64Uint = VK_FORMAT_R64G64B64_UINT,
+ eR64G64B64Sint = VK_FORMAT_R64G64B64_SINT,
+ eR64G64B64Sfloat = VK_FORMAT_R64G64B64_SFLOAT,
+ eR64G64B64A64Uint = VK_FORMAT_R64G64B64A64_UINT,
+ eR64G64B64A64Sint = VK_FORMAT_R64G64B64A64_SINT,
+ eR64G64B64A64Sfloat = VK_FORMAT_R64G64B64A64_SFLOAT,
+ eB10G11R11UfloatPack32 = VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ eE5B9G9R9UfloatPack32 = VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ eD16Unorm = VK_FORMAT_D16_UNORM,
+ eX8D24UnormPack32 = VK_FORMAT_X8_D24_UNORM_PACK32,
+ eD32Sfloat = VK_FORMAT_D32_SFLOAT,
+ eS8Uint = VK_FORMAT_S8_UINT,
+ eD16UnormS8Uint = VK_FORMAT_D16_UNORM_S8_UINT,
+ eD24UnormS8Uint = VK_FORMAT_D24_UNORM_S8_UINT,
+ eD32SfloatS8Uint = VK_FORMAT_D32_SFLOAT_S8_UINT,
+ eBc1RgbUnormBlock = VK_FORMAT_BC1_RGB_UNORM_BLOCK,
+ eBc1RgbSrgbBlock = VK_FORMAT_BC1_RGB_SRGB_BLOCK,
+ eBc1RgbaUnormBlock = VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ eBc1RgbaSrgbBlock = VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ eBc2UnormBlock = VK_FORMAT_BC2_UNORM_BLOCK,
+ eBc2SrgbBlock = VK_FORMAT_BC2_SRGB_BLOCK,
+ eBc3UnormBlock = VK_FORMAT_BC3_UNORM_BLOCK,
+ eBc3SrgbBlock = VK_FORMAT_BC3_SRGB_BLOCK,
+ eBc4UnormBlock = VK_FORMAT_BC4_UNORM_BLOCK,
+ eBc4SnormBlock = VK_FORMAT_BC4_SNORM_BLOCK,
+ eBc5UnormBlock = VK_FORMAT_BC5_UNORM_BLOCK,
+ eBc5SnormBlock = VK_FORMAT_BC5_SNORM_BLOCK,
+ eBc6HUfloatBlock = VK_FORMAT_BC6H_UFLOAT_BLOCK,
+ eBc6HSfloatBlock = VK_FORMAT_BC6H_SFLOAT_BLOCK,
+ eBc7UnormBlock = VK_FORMAT_BC7_UNORM_BLOCK,
+ eBc7SrgbBlock = VK_FORMAT_BC7_SRGB_BLOCK,
+ eEtc2R8G8B8UnormBlock = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ eEtc2R8G8B8SrgbBlock = VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,
+ eEtc2R8G8B8A1UnormBlock = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK,
+ eEtc2R8G8B8A1SrgbBlock = VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK,
+ eEtc2R8G8B8A8UnormBlock = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK,
+ eEtc2R8G8B8A8SrgbBlock = VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
+ eEacR11UnormBlock = VK_FORMAT_EAC_R11_UNORM_BLOCK,
+ eEacR11SnormBlock = VK_FORMAT_EAC_R11_SNORM_BLOCK,
+ eEacR11G11UnormBlock = VK_FORMAT_EAC_R11G11_UNORM_BLOCK,
+ eEacR11G11SnormBlock = VK_FORMAT_EAC_R11G11_SNORM_BLOCK,
+ eAstc4x4UnormBlock = VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
+ eAstc4x4SrgbBlock = VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ eAstc5x4UnormBlock = VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
+ eAstc5x4SrgbBlock = VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ eAstc5x5UnormBlock = VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ eAstc5x5SrgbBlock = VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ eAstc6x5UnormBlock = VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ eAstc6x5SrgbBlock = VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ eAstc6x6UnormBlock = VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ eAstc6x6SrgbBlock = VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ eAstc8x5UnormBlock = VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
+ eAstc8x5SrgbBlock = VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ eAstc8x6UnormBlock = VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ eAstc8x6SrgbBlock = VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ eAstc8x8UnormBlock = VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
+ eAstc8x8SrgbBlock = VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ eAstc10x5UnormBlock = VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
+ eAstc10x5SrgbBlock = VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
+ eAstc10x6UnormBlock = VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
+ eAstc10x6SrgbBlock = VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
+ eAstc10x8UnormBlock = VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ eAstc10x8SrgbBlock = VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ eAstc10x10UnormBlock = VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ eAstc10x10SrgbBlock = VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ eAstc12x10UnormBlock = VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
+ eAstc12x10SrgbBlock = VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
+ eAstc12x12UnormBlock = VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ eAstc12x12SrgbBlock = VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ ePvrtc12BppUnormBlockIMG = VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG,
+ ePvrtc14BppUnormBlockIMG = VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG,
+ ePvrtc22BppUnormBlockIMG = VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG,
+ ePvrtc24BppUnormBlockIMG = VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG,
+ ePvrtc12BppSrgbBlockIMG = VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG,
+ ePvrtc14BppSrgbBlockIMG = VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG,
+ ePvrtc22BppSrgbBlockIMG = VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG,
+ ePvrtc24BppSrgbBlockIMG = VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG
+ };
+
+ struct VertexInputAttributeDescription
+ {
+ VertexInputAttributeDescription( uint32_t location_ = 0, uint32_t binding_ = 0, Format format_ = Format::eUndefined, uint32_t offset_ = 0 )
+ : location( location_ )
+ , binding( binding_ )
+ , format( format_ )
+ , offset( offset_ )
+ {
+ }
+
+ VertexInputAttributeDescription( VkVertexInputAttributeDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(VertexInputAttributeDescription) );
+ }
+
+ VertexInputAttributeDescription& operator=( VkVertexInputAttributeDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(VertexInputAttributeDescription) );
+ return *this;
+ }
+
+ VertexInputAttributeDescription& setLocation( uint32_t location_ )
+ {
+ location = location_;
+ return *this;
+ }
+
+ VertexInputAttributeDescription& setBinding( uint32_t binding_ )
+ {
+ binding = binding_;
+ return *this;
+ }
+
+ VertexInputAttributeDescription& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ VertexInputAttributeDescription& setOffset( uint32_t offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ operator const VkVertexInputAttributeDescription&() const
+ {
+ return *reinterpret_cast<const VkVertexInputAttributeDescription*>(this);
+ }
+
+ bool operator==( VertexInputAttributeDescription const& rhs ) const
+ {
+ return ( location == rhs.location )
+ && ( binding == rhs.binding )
+ && ( format == rhs.format )
+ && ( offset == rhs.offset );
+ }
+
+ bool operator!=( VertexInputAttributeDescription const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t location;
+ uint32_t binding;
+ Format format;
+ uint32_t offset;
+ };
+ static_assert( sizeof( VertexInputAttributeDescription ) == sizeof( VkVertexInputAttributeDescription ), "struct and wrapper have different size!" );
+
+ enum class StructureType
+ {
+ eApplicationInfo = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ eInstanceCreateInfo = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ eDeviceQueueCreateInfo = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ eDeviceCreateInfo = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ eSubmitInfo = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ eMemoryAllocateInfo = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ eMappedMemoryRange = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
+ eBindSparseInfo = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,
+ eFenceCreateInfo = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ eSemaphoreCreateInfo = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ eEventCreateInfo = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
+ eQueryPoolCreateInfo = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
+ eBufferCreateInfo = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ eBufferViewCreateInfo = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ eImageCreateInfo = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ eImageViewCreateInfo = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ eShaderModuleCreateInfo = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ ePipelineCacheCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
+ ePipelineShaderStageCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ ePipelineVertexInputStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ ePipelineInputAssemblyStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ ePipelineTessellationStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
+ ePipelineViewportStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ ePipelineRasterizationStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ ePipelineMultisampleStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ ePipelineDepthStencilStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ ePipelineColorBlendStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ ePipelineDynamicStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ eGraphicsPipelineCreateInfo = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ eComputePipelineCreateInfo = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ ePipelineLayoutCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ eSamplerCreateInfo = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+ eDescriptorSetLayoutCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ eDescriptorPoolCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ eDescriptorSetAllocateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ eWriteDescriptorSet = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ eCopyDescriptorSet = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET,
+ eFramebufferCreateInfo = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ eRenderPassCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ eCommandPoolCreateInfo = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ eCommandBufferAllocateInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ eCommandBufferInheritanceInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
+ eCommandBufferBeginInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ eRenderPassBeginInfo = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ eBufferMemoryBarrier = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ eImageMemoryBarrier = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ eMemoryBarrier = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+ eLoaderInstanceCreateInfo = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO,
+ eLoaderDeviceCreateInfo = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
+ eSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ ePresentInfoKHR = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ eDisplayModeCreateInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR,
+ eDisplaySurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR,
+ eDisplayPresentInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR,
+ eXlibSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR,
+ eXcbSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR,
+ eWaylandSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR,
+ eMirSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR,
+ eAndroidSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR,
+ eWin32SurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR,
+ eDebugReportCallbackCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT,
+ ePipelineRasterizationStateRasterizationOrderAMD = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD,
+ eDebugMarkerObjectNameInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT,
+ eDebugMarkerObjectTagInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT,
+ eDebugMarkerMarkerInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT,
+ eDedicatedAllocationImageCreateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV,
+ eDedicatedAllocationBufferCreateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV,
+ eDedicatedAllocationMemoryAllocateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV,
+ eRenderPassMultiviewCreateInfoKHX = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHX,
+ ePhysicalDeviceMultiviewFeaturesKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX,
+ ePhysicalDeviceMultiviewPropertiesKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX,
+ eExternalMemoryImageCreateInfoNV = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV,
+ eExportMemoryAllocateInfoNV = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV,
+ eImportMemoryWin32HandleInfoNV = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV,
+ eExportMemoryWin32HandleInfoNV = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV,
+ eWin32KeyedMutexAcquireReleaseInfoNV = VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV,
+ ePhysicalDeviceFeatures2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR,
+ ePhysicalDeviceProperties2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR,
+ eFormatProperties2KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR,
+ eImageFormatProperties2KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR,
+ ePhysicalDeviceImageFormatInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR,
+ eQueueFamilyProperties2KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR,
+ ePhysicalDeviceMemoryProperties2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR,
+ eSparseImageFormatProperties2KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR,
+ ePhysicalDeviceSparseImageFormatInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR,
+ eMemoryAllocateFlagsInfoKHX = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHX,
+ eBindBufferMemoryInfoKHX = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHX,
+ eBindImageMemoryInfoKHX = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHX,
+ eDeviceGroupRenderPassBeginInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHX,
+ eDeviceGroupCommandBufferBeginInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHX,
+ eDeviceGroupSubmitInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHX,
+ eDeviceGroupBindSparseInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHX,
+ eDeviceGroupPresentCapabilitiesKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHX,
+ eImageSwapchainCreateInfoKHX = VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHX,
+ eBindImageMemorySwapchainInfoKHX = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHX,
+ eAcquireNextImageInfoKHX = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHX,
+ eDeviceGroupPresentInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHX,
+ eDeviceGroupSwapchainCreateInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHX,
+ eValidationFlagsEXT = VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT,
+ eViSurfaceCreateInfoNN = VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN,
+ ePhysicalDeviceGroupPropertiesKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHX,
+ eDeviceGroupDeviceCreateInfoKHX = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHX,
+ ePhysicalDeviceExternalImageFormatInfoKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHX,
+ eExternalImageFormatPropertiesKHX = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHX,
+ ePhysicalDeviceExternalBufferInfoKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHX,
+ eExternalBufferPropertiesKHX = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHX,
+ ePhysicalDeviceIdPropertiesKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHX,
+ eExternalMemoryBufferCreateInfoKHX = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHX,
+ eExternalMemoryImageCreateInfoKHX = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHX,
+ eExportMemoryAllocateInfoKHX = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHX,
+ eImportMemoryWin32HandleInfoKHX = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHX,
+ eExportMemoryWin32HandleInfoKHX = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHX,
+ eMemoryWin32HandlePropertiesKHX = VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHX,
+ eImportMemoryFdInfoKHX = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHX,
+ eMemoryFdPropertiesKHX = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHX,
+ eWin32KeyedMutexAcquireReleaseInfoKHX = VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHX,
+ ePhysicalDeviceExternalSemaphoreInfoKHX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHX,
+ eExternalSemaphorePropertiesKHX = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHX,
+ eExportSemaphoreCreateInfoKHX = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHX,
+ eImportSemaphoreWin32HandleInfoKHX = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHX,
+ eExportSemaphoreWin32HandleInfoKHX = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHX,
+ eD3D12FenceSubmitInfoKHX = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHX,
+ eImportSemaphoreFdInfoKHX = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHX,
+ ePhysicalDevicePushDescriptorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR,
+ ePresentRegionsKHR = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR,
+ eDescriptorUpdateTemplateCreateInfoKHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
+ eObjectTableCreateInfoNVX = VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX,
+ eIndirectCommandsLayoutCreateInfoNVX = VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX,
+ eCmdProcessCommandsInfoNVX = VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX,
+ eCmdReserveSpaceForCommandsInfoNVX = VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX,
+ eDeviceGeneratedCommandsLimitsNVX = VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX,
+ eDeviceGeneratedCommandsFeaturesNVX = VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX,
+ ePipelineViewportWScalingStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV,
+ eSurfaceCapabilities2EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT,
+ eDisplayPowerInfoEXT = VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT,
+ eDeviceEventInfoEXT = VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT,
+ eDisplayEventInfoEXT = VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT,
+ eSwapchainCounterCreateInfoEXT = VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT,
+ ePresentTimesInfoGOOGLE = VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE,
+ ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX,
+ ePipelineViewportSwizzleStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
+ ePhysicalDeviceDiscardRectanglePropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT,
+ ePipelineDiscardRectangleStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT,
+ eHdrMetadataEXT = VK_STRUCTURE_TYPE_HDR_METADATA_EXT,
+ eSharedPresentSurfaceCapabilitiesKHR = VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR,
+ ePhysicalDeviceSurfaceInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR,
+ eSurfaceCapabilities2KHR = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
+ eSurfaceFormat2KHR = VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR,
+ eIosSurfaceCreateInfoMVK = VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK,
+ eMacosSurfaceCreateInfoMVK = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK
+ };
+
+ struct ApplicationInfo
+ {
+ ApplicationInfo( const char* pApplicationName_ = nullptr, uint32_t applicationVersion_ = 0, const char* pEngineName_ = nullptr, uint32_t engineVersion_ = 0, uint32_t apiVersion_ = 0 )
+ : sType( StructureType::eApplicationInfo )
+ , pNext( nullptr )
+ , pApplicationName( pApplicationName_ )
+ , applicationVersion( applicationVersion_ )
+ , pEngineName( pEngineName_ )
+ , engineVersion( engineVersion_ )
+ , apiVersion( apiVersion_ )
+ {
+ }
+
+ ApplicationInfo( VkApplicationInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ApplicationInfo) );
+ }
+
+ ApplicationInfo& operator=( VkApplicationInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ApplicationInfo) );
+ return *this;
+ }
+
+ ApplicationInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ApplicationInfo& setPApplicationName( const char* pApplicationName_ )
+ {
+ pApplicationName = pApplicationName_;
+ return *this;
+ }
+
+ ApplicationInfo& setApplicationVersion( uint32_t applicationVersion_ )
+ {
+ applicationVersion = applicationVersion_;
+ return *this;
+ }
+
+ ApplicationInfo& setPEngineName( const char* pEngineName_ )
+ {
+ pEngineName = pEngineName_;
+ return *this;
+ }
+
+ ApplicationInfo& setEngineVersion( uint32_t engineVersion_ )
+ {
+ engineVersion = engineVersion_;
+ return *this;
+ }
+
+ ApplicationInfo& setApiVersion( uint32_t apiVersion_ )
+ {
+ apiVersion = apiVersion_;
+ return *this;
+ }
+
+ operator const VkApplicationInfo&() const
+ {
+ return *reinterpret_cast<const VkApplicationInfo*>(this);
+ }
+
+ bool operator==( ApplicationInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( pApplicationName == rhs.pApplicationName )
+ && ( applicationVersion == rhs.applicationVersion )
+ && ( pEngineName == rhs.pEngineName )
+ && ( engineVersion == rhs.engineVersion )
+ && ( apiVersion == rhs.apiVersion );
+ }
+
+ bool operator!=( ApplicationInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ const char* pApplicationName;
+ uint32_t applicationVersion;
+ const char* pEngineName;
+ uint32_t engineVersion;
+ uint32_t apiVersion;
+ };
+ static_assert( sizeof( ApplicationInfo ) == sizeof( VkApplicationInfo ), "struct and wrapper have different size!" );
+
+ struct DeviceQueueCreateInfo
+ {
+ DeviceQueueCreateInfo( DeviceQueueCreateFlags flags_ = DeviceQueueCreateFlags(), uint32_t queueFamilyIndex_ = 0, uint32_t queueCount_ = 0, const float* pQueuePriorities_ = nullptr )
+ : sType( StructureType::eDeviceQueueCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , queueFamilyIndex( queueFamilyIndex_ )
+ , queueCount( queueCount_ )
+ , pQueuePriorities( pQueuePriorities_ )
+ {
+ }
+
+ DeviceQueueCreateInfo( VkDeviceQueueCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceQueueCreateInfo) );
+ }
+
+ DeviceQueueCreateInfo& operator=( VkDeviceQueueCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceQueueCreateInfo) );
+ return *this;
+ }
+
+ DeviceQueueCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceQueueCreateInfo& setFlags( DeviceQueueCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DeviceQueueCreateInfo& setQueueFamilyIndex( uint32_t queueFamilyIndex_ )
+ {
+ queueFamilyIndex = queueFamilyIndex_;
+ return *this;
+ }
+
+ DeviceQueueCreateInfo& setQueueCount( uint32_t queueCount_ )
+ {
+ queueCount = queueCount_;
+ return *this;
+ }
+
+ DeviceQueueCreateInfo& setPQueuePriorities( const float* pQueuePriorities_ )
+ {
+ pQueuePriorities = pQueuePriorities_;
+ return *this;
+ }
+
+ operator const VkDeviceQueueCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkDeviceQueueCreateInfo*>(this);
+ }
+
+ bool operator==( DeviceQueueCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( queueFamilyIndex == rhs.queueFamilyIndex )
+ && ( queueCount == rhs.queueCount )
+ && ( pQueuePriorities == rhs.pQueuePriorities );
+ }
+
+ bool operator!=( DeviceQueueCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueCount;
+ const float* pQueuePriorities;
+ };
+ static_assert( sizeof( DeviceQueueCreateInfo ) == sizeof( VkDeviceQueueCreateInfo ), "struct and wrapper have different size!" );
+
+ struct DeviceCreateInfo
+ {
+ DeviceCreateInfo( DeviceCreateFlags flags_ = DeviceCreateFlags(), uint32_t queueCreateInfoCount_ = 0, const DeviceQueueCreateInfo* pQueueCreateInfos_ = nullptr, uint32_t enabledLayerCount_ = 0, const char* const* ppEnabledLayerNames_ = nullptr, uint32_t enabledExtensionCount_ = 0, const char* const* ppEnabledExtensionNames_ = nullptr, const PhysicalDeviceFeatures* pEnabledFeatures_ = nullptr )
+ : sType( StructureType::eDeviceCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , queueCreateInfoCount( queueCreateInfoCount_ )
+ , pQueueCreateInfos( pQueueCreateInfos_ )
+ , enabledLayerCount( enabledLayerCount_ )
+ , ppEnabledLayerNames( ppEnabledLayerNames_ )
+ , enabledExtensionCount( enabledExtensionCount_ )
+ , ppEnabledExtensionNames( ppEnabledExtensionNames_ )
+ , pEnabledFeatures( pEnabledFeatures_ )
+ {
+ }
+
+ DeviceCreateInfo( VkDeviceCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceCreateInfo) );
+ }
+
+ DeviceCreateInfo& operator=( VkDeviceCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceCreateInfo) );
+ return *this;
+ }
+
+ DeviceCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setFlags( DeviceCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setQueueCreateInfoCount( uint32_t queueCreateInfoCount_ )
+ {
+ queueCreateInfoCount = queueCreateInfoCount_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setPQueueCreateInfos( const DeviceQueueCreateInfo* pQueueCreateInfos_ )
+ {
+ pQueueCreateInfos = pQueueCreateInfos_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setEnabledLayerCount( uint32_t enabledLayerCount_ )
+ {
+ enabledLayerCount = enabledLayerCount_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setPpEnabledLayerNames( const char* const* ppEnabledLayerNames_ )
+ {
+ ppEnabledLayerNames = ppEnabledLayerNames_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setEnabledExtensionCount( uint32_t enabledExtensionCount_ )
+ {
+ enabledExtensionCount = enabledExtensionCount_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setPpEnabledExtensionNames( const char* const* ppEnabledExtensionNames_ )
+ {
+ ppEnabledExtensionNames = ppEnabledExtensionNames_;
+ return *this;
+ }
+
+ DeviceCreateInfo& setPEnabledFeatures( const PhysicalDeviceFeatures* pEnabledFeatures_ )
+ {
+ pEnabledFeatures = pEnabledFeatures_;
+ return *this;
+ }
+
+ operator const VkDeviceCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkDeviceCreateInfo*>(this);
+ }
+
+ bool operator==( DeviceCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( queueCreateInfoCount == rhs.queueCreateInfoCount )
+ && ( pQueueCreateInfos == rhs.pQueueCreateInfos )
+ && ( enabledLayerCount == rhs.enabledLayerCount )
+ && ( ppEnabledLayerNames == rhs.ppEnabledLayerNames )
+ && ( enabledExtensionCount == rhs.enabledExtensionCount )
+ && ( ppEnabledExtensionNames == rhs.ppEnabledExtensionNames )
+ && ( pEnabledFeatures == rhs.pEnabledFeatures );
+ }
+
+ bool operator!=( DeviceCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DeviceCreateFlags flags;
+ uint32_t queueCreateInfoCount;
+ const DeviceQueueCreateInfo* pQueueCreateInfos;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+ const PhysicalDeviceFeatures* pEnabledFeatures;
+ };
+ static_assert( sizeof( DeviceCreateInfo ) == sizeof( VkDeviceCreateInfo ), "struct and wrapper have different size!" );
+
+ struct InstanceCreateInfo
+ {
+ InstanceCreateInfo( InstanceCreateFlags flags_ = InstanceCreateFlags(), const ApplicationInfo* pApplicationInfo_ = nullptr, uint32_t enabledLayerCount_ = 0, const char* const* ppEnabledLayerNames_ = nullptr, uint32_t enabledExtensionCount_ = 0, const char* const* ppEnabledExtensionNames_ = nullptr )
+ : sType( StructureType::eInstanceCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , pApplicationInfo( pApplicationInfo_ )
+ , enabledLayerCount( enabledLayerCount_ )
+ , ppEnabledLayerNames( ppEnabledLayerNames_ )
+ , enabledExtensionCount( enabledExtensionCount_ )
+ , ppEnabledExtensionNames( ppEnabledExtensionNames_ )
+ {
+ }
+
+ InstanceCreateInfo( VkInstanceCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(InstanceCreateInfo) );
+ }
+
+ InstanceCreateInfo& operator=( VkInstanceCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(InstanceCreateInfo) );
+ return *this;
+ }
+
+ InstanceCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ InstanceCreateInfo& setFlags( InstanceCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ InstanceCreateInfo& setPApplicationInfo( const ApplicationInfo* pApplicationInfo_ )
+ {
+ pApplicationInfo = pApplicationInfo_;
+ return *this;
+ }
+
+ InstanceCreateInfo& setEnabledLayerCount( uint32_t enabledLayerCount_ )
+ {
+ enabledLayerCount = enabledLayerCount_;
+ return *this;
+ }
+
+ InstanceCreateInfo& setPpEnabledLayerNames( const char* const* ppEnabledLayerNames_ )
+ {
+ ppEnabledLayerNames = ppEnabledLayerNames_;
+ return *this;
+ }
+
+ InstanceCreateInfo& setEnabledExtensionCount( uint32_t enabledExtensionCount_ )
+ {
+ enabledExtensionCount = enabledExtensionCount_;
+ return *this;
+ }
+
+ InstanceCreateInfo& setPpEnabledExtensionNames( const char* const* ppEnabledExtensionNames_ )
+ {
+ ppEnabledExtensionNames = ppEnabledExtensionNames_;
+ return *this;
+ }
+
+ operator const VkInstanceCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkInstanceCreateInfo*>(this);
+ }
+
+ bool operator==( InstanceCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( pApplicationInfo == rhs.pApplicationInfo )
+ && ( enabledLayerCount == rhs.enabledLayerCount )
+ && ( ppEnabledLayerNames == rhs.ppEnabledLayerNames )
+ && ( enabledExtensionCount == rhs.enabledExtensionCount )
+ && ( ppEnabledExtensionNames == rhs.ppEnabledExtensionNames );
+ }
+
+ bool operator!=( InstanceCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ InstanceCreateFlags flags;
+ const ApplicationInfo* pApplicationInfo;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+ };
+ static_assert( sizeof( InstanceCreateInfo ) == sizeof( VkInstanceCreateInfo ), "struct and wrapper have different size!" );
+
+ struct MemoryAllocateInfo
+ {
+ MemoryAllocateInfo( DeviceSize allocationSize_ = 0, uint32_t memoryTypeIndex_ = 0 )
+ : sType( StructureType::eMemoryAllocateInfo )
+ , pNext( nullptr )
+ , allocationSize( allocationSize_ )
+ , memoryTypeIndex( memoryTypeIndex_ )
+ {
+ }
+
+ MemoryAllocateInfo( VkMemoryAllocateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MemoryAllocateInfo) );
+ }
+
+ MemoryAllocateInfo& operator=( VkMemoryAllocateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MemoryAllocateInfo) );
+ return *this;
+ }
+
+ MemoryAllocateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ MemoryAllocateInfo& setAllocationSize( DeviceSize allocationSize_ )
+ {
+ allocationSize = allocationSize_;
+ return *this;
+ }
+
+ MemoryAllocateInfo& setMemoryTypeIndex( uint32_t memoryTypeIndex_ )
+ {
+ memoryTypeIndex = memoryTypeIndex_;
+ return *this;
+ }
+
+ operator const VkMemoryAllocateInfo&() const
+ {
+ return *reinterpret_cast<const VkMemoryAllocateInfo*>(this);
+ }
+
+ bool operator==( MemoryAllocateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( allocationSize == rhs.allocationSize )
+ && ( memoryTypeIndex == rhs.memoryTypeIndex );
+ }
+
+ bool operator!=( MemoryAllocateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+ };
+ static_assert( sizeof( MemoryAllocateInfo ) == sizeof( VkMemoryAllocateInfo ), "struct and wrapper have different size!" );
+
+ struct MappedMemoryRange
+ {
+ MappedMemoryRange( DeviceMemory memory_ = DeviceMemory(), DeviceSize offset_ = 0, DeviceSize size_ = 0 )
+ : sType( StructureType::eMappedMemoryRange )
+ , pNext( nullptr )
+ , memory( memory_ )
+ , offset( offset_ )
+ , size( size_ )
+ {
+ }
+
+ MappedMemoryRange( VkMappedMemoryRange const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MappedMemoryRange) );
+ }
+
+ MappedMemoryRange& operator=( VkMappedMemoryRange const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MappedMemoryRange) );
+ return *this;
+ }
+
+ MappedMemoryRange& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ MappedMemoryRange& setMemory( DeviceMemory memory_ )
+ {
+ memory = memory_;
+ return *this;
+ }
+
+ MappedMemoryRange& setOffset( DeviceSize offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ MappedMemoryRange& setSize( DeviceSize size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ operator const VkMappedMemoryRange&() const
+ {
+ return *reinterpret_cast<const VkMappedMemoryRange*>(this);
+ }
+
+ bool operator==( MappedMemoryRange const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( memory == rhs.memory )
+ && ( offset == rhs.offset )
+ && ( size == rhs.size );
+ }
+
+ bool operator!=( MappedMemoryRange const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DeviceMemory memory;
+ DeviceSize offset;
+ DeviceSize size;
+ };
+ static_assert( sizeof( MappedMemoryRange ) == sizeof( VkMappedMemoryRange ), "struct and wrapper have different size!" );
+
+ struct WriteDescriptorSet
+ {
+ WriteDescriptorSet( DescriptorSet dstSet_ = DescriptorSet(), uint32_t dstBinding_ = 0, uint32_t dstArrayElement_ = 0, uint32_t descriptorCount_ = 0, DescriptorType descriptorType_ = DescriptorType::eSampler, const DescriptorImageInfo* pImageInfo_ = nullptr, const DescriptorBufferInfo* pBufferInfo_ = nullptr, const BufferView* pTexelBufferView_ = nullptr )
+ : sType( StructureType::eWriteDescriptorSet )
+ , pNext( nullptr )
+ , dstSet( dstSet_ )
+ , dstBinding( dstBinding_ )
+ , dstArrayElement( dstArrayElement_ )
+ , descriptorCount( descriptorCount_ )
+ , descriptorType( descriptorType_ )
+ , pImageInfo( pImageInfo_ )
+ , pBufferInfo( pBufferInfo_ )
+ , pTexelBufferView( pTexelBufferView_ )
+ {
+ }
+
+ WriteDescriptorSet( VkWriteDescriptorSet const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(WriteDescriptorSet) );
+ }
+
+ WriteDescriptorSet& operator=( VkWriteDescriptorSet const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(WriteDescriptorSet) );
+ return *this;
+ }
+
+ WriteDescriptorSet& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setDstSet( DescriptorSet dstSet_ )
+ {
+ dstSet = dstSet_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setDstBinding( uint32_t dstBinding_ )
+ {
+ dstBinding = dstBinding_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setDstArrayElement( uint32_t dstArrayElement_ )
+ {
+ dstArrayElement = dstArrayElement_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setDescriptorCount( uint32_t descriptorCount_ )
+ {
+ descriptorCount = descriptorCount_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setDescriptorType( DescriptorType descriptorType_ )
+ {
+ descriptorType = descriptorType_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setPImageInfo( const DescriptorImageInfo* pImageInfo_ )
+ {
+ pImageInfo = pImageInfo_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setPBufferInfo( const DescriptorBufferInfo* pBufferInfo_ )
+ {
+ pBufferInfo = pBufferInfo_;
+ return *this;
+ }
+
+ WriteDescriptorSet& setPTexelBufferView( const BufferView* pTexelBufferView_ )
+ {
+ pTexelBufferView = pTexelBufferView_;
+ return *this;
+ }
+
+ operator const VkWriteDescriptorSet&() const
+ {
+ return *reinterpret_cast<const VkWriteDescriptorSet*>(this);
+ }
+
+ bool operator==( WriteDescriptorSet const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( dstSet == rhs.dstSet )
+ && ( dstBinding == rhs.dstBinding )
+ && ( dstArrayElement == rhs.dstArrayElement )
+ && ( descriptorCount == rhs.descriptorCount )
+ && ( descriptorType == rhs.descriptorType )
+ && ( pImageInfo == rhs.pImageInfo )
+ && ( pBufferInfo == rhs.pBufferInfo )
+ && ( pTexelBufferView == rhs.pTexelBufferView );
+ }
+
+ bool operator!=( WriteDescriptorSet const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ DescriptorType descriptorType;
+ const DescriptorImageInfo* pImageInfo;
+ const DescriptorBufferInfo* pBufferInfo;
+ const BufferView* pTexelBufferView;
+ };
+ static_assert( sizeof( WriteDescriptorSet ) == sizeof( VkWriteDescriptorSet ), "struct and wrapper have different size!" );
+
+ struct CopyDescriptorSet
+ {
+ CopyDescriptorSet( DescriptorSet srcSet_ = DescriptorSet(), uint32_t srcBinding_ = 0, uint32_t srcArrayElement_ = 0, DescriptorSet dstSet_ = DescriptorSet(), uint32_t dstBinding_ = 0, uint32_t dstArrayElement_ = 0, uint32_t descriptorCount_ = 0 )
+ : sType( StructureType::eCopyDescriptorSet )
+ , pNext( nullptr )
+ , srcSet( srcSet_ )
+ , srcBinding( srcBinding_ )
+ , srcArrayElement( srcArrayElement_ )
+ , dstSet( dstSet_ )
+ , dstBinding( dstBinding_ )
+ , dstArrayElement( dstArrayElement_ )
+ , descriptorCount( descriptorCount_ )
+ {
+ }
+
+ CopyDescriptorSet( VkCopyDescriptorSet const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CopyDescriptorSet) );
+ }
+
+ CopyDescriptorSet& operator=( VkCopyDescriptorSet const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CopyDescriptorSet) );
+ return *this;
+ }
+
+ CopyDescriptorSet& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setSrcSet( DescriptorSet srcSet_ )
+ {
+ srcSet = srcSet_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setSrcBinding( uint32_t srcBinding_ )
+ {
+ srcBinding = srcBinding_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setSrcArrayElement( uint32_t srcArrayElement_ )
+ {
+ srcArrayElement = srcArrayElement_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setDstSet( DescriptorSet dstSet_ )
+ {
+ dstSet = dstSet_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setDstBinding( uint32_t dstBinding_ )
+ {
+ dstBinding = dstBinding_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setDstArrayElement( uint32_t dstArrayElement_ )
+ {
+ dstArrayElement = dstArrayElement_;
+ return *this;
+ }
+
+ CopyDescriptorSet& setDescriptorCount( uint32_t descriptorCount_ )
+ {
+ descriptorCount = descriptorCount_;
+ return *this;
+ }
+
+ operator const VkCopyDescriptorSet&() const
+ {
+ return *reinterpret_cast<const VkCopyDescriptorSet*>(this);
+ }
+
+ bool operator==( CopyDescriptorSet const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( srcSet == rhs.srcSet )
+ && ( srcBinding == rhs.srcBinding )
+ && ( srcArrayElement == rhs.srcArrayElement )
+ && ( dstSet == rhs.dstSet )
+ && ( dstBinding == rhs.dstBinding )
+ && ( dstArrayElement == rhs.dstArrayElement )
+ && ( descriptorCount == rhs.descriptorCount );
+ }
+
+ bool operator!=( CopyDescriptorSet const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DescriptorSet srcSet;
+ uint32_t srcBinding;
+ uint32_t srcArrayElement;
+ DescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ };
+ static_assert( sizeof( CopyDescriptorSet ) == sizeof( VkCopyDescriptorSet ), "struct and wrapper have different size!" );
+
+ struct BufferViewCreateInfo
+ {
+ BufferViewCreateInfo( BufferViewCreateFlags flags_ = BufferViewCreateFlags(), Buffer buffer_ = Buffer(), Format format_ = Format::eUndefined, DeviceSize offset_ = 0, DeviceSize range_ = 0 )
+ : sType( StructureType::eBufferViewCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , buffer( buffer_ )
+ , format( format_ )
+ , offset( offset_ )
+ , range( range_ )
+ {
+ }
+
+ BufferViewCreateInfo( VkBufferViewCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferViewCreateInfo) );
+ }
+
+ BufferViewCreateInfo& operator=( VkBufferViewCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferViewCreateInfo) );
+ return *this;
+ }
+
+ BufferViewCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BufferViewCreateInfo& setFlags( BufferViewCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ BufferViewCreateInfo& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ BufferViewCreateInfo& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ BufferViewCreateInfo& setOffset( DeviceSize offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ BufferViewCreateInfo& setRange( DeviceSize range_ )
+ {
+ range = range_;
+ return *this;
+ }
+
+ operator const VkBufferViewCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkBufferViewCreateInfo*>(this);
+ }
+
+ bool operator==( BufferViewCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( buffer == rhs.buffer )
+ && ( format == rhs.format )
+ && ( offset == rhs.offset )
+ && ( range == rhs.range );
+ }
+
+ bool operator!=( BufferViewCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ BufferViewCreateFlags flags;
+ Buffer buffer;
+ Format format;
+ DeviceSize offset;
+ DeviceSize range;
+ };
+ static_assert( sizeof( BufferViewCreateInfo ) == sizeof( VkBufferViewCreateInfo ), "struct and wrapper have different size!" );
+
+ struct ShaderModuleCreateInfo
+ {
+ ShaderModuleCreateInfo( ShaderModuleCreateFlags flags_ = ShaderModuleCreateFlags(), size_t codeSize_ = 0, const uint32_t* pCode_ = nullptr )
+ : sType( StructureType::eShaderModuleCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , codeSize( codeSize_ )
+ , pCode( pCode_ )
+ {
+ }
+
+ ShaderModuleCreateInfo( VkShaderModuleCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ShaderModuleCreateInfo) );
+ }
+
+ ShaderModuleCreateInfo& operator=( VkShaderModuleCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ShaderModuleCreateInfo) );
+ return *this;
+ }
+
+ ShaderModuleCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ShaderModuleCreateInfo& setFlags( ShaderModuleCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ShaderModuleCreateInfo& setCodeSize( size_t codeSize_ )
+ {
+ codeSize = codeSize_;
+ return *this;
+ }
+
+ ShaderModuleCreateInfo& setPCode( const uint32_t* pCode_ )
+ {
+ pCode = pCode_;
+ return *this;
+ }
+
+ operator const VkShaderModuleCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkShaderModuleCreateInfo*>(this);
+ }
+
+ bool operator==( ShaderModuleCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( codeSize == rhs.codeSize )
+ && ( pCode == rhs.pCode );
+ }
+
+ bool operator!=( ShaderModuleCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ShaderModuleCreateFlags flags;
+ size_t codeSize;
+ const uint32_t* pCode;
+ };
+ static_assert( sizeof( ShaderModuleCreateInfo ) == sizeof( VkShaderModuleCreateInfo ), "struct and wrapper have different size!" );
+
+ struct DescriptorSetAllocateInfo
+ {
+ DescriptorSetAllocateInfo( DescriptorPool descriptorPool_ = DescriptorPool(), uint32_t descriptorSetCount_ = 0, const DescriptorSetLayout* pSetLayouts_ = nullptr )
+ : sType( StructureType::eDescriptorSetAllocateInfo )
+ , pNext( nullptr )
+ , descriptorPool( descriptorPool_ )
+ , descriptorSetCount( descriptorSetCount_ )
+ , pSetLayouts( pSetLayouts_ )
+ {
+ }
+
+ DescriptorSetAllocateInfo( VkDescriptorSetAllocateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorSetAllocateInfo) );
+ }
+
+ DescriptorSetAllocateInfo& operator=( VkDescriptorSetAllocateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorSetAllocateInfo) );
+ return *this;
+ }
+
+ DescriptorSetAllocateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DescriptorSetAllocateInfo& setDescriptorPool( DescriptorPool descriptorPool_ )
+ {
+ descriptorPool = descriptorPool_;
+ return *this;
+ }
+
+ DescriptorSetAllocateInfo& setDescriptorSetCount( uint32_t descriptorSetCount_ )
+ {
+ descriptorSetCount = descriptorSetCount_;
+ return *this;
+ }
+
+ DescriptorSetAllocateInfo& setPSetLayouts( const DescriptorSetLayout* pSetLayouts_ )
+ {
+ pSetLayouts = pSetLayouts_;
+ return *this;
+ }
+
+ operator const VkDescriptorSetAllocateInfo&() const
+ {
+ return *reinterpret_cast<const VkDescriptorSetAllocateInfo*>(this);
+ }
+
+ bool operator==( DescriptorSetAllocateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( descriptorPool == rhs.descriptorPool )
+ && ( descriptorSetCount == rhs.descriptorSetCount )
+ && ( pSetLayouts == rhs.pSetLayouts );
+ }
+
+ bool operator!=( DescriptorSetAllocateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DescriptorPool descriptorPool;
+ uint32_t descriptorSetCount;
+ const DescriptorSetLayout* pSetLayouts;
+ };
+ static_assert( sizeof( DescriptorSetAllocateInfo ) == sizeof( VkDescriptorSetAllocateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineVertexInputStateCreateInfo
+ {
+ PipelineVertexInputStateCreateInfo( PipelineVertexInputStateCreateFlags flags_ = PipelineVertexInputStateCreateFlags(), uint32_t vertexBindingDescriptionCount_ = 0, const VertexInputBindingDescription* pVertexBindingDescriptions_ = nullptr, uint32_t vertexAttributeDescriptionCount_ = 0, const VertexInputAttributeDescription* pVertexAttributeDescriptions_ = nullptr )
+ : sType( StructureType::ePipelineVertexInputStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , vertexBindingDescriptionCount( vertexBindingDescriptionCount_ )
+ , pVertexBindingDescriptions( pVertexBindingDescriptions_ )
+ , vertexAttributeDescriptionCount( vertexAttributeDescriptionCount_ )
+ , pVertexAttributeDescriptions( pVertexAttributeDescriptions_ )
+ {
+ }
+
+ PipelineVertexInputStateCreateInfo( VkPipelineVertexInputStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineVertexInputStateCreateInfo) );
+ }
+
+ PipelineVertexInputStateCreateInfo& operator=( VkPipelineVertexInputStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineVertexInputStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineVertexInputStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineVertexInputStateCreateInfo& setFlags( PipelineVertexInputStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineVertexInputStateCreateInfo& setVertexBindingDescriptionCount( uint32_t vertexBindingDescriptionCount_ )
+ {
+ vertexBindingDescriptionCount = vertexBindingDescriptionCount_;
+ return *this;
+ }
+
+ PipelineVertexInputStateCreateInfo& setPVertexBindingDescriptions( const VertexInputBindingDescription* pVertexBindingDescriptions_ )
+ {
+ pVertexBindingDescriptions = pVertexBindingDescriptions_;
+ return *this;
+ }
+
+ PipelineVertexInputStateCreateInfo& setVertexAttributeDescriptionCount( uint32_t vertexAttributeDescriptionCount_ )
+ {
+ vertexAttributeDescriptionCount = vertexAttributeDescriptionCount_;
+ return *this;
+ }
+
+ PipelineVertexInputStateCreateInfo& setPVertexAttributeDescriptions( const VertexInputAttributeDescription* pVertexAttributeDescriptions_ )
+ {
+ pVertexAttributeDescriptions = pVertexAttributeDescriptions_;
+ return *this;
+ }
+
+ operator const VkPipelineVertexInputStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineVertexInputStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineVertexInputStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( vertexBindingDescriptionCount == rhs.vertexBindingDescriptionCount )
+ && ( pVertexBindingDescriptions == rhs.pVertexBindingDescriptions )
+ && ( vertexAttributeDescriptionCount == rhs.vertexAttributeDescriptionCount )
+ && ( pVertexAttributeDescriptions == rhs.pVertexAttributeDescriptions );
+ }
+
+ bool operator!=( PipelineVertexInputStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineVertexInputStateCreateFlags flags;
+ uint32_t vertexBindingDescriptionCount;
+ const VertexInputBindingDescription* pVertexBindingDescriptions;
+ uint32_t vertexAttributeDescriptionCount;
+ const VertexInputAttributeDescription* pVertexAttributeDescriptions;
+ };
+ static_assert( sizeof( PipelineVertexInputStateCreateInfo ) == sizeof( VkPipelineVertexInputStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineInputAssemblyStateCreateInfo
+ {
+ PipelineInputAssemblyStateCreateInfo( PipelineInputAssemblyStateCreateFlags flags_ = PipelineInputAssemblyStateCreateFlags(), PrimitiveTopology topology_ = PrimitiveTopology::ePointList, Bool32 primitiveRestartEnable_ = 0 )
+ : sType( StructureType::ePipelineInputAssemblyStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , topology( topology_ )
+ , primitiveRestartEnable( primitiveRestartEnable_ )
+ {
+ }
+
+ PipelineInputAssemblyStateCreateInfo( VkPipelineInputAssemblyStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineInputAssemblyStateCreateInfo) );
+ }
+
+ PipelineInputAssemblyStateCreateInfo& operator=( VkPipelineInputAssemblyStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineInputAssemblyStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineInputAssemblyStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineInputAssemblyStateCreateInfo& setFlags( PipelineInputAssemblyStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineInputAssemblyStateCreateInfo& setTopology( PrimitiveTopology topology_ )
+ {
+ topology = topology_;
+ return *this;
+ }
+
+ PipelineInputAssemblyStateCreateInfo& setPrimitiveRestartEnable( Bool32 primitiveRestartEnable_ )
+ {
+ primitiveRestartEnable = primitiveRestartEnable_;
+ return *this;
+ }
+
+ operator const VkPipelineInputAssemblyStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineInputAssemblyStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineInputAssemblyStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( topology == rhs.topology )
+ && ( primitiveRestartEnable == rhs.primitiveRestartEnable );
+ }
+
+ bool operator!=( PipelineInputAssemblyStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineInputAssemblyStateCreateFlags flags;
+ PrimitiveTopology topology;
+ Bool32 primitiveRestartEnable;
+ };
+ static_assert( sizeof( PipelineInputAssemblyStateCreateInfo ) == sizeof( VkPipelineInputAssemblyStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineTessellationStateCreateInfo
+ {
+ PipelineTessellationStateCreateInfo( PipelineTessellationStateCreateFlags flags_ = PipelineTessellationStateCreateFlags(), uint32_t patchControlPoints_ = 0 )
+ : sType( StructureType::ePipelineTessellationStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , patchControlPoints( patchControlPoints_ )
+ {
+ }
+
+ PipelineTessellationStateCreateInfo( VkPipelineTessellationStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineTessellationStateCreateInfo) );
+ }
+
+ PipelineTessellationStateCreateInfo& operator=( VkPipelineTessellationStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineTessellationStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineTessellationStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineTessellationStateCreateInfo& setFlags( PipelineTessellationStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineTessellationStateCreateInfo& setPatchControlPoints( uint32_t patchControlPoints_ )
+ {
+ patchControlPoints = patchControlPoints_;
+ return *this;
+ }
+
+ operator const VkPipelineTessellationStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineTessellationStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineTessellationStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( patchControlPoints == rhs.patchControlPoints );
+ }
+
+ bool operator!=( PipelineTessellationStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineTessellationStateCreateFlags flags;
+ uint32_t patchControlPoints;
+ };
+ static_assert( sizeof( PipelineTessellationStateCreateInfo ) == sizeof( VkPipelineTessellationStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineViewportStateCreateInfo
+ {
+ PipelineViewportStateCreateInfo( PipelineViewportStateCreateFlags flags_ = PipelineViewportStateCreateFlags(), uint32_t viewportCount_ = 0, const Viewport* pViewports_ = nullptr, uint32_t scissorCount_ = 0, const Rect2D* pScissors_ = nullptr )
+ : sType( StructureType::ePipelineViewportStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , viewportCount( viewportCount_ )
+ , pViewports( pViewports_ )
+ , scissorCount( scissorCount_ )
+ , pScissors( pScissors_ )
+ {
+ }
+
+ PipelineViewportStateCreateInfo( VkPipelineViewportStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineViewportStateCreateInfo) );
+ }
+
+ PipelineViewportStateCreateInfo& operator=( VkPipelineViewportStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineViewportStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineViewportStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineViewportStateCreateInfo& setFlags( PipelineViewportStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineViewportStateCreateInfo& setViewportCount( uint32_t viewportCount_ )
+ {
+ viewportCount = viewportCount_;
+ return *this;
+ }
+
+ PipelineViewportStateCreateInfo& setPViewports( const Viewport* pViewports_ )
+ {
+ pViewports = pViewports_;
+ return *this;
+ }
+
+ PipelineViewportStateCreateInfo& setScissorCount( uint32_t scissorCount_ )
+ {
+ scissorCount = scissorCount_;
+ return *this;
+ }
+
+ PipelineViewportStateCreateInfo& setPScissors( const Rect2D* pScissors_ )
+ {
+ pScissors = pScissors_;
+ return *this;
+ }
+
+ operator const VkPipelineViewportStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineViewportStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineViewportStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( viewportCount == rhs.viewportCount )
+ && ( pViewports == rhs.pViewports )
+ && ( scissorCount == rhs.scissorCount )
+ && ( pScissors == rhs.pScissors );
+ }
+
+ bool operator!=( PipelineViewportStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineViewportStateCreateFlags flags;
+ uint32_t viewportCount;
+ const Viewport* pViewports;
+ uint32_t scissorCount;
+ const Rect2D* pScissors;
+ };
+ static_assert( sizeof( PipelineViewportStateCreateInfo ) == sizeof( VkPipelineViewportStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineRasterizationStateCreateInfo
+ {
+ PipelineRasterizationStateCreateInfo( PipelineRasterizationStateCreateFlags flags_ = PipelineRasterizationStateCreateFlags(), Bool32 depthClampEnable_ = 0, Bool32 rasterizerDiscardEnable_ = 0, PolygonMode polygonMode_ = PolygonMode::eFill, CullModeFlags cullMode_ = CullModeFlags(), FrontFace frontFace_ = FrontFace::eCounterClockwise, Bool32 depthBiasEnable_ = 0, float depthBiasConstantFactor_ = 0, float depthBiasClamp_ = 0, float depthBiasSlopeFactor_ = 0, float lineWidth_ = 0 )
+ : sType( StructureType::ePipelineRasterizationStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , depthClampEnable( depthClampEnable_ )
+ , rasterizerDiscardEnable( rasterizerDiscardEnable_ )
+ , polygonMode( polygonMode_ )
+ , cullMode( cullMode_ )
+ , frontFace( frontFace_ )
+ , depthBiasEnable( depthBiasEnable_ )
+ , depthBiasConstantFactor( depthBiasConstantFactor_ )
+ , depthBiasClamp( depthBiasClamp_ )
+ , depthBiasSlopeFactor( depthBiasSlopeFactor_ )
+ , lineWidth( lineWidth_ )
+ {
+ }
+
+ PipelineRasterizationStateCreateInfo( VkPipelineRasterizationStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineRasterizationStateCreateInfo) );
+ }
+
+ PipelineRasterizationStateCreateInfo& operator=( VkPipelineRasterizationStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineRasterizationStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setFlags( PipelineRasterizationStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setDepthClampEnable( Bool32 depthClampEnable_ )
+ {
+ depthClampEnable = depthClampEnable_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setRasterizerDiscardEnable( Bool32 rasterizerDiscardEnable_ )
+ {
+ rasterizerDiscardEnable = rasterizerDiscardEnable_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setPolygonMode( PolygonMode polygonMode_ )
+ {
+ polygonMode = polygonMode_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setCullMode( CullModeFlags cullMode_ )
+ {
+ cullMode = cullMode_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setFrontFace( FrontFace frontFace_ )
+ {
+ frontFace = frontFace_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setDepthBiasEnable( Bool32 depthBiasEnable_ )
+ {
+ depthBiasEnable = depthBiasEnable_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setDepthBiasConstantFactor( float depthBiasConstantFactor_ )
+ {
+ depthBiasConstantFactor = depthBiasConstantFactor_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setDepthBiasClamp( float depthBiasClamp_ )
+ {
+ depthBiasClamp = depthBiasClamp_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setDepthBiasSlopeFactor( float depthBiasSlopeFactor_ )
+ {
+ depthBiasSlopeFactor = depthBiasSlopeFactor_;
+ return *this;
+ }
+
+ PipelineRasterizationStateCreateInfo& setLineWidth( float lineWidth_ )
+ {
+ lineWidth = lineWidth_;
+ return *this;
+ }
+
+ operator const VkPipelineRasterizationStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineRasterizationStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineRasterizationStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( depthClampEnable == rhs.depthClampEnable )
+ && ( rasterizerDiscardEnable == rhs.rasterizerDiscardEnable )
+ && ( polygonMode == rhs.polygonMode )
+ && ( cullMode == rhs.cullMode )
+ && ( frontFace == rhs.frontFace )
+ && ( depthBiasEnable == rhs.depthBiasEnable )
+ && ( depthBiasConstantFactor == rhs.depthBiasConstantFactor )
+ && ( depthBiasClamp == rhs.depthBiasClamp )
+ && ( depthBiasSlopeFactor == rhs.depthBiasSlopeFactor )
+ && ( lineWidth == rhs.lineWidth );
+ }
+
+ bool operator!=( PipelineRasterizationStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineRasterizationStateCreateFlags flags;
+ Bool32 depthClampEnable;
+ Bool32 rasterizerDiscardEnable;
+ PolygonMode polygonMode;
+ CullModeFlags cullMode;
+ FrontFace frontFace;
+ Bool32 depthBiasEnable;
+ float depthBiasConstantFactor;
+ float depthBiasClamp;
+ float depthBiasSlopeFactor;
+ float lineWidth;
+ };
+ static_assert( sizeof( PipelineRasterizationStateCreateInfo ) == sizeof( VkPipelineRasterizationStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineDepthStencilStateCreateInfo
+ {
+ PipelineDepthStencilStateCreateInfo( PipelineDepthStencilStateCreateFlags flags_ = PipelineDepthStencilStateCreateFlags(), Bool32 depthTestEnable_ = 0, Bool32 depthWriteEnable_ = 0, CompareOp depthCompareOp_ = CompareOp::eNever, Bool32 depthBoundsTestEnable_ = 0, Bool32 stencilTestEnable_ = 0, StencilOpState front_ = StencilOpState(), StencilOpState back_ = StencilOpState(), float minDepthBounds_ = 0, float maxDepthBounds_ = 0 )
+ : sType( StructureType::ePipelineDepthStencilStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , depthTestEnable( depthTestEnable_ )
+ , depthWriteEnable( depthWriteEnable_ )
+ , depthCompareOp( depthCompareOp_ )
+ , depthBoundsTestEnable( depthBoundsTestEnable_ )
+ , stencilTestEnable( stencilTestEnable_ )
+ , front( front_ )
+ , back( back_ )
+ , minDepthBounds( minDepthBounds_ )
+ , maxDepthBounds( maxDepthBounds_ )
+ {
+ }
+
+ PipelineDepthStencilStateCreateInfo( VkPipelineDepthStencilStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineDepthStencilStateCreateInfo) );
+ }
+
+ PipelineDepthStencilStateCreateInfo& operator=( VkPipelineDepthStencilStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineDepthStencilStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setFlags( PipelineDepthStencilStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setDepthTestEnable( Bool32 depthTestEnable_ )
+ {
+ depthTestEnable = depthTestEnable_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setDepthWriteEnable( Bool32 depthWriteEnable_ )
+ {
+ depthWriteEnable = depthWriteEnable_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setDepthCompareOp( CompareOp depthCompareOp_ )
+ {
+ depthCompareOp = depthCompareOp_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setDepthBoundsTestEnable( Bool32 depthBoundsTestEnable_ )
+ {
+ depthBoundsTestEnable = depthBoundsTestEnable_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setStencilTestEnable( Bool32 stencilTestEnable_ )
+ {
+ stencilTestEnable = stencilTestEnable_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setFront( StencilOpState front_ )
+ {
+ front = front_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setBack( StencilOpState back_ )
+ {
+ back = back_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setMinDepthBounds( float minDepthBounds_ )
+ {
+ minDepthBounds = minDepthBounds_;
+ return *this;
+ }
+
+ PipelineDepthStencilStateCreateInfo& setMaxDepthBounds( float maxDepthBounds_ )
+ {
+ maxDepthBounds = maxDepthBounds_;
+ return *this;
+ }
+
+ operator const VkPipelineDepthStencilStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineDepthStencilStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( depthTestEnable == rhs.depthTestEnable )
+ && ( depthWriteEnable == rhs.depthWriteEnable )
+ && ( depthCompareOp == rhs.depthCompareOp )
+ && ( depthBoundsTestEnable == rhs.depthBoundsTestEnable )
+ && ( stencilTestEnable == rhs.stencilTestEnable )
+ && ( front == rhs.front )
+ && ( back == rhs.back )
+ && ( minDepthBounds == rhs.minDepthBounds )
+ && ( maxDepthBounds == rhs.maxDepthBounds );
+ }
+
+ bool operator!=( PipelineDepthStencilStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineDepthStencilStateCreateFlags flags;
+ Bool32 depthTestEnable;
+ Bool32 depthWriteEnable;
+ CompareOp depthCompareOp;
+ Bool32 depthBoundsTestEnable;
+ Bool32 stencilTestEnable;
+ StencilOpState front;
+ StencilOpState back;
+ float minDepthBounds;
+ float maxDepthBounds;
+ };
+ static_assert( sizeof( PipelineDepthStencilStateCreateInfo ) == sizeof( VkPipelineDepthStencilStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineCacheCreateInfo
+ {
+ PipelineCacheCreateInfo( PipelineCacheCreateFlags flags_ = PipelineCacheCreateFlags(), size_t initialDataSize_ = 0, const void* pInitialData_ = nullptr )
+ : sType( StructureType::ePipelineCacheCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , initialDataSize( initialDataSize_ )
+ , pInitialData( pInitialData_ )
+ {
+ }
+
+ PipelineCacheCreateInfo( VkPipelineCacheCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineCacheCreateInfo) );
+ }
+
+ PipelineCacheCreateInfo& operator=( VkPipelineCacheCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineCacheCreateInfo) );
+ return *this;
+ }
+
+ PipelineCacheCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineCacheCreateInfo& setFlags( PipelineCacheCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineCacheCreateInfo& setInitialDataSize( size_t initialDataSize_ )
+ {
+ initialDataSize = initialDataSize_;
+ return *this;
+ }
+
+ PipelineCacheCreateInfo& setPInitialData( const void* pInitialData_ )
+ {
+ pInitialData = pInitialData_;
+ return *this;
+ }
+
+ operator const VkPipelineCacheCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineCacheCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineCacheCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( initialDataSize == rhs.initialDataSize )
+ && ( pInitialData == rhs.pInitialData );
+ }
+
+ bool operator!=( PipelineCacheCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineCacheCreateFlags flags;
+ size_t initialDataSize;
+ const void* pInitialData;
+ };
+ static_assert( sizeof( PipelineCacheCreateInfo ) == sizeof( VkPipelineCacheCreateInfo ), "struct and wrapper have different size!" );
+
+ struct SamplerCreateInfo
+ {
+ SamplerCreateInfo( SamplerCreateFlags flags_ = SamplerCreateFlags(), Filter magFilter_ = Filter::eNearest, Filter minFilter_ = Filter::eNearest, SamplerMipmapMode mipmapMode_ = SamplerMipmapMode::eNearest, SamplerAddressMode addressModeU_ = SamplerAddressMode::eRepeat, SamplerAddressMode addressModeV_ = SamplerAddressMode::eRepeat, SamplerAddressMode addressModeW_ = SamplerAddressMode::eRepeat, float mipLodBias_ = 0, Bool32 anisotropyEnable_ = 0, float maxAnisotropy_ = 0, Bool32 compareEnable_ = 0, CompareOp compareOp_ = CompareOp::eNever, float minLod_ = 0, float maxLod_ = 0, BorderColor borderColor_ = BorderColor::eFloatTransparentBlack, Bool32 unnormalizedCoordinates_ = 0 )
+ : sType( StructureType::eSamplerCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , magFilter( magFilter_ )
+ , minFilter( minFilter_ )
+ , mipmapMode( mipmapMode_ )
+ , addressModeU( addressModeU_ )
+ , addressModeV( addressModeV_ )
+ , addressModeW( addressModeW_ )
+ , mipLodBias( mipLodBias_ )
+ , anisotropyEnable( anisotropyEnable_ )
+ , maxAnisotropy( maxAnisotropy_ )
+ , compareEnable( compareEnable_ )
+ , compareOp( compareOp_ )
+ , minLod( minLod_ )
+ , maxLod( maxLod_ )
+ , borderColor( borderColor_ )
+ , unnormalizedCoordinates( unnormalizedCoordinates_ )
+ {
+ }
+
+ SamplerCreateInfo( VkSamplerCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SamplerCreateInfo) );
+ }
+
+ SamplerCreateInfo& operator=( VkSamplerCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SamplerCreateInfo) );
+ return *this;
+ }
+
+ SamplerCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setFlags( SamplerCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMagFilter( Filter magFilter_ )
+ {
+ magFilter = magFilter_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMinFilter( Filter minFilter_ )
+ {
+ minFilter = minFilter_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMipmapMode( SamplerMipmapMode mipmapMode_ )
+ {
+ mipmapMode = mipmapMode_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setAddressModeU( SamplerAddressMode addressModeU_ )
+ {
+ addressModeU = addressModeU_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setAddressModeV( SamplerAddressMode addressModeV_ )
+ {
+ addressModeV = addressModeV_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setAddressModeW( SamplerAddressMode addressModeW_ )
+ {
+ addressModeW = addressModeW_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMipLodBias( float mipLodBias_ )
+ {
+ mipLodBias = mipLodBias_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setAnisotropyEnable( Bool32 anisotropyEnable_ )
+ {
+ anisotropyEnable = anisotropyEnable_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMaxAnisotropy( float maxAnisotropy_ )
+ {
+ maxAnisotropy = maxAnisotropy_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setCompareEnable( Bool32 compareEnable_ )
+ {
+ compareEnable = compareEnable_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setCompareOp( CompareOp compareOp_ )
+ {
+ compareOp = compareOp_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMinLod( float minLod_ )
+ {
+ minLod = minLod_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setMaxLod( float maxLod_ )
+ {
+ maxLod = maxLod_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setBorderColor( BorderColor borderColor_ )
+ {
+ borderColor = borderColor_;
+ return *this;
+ }
+
+ SamplerCreateInfo& setUnnormalizedCoordinates( Bool32 unnormalizedCoordinates_ )
+ {
+ unnormalizedCoordinates = unnormalizedCoordinates_;
+ return *this;
+ }
+
+ operator const VkSamplerCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkSamplerCreateInfo*>(this);
+ }
+
+ bool operator==( SamplerCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( magFilter == rhs.magFilter )
+ && ( minFilter == rhs.minFilter )
+ && ( mipmapMode == rhs.mipmapMode )
+ && ( addressModeU == rhs.addressModeU )
+ && ( addressModeV == rhs.addressModeV )
+ && ( addressModeW == rhs.addressModeW )
+ && ( mipLodBias == rhs.mipLodBias )
+ && ( anisotropyEnable == rhs.anisotropyEnable )
+ && ( maxAnisotropy == rhs.maxAnisotropy )
+ && ( compareEnable == rhs.compareEnable )
+ && ( compareOp == rhs.compareOp )
+ && ( minLod == rhs.minLod )
+ && ( maxLod == rhs.maxLod )
+ && ( borderColor == rhs.borderColor )
+ && ( unnormalizedCoordinates == rhs.unnormalizedCoordinates );
+ }
+
+ bool operator!=( SamplerCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SamplerCreateFlags flags;
+ Filter magFilter;
+ Filter minFilter;
+ SamplerMipmapMode mipmapMode;
+ SamplerAddressMode addressModeU;
+ SamplerAddressMode addressModeV;
+ SamplerAddressMode addressModeW;
+ float mipLodBias;
+ Bool32 anisotropyEnable;
+ float maxAnisotropy;
+ Bool32 compareEnable;
+ CompareOp compareOp;
+ float minLod;
+ float maxLod;
+ BorderColor borderColor;
+ Bool32 unnormalizedCoordinates;
+ };
+ static_assert( sizeof( SamplerCreateInfo ) == sizeof( VkSamplerCreateInfo ), "struct and wrapper have different size!" );
+
+ struct CommandBufferAllocateInfo
+ {
+ CommandBufferAllocateInfo( CommandPool commandPool_ = CommandPool(), CommandBufferLevel level_ = CommandBufferLevel::ePrimary, uint32_t commandBufferCount_ = 0 )
+ : sType( StructureType::eCommandBufferAllocateInfo )
+ , pNext( nullptr )
+ , commandPool( commandPool_ )
+ , level( level_ )
+ , commandBufferCount( commandBufferCount_ )
+ {
+ }
+
+ CommandBufferAllocateInfo( VkCommandBufferAllocateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandBufferAllocateInfo) );
+ }
+
+ CommandBufferAllocateInfo& operator=( VkCommandBufferAllocateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandBufferAllocateInfo) );
+ return *this;
+ }
+
+ CommandBufferAllocateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CommandBufferAllocateInfo& setCommandPool( CommandPool commandPool_ )
+ {
+ commandPool = commandPool_;
+ return *this;
+ }
+
+ CommandBufferAllocateInfo& setLevel( CommandBufferLevel level_ )
+ {
+ level = level_;
+ return *this;
+ }
+
+ CommandBufferAllocateInfo& setCommandBufferCount( uint32_t commandBufferCount_ )
+ {
+ commandBufferCount = commandBufferCount_;
+ return *this;
+ }
+
+ operator const VkCommandBufferAllocateInfo&() const
+ {
+ return *reinterpret_cast<const VkCommandBufferAllocateInfo*>(this);
+ }
+
+ bool operator==( CommandBufferAllocateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( commandPool == rhs.commandPool )
+ && ( level == rhs.level )
+ && ( commandBufferCount == rhs.commandBufferCount );
+ }
+
+ bool operator!=( CommandBufferAllocateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ CommandPool commandPool;
+ CommandBufferLevel level;
+ uint32_t commandBufferCount;
+ };
+ static_assert( sizeof( CommandBufferAllocateInfo ) == sizeof( VkCommandBufferAllocateInfo ), "struct and wrapper have different size!" );
+
+ struct RenderPassBeginInfo
+ {
+ RenderPassBeginInfo( RenderPass renderPass_ = RenderPass(), Framebuffer framebuffer_ = Framebuffer(), Rect2D renderArea_ = Rect2D(), uint32_t clearValueCount_ = 0, const ClearValue* pClearValues_ = nullptr )
+ : sType( StructureType::eRenderPassBeginInfo )
+ , pNext( nullptr )
+ , renderPass( renderPass_ )
+ , framebuffer( framebuffer_ )
+ , renderArea( renderArea_ )
+ , clearValueCount( clearValueCount_ )
+ , pClearValues( pClearValues_ )
+ {
+ }
+
+ RenderPassBeginInfo( VkRenderPassBeginInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RenderPassBeginInfo) );
+ }
+
+ RenderPassBeginInfo& operator=( VkRenderPassBeginInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RenderPassBeginInfo) );
+ return *this;
+ }
+
+ RenderPassBeginInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ RenderPassBeginInfo& setRenderPass( RenderPass renderPass_ )
+ {
+ renderPass = renderPass_;
+ return *this;
+ }
+
+ RenderPassBeginInfo& setFramebuffer( Framebuffer framebuffer_ )
+ {
+ framebuffer = framebuffer_;
+ return *this;
+ }
+
+ RenderPassBeginInfo& setRenderArea( Rect2D renderArea_ )
+ {
+ renderArea = renderArea_;
+ return *this;
+ }
+
+ RenderPassBeginInfo& setClearValueCount( uint32_t clearValueCount_ )
+ {
+ clearValueCount = clearValueCount_;
+ return *this;
+ }
+
+ RenderPassBeginInfo& setPClearValues( const ClearValue* pClearValues_ )
+ {
+ pClearValues = pClearValues_;
+ return *this;
+ }
+
+ operator const VkRenderPassBeginInfo&() const
+ {
+ return *reinterpret_cast<const VkRenderPassBeginInfo*>(this);
+ }
+
+ bool operator==( RenderPassBeginInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( renderPass == rhs.renderPass )
+ && ( framebuffer == rhs.framebuffer )
+ && ( renderArea == rhs.renderArea )
+ && ( clearValueCount == rhs.clearValueCount )
+ && ( pClearValues == rhs.pClearValues );
+ }
+
+ bool operator!=( RenderPassBeginInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ RenderPass renderPass;
+ Framebuffer framebuffer;
+ Rect2D renderArea;
+ uint32_t clearValueCount;
+ const ClearValue* pClearValues;
+ };
+ static_assert( sizeof( RenderPassBeginInfo ) == sizeof( VkRenderPassBeginInfo ), "struct and wrapper have different size!" );
+
+ struct EventCreateInfo
+ {
+ EventCreateInfo( EventCreateFlags flags_ = EventCreateFlags() )
+ : sType( StructureType::eEventCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ {
+ }
+
+ EventCreateInfo( VkEventCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(EventCreateInfo) );
+ }
+
+ EventCreateInfo& operator=( VkEventCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(EventCreateInfo) );
+ return *this;
+ }
+
+ EventCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ EventCreateInfo& setFlags( EventCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkEventCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkEventCreateInfo*>(this);
+ }
+
+ bool operator==( EventCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( EventCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ EventCreateFlags flags;
+ };
+ static_assert( sizeof( EventCreateInfo ) == sizeof( VkEventCreateInfo ), "struct and wrapper have different size!" );
+
+ struct SemaphoreCreateInfo
+ {
+ SemaphoreCreateInfo( SemaphoreCreateFlags flags_ = SemaphoreCreateFlags() )
+ : sType( StructureType::eSemaphoreCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ {
+ }
+
+ SemaphoreCreateInfo( VkSemaphoreCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SemaphoreCreateInfo) );
+ }
+
+ SemaphoreCreateInfo& operator=( VkSemaphoreCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SemaphoreCreateInfo) );
+ return *this;
+ }
+
+ SemaphoreCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ SemaphoreCreateInfo& setFlags( SemaphoreCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkSemaphoreCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkSemaphoreCreateInfo*>(this);
+ }
+
+ bool operator==( SemaphoreCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( SemaphoreCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SemaphoreCreateFlags flags;
+ };
+ static_assert( sizeof( SemaphoreCreateInfo ) == sizeof( VkSemaphoreCreateInfo ), "struct and wrapper have different size!" );
+
+ struct FramebufferCreateInfo
+ {
+ FramebufferCreateInfo( FramebufferCreateFlags flags_ = FramebufferCreateFlags(), RenderPass renderPass_ = RenderPass(), uint32_t attachmentCount_ = 0, const ImageView* pAttachments_ = nullptr, uint32_t width_ = 0, uint32_t height_ = 0, uint32_t layers_ = 0 )
+ : sType( StructureType::eFramebufferCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , renderPass( renderPass_ )
+ , attachmentCount( attachmentCount_ )
+ , pAttachments( pAttachments_ )
+ , width( width_ )
+ , height( height_ )
+ , layers( layers_ )
+ {
+ }
+
+ FramebufferCreateInfo( VkFramebufferCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(FramebufferCreateInfo) );
+ }
+
+ FramebufferCreateInfo& operator=( VkFramebufferCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(FramebufferCreateInfo) );
+ return *this;
+ }
+
+ FramebufferCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setFlags( FramebufferCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setRenderPass( RenderPass renderPass_ )
+ {
+ renderPass = renderPass_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setAttachmentCount( uint32_t attachmentCount_ )
+ {
+ attachmentCount = attachmentCount_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setPAttachments( const ImageView* pAttachments_ )
+ {
+ pAttachments = pAttachments_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setWidth( uint32_t width_ )
+ {
+ width = width_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setHeight( uint32_t height_ )
+ {
+ height = height_;
+ return *this;
+ }
+
+ FramebufferCreateInfo& setLayers( uint32_t layers_ )
+ {
+ layers = layers_;
+ return *this;
+ }
+
+ operator const VkFramebufferCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkFramebufferCreateInfo*>(this);
+ }
+
+ bool operator==( FramebufferCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( renderPass == rhs.renderPass )
+ && ( attachmentCount == rhs.attachmentCount )
+ && ( pAttachments == rhs.pAttachments )
+ && ( width == rhs.width )
+ && ( height == rhs.height )
+ && ( layers == rhs.layers );
+ }
+
+ bool operator!=( FramebufferCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ FramebufferCreateFlags flags;
+ RenderPass renderPass;
+ uint32_t attachmentCount;
+ const ImageView* pAttachments;
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+ };
+ static_assert( sizeof( FramebufferCreateInfo ) == sizeof( VkFramebufferCreateInfo ), "struct and wrapper have different size!" );
+
+ struct DisplayModeCreateInfoKHR
+ {
+ DisplayModeCreateInfoKHR( DisplayModeCreateFlagsKHR flags_ = DisplayModeCreateFlagsKHR(), DisplayModeParametersKHR parameters_ = DisplayModeParametersKHR() )
+ : sType( StructureType::eDisplayModeCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , parameters( parameters_ )
+ {
+ }
+
+ DisplayModeCreateInfoKHR( VkDisplayModeCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayModeCreateInfoKHR) );
+ }
+
+ DisplayModeCreateInfoKHR& operator=( VkDisplayModeCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayModeCreateInfoKHR) );
+ return *this;
+ }
+
+ DisplayModeCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DisplayModeCreateInfoKHR& setFlags( DisplayModeCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DisplayModeCreateInfoKHR& setParameters( DisplayModeParametersKHR parameters_ )
+ {
+ parameters = parameters_;
+ return *this;
+ }
+
+ operator const VkDisplayModeCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayModeCreateInfoKHR*>(this);
+ }
+
+ bool operator==( DisplayModeCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( parameters == rhs.parameters );
+ }
+
+ bool operator!=( DisplayModeCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DisplayModeCreateFlagsKHR flags;
+ DisplayModeParametersKHR parameters;
+ };
+ static_assert( sizeof( DisplayModeCreateInfoKHR ) == sizeof( VkDisplayModeCreateInfoKHR ), "struct and wrapper have different size!" );
+
+ struct DisplayPresentInfoKHR
+ {
+ DisplayPresentInfoKHR( Rect2D srcRect_ = Rect2D(), Rect2D dstRect_ = Rect2D(), Bool32 persistent_ = 0 )
+ : sType( StructureType::eDisplayPresentInfoKHR )
+ , pNext( nullptr )
+ , srcRect( srcRect_ )
+ , dstRect( dstRect_ )
+ , persistent( persistent_ )
+ {
+ }
+
+ DisplayPresentInfoKHR( VkDisplayPresentInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayPresentInfoKHR) );
+ }
+
+ DisplayPresentInfoKHR& operator=( VkDisplayPresentInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayPresentInfoKHR) );
+ return *this;
+ }
+
+ DisplayPresentInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DisplayPresentInfoKHR& setSrcRect( Rect2D srcRect_ )
+ {
+ srcRect = srcRect_;
+ return *this;
+ }
+
+ DisplayPresentInfoKHR& setDstRect( Rect2D dstRect_ )
+ {
+ dstRect = dstRect_;
+ return *this;
+ }
+
+ DisplayPresentInfoKHR& setPersistent( Bool32 persistent_ )
+ {
+ persistent = persistent_;
+ return *this;
+ }
+
+ operator const VkDisplayPresentInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayPresentInfoKHR*>(this);
+ }
+
+ bool operator==( DisplayPresentInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( srcRect == rhs.srcRect )
+ && ( dstRect == rhs.dstRect )
+ && ( persistent == rhs.persistent );
+ }
+
+ bool operator!=( DisplayPresentInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Rect2D srcRect;
+ Rect2D dstRect;
+ Bool32 persistent;
+ };
+ static_assert( sizeof( DisplayPresentInfoKHR ) == sizeof( VkDisplayPresentInfoKHR ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ struct AndroidSurfaceCreateInfoKHR
+ {
+ AndroidSurfaceCreateInfoKHR( AndroidSurfaceCreateFlagsKHR flags_ = AndroidSurfaceCreateFlagsKHR(), ANativeWindow* window_ = nullptr )
+ : sType( StructureType::eAndroidSurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , window( window_ )
+ {
+ }
+
+ AndroidSurfaceCreateInfoKHR( VkAndroidSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AndroidSurfaceCreateInfoKHR) );
+ }
+
+ AndroidSurfaceCreateInfoKHR& operator=( VkAndroidSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AndroidSurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ AndroidSurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ AndroidSurfaceCreateInfoKHR& setFlags( AndroidSurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ AndroidSurfaceCreateInfoKHR& setWindow( ANativeWindow* window_ )
+ {
+ window = window_;
+ return *this;
+ }
+
+ operator const VkAndroidSurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( AndroidSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( window == rhs.window );
+ }
+
+ bool operator!=( AndroidSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ AndroidSurfaceCreateFlagsKHR flags;
+ ANativeWindow* window;
+ };
+ static_assert( sizeof( AndroidSurfaceCreateInfoKHR ) == sizeof( VkAndroidSurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ struct MirSurfaceCreateInfoKHR
+ {
+ MirSurfaceCreateInfoKHR( MirSurfaceCreateFlagsKHR flags_ = MirSurfaceCreateFlagsKHR(), MirConnection* connection_ = nullptr, MirSurface* mirSurface_ = nullptr )
+ : sType( StructureType::eMirSurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , connection( connection_ )
+ , mirSurface( mirSurface_ )
+ {
+ }
+
+ MirSurfaceCreateInfoKHR( VkMirSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MirSurfaceCreateInfoKHR) );
+ }
+
+ MirSurfaceCreateInfoKHR& operator=( VkMirSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MirSurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ MirSurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ MirSurfaceCreateInfoKHR& setFlags( MirSurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ MirSurfaceCreateInfoKHR& setConnection( MirConnection* connection_ )
+ {
+ connection = connection_;
+ return *this;
+ }
+
+ MirSurfaceCreateInfoKHR& setMirSurface( MirSurface* mirSurface_ )
+ {
+ mirSurface = mirSurface_;
+ return *this;
+ }
+
+ operator const VkMirSurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkMirSurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( MirSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( connection == rhs.connection )
+ && ( mirSurface == rhs.mirSurface );
+ }
+
+ bool operator!=( MirSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ MirSurfaceCreateFlagsKHR flags;
+ MirConnection* connection;
+ MirSurface* mirSurface;
+ };
+ static_assert( sizeof( MirSurfaceCreateInfoKHR ) == sizeof( VkMirSurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ struct ViSurfaceCreateInfoNN
+ {
+ ViSurfaceCreateInfoNN( ViSurfaceCreateFlagsNN flags_ = ViSurfaceCreateFlagsNN(), void* window_ = nullptr )
+ : sType( StructureType::eViSurfaceCreateInfoNN )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , window( window_ )
+ {
+ }
+
+ ViSurfaceCreateInfoNN( VkViSurfaceCreateInfoNN const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ViSurfaceCreateInfoNN) );
+ }
+
+ ViSurfaceCreateInfoNN& operator=( VkViSurfaceCreateInfoNN const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ViSurfaceCreateInfoNN) );
+ return *this;
+ }
+
+ ViSurfaceCreateInfoNN& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ViSurfaceCreateInfoNN& setFlags( ViSurfaceCreateFlagsNN flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ViSurfaceCreateInfoNN& setWindow( void* window_ )
+ {
+ window = window_;
+ return *this;
+ }
+
+ operator const VkViSurfaceCreateInfoNN&() const
+ {
+ return *reinterpret_cast<const VkViSurfaceCreateInfoNN*>(this);
+ }
+
+ bool operator==( ViSurfaceCreateInfoNN const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( window == rhs.window );
+ }
+
+ bool operator!=( ViSurfaceCreateInfoNN const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ViSurfaceCreateFlagsNN flags;
+ void* window;
+ };
+ static_assert( sizeof( ViSurfaceCreateInfoNN ) == sizeof( VkViSurfaceCreateInfoNN ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ struct WaylandSurfaceCreateInfoKHR
+ {
+ WaylandSurfaceCreateInfoKHR( WaylandSurfaceCreateFlagsKHR flags_ = WaylandSurfaceCreateFlagsKHR(), struct wl_display* display_ = nullptr, struct wl_surface* surface_ = nullptr )
+ : sType( StructureType::eWaylandSurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , display( display_ )
+ , surface( surface_ )
+ {
+ }
+
+ WaylandSurfaceCreateInfoKHR( VkWaylandSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(WaylandSurfaceCreateInfoKHR) );
+ }
+
+ WaylandSurfaceCreateInfoKHR& operator=( VkWaylandSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(WaylandSurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ WaylandSurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ WaylandSurfaceCreateInfoKHR& setFlags( WaylandSurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ WaylandSurfaceCreateInfoKHR& setDisplay( struct wl_display* display_ )
+ {
+ display = display_;
+ return *this;
+ }
+
+ WaylandSurfaceCreateInfoKHR& setSurface( struct wl_surface* surface_ )
+ {
+ surface = surface_;
+ return *this;
+ }
+
+ operator const VkWaylandSurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( WaylandSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( display == rhs.display )
+ && ( surface == rhs.surface );
+ }
+
+ bool operator!=( WaylandSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ WaylandSurfaceCreateFlagsKHR flags;
+ struct wl_display* display;
+ struct wl_surface* surface;
+ };
+ static_assert( sizeof( WaylandSurfaceCreateInfoKHR ) == sizeof( VkWaylandSurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ struct Win32SurfaceCreateInfoKHR
+ {
+ Win32SurfaceCreateInfoKHR( Win32SurfaceCreateFlagsKHR flags_ = Win32SurfaceCreateFlagsKHR(), HINSTANCE hinstance_ = 0, HWND hwnd_ = 0 )
+ : sType( StructureType::eWin32SurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , hinstance( hinstance_ )
+ , hwnd( hwnd_ )
+ {
+ }
+
+ Win32SurfaceCreateInfoKHR( VkWin32SurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Win32SurfaceCreateInfoKHR) );
+ }
+
+ Win32SurfaceCreateInfoKHR& operator=( VkWin32SurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Win32SurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ Win32SurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ Win32SurfaceCreateInfoKHR& setFlags( Win32SurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ Win32SurfaceCreateInfoKHR& setHinstance( HINSTANCE hinstance_ )
+ {
+ hinstance = hinstance_;
+ return *this;
+ }
+
+ Win32SurfaceCreateInfoKHR& setHwnd( HWND hwnd_ )
+ {
+ hwnd = hwnd_;
+ return *this;
+ }
+
+ operator const VkWin32SurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkWin32SurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( Win32SurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( hinstance == rhs.hinstance )
+ && ( hwnd == rhs.hwnd );
+ }
+
+ bool operator!=( Win32SurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Win32SurfaceCreateFlagsKHR flags;
+ HINSTANCE hinstance;
+ HWND hwnd;
+ };
+ static_assert( sizeof( Win32SurfaceCreateInfoKHR ) == sizeof( VkWin32SurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ struct XlibSurfaceCreateInfoKHR
+ {
+ XlibSurfaceCreateInfoKHR( XlibSurfaceCreateFlagsKHR flags_ = XlibSurfaceCreateFlagsKHR(), Display* dpy_ = nullptr, Window window_ = 0 )
+ : sType( StructureType::eXlibSurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , dpy( dpy_ )
+ , window( window_ )
+ {
+ }
+
+ XlibSurfaceCreateInfoKHR( VkXlibSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(XlibSurfaceCreateInfoKHR) );
+ }
+
+ XlibSurfaceCreateInfoKHR& operator=( VkXlibSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(XlibSurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ XlibSurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ XlibSurfaceCreateInfoKHR& setFlags( XlibSurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ XlibSurfaceCreateInfoKHR& setDpy( Display* dpy_ )
+ {
+ dpy = dpy_;
+ return *this;
+ }
+
+ XlibSurfaceCreateInfoKHR& setWindow( Window window_ )
+ {
+ window = window_;
+ return *this;
+ }
+
+ operator const VkXlibSurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkXlibSurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( XlibSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( dpy == rhs.dpy )
+ && ( window == rhs.window );
+ }
+
+ bool operator!=( XlibSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ XlibSurfaceCreateFlagsKHR flags;
+ Display* dpy;
+ Window window;
+ };
+ static_assert( sizeof( XlibSurfaceCreateInfoKHR ) == sizeof( VkXlibSurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ struct XcbSurfaceCreateInfoKHR
+ {
+ XcbSurfaceCreateInfoKHR( XcbSurfaceCreateFlagsKHR flags_ = XcbSurfaceCreateFlagsKHR(), xcb_connection_t* connection_ = nullptr, xcb_window_t window_ = 0 )
+ : sType( StructureType::eXcbSurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , connection( connection_ )
+ , window( window_ )
+ {
+ }
+
+ XcbSurfaceCreateInfoKHR( VkXcbSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(XcbSurfaceCreateInfoKHR) );
+ }
+
+ XcbSurfaceCreateInfoKHR& operator=( VkXcbSurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(XcbSurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ XcbSurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ XcbSurfaceCreateInfoKHR& setFlags( XcbSurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ XcbSurfaceCreateInfoKHR& setConnection( xcb_connection_t* connection_ )
+ {
+ connection = connection_;
+ return *this;
+ }
+
+ XcbSurfaceCreateInfoKHR& setWindow( xcb_window_t window_ )
+ {
+ window = window_;
+ return *this;
+ }
+
+ operator const VkXcbSurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkXcbSurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( XcbSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( connection == rhs.connection )
+ && ( window == rhs.window );
+ }
+
+ bool operator!=( XcbSurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ XcbSurfaceCreateFlagsKHR flags;
+ xcb_connection_t* connection;
+ xcb_window_t window;
+ };
+ static_assert( sizeof( XcbSurfaceCreateInfoKHR ) == sizeof( VkXcbSurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+ struct DebugMarkerMarkerInfoEXT
+ {
+ DebugMarkerMarkerInfoEXT( const char* pMarkerName_ = nullptr, std::array<float,4> const& color_ = { { 0, 0, 0, 0 } } )
+ : sType( StructureType::eDebugMarkerMarkerInfoEXT )
+ , pNext( nullptr )
+ , pMarkerName( pMarkerName_ )
+ {
+ memcpy( &color, color_.data(), 4 * sizeof( float ) );
+ }
+
+ DebugMarkerMarkerInfoEXT( VkDebugMarkerMarkerInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugMarkerMarkerInfoEXT) );
+ }
+
+ DebugMarkerMarkerInfoEXT& operator=( VkDebugMarkerMarkerInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugMarkerMarkerInfoEXT) );
+ return *this;
+ }
+
+ DebugMarkerMarkerInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DebugMarkerMarkerInfoEXT& setPMarkerName( const char* pMarkerName_ )
+ {
+ pMarkerName = pMarkerName_;
+ return *this;
+ }
+
+ DebugMarkerMarkerInfoEXT& setColor( std::array<float,4> color_ )
+ {
+ memcpy( &color, color_.data(), 4 * sizeof( float ) );
+ return *this;
+ }
+
+ operator const VkDebugMarkerMarkerInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDebugMarkerMarkerInfoEXT*>(this);
+ }
+
+ bool operator==( DebugMarkerMarkerInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( pMarkerName == rhs.pMarkerName )
+ && ( memcmp( color, rhs.color, 4 * sizeof( float ) ) == 0 );
+ }
+
+ bool operator!=( DebugMarkerMarkerInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ const char* pMarkerName;
+ float color[4];
+ };
+ static_assert( sizeof( DebugMarkerMarkerInfoEXT ) == sizeof( VkDebugMarkerMarkerInfoEXT ), "struct and wrapper have different size!" );
+
+ struct DedicatedAllocationImageCreateInfoNV
+ {
+ DedicatedAllocationImageCreateInfoNV( Bool32 dedicatedAllocation_ = 0 )
+ : sType( StructureType::eDedicatedAllocationImageCreateInfoNV )
+ , pNext( nullptr )
+ , dedicatedAllocation( dedicatedAllocation_ )
+ {
+ }
+
+ DedicatedAllocationImageCreateInfoNV( VkDedicatedAllocationImageCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DedicatedAllocationImageCreateInfoNV) );
+ }
+
+ DedicatedAllocationImageCreateInfoNV& operator=( VkDedicatedAllocationImageCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DedicatedAllocationImageCreateInfoNV) );
+ return *this;
+ }
+
+ DedicatedAllocationImageCreateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DedicatedAllocationImageCreateInfoNV& setDedicatedAllocation( Bool32 dedicatedAllocation_ )
+ {
+ dedicatedAllocation = dedicatedAllocation_;
+ return *this;
+ }
+
+ operator const VkDedicatedAllocationImageCreateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(this);
+ }
+
+ bool operator==( DedicatedAllocationImageCreateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( dedicatedAllocation == rhs.dedicatedAllocation );
+ }
+
+ bool operator!=( DedicatedAllocationImageCreateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Bool32 dedicatedAllocation;
+ };
+ static_assert( sizeof( DedicatedAllocationImageCreateInfoNV ) == sizeof( VkDedicatedAllocationImageCreateInfoNV ), "struct and wrapper have different size!" );
+
+ struct DedicatedAllocationBufferCreateInfoNV
+ {
+ DedicatedAllocationBufferCreateInfoNV( Bool32 dedicatedAllocation_ = 0 )
+ : sType( StructureType::eDedicatedAllocationBufferCreateInfoNV )
+ , pNext( nullptr )
+ , dedicatedAllocation( dedicatedAllocation_ )
+ {
+ }
+
+ DedicatedAllocationBufferCreateInfoNV( VkDedicatedAllocationBufferCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DedicatedAllocationBufferCreateInfoNV) );
+ }
+
+ DedicatedAllocationBufferCreateInfoNV& operator=( VkDedicatedAllocationBufferCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DedicatedAllocationBufferCreateInfoNV) );
+ return *this;
+ }
+
+ DedicatedAllocationBufferCreateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DedicatedAllocationBufferCreateInfoNV& setDedicatedAllocation( Bool32 dedicatedAllocation_ )
+ {
+ dedicatedAllocation = dedicatedAllocation_;
+ return *this;
+ }
+
+ operator const VkDedicatedAllocationBufferCreateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(this);
+ }
+
+ bool operator==( DedicatedAllocationBufferCreateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( dedicatedAllocation == rhs.dedicatedAllocation );
+ }
+
+ bool operator!=( DedicatedAllocationBufferCreateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Bool32 dedicatedAllocation;
+ };
+ static_assert( sizeof( DedicatedAllocationBufferCreateInfoNV ) == sizeof( VkDedicatedAllocationBufferCreateInfoNV ), "struct and wrapper have different size!" );
+
+ struct DedicatedAllocationMemoryAllocateInfoNV
+ {
+ DedicatedAllocationMemoryAllocateInfoNV( Image image_ = Image(), Buffer buffer_ = Buffer() )
+ : sType( StructureType::eDedicatedAllocationMemoryAllocateInfoNV )
+ , pNext( nullptr )
+ , image( image_ )
+ , buffer( buffer_ )
+ {
+ }
+
+ DedicatedAllocationMemoryAllocateInfoNV( VkDedicatedAllocationMemoryAllocateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DedicatedAllocationMemoryAllocateInfoNV) );
+ }
+
+ DedicatedAllocationMemoryAllocateInfoNV& operator=( VkDedicatedAllocationMemoryAllocateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DedicatedAllocationMemoryAllocateInfoNV) );
+ return *this;
+ }
+
+ DedicatedAllocationMemoryAllocateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DedicatedAllocationMemoryAllocateInfoNV& setImage( Image image_ )
+ {
+ image = image_;
+ return *this;
+ }
+
+ DedicatedAllocationMemoryAllocateInfoNV& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ operator const VkDedicatedAllocationMemoryAllocateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(this);
+ }
+
+ bool operator==( DedicatedAllocationMemoryAllocateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( image == rhs.image )
+ && ( buffer == rhs.buffer );
+ }
+
+ bool operator!=( DedicatedAllocationMemoryAllocateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Image image;
+ Buffer buffer;
+ };
+ static_assert( sizeof( DedicatedAllocationMemoryAllocateInfoNV ) == sizeof( VkDedicatedAllocationMemoryAllocateInfoNV ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ struct ExportMemoryWin32HandleInfoNV
+ {
+ ExportMemoryWin32HandleInfoNV( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, DWORD dwAccess_ = 0 )
+ : sType( StructureType::eExportMemoryWin32HandleInfoNV )
+ , pNext( nullptr )
+ , pAttributes( pAttributes_ )
+ , dwAccess( dwAccess_ )
+ {
+ }
+
+ ExportMemoryWin32HandleInfoNV( VkExportMemoryWin32HandleInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryWin32HandleInfoNV) );
+ }
+
+ ExportMemoryWin32HandleInfoNV& operator=( VkExportMemoryWin32HandleInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryWin32HandleInfoNV) );
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoNV& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ )
+ {
+ pAttributes = pAttributes_;
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoNV& setDwAccess( DWORD dwAccess_ )
+ {
+ dwAccess = dwAccess_;
+ return *this;
+ }
+
+ operator const VkExportMemoryWin32HandleInfoNV&() const
+ {
+ return *reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(this);
+ }
+
+ bool operator==( ExportMemoryWin32HandleInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( pAttributes == rhs.pAttributes )
+ && ( dwAccess == rhs.dwAccess );
+ }
+
+ bool operator!=( ExportMemoryWin32HandleInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ };
+ static_assert( sizeof( ExportMemoryWin32HandleInfoNV ) == sizeof( VkExportMemoryWin32HandleInfoNV ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ struct Win32KeyedMutexAcquireReleaseInfoNV
+ {
+ Win32KeyedMutexAcquireReleaseInfoNV( uint32_t acquireCount_ = 0, const DeviceMemory* pAcquireSyncs_ = nullptr, const uint64_t* pAcquireKeys_ = nullptr, const uint32_t* pAcquireTimeoutMilliseconds_ = nullptr, uint32_t releaseCount_ = 0, const DeviceMemory* pReleaseSyncs_ = nullptr, const uint64_t* pReleaseKeys_ = nullptr )
+ : sType( StructureType::eWin32KeyedMutexAcquireReleaseInfoNV )
+ , pNext( nullptr )
+ , acquireCount( acquireCount_ )
+ , pAcquireSyncs( pAcquireSyncs_ )
+ , pAcquireKeys( pAcquireKeys_ )
+ , pAcquireTimeoutMilliseconds( pAcquireTimeoutMilliseconds_ )
+ , releaseCount( releaseCount_ )
+ , pReleaseSyncs( pReleaseSyncs_ )
+ , pReleaseKeys( pReleaseKeys_ )
+ {
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV( VkWin32KeyedMutexAcquireReleaseInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Win32KeyedMutexAcquireReleaseInfoNV) );
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& operator=( VkWin32KeyedMutexAcquireReleaseInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Win32KeyedMutexAcquireReleaseInfoNV) );
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setAcquireCount( uint32_t acquireCount_ )
+ {
+ acquireCount = acquireCount_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setPAcquireSyncs( const DeviceMemory* pAcquireSyncs_ )
+ {
+ pAcquireSyncs = pAcquireSyncs_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setPAcquireKeys( const uint64_t* pAcquireKeys_ )
+ {
+ pAcquireKeys = pAcquireKeys_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setPAcquireTimeoutMilliseconds( const uint32_t* pAcquireTimeoutMilliseconds_ )
+ {
+ pAcquireTimeoutMilliseconds = pAcquireTimeoutMilliseconds_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setReleaseCount( uint32_t releaseCount_ )
+ {
+ releaseCount = releaseCount_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setPReleaseSyncs( const DeviceMemory* pReleaseSyncs_ )
+ {
+ pReleaseSyncs = pReleaseSyncs_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoNV& setPReleaseKeys( const uint64_t* pReleaseKeys_ )
+ {
+ pReleaseKeys = pReleaseKeys_;
+ return *this;
+ }
+
+ operator const VkWin32KeyedMutexAcquireReleaseInfoNV&() const
+ {
+ return *reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(this);
+ }
+
+ bool operator==( Win32KeyedMutexAcquireReleaseInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( acquireCount == rhs.acquireCount )
+ && ( pAcquireSyncs == rhs.pAcquireSyncs )
+ && ( pAcquireKeys == rhs.pAcquireKeys )
+ && ( pAcquireTimeoutMilliseconds == rhs.pAcquireTimeoutMilliseconds )
+ && ( releaseCount == rhs.releaseCount )
+ && ( pReleaseSyncs == rhs.pReleaseSyncs )
+ && ( pReleaseKeys == rhs.pReleaseKeys );
+ }
+
+ bool operator!=( Win32KeyedMutexAcquireReleaseInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t acquireCount;
+ const DeviceMemory* pAcquireSyncs;
+ const uint64_t* pAcquireKeys;
+ const uint32_t* pAcquireTimeoutMilliseconds;
+ uint32_t releaseCount;
+ const DeviceMemory* pReleaseSyncs;
+ const uint64_t* pReleaseKeys;
+ };
+ static_assert( sizeof( Win32KeyedMutexAcquireReleaseInfoNV ) == sizeof( VkWin32KeyedMutexAcquireReleaseInfoNV ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+ struct DeviceGeneratedCommandsFeaturesNVX
+ {
+ DeviceGeneratedCommandsFeaturesNVX( Bool32 computeBindingPointSupport_ = 0 )
+ : sType( StructureType::eDeviceGeneratedCommandsFeaturesNVX )
+ , pNext( nullptr )
+ , computeBindingPointSupport( computeBindingPointSupport_ )
+ {
+ }
+
+ DeviceGeneratedCommandsFeaturesNVX( VkDeviceGeneratedCommandsFeaturesNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGeneratedCommandsFeaturesNVX) );
+ }
+
+ DeviceGeneratedCommandsFeaturesNVX& operator=( VkDeviceGeneratedCommandsFeaturesNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGeneratedCommandsFeaturesNVX) );
+ return *this;
+ }
+
+ DeviceGeneratedCommandsFeaturesNVX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGeneratedCommandsFeaturesNVX& setComputeBindingPointSupport( Bool32 computeBindingPointSupport_ )
+ {
+ computeBindingPointSupport = computeBindingPointSupport_;
+ return *this;
+ }
+
+ operator const VkDeviceGeneratedCommandsFeaturesNVX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGeneratedCommandsFeaturesNVX*>(this);
+ }
+
+ bool operator==( DeviceGeneratedCommandsFeaturesNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( computeBindingPointSupport == rhs.computeBindingPointSupport );
+ }
+
+ bool operator!=( DeviceGeneratedCommandsFeaturesNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Bool32 computeBindingPointSupport;
+ };
+ static_assert( sizeof( DeviceGeneratedCommandsFeaturesNVX ) == sizeof( VkDeviceGeneratedCommandsFeaturesNVX ), "struct and wrapper have different size!" );
+
+ struct DeviceGeneratedCommandsLimitsNVX
+ {
+ DeviceGeneratedCommandsLimitsNVX( uint32_t maxIndirectCommandsLayoutTokenCount_ = 0, uint32_t maxObjectEntryCounts_ = 0, uint32_t minSequenceCountBufferOffsetAlignment_ = 0, uint32_t minSequenceIndexBufferOffsetAlignment_ = 0, uint32_t minCommandsTokenBufferOffsetAlignment_ = 0 )
+ : sType( StructureType::eDeviceGeneratedCommandsLimitsNVX )
+ , pNext( nullptr )
+ , maxIndirectCommandsLayoutTokenCount( maxIndirectCommandsLayoutTokenCount_ )
+ , maxObjectEntryCounts( maxObjectEntryCounts_ )
+ , minSequenceCountBufferOffsetAlignment( minSequenceCountBufferOffsetAlignment_ )
+ , minSequenceIndexBufferOffsetAlignment( minSequenceIndexBufferOffsetAlignment_ )
+ , minCommandsTokenBufferOffsetAlignment( minCommandsTokenBufferOffsetAlignment_ )
+ {
+ }
+
+ DeviceGeneratedCommandsLimitsNVX( VkDeviceGeneratedCommandsLimitsNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGeneratedCommandsLimitsNVX) );
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& operator=( VkDeviceGeneratedCommandsLimitsNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGeneratedCommandsLimitsNVX) );
+ return *this;
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& setMaxIndirectCommandsLayoutTokenCount( uint32_t maxIndirectCommandsLayoutTokenCount_ )
+ {
+ maxIndirectCommandsLayoutTokenCount = maxIndirectCommandsLayoutTokenCount_;
+ return *this;
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& setMaxObjectEntryCounts( uint32_t maxObjectEntryCounts_ )
+ {
+ maxObjectEntryCounts = maxObjectEntryCounts_;
+ return *this;
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& setMinSequenceCountBufferOffsetAlignment( uint32_t minSequenceCountBufferOffsetAlignment_ )
+ {
+ minSequenceCountBufferOffsetAlignment = minSequenceCountBufferOffsetAlignment_;
+ return *this;
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& setMinSequenceIndexBufferOffsetAlignment( uint32_t minSequenceIndexBufferOffsetAlignment_ )
+ {
+ minSequenceIndexBufferOffsetAlignment = minSequenceIndexBufferOffsetAlignment_;
+ return *this;
+ }
+
+ DeviceGeneratedCommandsLimitsNVX& setMinCommandsTokenBufferOffsetAlignment( uint32_t minCommandsTokenBufferOffsetAlignment_ )
+ {
+ minCommandsTokenBufferOffsetAlignment = minCommandsTokenBufferOffsetAlignment_;
+ return *this;
+ }
+
+ operator const VkDeviceGeneratedCommandsLimitsNVX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGeneratedCommandsLimitsNVX*>(this);
+ }
+
+ bool operator==( DeviceGeneratedCommandsLimitsNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( maxIndirectCommandsLayoutTokenCount == rhs.maxIndirectCommandsLayoutTokenCount )
+ && ( maxObjectEntryCounts == rhs.maxObjectEntryCounts )
+ && ( minSequenceCountBufferOffsetAlignment == rhs.minSequenceCountBufferOffsetAlignment )
+ && ( minSequenceIndexBufferOffsetAlignment == rhs.minSequenceIndexBufferOffsetAlignment )
+ && ( minCommandsTokenBufferOffsetAlignment == rhs.minCommandsTokenBufferOffsetAlignment );
+ }
+
+ bool operator!=( DeviceGeneratedCommandsLimitsNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t maxIndirectCommandsLayoutTokenCount;
+ uint32_t maxObjectEntryCounts;
+ uint32_t minSequenceCountBufferOffsetAlignment;
+ uint32_t minSequenceIndexBufferOffsetAlignment;
+ uint32_t minCommandsTokenBufferOffsetAlignment;
+ };
+ static_assert( sizeof( DeviceGeneratedCommandsLimitsNVX ) == sizeof( VkDeviceGeneratedCommandsLimitsNVX ), "struct and wrapper have different size!" );
+
+ struct CmdReserveSpaceForCommandsInfoNVX
+ {
+ CmdReserveSpaceForCommandsInfoNVX( ObjectTableNVX objectTable_ = ObjectTableNVX(), IndirectCommandsLayoutNVX indirectCommandsLayout_ = IndirectCommandsLayoutNVX(), uint32_t maxSequencesCount_ = 0 )
+ : sType( StructureType::eCmdReserveSpaceForCommandsInfoNVX )
+ , pNext( nullptr )
+ , objectTable( objectTable_ )
+ , indirectCommandsLayout( indirectCommandsLayout_ )
+ , maxSequencesCount( maxSequencesCount_ )
+ {
+ }
+
+ CmdReserveSpaceForCommandsInfoNVX( VkCmdReserveSpaceForCommandsInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CmdReserveSpaceForCommandsInfoNVX) );
+ }
+
+ CmdReserveSpaceForCommandsInfoNVX& operator=( VkCmdReserveSpaceForCommandsInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CmdReserveSpaceForCommandsInfoNVX) );
+ return *this;
+ }
+
+ CmdReserveSpaceForCommandsInfoNVX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CmdReserveSpaceForCommandsInfoNVX& setObjectTable( ObjectTableNVX objectTable_ )
+ {
+ objectTable = objectTable_;
+ return *this;
+ }
+
+ CmdReserveSpaceForCommandsInfoNVX& setIndirectCommandsLayout( IndirectCommandsLayoutNVX indirectCommandsLayout_ )
+ {
+ indirectCommandsLayout = indirectCommandsLayout_;
+ return *this;
+ }
+
+ CmdReserveSpaceForCommandsInfoNVX& setMaxSequencesCount( uint32_t maxSequencesCount_ )
+ {
+ maxSequencesCount = maxSequencesCount_;
+ return *this;
+ }
+
+ operator const VkCmdReserveSpaceForCommandsInfoNVX&() const
+ {
+ return *reinterpret_cast<const VkCmdReserveSpaceForCommandsInfoNVX*>(this);
+ }
+
+ bool operator==( CmdReserveSpaceForCommandsInfoNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( objectTable == rhs.objectTable )
+ && ( indirectCommandsLayout == rhs.indirectCommandsLayout )
+ && ( maxSequencesCount == rhs.maxSequencesCount );
+ }
+
+ bool operator!=( CmdReserveSpaceForCommandsInfoNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ObjectTableNVX objectTable;
+ IndirectCommandsLayoutNVX indirectCommandsLayout;
+ uint32_t maxSequencesCount;
+ };
+ static_assert( sizeof( CmdReserveSpaceForCommandsInfoNVX ) == sizeof( VkCmdReserveSpaceForCommandsInfoNVX ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceFeatures2KHR
+ {
+ PhysicalDeviceFeatures2KHR( PhysicalDeviceFeatures features_ = PhysicalDeviceFeatures() )
+ : sType( StructureType::ePhysicalDeviceFeatures2KHR )
+ , pNext( nullptr )
+ , features( features_ )
+ {
+ }
+
+ PhysicalDeviceFeatures2KHR( VkPhysicalDeviceFeatures2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceFeatures2KHR) );
+ }
+
+ PhysicalDeviceFeatures2KHR& operator=( VkPhysicalDeviceFeatures2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceFeatures2KHR) );
+ return *this;
+ }
+
+ PhysicalDeviceFeatures2KHR& setPNext( void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceFeatures2KHR& setFeatures( PhysicalDeviceFeatures features_ )
+ {
+ features = features_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceFeatures2KHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceFeatures2KHR*>(this);
+ }
+
+ bool operator==( PhysicalDeviceFeatures2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( features == rhs.features );
+ }
+
+ bool operator!=( PhysicalDeviceFeatures2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ PhysicalDeviceFeatures features;
+ };
+ static_assert( sizeof( PhysicalDeviceFeatures2KHR ) == sizeof( VkPhysicalDeviceFeatures2KHR ), "struct and wrapper have different size!" );
+
+ struct PhysicalDevicePushDescriptorPropertiesKHR
+ {
+ PhysicalDevicePushDescriptorPropertiesKHR( uint32_t maxPushDescriptors_ = 0 )
+ : sType( StructureType::ePhysicalDevicePushDescriptorPropertiesKHR )
+ , pNext( nullptr )
+ , maxPushDescriptors( maxPushDescriptors_ )
+ {
+ }
+
+ PhysicalDevicePushDescriptorPropertiesKHR( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDevicePushDescriptorPropertiesKHR) );
+ }
+
+ PhysicalDevicePushDescriptorPropertiesKHR& operator=( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDevicePushDescriptorPropertiesKHR) );
+ return *this;
+ }
+
+ PhysicalDevicePushDescriptorPropertiesKHR& setPNext( void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDevicePushDescriptorPropertiesKHR& setMaxPushDescriptors( uint32_t maxPushDescriptors_ )
+ {
+ maxPushDescriptors = maxPushDescriptors_;
+ return *this;
+ }
+
+ operator const VkPhysicalDevicePushDescriptorPropertiesKHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(this);
+ }
+
+ bool operator==( PhysicalDevicePushDescriptorPropertiesKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( maxPushDescriptors == rhs.maxPushDescriptors );
+ }
+
+ bool operator!=( PhysicalDevicePushDescriptorPropertiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t maxPushDescriptors;
+ };
+ static_assert( sizeof( PhysicalDevicePushDescriptorPropertiesKHR ) == sizeof( VkPhysicalDevicePushDescriptorPropertiesKHR ), "struct and wrapper have different size!" );
+
+ struct PresentRegionsKHR
+ {
+ PresentRegionsKHR( uint32_t swapchainCount_ = 0, const PresentRegionKHR* pRegions_ = nullptr )
+ : sType( StructureType::ePresentRegionsKHR )
+ , pNext( nullptr )
+ , swapchainCount( swapchainCount_ )
+ , pRegions( pRegions_ )
+ {
+ }
+
+ PresentRegionsKHR( VkPresentRegionsKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentRegionsKHR) );
+ }
+
+ PresentRegionsKHR& operator=( VkPresentRegionsKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentRegionsKHR) );
+ return *this;
+ }
+
+ PresentRegionsKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PresentRegionsKHR& setSwapchainCount( uint32_t swapchainCount_ )
+ {
+ swapchainCount = swapchainCount_;
+ return *this;
+ }
+
+ PresentRegionsKHR& setPRegions( const PresentRegionKHR* pRegions_ )
+ {
+ pRegions = pRegions_;
+ return *this;
+ }
+
+ operator const VkPresentRegionsKHR&() const
+ {
+ return *reinterpret_cast<const VkPresentRegionsKHR*>(this);
+ }
+
+ bool operator==( PresentRegionsKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( swapchainCount == rhs.swapchainCount )
+ && ( pRegions == rhs.pRegions );
+ }
+
+ bool operator!=( PresentRegionsKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t swapchainCount;
+ const PresentRegionKHR* pRegions;
+ };
+ static_assert( sizeof( PresentRegionsKHR ) == sizeof( VkPresentRegionsKHR ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceIDPropertiesKHX
+ {
+ operator const VkPhysicalDeviceIDPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceIDPropertiesKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceIDPropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( memcmp( deviceUUID, rhs.deviceUUID, VK_UUID_SIZE * sizeof( uint8_t ) ) == 0 )
+ && ( memcmp( driverUUID, rhs.driverUUID, VK_UUID_SIZE * sizeof( uint8_t ) ) == 0 )
+ && ( memcmp( deviceLUID, rhs.deviceLUID, VK_LUID_SIZE_KHX * sizeof( uint8_t ) ) == 0 )
+ && ( deviceLUIDValid == rhs.deviceLUIDValid );
+ }
+
+ bool operator!=( PhysicalDeviceIDPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint8_t deviceUUID[VK_UUID_SIZE];
+ uint8_t driverUUID[VK_UUID_SIZE];
+ uint8_t deviceLUID[VK_LUID_SIZE_KHX];
+ Bool32 deviceLUIDValid;
+ };
+ static_assert( sizeof( PhysicalDeviceIDPropertiesKHX ) == sizeof( VkPhysicalDeviceIDPropertiesKHX ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ struct ExportMemoryWin32HandleInfoKHX
+ {
+ ExportMemoryWin32HandleInfoKHX( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, DWORD dwAccess_ = 0, LPCWSTR name_ = 0 )
+ : sType( StructureType::eExportMemoryWin32HandleInfoKHX )
+ , pNext( nullptr )
+ , pAttributes( pAttributes_ )
+ , dwAccess( dwAccess_ )
+ , name( name_ )
+ {
+ }
+
+ ExportMemoryWin32HandleInfoKHX( VkExportMemoryWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryWin32HandleInfoKHX) );
+ }
+
+ ExportMemoryWin32HandleInfoKHX& operator=( VkExportMemoryWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryWin32HandleInfoKHX) );
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoKHX& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ )
+ {
+ pAttributes = pAttributes_;
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoKHX& setDwAccess( DWORD dwAccess_ )
+ {
+ dwAccess = dwAccess_;
+ return *this;
+ }
+
+ ExportMemoryWin32HandleInfoKHX& setName( LPCWSTR name_ )
+ {
+ name = name_;
+ return *this;
+ }
+
+ operator const VkExportMemoryWin32HandleInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkExportMemoryWin32HandleInfoKHX*>(this);
+ }
+
+ bool operator==( ExportMemoryWin32HandleInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( pAttributes == rhs.pAttributes )
+ && ( dwAccess == rhs.dwAccess )
+ && ( name == rhs.name );
+ }
+
+ bool operator!=( ExportMemoryWin32HandleInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+ };
+ static_assert( sizeof( ExportMemoryWin32HandleInfoKHX ) == sizeof( VkExportMemoryWin32HandleInfoKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ struct MemoryWin32HandlePropertiesKHX
+ {
+ operator const VkMemoryWin32HandlePropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkMemoryWin32HandlePropertiesKHX*>(this);
+ }
+
+ bool operator==( MemoryWin32HandlePropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( memoryTypeBits == rhs.memoryTypeBits );
+ }
+
+ bool operator!=( MemoryWin32HandlePropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t memoryTypeBits;
+ };
+ static_assert( sizeof( MemoryWin32HandlePropertiesKHX ) == sizeof( VkMemoryWin32HandlePropertiesKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ struct MemoryFdPropertiesKHX
+ {
+ operator const VkMemoryFdPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkMemoryFdPropertiesKHX*>(this);
+ }
+
+ bool operator==( MemoryFdPropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( memoryTypeBits == rhs.memoryTypeBits );
+ }
+
+ bool operator!=( MemoryFdPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t memoryTypeBits;
+ };
+ static_assert( sizeof( MemoryFdPropertiesKHX ) == sizeof( VkMemoryFdPropertiesKHX ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ struct Win32KeyedMutexAcquireReleaseInfoKHX
+ {
+ Win32KeyedMutexAcquireReleaseInfoKHX( uint32_t acquireCount_ = 0, const DeviceMemory* pAcquireSyncs_ = nullptr, const uint64_t* pAcquireKeys_ = nullptr, const uint32_t* pAcquireTimeouts_ = nullptr, uint32_t releaseCount_ = 0, const DeviceMemory* pReleaseSyncs_ = nullptr, const uint64_t* pReleaseKeys_ = nullptr )
+ : sType( StructureType::eWin32KeyedMutexAcquireReleaseInfoKHX )
+ , pNext( nullptr )
+ , acquireCount( acquireCount_ )
+ , pAcquireSyncs( pAcquireSyncs_ )
+ , pAcquireKeys( pAcquireKeys_ )
+ , pAcquireTimeouts( pAcquireTimeouts_ )
+ , releaseCount( releaseCount_ )
+ , pReleaseSyncs( pReleaseSyncs_ )
+ , pReleaseKeys( pReleaseKeys_ )
+ {
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX( VkWin32KeyedMutexAcquireReleaseInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Win32KeyedMutexAcquireReleaseInfoKHX) );
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& operator=( VkWin32KeyedMutexAcquireReleaseInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(Win32KeyedMutexAcquireReleaseInfoKHX) );
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setAcquireCount( uint32_t acquireCount_ )
+ {
+ acquireCount = acquireCount_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setPAcquireSyncs( const DeviceMemory* pAcquireSyncs_ )
+ {
+ pAcquireSyncs = pAcquireSyncs_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setPAcquireKeys( const uint64_t* pAcquireKeys_ )
+ {
+ pAcquireKeys = pAcquireKeys_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setPAcquireTimeouts( const uint32_t* pAcquireTimeouts_ )
+ {
+ pAcquireTimeouts = pAcquireTimeouts_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setReleaseCount( uint32_t releaseCount_ )
+ {
+ releaseCount = releaseCount_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setPReleaseSyncs( const DeviceMemory* pReleaseSyncs_ )
+ {
+ pReleaseSyncs = pReleaseSyncs_;
+ return *this;
+ }
+
+ Win32KeyedMutexAcquireReleaseInfoKHX& setPReleaseKeys( const uint64_t* pReleaseKeys_ )
+ {
+ pReleaseKeys = pReleaseKeys_;
+ return *this;
+ }
+
+ operator const VkWin32KeyedMutexAcquireReleaseInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHX*>(this);
+ }
+
+ bool operator==( Win32KeyedMutexAcquireReleaseInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( acquireCount == rhs.acquireCount )
+ && ( pAcquireSyncs == rhs.pAcquireSyncs )
+ && ( pAcquireKeys == rhs.pAcquireKeys )
+ && ( pAcquireTimeouts == rhs.pAcquireTimeouts )
+ && ( releaseCount == rhs.releaseCount )
+ && ( pReleaseSyncs == rhs.pReleaseSyncs )
+ && ( pReleaseKeys == rhs.pReleaseKeys );
+ }
+
+ bool operator!=( Win32KeyedMutexAcquireReleaseInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t acquireCount;
+ const DeviceMemory* pAcquireSyncs;
+ const uint64_t* pAcquireKeys;
+ const uint32_t* pAcquireTimeouts;
+ uint32_t releaseCount;
+ const DeviceMemory* pReleaseSyncs;
+ const uint64_t* pReleaseKeys;
+ };
+ static_assert( sizeof( Win32KeyedMutexAcquireReleaseInfoKHX ) == sizeof( VkWin32KeyedMutexAcquireReleaseInfoKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ struct ExportSemaphoreWin32HandleInfoKHX
+ {
+ ExportSemaphoreWin32HandleInfoKHX( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, DWORD dwAccess_ = 0, LPCWSTR name_ = 0 )
+ : sType( StructureType::eExportSemaphoreWin32HandleInfoKHX )
+ , pNext( nullptr )
+ , pAttributes( pAttributes_ )
+ , dwAccess( dwAccess_ )
+ , name( name_ )
+ {
+ }
+
+ ExportSemaphoreWin32HandleInfoKHX( VkExportSemaphoreWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportSemaphoreWin32HandleInfoKHX) );
+ }
+
+ ExportSemaphoreWin32HandleInfoKHX& operator=( VkExportSemaphoreWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportSemaphoreWin32HandleInfoKHX) );
+ return *this;
+ }
+
+ ExportSemaphoreWin32HandleInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExportSemaphoreWin32HandleInfoKHX& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ )
+ {
+ pAttributes = pAttributes_;
+ return *this;
+ }
+
+ ExportSemaphoreWin32HandleInfoKHX& setDwAccess( DWORD dwAccess_ )
+ {
+ dwAccess = dwAccess_;
+ return *this;
+ }
+
+ ExportSemaphoreWin32HandleInfoKHX& setName( LPCWSTR name_ )
+ {
+ name = name_;
+ return *this;
+ }
+
+ operator const VkExportSemaphoreWin32HandleInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHX*>(this);
+ }
+
+ bool operator==( ExportSemaphoreWin32HandleInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( pAttributes == rhs.pAttributes )
+ && ( dwAccess == rhs.dwAccess )
+ && ( name == rhs.name );
+ }
+
+ bool operator!=( ExportSemaphoreWin32HandleInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ const SECURITY_ATTRIBUTES* pAttributes;
+ DWORD dwAccess;
+ LPCWSTR name;
+ };
+ static_assert( sizeof( ExportSemaphoreWin32HandleInfoKHX ) == sizeof( VkExportSemaphoreWin32HandleInfoKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ struct D3D12FenceSubmitInfoKHX
+ {
+ D3D12FenceSubmitInfoKHX( uint32_t waitSemaphoreValuesCount_ = 0, const uint64_t* pWaitSemaphoreValues_ = nullptr, uint32_t signalSemaphoreValuesCount_ = 0, const uint64_t* pSignalSemaphoreValues_ = nullptr )
+ : sType( StructureType::eD3D12FenceSubmitInfoKHX )
+ , pNext( nullptr )
+ , waitSemaphoreValuesCount( waitSemaphoreValuesCount_ )
+ , pWaitSemaphoreValues( pWaitSemaphoreValues_ )
+ , signalSemaphoreValuesCount( signalSemaphoreValuesCount_ )
+ , pSignalSemaphoreValues( pSignalSemaphoreValues_ )
+ {
+ }
+
+ D3D12FenceSubmitInfoKHX( VkD3D12FenceSubmitInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(D3D12FenceSubmitInfoKHX) );
+ }
+
+ D3D12FenceSubmitInfoKHX& operator=( VkD3D12FenceSubmitInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(D3D12FenceSubmitInfoKHX) );
+ return *this;
+ }
+
+ D3D12FenceSubmitInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ D3D12FenceSubmitInfoKHX& setWaitSemaphoreValuesCount( uint32_t waitSemaphoreValuesCount_ )
+ {
+ waitSemaphoreValuesCount = waitSemaphoreValuesCount_;
+ return *this;
+ }
+
+ D3D12FenceSubmitInfoKHX& setPWaitSemaphoreValues( const uint64_t* pWaitSemaphoreValues_ )
+ {
+ pWaitSemaphoreValues = pWaitSemaphoreValues_;
+ return *this;
+ }
+
+ D3D12FenceSubmitInfoKHX& setSignalSemaphoreValuesCount( uint32_t signalSemaphoreValuesCount_ )
+ {
+ signalSemaphoreValuesCount = signalSemaphoreValuesCount_;
+ return *this;
+ }
+
+ D3D12FenceSubmitInfoKHX& setPSignalSemaphoreValues( const uint64_t* pSignalSemaphoreValues_ )
+ {
+ pSignalSemaphoreValues = pSignalSemaphoreValues_;
+ return *this;
+ }
+
+ operator const VkD3D12FenceSubmitInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkD3D12FenceSubmitInfoKHX*>(this);
+ }
+
+ bool operator==( D3D12FenceSubmitInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( waitSemaphoreValuesCount == rhs.waitSemaphoreValuesCount )
+ && ( pWaitSemaphoreValues == rhs.pWaitSemaphoreValues )
+ && ( signalSemaphoreValuesCount == rhs.signalSemaphoreValuesCount )
+ && ( pSignalSemaphoreValues == rhs.pSignalSemaphoreValues );
+ }
+
+ bool operator!=( D3D12FenceSubmitInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t waitSemaphoreValuesCount;
+ const uint64_t* pWaitSemaphoreValues;
+ uint32_t signalSemaphoreValuesCount;
+ const uint64_t* pSignalSemaphoreValues;
+ };
+ static_assert( sizeof( D3D12FenceSubmitInfoKHX ) == sizeof( VkD3D12FenceSubmitInfoKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ struct PhysicalDeviceMultiviewFeaturesKHX
+ {
+ PhysicalDeviceMultiviewFeaturesKHX( Bool32 multiview_ = 0, Bool32 multiviewGeometryShader_ = 0, Bool32 multiviewTessellationShader_ = 0 )
+ : sType( StructureType::ePhysicalDeviceMultiviewFeaturesKHX )
+ , pNext( nullptr )
+ , multiview( multiview_ )
+ , multiviewGeometryShader( multiviewGeometryShader_ )
+ , multiviewTessellationShader( multiviewTessellationShader_ )
+ {
+ }
+
+ PhysicalDeviceMultiviewFeaturesKHX( VkPhysicalDeviceMultiviewFeaturesKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceMultiviewFeaturesKHX) );
+ }
+
+ PhysicalDeviceMultiviewFeaturesKHX& operator=( VkPhysicalDeviceMultiviewFeaturesKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceMultiviewFeaturesKHX) );
+ return *this;
+ }
+
+ PhysicalDeviceMultiviewFeaturesKHX& setPNext( void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceMultiviewFeaturesKHX& setMultiview( Bool32 multiview_ )
+ {
+ multiview = multiview_;
+ return *this;
+ }
+
+ PhysicalDeviceMultiviewFeaturesKHX& setMultiviewGeometryShader( Bool32 multiviewGeometryShader_ )
+ {
+ multiviewGeometryShader = multiviewGeometryShader_;
+ return *this;
+ }
+
+ PhysicalDeviceMultiviewFeaturesKHX& setMultiviewTessellationShader( Bool32 multiviewTessellationShader_ )
+ {
+ multiviewTessellationShader = multiviewTessellationShader_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceMultiviewFeaturesKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceMultiviewFeaturesKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceMultiviewFeaturesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( multiview == rhs.multiview )
+ && ( multiviewGeometryShader == rhs.multiviewGeometryShader )
+ && ( multiviewTessellationShader == rhs.multiviewTessellationShader );
+ }
+
+ bool operator!=( PhysicalDeviceMultiviewFeaturesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ Bool32 multiview;
+ Bool32 multiviewGeometryShader;
+ Bool32 multiviewTessellationShader;
+ };
+ static_assert( sizeof( PhysicalDeviceMultiviewFeaturesKHX ) == sizeof( VkPhysicalDeviceMultiviewFeaturesKHX ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceMultiviewPropertiesKHX
+ {
+ operator const VkPhysicalDeviceMultiviewPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceMultiviewPropertiesKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceMultiviewPropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( maxMultiviewViewCount == rhs.maxMultiviewViewCount )
+ && ( maxMultiviewInstanceIndex == rhs.maxMultiviewInstanceIndex );
+ }
+
+ bool operator!=( PhysicalDeviceMultiviewPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t maxMultiviewViewCount;
+ uint32_t maxMultiviewInstanceIndex;
+ };
+ static_assert( sizeof( PhysicalDeviceMultiviewPropertiesKHX ) == sizeof( VkPhysicalDeviceMultiviewPropertiesKHX ), "struct and wrapper have different size!" );
+
+ struct RenderPassMultiviewCreateInfoKHX
+ {
+ RenderPassMultiviewCreateInfoKHX( uint32_t subpassCount_ = 0, const uint32_t* pViewMasks_ = nullptr, uint32_t dependencyCount_ = 0, const int32_t* pViewOffsets_ = nullptr, uint32_t correlationMaskCount_ = 0, const uint32_t* pCorrelationMasks_ = nullptr )
+ : sType( StructureType::eRenderPassMultiviewCreateInfoKHX )
+ , pNext( nullptr )
+ , subpassCount( subpassCount_ )
+ , pViewMasks( pViewMasks_ )
+ , dependencyCount( dependencyCount_ )
+ , pViewOffsets( pViewOffsets_ )
+ , correlationMaskCount( correlationMaskCount_ )
+ , pCorrelationMasks( pCorrelationMasks_ )
+ {
+ }
+
+ RenderPassMultiviewCreateInfoKHX( VkRenderPassMultiviewCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RenderPassMultiviewCreateInfoKHX) );
+ }
+
+ RenderPassMultiviewCreateInfoKHX& operator=( VkRenderPassMultiviewCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RenderPassMultiviewCreateInfoKHX) );
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setSubpassCount( uint32_t subpassCount_ )
+ {
+ subpassCount = subpassCount_;
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setPViewMasks( const uint32_t* pViewMasks_ )
+ {
+ pViewMasks = pViewMasks_;
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setDependencyCount( uint32_t dependencyCount_ )
+ {
+ dependencyCount = dependencyCount_;
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setPViewOffsets( const int32_t* pViewOffsets_ )
+ {
+ pViewOffsets = pViewOffsets_;
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setCorrelationMaskCount( uint32_t correlationMaskCount_ )
+ {
+ correlationMaskCount = correlationMaskCount_;
+ return *this;
+ }
+
+ RenderPassMultiviewCreateInfoKHX& setPCorrelationMasks( const uint32_t* pCorrelationMasks_ )
+ {
+ pCorrelationMasks = pCorrelationMasks_;
+ return *this;
+ }
+
+ operator const VkRenderPassMultiviewCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkRenderPassMultiviewCreateInfoKHX*>(this);
+ }
+
+ bool operator==( RenderPassMultiviewCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( subpassCount == rhs.subpassCount )
+ && ( pViewMasks == rhs.pViewMasks )
+ && ( dependencyCount == rhs.dependencyCount )
+ && ( pViewOffsets == rhs.pViewOffsets )
+ && ( correlationMaskCount == rhs.correlationMaskCount )
+ && ( pCorrelationMasks == rhs.pCorrelationMasks );
+ }
+
+ bool operator!=( RenderPassMultiviewCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t subpassCount;
+ const uint32_t* pViewMasks;
+ uint32_t dependencyCount;
+ const int32_t* pViewOffsets;
+ uint32_t correlationMaskCount;
+ const uint32_t* pCorrelationMasks;
+ };
+ static_assert( sizeof( RenderPassMultiviewCreateInfoKHX ) == sizeof( VkRenderPassMultiviewCreateInfoKHX ), "struct and wrapper have different size!" );
+
+ struct BindBufferMemoryInfoKHX
+ {
+ BindBufferMemoryInfoKHX( Buffer buffer_ = Buffer(), DeviceMemory memory_ = DeviceMemory(), DeviceSize memoryOffset_ = 0, uint32_t deviceIndexCount_ = 0, const uint32_t* pDeviceIndices_ = nullptr )
+ : sType( StructureType::eBindBufferMemoryInfoKHX )
+ , pNext( nullptr )
+ , buffer( buffer_ )
+ , memory( memory_ )
+ , memoryOffset( memoryOffset_ )
+ , deviceIndexCount( deviceIndexCount_ )
+ , pDeviceIndices( pDeviceIndices_ )
+ {
+ }
+
+ BindBufferMemoryInfoKHX( VkBindBufferMemoryInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindBufferMemoryInfoKHX) );
+ }
+
+ BindBufferMemoryInfoKHX& operator=( VkBindBufferMemoryInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindBufferMemoryInfoKHX) );
+ return *this;
+ }
+
+ BindBufferMemoryInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BindBufferMemoryInfoKHX& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ BindBufferMemoryInfoKHX& setMemory( DeviceMemory memory_ )
+ {
+ memory = memory_;
+ return *this;
+ }
+
+ BindBufferMemoryInfoKHX& setMemoryOffset( DeviceSize memoryOffset_ )
+ {
+ memoryOffset = memoryOffset_;
+ return *this;
+ }
+
+ BindBufferMemoryInfoKHX& setDeviceIndexCount( uint32_t deviceIndexCount_ )
+ {
+ deviceIndexCount = deviceIndexCount_;
+ return *this;
+ }
+
+ BindBufferMemoryInfoKHX& setPDeviceIndices( const uint32_t* pDeviceIndices_ )
+ {
+ pDeviceIndices = pDeviceIndices_;
+ return *this;
+ }
+
+ operator const VkBindBufferMemoryInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkBindBufferMemoryInfoKHX*>(this);
+ }
+
+ bool operator==( BindBufferMemoryInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( buffer == rhs.buffer )
+ && ( memory == rhs.memory )
+ && ( memoryOffset == rhs.memoryOffset )
+ && ( deviceIndexCount == rhs.deviceIndexCount )
+ && ( pDeviceIndices == rhs.pDeviceIndices );
+ }
+
+ bool operator!=( BindBufferMemoryInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Buffer buffer;
+ DeviceMemory memory;
+ DeviceSize memoryOffset;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+ };
+ static_assert( sizeof( BindBufferMemoryInfoKHX ) == sizeof( VkBindBufferMemoryInfoKHX ), "struct and wrapper have different size!" );
+
+ struct BindImageMemoryInfoKHX
+ {
+ BindImageMemoryInfoKHX( Image image_ = Image(), DeviceMemory memory_ = DeviceMemory(), DeviceSize memoryOffset_ = 0, uint32_t deviceIndexCount_ = 0, const uint32_t* pDeviceIndices_ = nullptr, uint32_t SFRRectCount_ = 0, const Rect2D* pSFRRects_ = nullptr )
+ : sType( StructureType::eBindImageMemoryInfoKHX )
+ , pNext( nullptr )
+ , image( image_ )
+ , memory( memory_ )
+ , memoryOffset( memoryOffset_ )
+ , deviceIndexCount( deviceIndexCount_ )
+ , pDeviceIndices( pDeviceIndices_ )
+ , SFRRectCount( SFRRectCount_ )
+ , pSFRRects( pSFRRects_ )
+ {
+ }
+
+ BindImageMemoryInfoKHX( VkBindImageMemoryInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindImageMemoryInfoKHX) );
+ }
+
+ BindImageMemoryInfoKHX& operator=( VkBindImageMemoryInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindImageMemoryInfoKHX) );
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setImage( Image image_ )
+ {
+ image = image_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setMemory( DeviceMemory memory_ )
+ {
+ memory = memory_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setMemoryOffset( DeviceSize memoryOffset_ )
+ {
+ memoryOffset = memoryOffset_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setDeviceIndexCount( uint32_t deviceIndexCount_ )
+ {
+ deviceIndexCount = deviceIndexCount_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setPDeviceIndices( const uint32_t* pDeviceIndices_ )
+ {
+ pDeviceIndices = pDeviceIndices_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setSFRRectCount( uint32_t SFRRectCount_ )
+ {
+ SFRRectCount = SFRRectCount_;
+ return *this;
+ }
+
+ BindImageMemoryInfoKHX& setPSFRRects( const Rect2D* pSFRRects_ )
+ {
+ pSFRRects = pSFRRects_;
+ return *this;
+ }
+
+ operator const VkBindImageMemoryInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkBindImageMemoryInfoKHX*>(this);
+ }
+
+ bool operator==( BindImageMemoryInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( image == rhs.image )
+ && ( memory == rhs.memory )
+ && ( memoryOffset == rhs.memoryOffset )
+ && ( deviceIndexCount == rhs.deviceIndexCount )
+ && ( pDeviceIndices == rhs.pDeviceIndices )
+ && ( SFRRectCount == rhs.SFRRectCount )
+ && ( pSFRRects == rhs.pSFRRects );
+ }
+
+ bool operator!=( BindImageMemoryInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Image image;
+ DeviceMemory memory;
+ DeviceSize memoryOffset;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+ uint32_t SFRRectCount;
+ const Rect2D* pSFRRects;
+ };
+ static_assert( sizeof( BindImageMemoryInfoKHX ) == sizeof( VkBindImageMemoryInfoKHX ), "struct and wrapper have different size!" );
+
+ struct DeviceGroupRenderPassBeginInfoKHX
+ {
+ DeviceGroupRenderPassBeginInfoKHX( uint32_t deviceMask_ = 0, uint32_t deviceRenderAreaCount_ = 0, const Rect2D* pDeviceRenderAreas_ = nullptr )
+ : sType( StructureType::eDeviceGroupRenderPassBeginInfoKHX )
+ , pNext( nullptr )
+ , deviceMask( deviceMask_ )
+ , deviceRenderAreaCount( deviceRenderAreaCount_ )
+ , pDeviceRenderAreas( pDeviceRenderAreas_ )
+ {
+ }
+
+ DeviceGroupRenderPassBeginInfoKHX( VkDeviceGroupRenderPassBeginInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupRenderPassBeginInfoKHX) );
+ }
+
+ DeviceGroupRenderPassBeginInfoKHX& operator=( VkDeviceGroupRenderPassBeginInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupRenderPassBeginInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupRenderPassBeginInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupRenderPassBeginInfoKHX& setDeviceMask( uint32_t deviceMask_ )
+ {
+ deviceMask = deviceMask_;
+ return *this;
+ }
+
+ DeviceGroupRenderPassBeginInfoKHX& setDeviceRenderAreaCount( uint32_t deviceRenderAreaCount_ )
+ {
+ deviceRenderAreaCount = deviceRenderAreaCount_;
+ return *this;
+ }
+
+ DeviceGroupRenderPassBeginInfoKHX& setPDeviceRenderAreas( const Rect2D* pDeviceRenderAreas_ )
+ {
+ pDeviceRenderAreas = pDeviceRenderAreas_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupRenderPassBeginInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupRenderPassBeginInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupRenderPassBeginInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( deviceMask == rhs.deviceMask )
+ && ( deviceRenderAreaCount == rhs.deviceRenderAreaCount )
+ && ( pDeviceRenderAreas == rhs.pDeviceRenderAreas );
+ }
+
+ bool operator!=( DeviceGroupRenderPassBeginInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t deviceMask;
+ uint32_t deviceRenderAreaCount;
+ const Rect2D* pDeviceRenderAreas;
+ };
+ static_assert( sizeof( DeviceGroupRenderPassBeginInfoKHX ) == sizeof( VkDeviceGroupRenderPassBeginInfoKHX ), "struct and wrapper have different size!" );
+
+ struct DeviceGroupCommandBufferBeginInfoKHX
+ {
+ DeviceGroupCommandBufferBeginInfoKHX( uint32_t deviceMask_ = 0 )
+ : sType( StructureType::eDeviceGroupCommandBufferBeginInfoKHX )
+ , pNext( nullptr )
+ , deviceMask( deviceMask_ )
+ {
+ }
+
+ DeviceGroupCommandBufferBeginInfoKHX( VkDeviceGroupCommandBufferBeginInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupCommandBufferBeginInfoKHX) );
+ }
+
+ DeviceGroupCommandBufferBeginInfoKHX& operator=( VkDeviceGroupCommandBufferBeginInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupCommandBufferBeginInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupCommandBufferBeginInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupCommandBufferBeginInfoKHX& setDeviceMask( uint32_t deviceMask_ )
+ {
+ deviceMask = deviceMask_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupCommandBufferBeginInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupCommandBufferBeginInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( deviceMask == rhs.deviceMask );
+ }
+
+ bool operator!=( DeviceGroupCommandBufferBeginInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t deviceMask;
+ };
+ static_assert( sizeof( DeviceGroupCommandBufferBeginInfoKHX ) == sizeof( VkDeviceGroupCommandBufferBeginInfoKHX ), "struct and wrapper have different size!" );
+
+ struct DeviceGroupSubmitInfoKHX
+ {
+ DeviceGroupSubmitInfoKHX( uint32_t waitSemaphoreCount_ = 0, const uint32_t* pWaitSemaphoreDeviceIndices_ = nullptr, uint32_t commandBufferCount_ = 0, const uint32_t* pCommandBufferDeviceMasks_ = nullptr, uint32_t signalSemaphoreCount_ = 0, const uint32_t* pSignalSemaphoreDeviceIndices_ = nullptr )
+ : sType( StructureType::eDeviceGroupSubmitInfoKHX )
+ , pNext( nullptr )
+ , waitSemaphoreCount( waitSemaphoreCount_ )
+ , pWaitSemaphoreDeviceIndices( pWaitSemaphoreDeviceIndices_ )
+ , commandBufferCount( commandBufferCount_ )
+ , pCommandBufferDeviceMasks( pCommandBufferDeviceMasks_ )
+ , signalSemaphoreCount( signalSemaphoreCount_ )
+ , pSignalSemaphoreDeviceIndices( pSignalSemaphoreDeviceIndices_ )
+ {
+ }
+
+ DeviceGroupSubmitInfoKHX( VkDeviceGroupSubmitInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupSubmitInfoKHX) );
+ }
+
+ DeviceGroupSubmitInfoKHX& operator=( VkDeviceGroupSubmitInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupSubmitInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ )
+ {
+ waitSemaphoreCount = waitSemaphoreCount_;
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setPWaitSemaphoreDeviceIndices( const uint32_t* pWaitSemaphoreDeviceIndices_ )
+ {
+ pWaitSemaphoreDeviceIndices = pWaitSemaphoreDeviceIndices_;
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setCommandBufferCount( uint32_t commandBufferCount_ )
+ {
+ commandBufferCount = commandBufferCount_;
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setPCommandBufferDeviceMasks( const uint32_t* pCommandBufferDeviceMasks_ )
+ {
+ pCommandBufferDeviceMasks = pCommandBufferDeviceMasks_;
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ )
+ {
+ signalSemaphoreCount = signalSemaphoreCount_;
+ return *this;
+ }
+
+ DeviceGroupSubmitInfoKHX& setPSignalSemaphoreDeviceIndices( const uint32_t* pSignalSemaphoreDeviceIndices_ )
+ {
+ pSignalSemaphoreDeviceIndices = pSignalSemaphoreDeviceIndices_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupSubmitInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupSubmitInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupSubmitInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( waitSemaphoreCount == rhs.waitSemaphoreCount )
+ && ( pWaitSemaphoreDeviceIndices == rhs.pWaitSemaphoreDeviceIndices )
+ && ( commandBufferCount == rhs.commandBufferCount )
+ && ( pCommandBufferDeviceMasks == rhs.pCommandBufferDeviceMasks )
+ && ( signalSemaphoreCount == rhs.signalSemaphoreCount )
+ && ( pSignalSemaphoreDeviceIndices == rhs.pSignalSemaphoreDeviceIndices );
+ }
+
+ bool operator!=( DeviceGroupSubmitInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const uint32_t* pWaitSemaphoreDeviceIndices;
+ uint32_t commandBufferCount;
+ const uint32_t* pCommandBufferDeviceMasks;
+ uint32_t signalSemaphoreCount;
+ const uint32_t* pSignalSemaphoreDeviceIndices;
+ };
+ static_assert( sizeof( DeviceGroupSubmitInfoKHX ) == sizeof( VkDeviceGroupSubmitInfoKHX ), "struct and wrapper have different size!" );
+
+ struct DeviceGroupBindSparseInfoKHX
+ {
+ DeviceGroupBindSparseInfoKHX( uint32_t resourceDeviceIndex_ = 0, uint32_t memoryDeviceIndex_ = 0 )
+ : sType( StructureType::eDeviceGroupBindSparseInfoKHX )
+ , pNext( nullptr )
+ , resourceDeviceIndex( resourceDeviceIndex_ )
+ , memoryDeviceIndex( memoryDeviceIndex_ )
+ {
+ }
+
+ DeviceGroupBindSparseInfoKHX( VkDeviceGroupBindSparseInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupBindSparseInfoKHX) );
+ }
+
+ DeviceGroupBindSparseInfoKHX& operator=( VkDeviceGroupBindSparseInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupBindSparseInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupBindSparseInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupBindSparseInfoKHX& setResourceDeviceIndex( uint32_t resourceDeviceIndex_ )
+ {
+ resourceDeviceIndex = resourceDeviceIndex_;
+ return *this;
+ }
+
+ DeviceGroupBindSparseInfoKHX& setMemoryDeviceIndex( uint32_t memoryDeviceIndex_ )
+ {
+ memoryDeviceIndex = memoryDeviceIndex_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupBindSparseInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupBindSparseInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupBindSparseInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( resourceDeviceIndex == rhs.resourceDeviceIndex )
+ && ( memoryDeviceIndex == rhs.memoryDeviceIndex );
+ }
+
+ bool operator!=( DeviceGroupBindSparseInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t resourceDeviceIndex;
+ uint32_t memoryDeviceIndex;
+ };
+ static_assert( sizeof( DeviceGroupBindSparseInfoKHX ) == sizeof( VkDeviceGroupBindSparseInfoKHX ), "struct and wrapper have different size!" );
+
+ struct ImageSwapchainCreateInfoKHX
+ {
+ ImageSwapchainCreateInfoKHX( SwapchainKHR swapchain_ = SwapchainKHR() )
+ : sType( StructureType::eImageSwapchainCreateInfoKHX )
+ , pNext( nullptr )
+ , swapchain( swapchain_ )
+ {
+ }
+
+ ImageSwapchainCreateInfoKHX( VkImageSwapchainCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSwapchainCreateInfoKHX) );
+ }
+
+ ImageSwapchainCreateInfoKHX& operator=( VkImageSwapchainCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSwapchainCreateInfoKHX) );
+ return *this;
+ }
+
+ ImageSwapchainCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImageSwapchainCreateInfoKHX& setSwapchain( SwapchainKHR swapchain_ )
+ {
+ swapchain = swapchain_;
+ return *this;
+ }
+
+ operator const VkImageSwapchainCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkImageSwapchainCreateInfoKHX*>(this);
+ }
+
+ bool operator==( ImageSwapchainCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( swapchain == rhs.swapchain );
+ }
+
+ bool operator!=( ImageSwapchainCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SwapchainKHR swapchain;
+ };
+ static_assert( sizeof( ImageSwapchainCreateInfoKHX ) == sizeof( VkImageSwapchainCreateInfoKHX ), "struct and wrapper have different size!" );
+
+ struct BindImageMemorySwapchainInfoKHX
+ {
+ BindImageMemorySwapchainInfoKHX( SwapchainKHR swapchain_ = SwapchainKHR(), uint32_t imageIndex_ = 0 )
+ : sType( StructureType::eBindImageMemorySwapchainInfoKHX )
+ , pNext( nullptr )
+ , swapchain( swapchain_ )
+ , imageIndex( imageIndex_ )
+ {
+ }
+
+ BindImageMemorySwapchainInfoKHX( VkBindImageMemorySwapchainInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindImageMemorySwapchainInfoKHX) );
+ }
+
+ BindImageMemorySwapchainInfoKHX& operator=( VkBindImageMemorySwapchainInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindImageMemorySwapchainInfoKHX) );
+ return *this;
+ }
+
+ BindImageMemorySwapchainInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BindImageMemorySwapchainInfoKHX& setSwapchain( SwapchainKHR swapchain_ )
+ {
+ swapchain = swapchain_;
+ return *this;
+ }
+
+ BindImageMemorySwapchainInfoKHX& setImageIndex( uint32_t imageIndex_ )
+ {
+ imageIndex = imageIndex_;
+ return *this;
+ }
+
+ operator const VkBindImageMemorySwapchainInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkBindImageMemorySwapchainInfoKHX*>(this);
+ }
+
+ bool operator==( BindImageMemorySwapchainInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( swapchain == rhs.swapchain )
+ && ( imageIndex == rhs.imageIndex );
+ }
+
+ bool operator!=( BindImageMemorySwapchainInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SwapchainKHR swapchain;
+ uint32_t imageIndex;
+ };
+ static_assert( sizeof( BindImageMemorySwapchainInfoKHX ) == sizeof( VkBindImageMemorySwapchainInfoKHX ), "struct and wrapper have different size!" );
+
+ struct AcquireNextImageInfoKHX
+ {
+ AcquireNextImageInfoKHX( SwapchainKHR swapchain_ = SwapchainKHR(), uint64_t timeout_ = 0, Semaphore semaphore_ = Semaphore(), Fence fence_ = Fence(), uint32_t deviceMask_ = 0 )
+ : sType( StructureType::eAcquireNextImageInfoKHX )
+ , pNext( nullptr )
+ , swapchain( swapchain_ )
+ , timeout( timeout_ )
+ , semaphore( semaphore_ )
+ , fence( fence_ )
+ , deviceMask( deviceMask_ )
+ {
+ }
+
+ AcquireNextImageInfoKHX( VkAcquireNextImageInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AcquireNextImageInfoKHX) );
+ }
+
+ AcquireNextImageInfoKHX& operator=( VkAcquireNextImageInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AcquireNextImageInfoKHX) );
+ return *this;
+ }
+
+ AcquireNextImageInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ AcquireNextImageInfoKHX& setSwapchain( SwapchainKHR swapchain_ )
+ {
+ swapchain = swapchain_;
+ return *this;
+ }
+
+ AcquireNextImageInfoKHX& setTimeout( uint64_t timeout_ )
+ {
+ timeout = timeout_;
+ return *this;
+ }
+
+ AcquireNextImageInfoKHX& setSemaphore( Semaphore semaphore_ )
+ {
+ semaphore = semaphore_;
+ return *this;
+ }
+
+ AcquireNextImageInfoKHX& setFence( Fence fence_ )
+ {
+ fence = fence_;
+ return *this;
+ }
+
+ AcquireNextImageInfoKHX& setDeviceMask( uint32_t deviceMask_ )
+ {
+ deviceMask = deviceMask_;
+ return *this;
+ }
+
+ operator const VkAcquireNextImageInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkAcquireNextImageInfoKHX*>(this);
+ }
+
+ bool operator==( AcquireNextImageInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( swapchain == rhs.swapchain )
+ && ( timeout == rhs.timeout )
+ && ( semaphore == rhs.semaphore )
+ && ( fence == rhs.fence )
+ && ( deviceMask == rhs.deviceMask );
+ }
+
+ bool operator!=( AcquireNextImageInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SwapchainKHR swapchain;
+ uint64_t timeout;
+ Semaphore semaphore;
+ Fence fence;
+ uint32_t deviceMask;
+ };
+ static_assert( sizeof( AcquireNextImageInfoKHX ) == sizeof( VkAcquireNextImageInfoKHX ), "struct and wrapper have different size!" );
+
+ struct HdrMetadataEXT
+ {
+ HdrMetadataEXT( XYColorEXT displayPrimaryRed_ = XYColorEXT(), XYColorEXT displayPrimaryGreen_ = XYColorEXT(), XYColorEXT displayPrimaryBlue_ = XYColorEXT(), XYColorEXT whitePoint_ = XYColorEXT(), float maxLuminance_ = 0, float minLuminance_ = 0, float maxContentLightLevel_ = 0, float maxFrameAverageLightLevel_ = 0 )
+ : sType( StructureType::eHdrMetadataEXT )
+ , pNext( nullptr )
+ , displayPrimaryRed( displayPrimaryRed_ )
+ , displayPrimaryGreen( displayPrimaryGreen_ )
+ , displayPrimaryBlue( displayPrimaryBlue_ )
+ , whitePoint( whitePoint_ )
+ , maxLuminance( maxLuminance_ )
+ , minLuminance( minLuminance_ )
+ , maxContentLightLevel( maxContentLightLevel_ )
+ , maxFrameAverageLightLevel( maxFrameAverageLightLevel_ )
+ {
+ }
+
+ HdrMetadataEXT( VkHdrMetadataEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(HdrMetadataEXT) );
+ }
+
+ HdrMetadataEXT& operator=( VkHdrMetadataEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(HdrMetadataEXT) );
+ return *this;
+ }
+
+ HdrMetadataEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setDisplayPrimaryRed( XYColorEXT displayPrimaryRed_ )
+ {
+ displayPrimaryRed = displayPrimaryRed_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setDisplayPrimaryGreen( XYColorEXT displayPrimaryGreen_ )
+ {
+ displayPrimaryGreen = displayPrimaryGreen_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setDisplayPrimaryBlue( XYColorEXT displayPrimaryBlue_ )
+ {
+ displayPrimaryBlue = displayPrimaryBlue_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setWhitePoint( XYColorEXT whitePoint_ )
+ {
+ whitePoint = whitePoint_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setMaxLuminance( float maxLuminance_ )
+ {
+ maxLuminance = maxLuminance_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setMinLuminance( float minLuminance_ )
+ {
+ minLuminance = minLuminance_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setMaxContentLightLevel( float maxContentLightLevel_ )
+ {
+ maxContentLightLevel = maxContentLightLevel_;
+ return *this;
+ }
+
+ HdrMetadataEXT& setMaxFrameAverageLightLevel( float maxFrameAverageLightLevel_ )
+ {
+ maxFrameAverageLightLevel = maxFrameAverageLightLevel_;
+ return *this;
+ }
+
+ operator const VkHdrMetadataEXT&() const
+ {
+ return *reinterpret_cast<const VkHdrMetadataEXT*>(this);
+ }
+
+ bool operator==( HdrMetadataEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( displayPrimaryRed == rhs.displayPrimaryRed )
+ && ( displayPrimaryGreen == rhs.displayPrimaryGreen )
+ && ( displayPrimaryBlue == rhs.displayPrimaryBlue )
+ && ( whitePoint == rhs.whitePoint )
+ && ( maxLuminance == rhs.maxLuminance )
+ && ( minLuminance == rhs.minLuminance )
+ && ( maxContentLightLevel == rhs.maxContentLightLevel )
+ && ( maxFrameAverageLightLevel == rhs.maxFrameAverageLightLevel );
+ }
+
+ bool operator!=( HdrMetadataEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ XYColorEXT displayPrimaryRed;
+ XYColorEXT displayPrimaryGreen;
+ XYColorEXT displayPrimaryBlue;
+ XYColorEXT whitePoint;
+ float maxLuminance;
+ float minLuminance;
+ float maxContentLightLevel;
+ float maxFrameAverageLightLevel;
+ };
+ static_assert( sizeof( HdrMetadataEXT ) == sizeof( VkHdrMetadataEXT ), "struct and wrapper have different size!" );
+
+ struct PresentTimesInfoGOOGLE
+ {
+ PresentTimesInfoGOOGLE( uint32_t swapchainCount_ = 0, const PresentTimeGOOGLE* pTimes_ = nullptr )
+ : sType( StructureType::ePresentTimesInfoGOOGLE )
+ , pNext( nullptr )
+ , swapchainCount( swapchainCount_ )
+ , pTimes( pTimes_ )
+ {
+ }
+
+ PresentTimesInfoGOOGLE( VkPresentTimesInfoGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentTimesInfoGOOGLE) );
+ }
+
+ PresentTimesInfoGOOGLE& operator=( VkPresentTimesInfoGOOGLE const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentTimesInfoGOOGLE) );
+ return *this;
+ }
+
+ PresentTimesInfoGOOGLE& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PresentTimesInfoGOOGLE& setSwapchainCount( uint32_t swapchainCount_ )
+ {
+ swapchainCount = swapchainCount_;
+ return *this;
+ }
+
+ PresentTimesInfoGOOGLE& setPTimes( const PresentTimeGOOGLE* pTimes_ )
+ {
+ pTimes = pTimes_;
+ return *this;
+ }
+
+ operator const VkPresentTimesInfoGOOGLE&() const
+ {
+ return *reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(this);
+ }
+
+ bool operator==( PresentTimesInfoGOOGLE const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( swapchainCount == rhs.swapchainCount )
+ && ( pTimes == rhs.pTimes );
+ }
+
+ bool operator!=( PresentTimesInfoGOOGLE const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t swapchainCount;
+ const PresentTimeGOOGLE* pTimes;
+ };
+ static_assert( sizeof( PresentTimesInfoGOOGLE ) == sizeof( VkPresentTimesInfoGOOGLE ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ struct IOSSurfaceCreateInfoMVK
+ {
+ IOSSurfaceCreateInfoMVK( IOSSurfaceCreateFlagsMVK flags_ = IOSSurfaceCreateFlagsMVK(), const void* pView_ = nullptr )
+ : sType( StructureType::eIOSSurfaceCreateInfoMVK )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , pView( pView_ )
+ {
+ }
+
+ IOSSurfaceCreateInfoMVK( VkIOSSurfaceCreateInfoMVK const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IOSSurfaceCreateInfoMVK) );
+ }
+
+ IOSSurfaceCreateInfoMVK& operator=( VkIOSSurfaceCreateInfoMVK const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IOSSurfaceCreateInfoMVK) );
+ return *this;
+ }
+
+ IOSSurfaceCreateInfoMVK& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ IOSSurfaceCreateInfoMVK& setFlags( IOSSurfaceCreateFlagsMVK flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ IOSSurfaceCreateInfoMVK& setPView( const void* pView_ )
+ {
+ pView = pView_;
+ return *this;
+ }
+
+ operator const VkIOSSurfaceCreateInfoMVK&() const
+ {
+ return *reinterpret_cast<const VkIOSSurfaceCreateInfoMVK*>(this);
+ }
+
+ bool operator==( IOSSurfaceCreateInfoMVK const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( pView == rhs.pView );
+ }
+
+ bool operator!=( IOSSurfaceCreateInfoMVK const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ IOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+ };
+ static_assert( sizeof( IOSSurfaceCreateInfoMVK ) == sizeof( VkIOSSurfaceCreateInfoMVK ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ struct MacOSSurfaceCreateInfoMVK
+ {
+ MacOSSurfaceCreateInfoMVK( MacOSSurfaceCreateFlagsMVK flags_ = MacOSSurfaceCreateFlagsMVK(), const void* pView_ = nullptr )
+ : sType( StructureType::eMacOSSurfaceCreateInfoMVK )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , pView( pView_ )
+ {
+ }
+
+ MacOSSurfaceCreateInfoMVK( VkMacOSSurfaceCreateInfoMVK const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MacOSSurfaceCreateInfoMVK) );
+ }
+
+ MacOSSurfaceCreateInfoMVK& operator=( VkMacOSSurfaceCreateInfoMVK const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MacOSSurfaceCreateInfoMVK) );
+ return *this;
+ }
+
+ MacOSSurfaceCreateInfoMVK& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ MacOSSurfaceCreateInfoMVK& setFlags( MacOSSurfaceCreateFlagsMVK flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ MacOSSurfaceCreateInfoMVK& setPView( const void* pView_ )
+ {
+ pView = pView_;
+ return *this;
+ }
+
+ operator const VkMacOSSurfaceCreateInfoMVK&() const
+ {
+ return *reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK*>(this);
+ }
+
+ bool operator==( MacOSSurfaceCreateInfoMVK const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( pView == rhs.pView );
+ }
+
+ bool operator!=( MacOSSurfaceCreateInfoMVK const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ MacOSSurfaceCreateFlagsMVK flags;
+ const void* pView;
+ };
+ static_assert( sizeof( MacOSSurfaceCreateInfoMVK ) == sizeof( VkMacOSSurfaceCreateInfoMVK ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+ struct PipelineViewportWScalingStateCreateInfoNV
+ {
+ PipelineViewportWScalingStateCreateInfoNV( Bool32 viewportWScalingEnable_ = 0, uint32_t viewportCount_ = 0, const ViewportWScalingNV* pViewportWScalings_ = nullptr )
+ : sType( StructureType::ePipelineViewportWScalingStateCreateInfoNV )
+ , pNext( nullptr )
+ , viewportWScalingEnable( viewportWScalingEnable_ )
+ , viewportCount( viewportCount_ )
+ , pViewportWScalings( pViewportWScalings_ )
+ {
+ }
+
+ PipelineViewportWScalingStateCreateInfoNV( VkPipelineViewportWScalingStateCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineViewportWScalingStateCreateInfoNV) );
+ }
+
+ PipelineViewportWScalingStateCreateInfoNV& operator=( VkPipelineViewportWScalingStateCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineViewportWScalingStateCreateInfoNV) );
+ return *this;
+ }
+
+ PipelineViewportWScalingStateCreateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineViewportWScalingStateCreateInfoNV& setViewportWScalingEnable( Bool32 viewportWScalingEnable_ )
+ {
+ viewportWScalingEnable = viewportWScalingEnable_;
+ return *this;
+ }
+
+ PipelineViewportWScalingStateCreateInfoNV& setViewportCount( uint32_t viewportCount_ )
+ {
+ viewportCount = viewportCount_;
+ return *this;
+ }
+
+ PipelineViewportWScalingStateCreateInfoNV& setPViewportWScalings( const ViewportWScalingNV* pViewportWScalings_ )
+ {
+ pViewportWScalings = pViewportWScalings_;
+ return *this;
+ }
+
+ operator const VkPipelineViewportWScalingStateCreateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(this);
+ }
+
+ bool operator==( PipelineViewportWScalingStateCreateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( viewportWScalingEnable == rhs.viewportWScalingEnable )
+ && ( viewportCount == rhs.viewportCount )
+ && ( pViewportWScalings == rhs.pViewportWScalings );
+ }
+
+ bool operator!=( PipelineViewportWScalingStateCreateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Bool32 viewportWScalingEnable;
+ uint32_t viewportCount;
+ const ViewportWScalingNV* pViewportWScalings;
+ };
+ static_assert( sizeof( PipelineViewportWScalingStateCreateInfoNV ) == sizeof( VkPipelineViewportWScalingStateCreateInfoNV ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceDiscardRectanglePropertiesEXT
+ {
+ PhysicalDeviceDiscardRectanglePropertiesEXT( uint32_t maxDiscardRectangles_ = 0 )
+ : sType( StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT )
+ , pNext( nullptr )
+ , maxDiscardRectangles( maxDiscardRectangles_ )
+ {
+ }
+
+ PhysicalDeviceDiscardRectanglePropertiesEXT( VkPhysicalDeviceDiscardRectanglePropertiesEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceDiscardRectanglePropertiesEXT) );
+ }
+
+ PhysicalDeviceDiscardRectanglePropertiesEXT& operator=( VkPhysicalDeviceDiscardRectanglePropertiesEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceDiscardRectanglePropertiesEXT) );
+ return *this;
+ }
+
+ PhysicalDeviceDiscardRectanglePropertiesEXT& setPNext( void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceDiscardRectanglePropertiesEXT& setMaxDiscardRectangles( uint32_t maxDiscardRectangles_ )
+ {
+ maxDiscardRectangles = maxDiscardRectangles_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceDiscardRectanglePropertiesEXT&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(this);
+ }
+
+ bool operator==( PhysicalDeviceDiscardRectanglePropertiesEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( maxDiscardRectangles == rhs.maxDiscardRectangles );
+ }
+
+ bool operator!=( PhysicalDeviceDiscardRectanglePropertiesEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t maxDiscardRectangles;
+ };
+ static_assert( sizeof( PhysicalDeviceDiscardRectanglePropertiesEXT ) == sizeof( VkPhysicalDeviceDiscardRectanglePropertiesEXT ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX
+ {
+ operator const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( perViewPositionAllComponents == rhs.perViewPositionAllComponents );
+ }
+
+ bool operator!=( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ Bool32 perViewPositionAllComponents;
+ };
+ static_assert( sizeof( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ) == sizeof( VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceSurfaceInfo2KHR
+ {
+ PhysicalDeviceSurfaceInfo2KHR( SurfaceKHR surface_ = SurfaceKHR() )
+ : sType( StructureType::ePhysicalDeviceSurfaceInfo2KHR )
+ , pNext( nullptr )
+ , surface( surface_ )
+ {
+ }
+
+ PhysicalDeviceSurfaceInfo2KHR( VkPhysicalDeviceSurfaceInfo2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceSurfaceInfo2KHR) );
+ }
+
+ PhysicalDeviceSurfaceInfo2KHR& operator=( VkPhysicalDeviceSurfaceInfo2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceSurfaceInfo2KHR) );
+ return *this;
+ }
+
+ PhysicalDeviceSurfaceInfo2KHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceSurfaceInfo2KHR& setSurface( SurfaceKHR surface_ )
+ {
+ surface = surface_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceSurfaceInfo2KHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>(this);
+ }
+
+ bool operator==( PhysicalDeviceSurfaceInfo2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( surface == rhs.surface );
+ }
+
+ bool operator!=( PhysicalDeviceSurfaceInfo2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SurfaceKHR surface;
+ };
+ static_assert( sizeof( PhysicalDeviceSurfaceInfo2KHR ) == sizeof( VkPhysicalDeviceSurfaceInfo2KHR ), "struct and wrapper have different size!" );
+
+ enum class SubpassContents
+ {
+ eInline = VK_SUBPASS_CONTENTS_INLINE,
+ eSecondaryCommandBuffers = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
+ };
+
+ struct PresentInfoKHR
+ {
+ PresentInfoKHR( uint32_t waitSemaphoreCount_ = 0, const Semaphore* pWaitSemaphores_ = nullptr, uint32_t swapchainCount_ = 0, const SwapchainKHR* pSwapchains_ = nullptr, const uint32_t* pImageIndices_ = nullptr, Result* pResults_ = nullptr )
+ : sType( StructureType::ePresentInfoKHR )
+ , pNext( nullptr )
+ , waitSemaphoreCount( waitSemaphoreCount_ )
+ , pWaitSemaphores( pWaitSemaphores_ )
+ , swapchainCount( swapchainCount_ )
+ , pSwapchains( pSwapchains_ )
+ , pImageIndices( pImageIndices_ )
+ , pResults( pResults_ )
+ {
+ }
+
+ PresentInfoKHR( VkPresentInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentInfoKHR) );
+ }
+
+ PresentInfoKHR& operator=( VkPresentInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PresentInfoKHR) );
+ return *this;
+ }
+
+ PresentInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PresentInfoKHR& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ )
+ {
+ waitSemaphoreCount = waitSemaphoreCount_;
+ return *this;
+ }
+
+ PresentInfoKHR& setPWaitSemaphores( const Semaphore* pWaitSemaphores_ )
+ {
+ pWaitSemaphores = pWaitSemaphores_;
+ return *this;
+ }
+
+ PresentInfoKHR& setSwapchainCount( uint32_t swapchainCount_ )
+ {
+ swapchainCount = swapchainCount_;
+ return *this;
+ }
+
+ PresentInfoKHR& setPSwapchains( const SwapchainKHR* pSwapchains_ )
+ {
+ pSwapchains = pSwapchains_;
+ return *this;
+ }
+
+ PresentInfoKHR& setPImageIndices( const uint32_t* pImageIndices_ )
+ {
+ pImageIndices = pImageIndices_;
+ return *this;
+ }
+
+ PresentInfoKHR& setPResults( Result* pResults_ )
+ {
+ pResults = pResults_;
+ return *this;
+ }
+
+ operator const VkPresentInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkPresentInfoKHR*>(this);
+ }
+
+ bool operator==( PresentInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( waitSemaphoreCount == rhs.waitSemaphoreCount )
+ && ( pWaitSemaphores == rhs.pWaitSemaphores )
+ && ( swapchainCount == rhs.swapchainCount )
+ && ( pSwapchains == rhs.pSwapchains )
+ && ( pImageIndices == rhs.pImageIndices )
+ && ( pResults == rhs.pResults );
+ }
+
+ bool operator!=( PresentInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const Semaphore* pWaitSemaphores;
+ uint32_t swapchainCount;
+ const SwapchainKHR* pSwapchains;
+ const uint32_t* pImageIndices;
+ Result* pResults;
+ };
+ static_assert( sizeof( PresentInfoKHR ) == sizeof( VkPresentInfoKHR ), "struct and wrapper have different size!" );
+
+ enum class DynamicState
+ {
+ eViewport = VK_DYNAMIC_STATE_VIEWPORT,
+ eScissor = VK_DYNAMIC_STATE_SCISSOR,
+ eLineWidth = VK_DYNAMIC_STATE_LINE_WIDTH,
+ eDepthBias = VK_DYNAMIC_STATE_DEPTH_BIAS,
+ eBlendConstants = VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ eDepthBounds = VK_DYNAMIC_STATE_DEPTH_BOUNDS,
+ eStencilCompareMask = VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+ eStencilWriteMask = VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
+ eStencilReference = VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ eViewportWScalingNV = VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV,
+ eDiscardRectangleEXT = VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
+ };
+
+ struct PipelineDynamicStateCreateInfo
+ {
+ PipelineDynamicStateCreateInfo( PipelineDynamicStateCreateFlags flags_ = PipelineDynamicStateCreateFlags(), uint32_t dynamicStateCount_ = 0, const DynamicState* pDynamicStates_ = nullptr )
+ : sType( StructureType::ePipelineDynamicStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , dynamicStateCount( dynamicStateCount_ )
+ , pDynamicStates( pDynamicStates_ )
+ {
+ }
+
+ PipelineDynamicStateCreateInfo( VkPipelineDynamicStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineDynamicStateCreateInfo) );
+ }
+
+ PipelineDynamicStateCreateInfo& operator=( VkPipelineDynamicStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineDynamicStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineDynamicStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineDynamicStateCreateInfo& setFlags( PipelineDynamicStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineDynamicStateCreateInfo& setDynamicStateCount( uint32_t dynamicStateCount_ )
+ {
+ dynamicStateCount = dynamicStateCount_;
+ return *this;
+ }
+
+ PipelineDynamicStateCreateInfo& setPDynamicStates( const DynamicState* pDynamicStates_ )
+ {
+ pDynamicStates = pDynamicStates_;
+ return *this;
+ }
+
+ operator const VkPipelineDynamicStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineDynamicStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineDynamicStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( dynamicStateCount == rhs.dynamicStateCount )
+ && ( pDynamicStates == rhs.pDynamicStates );
+ }
+
+ bool operator!=( PipelineDynamicStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineDynamicStateCreateFlags flags;
+ uint32_t dynamicStateCount;
+ const DynamicState* pDynamicStates;
+ };
+ static_assert( sizeof( PipelineDynamicStateCreateInfo ) == sizeof( VkPipelineDynamicStateCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class DescriptorUpdateTemplateTypeKHR
+ {
+ eDescriptorSet = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
+ ePushDescriptors = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR
+ };
+
+ struct DescriptorUpdateTemplateCreateInfoKHR
+ {
+ DescriptorUpdateTemplateCreateInfoKHR( DescriptorUpdateTemplateCreateFlagsKHR flags_ = DescriptorUpdateTemplateCreateFlagsKHR(), uint32_t descriptorUpdateEntryCount_ = 0, const DescriptorUpdateTemplateEntryKHR* pDescriptorUpdateEntries_ = nullptr, DescriptorUpdateTemplateTypeKHR templateType_ = DescriptorUpdateTemplateTypeKHR::eDescriptorSet, DescriptorSetLayout descriptorSetLayout_ = DescriptorSetLayout(), PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, PipelineLayout pipelineLayout_ = PipelineLayout(), uint32_t set_ = 0 )
+ : sType( StructureType::eDescriptorUpdateTemplateCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , descriptorUpdateEntryCount( descriptorUpdateEntryCount_ )
+ , pDescriptorUpdateEntries( pDescriptorUpdateEntries_ )
+ , templateType( templateType_ )
+ , descriptorSetLayout( descriptorSetLayout_ )
+ , pipelineBindPoint( pipelineBindPoint_ )
+ , pipelineLayout( pipelineLayout_ )
+ , set( set_ )
+ {
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR( VkDescriptorUpdateTemplateCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorUpdateTemplateCreateInfoKHR) );
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& operator=( VkDescriptorUpdateTemplateCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorUpdateTemplateCreateInfoKHR) );
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setPNext( void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setFlags( DescriptorUpdateTemplateCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setDescriptorUpdateEntryCount( uint32_t descriptorUpdateEntryCount_ )
+ {
+ descriptorUpdateEntryCount = descriptorUpdateEntryCount_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setPDescriptorUpdateEntries( const DescriptorUpdateTemplateEntryKHR* pDescriptorUpdateEntries_ )
+ {
+ pDescriptorUpdateEntries = pDescriptorUpdateEntries_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setTemplateType( DescriptorUpdateTemplateTypeKHR templateType_ )
+ {
+ templateType = templateType_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout_ )
+ {
+ descriptorSetLayout = descriptorSetLayout_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ )
+ {
+ pipelineBindPoint = pipelineBindPoint_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setPipelineLayout( PipelineLayout pipelineLayout_ )
+ {
+ pipelineLayout = pipelineLayout_;
+ return *this;
+ }
+
+ DescriptorUpdateTemplateCreateInfoKHR& setSet( uint32_t set_ )
+ {
+ set = set_;
+ return *this;
+ }
+
+ operator const VkDescriptorUpdateTemplateCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfoKHR*>(this);
+ }
+
+ bool operator==( DescriptorUpdateTemplateCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( descriptorUpdateEntryCount == rhs.descriptorUpdateEntryCount )
+ && ( pDescriptorUpdateEntries == rhs.pDescriptorUpdateEntries )
+ && ( templateType == rhs.templateType )
+ && ( descriptorSetLayout == rhs.descriptorSetLayout )
+ && ( pipelineBindPoint == rhs.pipelineBindPoint )
+ && ( pipelineLayout == rhs.pipelineLayout )
+ && ( set == rhs.set );
+ }
+
+ bool operator!=( DescriptorUpdateTemplateCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ DescriptorUpdateTemplateCreateFlagsKHR flags;
+ uint32_t descriptorUpdateEntryCount;
+ const DescriptorUpdateTemplateEntryKHR* pDescriptorUpdateEntries;
+ DescriptorUpdateTemplateTypeKHR templateType;
+ DescriptorSetLayout descriptorSetLayout;
+ PipelineBindPoint pipelineBindPoint;
+ PipelineLayout pipelineLayout;
+ uint32_t set;
+ };
+ static_assert( sizeof( DescriptorUpdateTemplateCreateInfoKHR ) == sizeof( VkDescriptorUpdateTemplateCreateInfoKHR ), "struct and wrapper have different size!" );
+
+ enum class ObjectType
+ {
+ eUnknown = VK_OBJECT_TYPE_UNKNOWN,
+ eInstance = VK_OBJECT_TYPE_INSTANCE,
+ ePhysicalDevice = VK_OBJECT_TYPE_PHYSICAL_DEVICE,
+ eDevice = VK_OBJECT_TYPE_DEVICE,
+ eQueue = VK_OBJECT_TYPE_QUEUE,
+ eSemaphore = VK_OBJECT_TYPE_SEMAPHORE,
+ eCommandBuffer = VK_OBJECT_TYPE_COMMAND_BUFFER,
+ eFence = VK_OBJECT_TYPE_FENCE,
+ eDeviceMemory = VK_OBJECT_TYPE_DEVICE_MEMORY,
+ eBuffer = VK_OBJECT_TYPE_BUFFER,
+ eImage = VK_OBJECT_TYPE_IMAGE,
+ eEvent = VK_OBJECT_TYPE_EVENT,
+ eQueryPool = VK_OBJECT_TYPE_QUERY_POOL,
+ eBufferView = VK_OBJECT_TYPE_BUFFER_VIEW,
+ eImageView = VK_OBJECT_TYPE_IMAGE_VIEW,
+ eShaderModule = VK_OBJECT_TYPE_SHADER_MODULE,
+ ePipelineCache = VK_OBJECT_TYPE_PIPELINE_CACHE,
+ ePipelineLayout = VK_OBJECT_TYPE_PIPELINE_LAYOUT,
+ eRenderPass = VK_OBJECT_TYPE_RENDER_PASS,
+ ePipeline = VK_OBJECT_TYPE_PIPELINE,
+ eDescriptorSetLayout = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
+ eSampler = VK_OBJECT_TYPE_SAMPLER,
+ eDescriptorPool = VK_OBJECT_TYPE_DESCRIPTOR_POOL,
+ eDescriptorSet = VK_OBJECT_TYPE_DESCRIPTOR_SET,
+ eFramebuffer = VK_OBJECT_TYPE_FRAMEBUFFER,
+ eCommandPool = VK_OBJECT_TYPE_COMMAND_POOL,
+ eSurfaceKHR = VK_OBJECT_TYPE_SURFACE_KHR,
+ eSwapchainKHR = VK_OBJECT_TYPE_SWAPCHAIN_KHR,
+ eDisplayKHR = VK_OBJECT_TYPE_DISPLAY_KHR,
+ eDisplayModeKHR = VK_OBJECT_TYPE_DISPLAY_MODE_KHR,
+ eDebugReportCallbackEXT = VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT,
+ eDescriptorUpdateTemplateKHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR,
+ eObjectTableNVX = VK_OBJECT_TYPE_OBJECT_TABLE_NVX,
+ eIndirectCommandsLayoutNVX = VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX
+ };
+
+ enum class QueueFlagBits
+ {
+ eGraphics = VK_QUEUE_GRAPHICS_BIT,
+ eCompute = VK_QUEUE_COMPUTE_BIT,
+ eTransfer = VK_QUEUE_TRANSFER_BIT,
+ eSparseBinding = VK_QUEUE_SPARSE_BINDING_BIT
+ };
+
+ using QueueFlags = Flags<QueueFlagBits, VkQueueFlags>;
+
+ VULKAN_HPP_INLINE QueueFlags operator|( QueueFlagBits bit0, QueueFlagBits bit1 )
+ {
+ return QueueFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE QueueFlags operator~( QueueFlagBits bits )
+ {
+ return ~( QueueFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<QueueFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(QueueFlagBits::eGraphics) | VkFlags(QueueFlagBits::eCompute) | VkFlags(QueueFlagBits::eTransfer) | VkFlags(QueueFlagBits::eSparseBinding)
+ };
+ };
+
+ struct QueueFamilyProperties
+ {
+ operator const VkQueueFamilyProperties&() const
+ {
+ return *reinterpret_cast<const VkQueueFamilyProperties*>(this);
+ }
+
+ bool operator==( QueueFamilyProperties const& rhs ) const
+ {
+ return ( queueFlags == rhs.queueFlags )
+ && ( queueCount == rhs.queueCount )
+ && ( timestampValidBits == rhs.timestampValidBits )
+ && ( minImageTransferGranularity == rhs.minImageTransferGranularity );
+ }
+
+ bool operator!=( QueueFamilyProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ QueueFlags queueFlags;
+ uint32_t queueCount;
+ uint32_t timestampValidBits;
+ Extent3D minImageTransferGranularity;
+ };
+ static_assert( sizeof( QueueFamilyProperties ) == sizeof( VkQueueFamilyProperties ), "struct and wrapper have different size!" );
+
+ struct QueueFamilyProperties2KHR
+ {
+ operator const VkQueueFamilyProperties2KHR&() const
+ {
+ return *reinterpret_cast<const VkQueueFamilyProperties2KHR*>(this);
+ }
+
+ bool operator==( QueueFamilyProperties2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( queueFamilyProperties == rhs.queueFamilyProperties );
+ }
+
+ bool operator!=( QueueFamilyProperties2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ QueueFamilyProperties queueFamilyProperties;
+ };
+ static_assert( sizeof( QueueFamilyProperties2KHR ) == sizeof( VkQueueFamilyProperties2KHR ), "struct and wrapper have different size!" );
+
+ enum class MemoryPropertyFlagBits
+ {
+ eDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ eHostVisible = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
+ eHostCoherent = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ eHostCached = VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ eLazilyAllocated = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
+ };
+
+ using MemoryPropertyFlags = Flags<MemoryPropertyFlagBits, VkMemoryPropertyFlags>;
+
+ VULKAN_HPP_INLINE MemoryPropertyFlags operator|( MemoryPropertyFlagBits bit0, MemoryPropertyFlagBits bit1 )
+ {
+ return MemoryPropertyFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE MemoryPropertyFlags operator~( MemoryPropertyFlagBits bits )
+ {
+ return ~( MemoryPropertyFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<MemoryPropertyFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(MemoryPropertyFlagBits::eDeviceLocal) | VkFlags(MemoryPropertyFlagBits::eHostVisible) | VkFlags(MemoryPropertyFlagBits::eHostCoherent) | VkFlags(MemoryPropertyFlagBits::eHostCached) | VkFlags(MemoryPropertyFlagBits::eLazilyAllocated)
+ };
+ };
+
+ struct MemoryType
+ {
+ operator const VkMemoryType&() const
+ {
+ return *reinterpret_cast<const VkMemoryType*>(this);
+ }
+
+ bool operator==( MemoryType const& rhs ) const
+ {
+ return ( propertyFlags == rhs.propertyFlags )
+ && ( heapIndex == rhs.heapIndex );
+ }
+
+ bool operator!=( MemoryType const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ MemoryPropertyFlags propertyFlags;
+ uint32_t heapIndex;
+ };
+ static_assert( sizeof( MemoryType ) == sizeof( VkMemoryType ), "struct and wrapper have different size!" );
+
+ enum class MemoryHeapFlagBits
+ {
+ eDeviceLocal = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ eMultiInstanceKHX = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHX
+ };
+
+ using MemoryHeapFlags = Flags<MemoryHeapFlagBits, VkMemoryHeapFlags>;
+
+ VULKAN_HPP_INLINE MemoryHeapFlags operator|( MemoryHeapFlagBits bit0, MemoryHeapFlagBits bit1 )
+ {
+ return MemoryHeapFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE MemoryHeapFlags operator~( MemoryHeapFlagBits bits )
+ {
+ return ~( MemoryHeapFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<MemoryHeapFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(MemoryHeapFlagBits::eDeviceLocal) | VkFlags(MemoryHeapFlagBits::eMultiInstanceKHX)
+ };
+ };
+
+ struct MemoryHeap
+ {
+ operator const VkMemoryHeap&() const
+ {
+ return *reinterpret_cast<const VkMemoryHeap*>(this);
+ }
+
+ bool operator==( MemoryHeap const& rhs ) const
+ {
+ return ( size == rhs.size )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( MemoryHeap const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DeviceSize size;
+ MemoryHeapFlags flags;
+ };
+ static_assert( sizeof( MemoryHeap ) == sizeof( VkMemoryHeap ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceMemoryProperties
+ {
+ operator const VkPhysicalDeviceMemoryProperties&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceMemoryProperties*>(this);
+ }
+
+ bool operator==( PhysicalDeviceMemoryProperties const& rhs ) const
+ {
+ return ( memoryTypeCount == rhs.memoryTypeCount )
+ && ( memcmp( memoryTypes, rhs.memoryTypes, VK_MAX_MEMORY_TYPES * sizeof( MemoryType ) ) == 0 )
+ && ( memoryHeapCount == rhs.memoryHeapCount )
+ && ( memcmp( memoryHeaps, rhs.memoryHeaps, VK_MAX_MEMORY_HEAPS * sizeof( MemoryHeap ) ) == 0 );
+ }
+
+ bool operator!=( PhysicalDeviceMemoryProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t memoryTypeCount;
+ MemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
+ uint32_t memoryHeapCount;
+ MemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS];
+ };
+ static_assert( sizeof( PhysicalDeviceMemoryProperties ) == sizeof( VkPhysicalDeviceMemoryProperties ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceMemoryProperties2KHR
+ {
+ operator const VkPhysicalDeviceMemoryProperties2KHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceMemoryProperties2KHR*>(this);
+ }
+
+ bool operator==( PhysicalDeviceMemoryProperties2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( memoryProperties == rhs.memoryProperties );
+ }
+
+ bool operator!=( PhysicalDeviceMemoryProperties2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ PhysicalDeviceMemoryProperties memoryProperties;
+ };
+ static_assert( sizeof( PhysicalDeviceMemoryProperties2KHR ) == sizeof( VkPhysicalDeviceMemoryProperties2KHR ), "struct and wrapper have different size!" );
+
+ enum class AccessFlagBits
+ {
+ eIndirectCommandRead = VK_ACCESS_INDIRECT_COMMAND_READ_BIT,
+ eIndexRead = VK_ACCESS_INDEX_READ_BIT,
+ eVertexAttributeRead = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
+ eUniformRead = VK_ACCESS_UNIFORM_READ_BIT,
+ eInputAttachmentRead = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT,
+ eShaderRead = VK_ACCESS_SHADER_READ_BIT,
+ eShaderWrite = VK_ACCESS_SHADER_WRITE_BIT,
+ eColorAttachmentRead = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
+ eColorAttachmentWrite = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ eDepthStencilAttachmentRead = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
+ eDepthStencilAttachmentWrite = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
+ eTransferRead = VK_ACCESS_TRANSFER_READ_BIT,
+ eTransferWrite = VK_ACCESS_TRANSFER_WRITE_BIT,
+ eHostRead = VK_ACCESS_HOST_READ_BIT,
+ eHostWrite = VK_ACCESS_HOST_WRITE_BIT,
+ eMemoryRead = VK_ACCESS_MEMORY_READ_BIT,
+ eMemoryWrite = VK_ACCESS_MEMORY_WRITE_BIT,
+ eCommandProcessReadNVX = VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX,
+ eCommandProcessWriteNVX = VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX
+ };
+
+ using AccessFlags = Flags<AccessFlagBits, VkAccessFlags>;
+
+ VULKAN_HPP_INLINE AccessFlags operator|( AccessFlagBits bit0, AccessFlagBits bit1 )
+ {
+ return AccessFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE AccessFlags operator~( AccessFlagBits bits )
+ {
+ return ~( AccessFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<AccessFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(AccessFlagBits::eIndirectCommandRead) | VkFlags(AccessFlagBits::eIndexRead) | VkFlags(AccessFlagBits::eVertexAttributeRead) | VkFlags(AccessFlagBits::eUniformRead) | VkFlags(AccessFlagBits::eInputAttachmentRead) | VkFlags(AccessFlagBits::eShaderRead) | VkFlags(AccessFlagBits::eShaderWrite) | VkFlags(AccessFlagBits::eColorAttachmentRead) | VkFlags(AccessFlagBits::eColorAttachmentWrite) | VkFlags(AccessFlagBits::eDepthStencilAttachmentRead) | VkFlags(AccessFlagBits::eDepthStencilAttachmentWrite) | VkFlags(AccessFlagBits::eTransferRead) | VkFlags(AccessFlagBits::eTransferWrite) | VkFlags(AccessFlagBits::eHostRead) | VkFlags(AccessFlagBits::eHostWrite) | VkFlags(AccessFlagBits::eMemoryRead) | VkFlags(AccessFlagBits::eMemoryWrite) | VkFlags(AccessFlagBits::eCommandProcessReadNVX) | VkFlags(AccessFlagBits::eCommandProcessWriteNVX)
+ };
+ };
+
+ struct MemoryBarrier
+ {
+ MemoryBarrier( AccessFlags srcAccessMask_ = AccessFlags(), AccessFlags dstAccessMask_ = AccessFlags() )
+ : sType( StructureType::eMemoryBarrier )
+ , pNext( nullptr )
+ , srcAccessMask( srcAccessMask_ )
+ , dstAccessMask( dstAccessMask_ )
+ {
+ }
+
+ MemoryBarrier( VkMemoryBarrier const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MemoryBarrier) );
+ }
+
+ MemoryBarrier& operator=( VkMemoryBarrier const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MemoryBarrier) );
+ return *this;
+ }
+
+ MemoryBarrier& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ MemoryBarrier& setSrcAccessMask( AccessFlags srcAccessMask_ )
+ {
+ srcAccessMask = srcAccessMask_;
+ return *this;
+ }
+
+ MemoryBarrier& setDstAccessMask( AccessFlags dstAccessMask_ )
+ {
+ dstAccessMask = dstAccessMask_;
+ return *this;
+ }
+
+ operator const VkMemoryBarrier&() const
+ {
+ return *reinterpret_cast<const VkMemoryBarrier*>(this);
+ }
+
+ bool operator==( MemoryBarrier const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( srcAccessMask == rhs.srcAccessMask )
+ && ( dstAccessMask == rhs.dstAccessMask );
+ }
+
+ bool operator!=( MemoryBarrier const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ AccessFlags srcAccessMask;
+ AccessFlags dstAccessMask;
+ };
+ static_assert( sizeof( MemoryBarrier ) == sizeof( VkMemoryBarrier ), "struct and wrapper have different size!" );
+
+ struct BufferMemoryBarrier
+ {
+ BufferMemoryBarrier( AccessFlags srcAccessMask_ = AccessFlags(), AccessFlags dstAccessMask_ = AccessFlags(), uint32_t srcQueueFamilyIndex_ = 0, uint32_t dstQueueFamilyIndex_ = 0, Buffer buffer_ = Buffer(), DeviceSize offset_ = 0, DeviceSize size_ = 0 )
+ : sType( StructureType::eBufferMemoryBarrier )
+ , pNext( nullptr )
+ , srcAccessMask( srcAccessMask_ )
+ , dstAccessMask( dstAccessMask_ )
+ , srcQueueFamilyIndex( srcQueueFamilyIndex_ )
+ , dstQueueFamilyIndex( dstQueueFamilyIndex_ )
+ , buffer( buffer_ )
+ , offset( offset_ )
+ , size( size_ )
+ {
+ }
+
+ BufferMemoryBarrier( VkBufferMemoryBarrier const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferMemoryBarrier) );
+ }
+
+ BufferMemoryBarrier& operator=( VkBufferMemoryBarrier const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferMemoryBarrier) );
+ return *this;
+ }
+
+ BufferMemoryBarrier& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setSrcAccessMask( AccessFlags srcAccessMask_ )
+ {
+ srcAccessMask = srcAccessMask_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setDstAccessMask( AccessFlags dstAccessMask_ )
+ {
+ dstAccessMask = dstAccessMask_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setSrcQueueFamilyIndex( uint32_t srcQueueFamilyIndex_ )
+ {
+ srcQueueFamilyIndex = srcQueueFamilyIndex_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setDstQueueFamilyIndex( uint32_t dstQueueFamilyIndex_ )
+ {
+ dstQueueFamilyIndex = dstQueueFamilyIndex_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setOffset( DeviceSize offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ BufferMemoryBarrier& setSize( DeviceSize size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ operator const VkBufferMemoryBarrier&() const
+ {
+ return *reinterpret_cast<const VkBufferMemoryBarrier*>(this);
+ }
+
+ bool operator==( BufferMemoryBarrier const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( srcAccessMask == rhs.srcAccessMask )
+ && ( dstAccessMask == rhs.dstAccessMask )
+ && ( srcQueueFamilyIndex == rhs.srcQueueFamilyIndex )
+ && ( dstQueueFamilyIndex == rhs.dstQueueFamilyIndex )
+ && ( buffer == rhs.buffer )
+ && ( offset == rhs.offset )
+ && ( size == rhs.size );
+ }
+
+ bool operator!=( BufferMemoryBarrier const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ AccessFlags srcAccessMask;
+ AccessFlags dstAccessMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ Buffer buffer;
+ DeviceSize offset;
+ DeviceSize size;
+ };
+ static_assert( sizeof( BufferMemoryBarrier ) == sizeof( VkBufferMemoryBarrier ), "struct and wrapper have different size!" );
+
+ enum class BufferUsageFlagBits
+ {
+ eTransferSrc = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ eTransferDst = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ eUniformTexelBuffer = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT,
+ eStorageTexelBuffer = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
+ eUniformBuffer = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
+ eStorageBuffer = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
+ eIndexBuffer = VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
+ eVertexBuffer = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
+ eIndirectBuffer = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
+ };
+
+ using BufferUsageFlags = Flags<BufferUsageFlagBits, VkBufferUsageFlags>;
+
+ VULKAN_HPP_INLINE BufferUsageFlags operator|( BufferUsageFlagBits bit0, BufferUsageFlagBits bit1 )
+ {
+ return BufferUsageFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE BufferUsageFlags operator~( BufferUsageFlagBits bits )
+ {
+ return ~( BufferUsageFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<BufferUsageFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(BufferUsageFlagBits::eTransferSrc) | VkFlags(BufferUsageFlagBits::eTransferDst) | VkFlags(BufferUsageFlagBits::eUniformTexelBuffer) | VkFlags(BufferUsageFlagBits::eStorageTexelBuffer) | VkFlags(BufferUsageFlagBits::eUniformBuffer) | VkFlags(BufferUsageFlagBits::eStorageBuffer) | VkFlags(BufferUsageFlagBits::eIndexBuffer) | VkFlags(BufferUsageFlagBits::eVertexBuffer) | VkFlags(BufferUsageFlagBits::eIndirectBuffer)
+ };
+ };
+
+ enum class BufferCreateFlagBits
+ {
+ eSparseBinding = VK_BUFFER_CREATE_SPARSE_BINDING_BIT,
+ eSparseResidency = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT,
+ eSparseAliased = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT
+ };
+
+ using BufferCreateFlags = Flags<BufferCreateFlagBits, VkBufferCreateFlags>;
+
+ VULKAN_HPP_INLINE BufferCreateFlags operator|( BufferCreateFlagBits bit0, BufferCreateFlagBits bit1 )
+ {
+ return BufferCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE BufferCreateFlags operator~( BufferCreateFlagBits bits )
+ {
+ return ~( BufferCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<BufferCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(BufferCreateFlagBits::eSparseBinding) | VkFlags(BufferCreateFlagBits::eSparseResidency) | VkFlags(BufferCreateFlagBits::eSparseAliased)
+ };
+ };
+
+ struct BufferCreateInfo
+ {
+ BufferCreateInfo( BufferCreateFlags flags_ = BufferCreateFlags(), DeviceSize size_ = 0, BufferUsageFlags usage_ = BufferUsageFlags(), SharingMode sharingMode_ = SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = 0, const uint32_t* pQueueFamilyIndices_ = nullptr )
+ : sType( StructureType::eBufferCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , size( size_ )
+ , usage( usage_ )
+ , sharingMode( sharingMode_ )
+ , queueFamilyIndexCount( queueFamilyIndexCount_ )
+ , pQueueFamilyIndices( pQueueFamilyIndices_ )
+ {
+ }
+
+ BufferCreateInfo( VkBufferCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferCreateInfo) );
+ }
+
+ BufferCreateInfo& operator=( VkBufferCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferCreateInfo) );
+ return *this;
+ }
+
+ BufferCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BufferCreateInfo& setFlags( BufferCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ BufferCreateInfo& setSize( DeviceSize size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ BufferCreateInfo& setUsage( BufferUsageFlags usage_ )
+ {
+ usage = usage_;
+ return *this;
+ }
+
+ BufferCreateInfo& setSharingMode( SharingMode sharingMode_ )
+ {
+ sharingMode = sharingMode_;
+ return *this;
+ }
+
+ BufferCreateInfo& setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ )
+ {
+ queueFamilyIndexCount = queueFamilyIndexCount_;
+ return *this;
+ }
+
+ BufferCreateInfo& setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ )
+ {
+ pQueueFamilyIndices = pQueueFamilyIndices_;
+ return *this;
+ }
+
+ operator const VkBufferCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkBufferCreateInfo*>(this);
+ }
+
+ bool operator==( BufferCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( size == rhs.size )
+ && ( usage == rhs.usage )
+ && ( sharingMode == rhs.sharingMode )
+ && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount )
+ && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices );
+ }
+
+ bool operator!=( BufferCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ BufferCreateFlags flags;
+ DeviceSize size;
+ BufferUsageFlags usage;
+ SharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ };
+ static_assert( sizeof( BufferCreateInfo ) == sizeof( VkBufferCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class ShaderStageFlagBits
+ {
+ eVertex = VK_SHADER_STAGE_VERTEX_BIT,
+ eTessellationControl = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ eTessellationEvaluation = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ eGeometry = VK_SHADER_STAGE_GEOMETRY_BIT,
+ eFragment = VK_SHADER_STAGE_FRAGMENT_BIT,
+ eCompute = VK_SHADER_STAGE_COMPUTE_BIT,
+ eAllGraphics = VK_SHADER_STAGE_ALL_GRAPHICS,
+ eAll = VK_SHADER_STAGE_ALL
+ };
+
+ using ShaderStageFlags = Flags<ShaderStageFlagBits, VkShaderStageFlags>;
+
+ VULKAN_HPP_INLINE ShaderStageFlags operator|( ShaderStageFlagBits bit0, ShaderStageFlagBits bit1 )
+ {
+ return ShaderStageFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ShaderStageFlags operator~( ShaderStageFlagBits bits )
+ {
+ return ~( ShaderStageFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<ShaderStageFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(ShaderStageFlagBits::eVertex) | VkFlags(ShaderStageFlagBits::eTessellationControl) | VkFlags(ShaderStageFlagBits::eTessellationEvaluation) | VkFlags(ShaderStageFlagBits::eGeometry) | VkFlags(ShaderStageFlagBits::eFragment) | VkFlags(ShaderStageFlagBits::eCompute) | VkFlags(ShaderStageFlagBits::eAllGraphics) | VkFlags(ShaderStageFlagBits::eAll)
+ };
+ };
+
+ struct DescriptorSetLayoutBinding
+ {
+ DescriptorSetLayoutBinding( uint32_t binding_ = 0, DescriptorType descriptorType_ = DescriptorType::eSampler, uint32_t descriptorCount_ = 0, ShaderStageFlags stageFlags_ = ShaderStageFlags(), const Sampler* pImmutableSamplers_ = nullptr )
+ : binding( binding_ )
+ , descriptorType( descriptorType_ )
+ , descriptorCount( descriptorCount_ )
+ , stageFlags( stageFlags_ )
+ , pImmutableSamplers( pImmutableSamplers_ )
+ {
+ }
+
+ DescriptorSetLayoutBinding( VkDescriptorSetLayoutBinding const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorSetLayoutBinding) );
+ }
+
+ DescriptorSetLayoutBinding& operator=( VkDescriptorSetLayoutBinding const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorSetLayoutBinding) );
+ return *this;
+ }
+
+ DescriptorSetLayoutBinding& setBinding( uint32_t binding_ )
+ {
+ binding = binding_;
+ return *this;
+ }
+
+ DescriptorSetLayoutBinding& setDescriptorType( DescriptorType descriptorType_ )
+ {
+ descriptorType = descriptorType_;
+ return *this;
+ }
+
+ DescriptorSetLayoutBinding& setDescriptorCount( uint32_t descriptorCount_ )
+ {
+ descriptorCount = descriptorCount_;
+ return *this;
+ }
+
+ DescriptorSetLayoutBinding& setStageFlags( ShaderStageFlags stageFlags_ )
+ {
+ stageFlags = stageFlags_;
+ return *this;
+ }
+
+ DescriptorSetLayoutBinding& setPImmutableSamplers( const Sampler* pImmutableSamplers_ )
+ {
+ pImmutableSamplers = pImmutableSamplers_;
+ return *this;
+ }
+
+ operator const VkDescriptorSetLayoutBinding&() const
+ {
+ return *reinterpret_cast<const VkDescriptorSetLayoutBinding*>(this);
+ }
+
+ bool operator==( DescriptorSetLayoutBinding const& rhs ) const
+ {
+ return ( binding == rhs.binding )
+ && ( descriptorType == rhs.descriptorType )
+ && ( descriptorCount == rhs.descriptorCount )
+ && ( stageFlags == rhs.stageFlags )
+ && ( pImmutableSamplers == rhs.pImmutableSamplers );
+ }
+
+ bool operator!=( DescriptorSetLayoutBinding const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t binding;
+ DescriptorType descriptorType;
+ uint32_t descriptorCount;
+ ShaderStageFlags stageFlags;
+ const Sampler* pImmutableSamplers;
+ };
+ static_assert( sizeof( DescriptorSetLayoutBinding ) == sizeof( VkDescriptorSetLayoutBinding ), "struct and wrapper have different size!" );
+
+ struct PipelineShaderStageCreateInfo
+ {
+ PipelineShaderStageCreateInfo( PipelineShaderStageCreateFlags flags_ = PipelineShaderStageCreateFlags(), ShaderStageFlagBits stage_ = ShaderStageFlagBits::eVertex, ShaderModule module_ = ShaderModule(), const char* pName_ = nullptr, const SpecializationInfo* pSpecializationInfo_ = nullptr )
+ : sType( StructureType::ePipelineShaderStageCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , stage( stage_ )
+ , module( module_ )
+ , pName( pName_ )
+ , pSpecializationInfo( pSpecializationInfo_ )
+ {
+ }
+
+ PipelineShaderStageCreateInfo( VkPipelineShaderStageCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineShaderStageCreateInfo) );
+ }
+
+ PipelineShaderStageCreateInfo& operator=( VkPipelineShaderStageCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineShaderStageCreateInfo) );
+ return *this;
+ }
+
+ PipelineShaderStageCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineShaderStageCreateInfo& setFlags( PipelineShaderStageCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineShaderStageCreateInfo& setStage( ShaderStageFlagBits stage_ )
+ {
+ stage = stage_;
+ return *this;
+ }
+
+ PipelineShaderStageCreateInfo& setModule( ShaderModule module_ )
+ {
+ module = module_;
+ return *this;
+ }
+
+ PipelineShaderStageCreateInfo& setPName( const char* pName_ )
+ {
+ pName = pName_;
+ return *this;
+ }
+
+ PipelineShaderStageCreateInfo& setPSpecializationInfo( const SpecializationInfo* pSpecializationInfo_ )
+ {
+ pSpecializationInfo = pSpecializationInfo_;
+ return *this;
+ }
+
+ operator const VkPipelineShaderStageCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineShaderStageCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineShaderStageCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( stage == rhs.stage )
+ && ( module == rhs.module )
+ && ( pName == rhs.pName )
+ && ( pSpecializationInfo == rhs.pSpecializationInfo );
+ }
+
+ bool operator!=( PipelineShaderStageCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineShaderStageCreateFlags flags;
+ ShaderStageFlagBits stage;
+ ShaderModule module;
+ const char* pName;
+ const SpecializationInfo* pSpecializationInfo;
+ };
+ static_assert( sizeof( PipelineShaderStageCreateInfo ) == sizeof( VkPipelineShaderStageCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PushConstantRange
+ {
+ PushConstantRange( ShaderStageFlags stageFlags_ = ShaderStageFlags(), uint32_t offset_ = 0, uint32_t size_ = 0 )
+ : stageFlags( stageFlags_ )
+ , offset( offset_ )
+ , size( size_ )
+ {
+ }
+
+ PushConstantRange( VkPushConstantRange const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PushConstantRange) );
+ }
+
+ PushConstantRange& operator=( VkPushConstantRange const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PushConstantRange) );
+ return *this;
+ }
+
+ PushConstantRange& setStageFlags( ShaderStageFlags stageFlags_ )
+ {
+ stageFlags = stageFlags_;
+ return *this;
+ }
+
+ PushConstantRange& setOffset( uint32_t offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ PushConstantRange& setSize( uint32_t size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ operator const VkPushConstantRange&() const
+ {
+ return *reinterpret_cast<const VkPushConstantRange*>(this);
+ }
+
+ bool operator==( PushConstantRange const& rhs ) const
+ {
+ return ( stageFlags == rhs.stageFlags )
+ && ( offset == rhs.offset )
+ && ( size == rhs.size );
+ }
+
+ bool operator!=( PushConstantRange const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ShaderStageFlags stageFlags;
+ uint32_t offset;
+ uint32_t size;
+ };
+ static_assert( sizeof( PushConstantRange ) == sizeof( VkPushConstantRange ), "struct and wrapper have different size!" );
+
+ struct PipelineLayoutCreateInfo
+ {
+ PipelineLayoutCreateInfo( PipelineLayoutCreateFlags flags_ = PipelineLayoutCreateFlags(), uint32_t setLayoutCount_ = 0, const DescriptorSetLayout* pSetLayouts_ = nullptr, uint32_t pushConstantRangeCount_ = 0, const PushConstantRange* pPushConstantRanges_ = nullptr )
+ : sType( StructureType::ePipelineLayoutCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , setLayoutCount( setLayoutCount_ )
+ , pSetLayouts( pSetLayouts_ )
+ , pushConstantRangeCount( pushConstantRangeCount_ )
+ , pPushConstantRanges( pPushConstantRanges_ )
+ {
+ }
+
+ PipelineLayoutCreateInfo( VkPipelineLayoutCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineLayoutCreateInfo) );
+ }
+
+ PipelineLayoutCreateInfo& operator=( VkPipelineLayoutCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineLayoutCreateInfo) );
+ return *this;
+ }
+
+ PipelineLayoutCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineLayoutCreateInfo& setFlags( PipelineLayoutCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineLayoutCreateInfo& setSetLayoutCount( uint32_t setLayoutCount_ )
+ {
+ setLayoutCount = setLayoutCount_;
+ return *this;
+ }
+
+ PipelineLayoutCreateInfo& setPSetLayouts( const DescriptorSetLayout* pSetLayouts_ )
+ {
+ pSetLayouts = pSetLayouts_;
+ return *this;
+ }
+
+ PipelineLayoutCreateInfo& setPushConstantRangeCount( uint32_t pushConstantRangeCount_ )
+ {
+ pushConstantRangeCount = pushConstantRangeCount_;
+ return *this;
+ }
+
+ PipelineLayoutCreateInfo& setPPushConstantRanges( const PushConstantRange* pPushConstantRanges_ )
+ {
+ pPushConstantRanges = pPushConstantRanges_;
+ return *this;
+ }
+
+ operator const VkPipelineLayoutCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineLayoutCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineLayoutCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( setLayoutCount == rhs.setLayoutCount )
+ && ( pSetLayouts == rhs.pSetLayouts )
+ && ( pushConstantRangeCount == rhs.pushConstantRangeCount )
+ && ( pPushConstantRanges == rhs.pPushConstantRanges );
+ }
+
+ bool operator!=( PipelineLayoutCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineLayoutCreateFlags flags;
+ uint32_t setLayoutCount;
+ const DescriptorSetLayout* pSetLayouts;
+ uint32_t pushConstantRangeCount;
+ const PushConstantRange* pPushConstantRanges;
+ };
+ static_assert( sizeof( PipelineLayoutCreateInfo ) == sizeof( VkPipelineLayoutCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class ImageUsageFlagBits
+ {
+ eTransferSrc = VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
+ eTransferDst = VK_IMAGE_USAGE_TRANSFER_DST_BIT,
+ eSampled = VK_IMAGE_USAGE_SAMPLED_BIT,
+ eStorage = VK_IMAGE_USAGE_STORAGE_BIT,
+ eColorAttachment = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ eDepthStencilAttachment = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
+ eTransientAttachment = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT,
+ eInputAttachment = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
+ };
+
+ using ImageUsageFlags = Flags<ImageUsageFlagBits, VkImageUsageFlags>;
+
+ VULKAN_HPP_INLINE ImageUsageFlags operator|( ImageUsageFlagBits bit0, ImageUsageFlagBits bit1 )
+ {
+ return ImageUsageFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ImageUsageFlags operator~( ImageUsageFlagBits bits )
+ {
+ return ~( ImageUsageFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<ImageUsageFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(ImageUsageFlagBits::eTransferSrc) | VkFlags(ImageUsageFlagBits::eTransferDst) | VkFlags(ImageUsageFlagBits::eSampled) | VkFlags(ImageUsageFlagBits::eStorage) | VkFlags(ImageUsageFlagBits::eColorAttachment) | VkFlags(ImageUsageFlagBits::eDepthStencilAttachment) | VkFlags(ImageUsageFlagBits::eTransientAttachment) | VkFlags(ImageUsageFlagBits::eInputAttachment)
+ };
+ };
+
+ struct SharedPresentSurfaceCapabilitiesKHR
+ {
+ operator const VkSharedPresentSurfaceCapabilitiesKHR&() const
+ {
+ return *reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(this);
+ }
+
+ bool operator==( SharedPresentSurfaceCapabilitiesKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( sharedPresentSupportedUsageFlags == rhs.sharedPresentSupportedUsageFlags );
+ }
+
+ bool operator!=( SharedPresentSurfaceCapabilitiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ ImageUsageFlags sharedPresentSupportedUsageFlags;
+ };
+ static_assert( sizeof( SharedPresentSurfaceCapabilitiesKHR ) == sizeof( VkSharedPresentSurfaceCapabilitiesKHR ), "struct and wrapper have different size!" );
+
+ enum class ImageCreateFlagBits
+ {
+ eSparseBinding = VK_IMAGE_CREATE_SPARSE_BINDING_BIT,
+ eSparseResidency = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT,
+ eSparseAliased = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT,
+ eMutableFormat = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
+ eCubeCompatible = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT,
+ eBindSfrKHX = VK_IMAGE_CREATE_BIND_SFR_BIT_KHX,
+ e2DArrayCompatibleKHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR
+ };
+
+ using ImageCreateFlags = Flags<ImageCreateFlagBits, VkImageCreateFlags>;
+
+ VULKAN_HPP_INLINE ImageCreateFlags operator|( ImageCreateFlagBits bit0, ImageCreateFlagBits bit1 )
+ {
+ return ImageCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ImageCreateFlags operator~( ImageCreateFlagBits bits )
+ {
+ return ~( ImageCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<ImageCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(ImageCreateFlagBits::eSparseBinding) | VkFlags(ImageCreateFlagBits::eSparseResidency) | VkFlags(ImageCreateFlagBits::eSparseAliased) | VkFlags(ImageCreateFlagBits::eMutableFormat) | VkFlags(ImageCreateFlagBits::eCubeCompatible) | VkFlags(ImageCreateFlagBits::eBindSfrKHX) | VkFlags(ImageCreateFlagBits::e2DArrayCompatibleKHR)
+ };
+ };
+
+ struct PhysicalDeviceImageFormatInfo2KHR
+ {
+ PhysicalDeviceImageFormatInfo2KHR( Format format_ = Format::eUndefined, ImageType type_ = ImageType::e1D, ImageTiling tiling_ = ImageTiling::eOptimal, ImageUsageFlags usage_ = ImageUsageFlags(), ImageCreateFlags flags_ = ImageCreateFlags() )
+ : sType( StructureType::ePhysicalDeviceImageFormatInfo2KHR )
+ , pNext( nullptr )
+ , format( format_ )
+ , type( type_ )
+ , tiling( tiling_ )
+ , usage( usage_ )
+ , flags( flags_ )
+ {
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR( VkPhysicalDeviceImageFormatInfo2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceImageFormatInfo2KHR) );
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& operator=( VkPhysicalDeviceImageFormatInfo2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceImageFormatInfo2KHR) );
+ return *this;
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& setType( ImageType type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& setTiling( ImageTiling tiling_ )
+ {
+ tiling = tiling_;
+ return *this;
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& setUsage( ImageUsageFlags usage_ )
+ {
+ usage = usage_;
+ return *this;
+ }
+
+ PhysicalDeviceImageFormatInfo2KHR& setFlags( ImageCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceImageFormatInfo2KHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2KHR*>(this);
+ }
+
+ bool operator==( PhysicalDeviceImageFormatInfo2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( format == rhs.format )
+ && ( type == rhs.type )
+ && ( tiling == rhs.tiling )
+ && ( usage == rhs.usage )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( PhysicalDeviceImageFormatInfo2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Format format;
+ ImageType type;
+ ImageTiling tiling;
+ ImageUsageFlags usage;
+ ImageCreateFlags flags;
+ };
+ static_assert( sizeof( PhysicalDeviceImageFormatInfo2KHR ) == sizeof( VkPhysicalDeviceImageFormatInfo2KHR ), "struct and wrapper have different size!" );
+
+ enum class PipelineCreateFlagBits
+ {
+ eDisableOptimization = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT,
+ eAllowDerivatives = VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT,
+ eDerivative = VK_PIPELINE_CREATE_DERIVATIVE_BIT,
+ eViewIndexFromDeviceIndexKHX = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHX,
+ eDispatchBaseKHX = VK_PIPELINE_CREATE_DISPATCH_BASE_KHX
+ };
+
+ using PipelineCreateFlags = Flags<PipelineCreateFlagBits, VkPipelineCreateFlags>;
+
+ VULKAN_HPP_INLINE PipelineCreateFlags operator|( PipelineCreateFlagBits bit0, PipelineCreateFlagBits bit1 )
+ {
+ return PipelineCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE PipelineCreateFlags operator~( PipelineCreateFlagBits bits )
+ {
+ return ~( PipelineCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<PipelineCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(PipelineCreateFlagBits::eDisableOptimization) | VkFlags(PipelineCreateFlagBits::eAllowDerivatives) | VkFlags(PipelineCreateFlagBits::eDerivative) | VkFlags(PipelineCreateFlagBits::eViewIndexFromDeviceIndexKHX) | VkFlags(PipelineCreateFlagBits::eDispatchBaseKHX)
+ };
+ };
+
+ struct ComputePipelineCreateInfo
+ {
+ ComputePipelineCreateInfo( PipelineCreateFlags flags_ = PipelineCreateFlags(), PipelineShaderStageCreateInfo stage_ = PipelineShaderStageCreateInfo(), PipelineLayout layout_ = PipelineLayout(), Pipeline basePipelineHandle_ = Pipeline(), int32_t basePipelineIndex_ = 0 )
+ : sType( StructureType::eComputePipelineCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , stage( stage_ )
+ , layout( layout_ )
+ , basePipelineHandle( basePipelineHandle_ )
+ , basePipelineIndex( basePipelineIndex_ )
+ {
+ }
+
+ ComputePipelineCreateInfo( VkComputePipelineCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ComputePipelineCreateInfo) );
+ }
+
+ ComputePipelineCreateInfo& operator=( VkComputePipelineCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ComputePipelineCreateInfo) );
+ return *this;
+ }
+
+ ComputePipelineCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ComputePipelineCreateInfo& setFlags( PipelineCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ComputePipelineCreateInfo& setStage( PipelineShaderStageCreateInfo stage_ )
+ {
+ stage = stage_;
+ return *this;
+ }
+
+ ComputePipelineCreateInfo& setLayout( PipelineLayout layout_ )
+ {
+ layout = layout_;
+ return *this;
+ }
+
+ ComputePipelineCreateInfo& setBasePipelineHandle( Pipeline basePipelineHandle_ )
+ {
+ basePipelineHandle = basePipelineHandle_;
+ return *this;
+ }
+
+ ComputePipelineCreateInfo& setBasePipelineIndex( int32_t basePipelineIndex_ )
+ {
+ basePipelineIndex = basePipelineIndex_;
+ return *this;
+ }
+
+ operator const VkComputePipelineCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkComputePipelineCreateInfo*>(this);
+ }
+
+ bool operator==( ComputePipelineCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( stage == rhs.stage )
+ && ( layout == rhs.layout )
+ && ( basePipelineHandle == rhs.basePipelineHandle )
+ && ( basePipelineIndex == rhs.basePipelineIndex );
+ }
+
+ bool operator!=( ComputePipelineCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineCreateFlags flags;
+ PipelineShaderStageCreateInfo stage;
+ PipelineLayout layout;
+ Pipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+ };
+ static_assert( sizeof( ComputePipelineCreateInfo ) == sizeof( VkComputePipelineCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class ColorComponentFlagBits
+ {
+ eR = VK_COLOR_COMPONENT_R_BIT,
+ eG = VK_COLOR_COMPONENT_G_BIT,
+ eB = VK_COLOR_COMPONENT_B_BIT,
+ eA = VK_COLOR_COMPONENT_A_BIT
+ };
+
+ using ColorComponentFlags = Flags<ColorComponentFlagBits, VkColorComponentFlags>;
+
+ VULKAN_HPP_INLINE ColorComponentFlags operator|( ColorComponentFlagBits bit0, ColorComponentFlagBits bit1 )
+ {
+ return ColorComponentFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ColorComponentFlags operator~( ColorComponentFlagBits bits )
+ {
+ return ~( ColorComponentFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<ColorComponentFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(ColorComponentFlagBits::eR) | VkFlags(ColorComponentFlagBits::eG) | VkFlags(ColorComponentFlagBits::eB) | VkFlags(ColorComponentFlagBits::eA)
+ };
+ };
+
+ struct PipelineColorBlendAttachmentState
+ {
+ PipelineColorBlendAttachmentState( Bool32 blendEnable_ = 0, BlendFactor srcColorBlendFactor_ = BlendFactor::eZero, BlendFactor dstColorBlendFactor_ = BlendFactor::eZero, BlendOp colorBlendOp_ = BlendOp::eAdd, BlendFactor srcAlphaBlendFactor_ = BlendFactor::eZero, BlendFactor dstAlphaBlendFactor_ = BlendFactor::eZero, BlendOp alphaBlendOp_ = BlendOp::eAdd, ColorComponentFlags colorWriteMask_ = ColorComponentFlags() )
+ : blendEnable( blendEnable_ )
+ , srcColorBlendFactor( srcColorBlendFactor_ )
+ , dstColorBlendFactor( dstColorBlendFactor_ )
+ , colorBlendOp( colorBlendOp_ )
+ , srcAlphaBlendFactor( srcAlphaBlendFactor_ )
+ , dstAlphaBlendFactor( dstAlphaBlendFactor_ )
+ , alphaBlendOp( alphaBlendOp_ )
+ , colorWriteMask( colorWriteMask_ )
+ {
+ }
+
+ PipelineColorBlendAttachmentState( VkPipelineColorBlendAttachmentState const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineColorBlendAttachmentState) );
+ }
+
+ PipelineColorBlendAttachmentState& operator=( VkPipelineColorBlendAttachmentState const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineColorBlendAttachmentState) );
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setBlendEnable( Bool32 blendEnable_ )
+ {
+ blendEnable = blendEnable_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setSrcColorBlendFactor( BlendFactor srcColorBlendFactor_ )
+ {
+ srcColorBlendFactor = srcColorBlendFactor_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setDstColorBlendFactor( BlendFactor dstColorBlendFactor_ )
+ {
+ dstColorBlendFactor = dstColorBlendFactor_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setColorBlendOp( BlendOp colorBlendOp_ )
+ {
+ colorBlendOp = colorBlendOp_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setSrcAlphaBlendFactor( BlendFactor srcAlphaBlendFactor_ )
+ {
+ srcAlphaBlendFactor = srcAlphaBlendFactor_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setDstAlphaBlendFactor( BlendFactor dstAlphaBlendFactor_ )
+ {
+ dstAlphaBlendFactor = dstAlphaBlendFactor_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setAlphaBlendOp( BlendOp alphaBlendOp_ )
+ {
+ alphaBlendOp = alphaBlendOp_;
+ return *this;
+ }
+
+ PipelineColorBlendAttachmentState& setColorWriteMask( ColorComponentFlags colorWriteMask_ )
+ {
+ colorWriteMask = colorWriteMask_;
+ return *this;
+ }
+
+ operator const VkPipelineColorBlendAttachmentState&() const
+ {
+ return *reinterpret_cast<const VkPipelineColorBlendAttachmentState*>(this);
+ }
+
+ bool operator==( PipelineColorBlendAttachmentState const& rhs ) const
+ {
+ return ( blendEnable == rhs.blendEnable )
+ && ( srcColorBlendFactor == rhs.srcColorBlendFactor )
+ && ( dstColorBlendFactor == rhs.dstColorBlendFactor )
+ && ( colorBlendOp == rhs.colorBlendOp )
+ && ( srcAlphaBlendFactor == rhs.srcAlphaBlendFactor )
+ && ( dstAlphaBlendFactor == rhs.dstAlphaBlendFactor )
+ && ( alphaBlendOp == rhs.alphaBlendOp )
+ && ( colorWriteMask == rhs.colorWriteMask );
+ }
+
+ bool operator!=( PipelineColorBlendAttachmentState const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Bool32 blendEnable;
+ BlendFactor srcColorBlendFactor;
+ BlendFactor dstColorBlendFactor;
+ BlendOp colorBlendOp;
+ BlendFactor srcAlphaBlendFactor;
+ BlendFactor dstAlphaBlendFactor;
+ BlendOp alphaBlendOp;
+ ColorComponentFlags colorWriteMask;
+ };
+ static_assert( sizeof( PipelineColorBlendAttachmentState ) == sizeof( VkPipelineColorBlendAttachmentState ), "struct and wrapper have different size!" );
+
+ struct PipelineColorBlendStateCreateInfo
+ {
+ PipelineColorBlendStateCreateInfo( PipelineColorBlendStateCreateFlags flags_ = PipelineColorBlendStateCreateFlags(), Bool32 logicOpEnable_ = 0, LogicOp logicOp_ = LogicOp::eClear, uint32_t attachmentCount_ = 0, const PipelineColorBlendAttachmentState* pAttachments_ = nullptr, std::array<float,4> const& blendConstants_ = { { 0, 0, 0, 0 } } )
+ : sType( StructureType::ePipelineColorBlendStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , logicOpEnable( logicOpEnable_ )
+ , logicOp( logicOp_ )
+ , attachmentCount( attachmentCount_ )
+ , pAttachments( pAttachments_ )
+ {
+ memcpy( &blendConstants, blendConstants_.data(), 4 * sizeof( float ) );
+ }
+
+ PipelineColorBlendStateCreateInfo( VkPipelineColorBlendStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineColorBlendStateCreateInfo) );
+ }
+
+ PipelineColorBlendStateCreateInfo& operator=( VkPipelineColorBlendStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineColorBlendStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setFlags( PipelineColorBlendStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setLogicOpEnable( Bool32 logicOpEnable_ )
+ {
+ logicOpEnable = logicOpEnable_;
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setLogicOp( LogicOp logicOp_ )
+ {
+ logicOp = logicOp_;
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setAttachmentCount( uint32_t attachmentCount_ )
+ {
+ attachmentCount = attachmentCount_;
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setPAttachments( const PipelineColorBlendAttachmentState* pAttachments_ )
+ {
+ pAttachments = pAttachments_;
+ return *this;
+ }
+
+ PipelineColorBlendStateCreateInfo& setBlendConstants( std::array<float,4> blendConstants_ )
+ {
+ memcpy( &blendConstants, blendConstants_.data(), 4 * sizeof( float ) );
+ return *this;
+ }
+
+ operator const VkPipelineColorBlendStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineColorBlendStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineColorBlendStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( logicOpEnable == rhs.logicOpEnable )
+ && ( logicOp == rhs.logicOp )
+ && ( attachmentCount == rhs.attachmentCount )
+ && ( pAttachments == rhs.pAttachments )
+ && ( memcmp( blendConstants, rhs.blendConstants, 4 * sizeof( float ) ) == 0 );
+ }
+
+ bool operator!=( PipelineColorBlendStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineColorBlendStateCreateFlags flags;
+ Bool32 logicOpEnable;
+ LogicOp logicOp;
+ uint32_t attachmentCount;
+ const PipelineColorBlendAttachmentState* pAttachments;
+ float blendConstants[4];
+ };
+ static_assert( sizeof( PipelineColorBlendStateCreateInfo ) == sizeof( VkPipelineColorBlendStateCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class FenceCreateFlagBits
+ {
+ eSignaled = VK_FENCE_CREATE_SIGNALED_BIT
+ };
+
+ using FenceCreateFlags = Flags<FenceCreateFlagBits, VkFenceCreateFlags>;
+
+ VULKAN_HPP_INLINE FenceCreateFlags operator|( FenceCreateFlagBits bit0, FenceCreateFlagBits bit1 )
+ {
+ return FenceCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE FenceCreateFlags operator~( FenceCreateFlagBits bits )
+ {
+ return ~( FenceCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<FenceCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(FenceCreateFlagBits::eSignaled)
+ };
+ };
+
+ struct FenceCreateInfo
+ {
+ FenceCreateInfo( FenceCreateFlags flags_ = FenceCreateFlags() )
+ : sType( StructureType::eFenceCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ {
+ }
+
+ FenceCreateInfo( VkFenceCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(FenceCreateInfo) );
+ }
+
+ FenceCreateInfo& operator=( VkFenceCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(FenceCreateInfo) );
+ return *this;
+ }
+
+ FenceCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ FenceCreateInfo& setFlags( FenceCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkFenceCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkFenceCreateInfo*>(this);
+ }
+
+ bool operator==( FenceCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( FenceCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ FenceCreateFlags flags;
+ };
+ static_assert( sizeof( FenceCreateInfo ) == sizeof( VkFenceCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class FormatFeatureFlagBits
+ {
+ eSampledImage = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT,
+ eStorageImage = VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT,
+ eStorageImageAtomic = VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT,
+ eUniformTexelBuffer = VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT,
+ eStorageTexelBuffer = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT,
+ eStorageTexelBufferAtomic = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT,
+ eVertexBuffer = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT,
+ eColorAttachment = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT,
+ eColorAttachmentBlend = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT,
+ eDepthStencilAttachment = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT,
+ eBlitSrc = VK_FORMAT_FEATURE_BLIT_SRC_BIT,
+ eBlitDst = VK_FORMAT_FEATURE_BLIT_DST_BIT,
+ eSampledImageFilterLinear = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT,
+ eSampledImageFilterCubicIMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG,
+ eTransferSrcKHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR,
+ eTransferDstKHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR
+ };
+
+ using FormatFeatureFlags = Flags<FormatFeatureFlagBits, VkFormatFeatureFlags>;
+
+ VULKAN_HPP_INLINE FormatFeatureFlags operator|( FormatFeatureFlagBits bit0, FormatFeatureFlagBits bit1 )
+ {
+ return FormatFeatureFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE FormatFeatureFlags operator~( FormatFeatureFlagBits bits )
+ {
+ return ~( FormatFeatureFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<FormatFeatureFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(FormatFeatureFlagBits::eSampledImage) | VkFlags(FormatFeatureFlagBits::eStorageImage) | VkFlags(FormatFeatureFlagBits::eStorageImageAtomic) | VkFlags(FormatFeatureFlagBits::eUniformTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBufferAtomic) | VkFlags(FormatFeatureFlagBits::eVertexBuffer) | VkFlags(FormatFeatureFlagBits::eColorAttachment) | VkFlags(FormatFeatureFlagBits::eColorAttachmentBlend) | VkFlags(FormatFeatureFlagBits::eDepthStencilAttachment) | VkFlags(FormatFeatureFlagBits::eBlitSrc) | VkFlags(FormatFeatureFlagBits::eBlitDst) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterLinear) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterCubicIMG) | VkFlags(FormatFeatureFlagBits::eTransferSrcKHR) | VkFlags(FormatFeatureFlagBits::eTransferDstKHR)
+ };
+ };
+
+ struct FormatProperties
+ {
+ operator const VkFormatProperties&() const
+ {
+ return *reinterpret_cast<const VkFormatProperties*>(this);
+ }
+
+ bool operator==( FormatProperties const& rhs ) const
+ {
+ return ( linearTilingFeatures == rhs.linearTilingFeatures )
+ && ( optimalTilingFeatures == rhs.optimalTilingFeatures )
+ && ( bufferFeatures == rhs.bufferFeatures );
+ }
+
+ bool operator!=( FormatProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ FormatFeatureFlags linearTilingFeatures;
+ FormatFeatureFlags optimalTilingFeatures;
+ FormatFeatureFlags bufferFeatures;
+ };
+ static_assert( sizeof( FormatProperties ) == sizeof( VkFormatProperties ), "struct and wrapper have different size!" );
+
+ struct FormatProperties2KHR
+ {
+ operator const VkFormatProperties2KHR&() const
+ {
+ return *reinterpret_cast<const VkFormatProperties2KHR*>(this);
+ }
+
+ bool operator==( FormatProperties2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( formatProperties == rhs.formatProperties );
+ }
+
+ bool operator!=( FormatProperties2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ FormatProperties formatProperties;
+ };
+ static_assert( sizeof( FormatProperties2KHR ) == sizeof( VkFormatProperties2KHR ), "struct and wrapper have different size!" );
+
+ enum class QueryControlFlagBits
+ {
+ ePrecise = VK_QUERY_CONTROL_PRECISE_BIT
+ };
+
+ using QueryControlFlags = Flags<QueryControlFlagBits, VkQueryControlFlags>;
+
+ VULKAN_HPP_INLINE QueryControlFlags operator|( QueryControlFlagBits bit0, QueryControlFlagBits bit1 )
+ {
+ return QueryControlFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE QueryControlFlags operator~( QueryControlFlagBits bits )
+ {
+ return ~( QueryControlFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<QueryControlFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(QueryControlFlagBits::ePrecise)
+ };
+ };
+
+ enum class QueryResultFlagBits
+ {
+ e64 = VK_QUERY_RESULT_64_BIT,
+ eWait = VK_QUERY_RESULT_WAIT_BIT,
+ eWithAvailability = VK_QUERY_RESULT_WITH_AVAILABILITY_BIT,
+ ePartial = VK_QUERY_RESULT_PARTIAL_BIT
+ };
+
+ using QueryResultFlags = Flags<QueryResultFlagBits, VkQueryResultFlags>;
+
+ VULKAN_HPP_INLINE QueryResultFlags operator|( QueryResultFlagBits bit0, QueryResultFlagBits bit1 )
+ {
+ return QueryResultFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE QueryResultFlags operator~( QueryResultFlagBits bits )
+ {
+ return ~( QueryResultFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<QueryResultFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(QueryResultFlagBits::e64) | VkFlags(QueryResultFlagBits::eWait) | VkFlags(QueryResultFlagBits::eWithAvailability) | VkFlags(QueryResultFlagBits::ePartial)
+ };
+ };
+
+ enum class CommandBufferUsageFlagBits
+ {
+ eOneTimeSubmit = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+ eRenderPassContinue = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT,
+ eSimultaneousUse = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
+ };
+
+ using CommandBufferUsageFlags = Flags<CommandBufferUsageFlagBits, VkCommandBufferUsageFlags>;
+
+ VULKAN_HPP_INLINE CommandBufferUsageFlags operator|( CommandBufferUsageFlagBits bit0, CommandBufferUsageFlagBits bit1 )
+ {
+ return CommandBufferUsageFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE CommandBufferUsageFlags operator~( CommandBufferUsageFlagBits bits )
+ {
+ return ~( CommandBufferUsageFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<CommandBufferUsageFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(CommandBufferUsageFlagBits::eOneTimeSubmit) | VkFlags(CommandBufferUsageFlagBits::eRenderPassContinue) | VkFlags(CommandBufferUsageFlagBits::eSimultaneousUse)
+ };
+ };
+
+ enum class QueryPipelineStatisticFlagBits
+ {
+ eInputAssemblyVertices = VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT,
+ eInputAssemblyPrimitives = VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT,
+ eVertexShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT,
+ eGeometryShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT,
+ eGeometryShaderPrimitives = VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT,
+ eClippingInvocations = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT,
+ eClippingPrimitives = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT,
+ eFragmentShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT,
+ eTessellationControlShaderPatches = VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT,
+ eTessellationEvaluationShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT,
+ eComputeShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT
+ };
+
+ using QueryPipelineStatisticFlags = Flags<QueryPipelineStatisticFlagBits, VkQueryPipelineStatisticFlags>;
+
+ VULKAN_HPP_INLINE QueryPipelineStatisticFlags operator|( QueryPipelineStatisticFlagBits bit0, QueryPipelineStatisticFlagBits bit1 )
+ {
+ return QueryPipelineStatisticFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE QueryPipelineStatisticFlags operator~( QueryPipelineStatisticFlagBits bits )
+ {
+ return ~( QueryPipelineStatisticFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<QueryPipelineStatisticFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(QueryPipelineStatisticFlagBits::eInputAssemblyVertices) | VkFlags(QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eVertexShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eGeometryShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eClippingInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eClippingPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eFragmentShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches) | VkFlags(QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eComputeShaderInvocations)
+ };
+ };
+
+ struct CommandBufferInheritanceInfo
+ {
+ CommandBufferInheritanceInfo( RenderPass renderPass_ = RenderPass(), uint32_t subpass_ = 0, Framebuffer framebuffer_ = Framebuffer(), Bool32 occlusionQueryEnable_ = 0, QueryControlFlags queryFlags_ = QueryControlFlags(), QueryPipelineStatisticFlags pipelineStatistics_ = QueryPipelineStatisticFlags() )
+ : sType( StructureType::eCommandBufferInheritanceInfo )
+ , pNext( nullptr )
+ , renderPass( renderPass_ )
+ , subpass( subpass_ )
+ , framebuffer( framebuffer_ )
+ , occlusionQueryEnable( occlusionQueryEnable_ )
+ , queryFlags( queryFlags_ )
+ , pipelineStatistics( pipelineStatistics_ )
+ {
+ }
+
+ CommandBufferInheritanceInfo( VkCommandBufferInheritanceInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandBufferInheritanceInfo) );
+ }
+
+ CommandBufferInheritanceInfo& operator=( VkCommandBufferInheritanceInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandBufferInheritanceInfo) );
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setRenderPass( RenderPass renderPass_ )
+ {
+ renderPass = renderPass_;
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setSubpass( uint32_t subpass_ )
+ {
+ subpass = subpass_;
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setFramebuffer( Framebuffer framebuffer_ )
+ {
+ framebuffer = framebuffer_;
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setOcclusionQueryEnable( Bool32 occlusionQueryEnable_ )
+ {
+ occlusionQueryEnable = occlusionQueryEnable_;
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setQueryFlags( QueryControlFlags queryFlags_ )
+ {
+ queryFlags = queryFlags_;
+ return *this;
+ }
+
+ CommandBufferInheritanceInfo& setPipelineStatistics( QueryPipelineStatisticFlags pipelineStatistics_ )
+ {
+ pipelineStatistics = pipelineStatistics_;
+ return *this;
+ }
+
+ operator const VkCommandBufferInheritanceInfo&() const
+ {
+ return *reinterpret_cast<const VkCommandBufferInheritanceInfo*>(this);
+ }
+
+ bool operator==( CommandBufferInheritanceInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( renderPass == rhs.renderPass )
+ && ( subpass == rhs.subpass )
+ && ( framebuffer == rhs.framebuffer )
+ && ( occlusionQueryEnable == rhs.occlusionQueryEnable )
+ && ( queryFlags == rhs.queryFlags )
+ && ( pipelineStatistics == rhs.pipelineStatistics );
+ }
+
+ bool operator!=( CommandBufferInheritanceInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ RenderPass renderPass;
+ uint32_t subpass;
+ Framebuffer framebuffer;
+ Bool32 occlusionQueryEnable;
+ QueryControlFlags queryFlags;
+ QueryPipelineStatisticFlags pipelineStatistics;
+ };
+ static_assert( sizeof( CommandBufferInheritanceInfo ) == sizeof( VkCommandBufferInheritanceInfo ), "struct and wrapper have different size!" );
+
+ struct CommandBufferBeginInfo
+ {
+ CommandBufferBeginInfo( CommandBufferUsageFlags flags_ = CommandBufferUsageFlags(), const CommandBufferInheritanceInfo* pInheritanceInfo_ = nullptr )
+ : sType( StructureType::eCommandBufferBeginInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , pInheritanceInfo( pInheritanceInfo_ )
+ {
+ }
+
+ CommandBufferBeginInfo( VkCommandBufferBeginInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandBufferBeginInfo) );
+ }
+
+ CommandBufferBeginInfo& operator=( VkCommandBufferBeginInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandBufferBeginInfo) );
+ return *this;
+ }
+
+ CommandBufferBeginInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CommandBufferBeginInfo& setFlags( CommandBufferUsageFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ CommandBufferBeginInfo& setPInheritanceInfo( const CommandBufferInheritanceInfo* pInheritanceInfo_ )
+ {
+ pInheritanceInfo = pInheritanceInfo_;
+ return *this;
+ }
+
+ operator const VkCommandBufferBeginInfo&() const
+ {
+ return *reinterpret_cast<const VkCommandBufferBeginInfo*>(this);
+ }
+
+ bool operator==( CommandBufferBeginInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( pInheritanceInfo == rhs.pInheritanceInfo );
+ }
+
+ bool operator!=( CommandBufferBeginInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ CommandBufferUsageFlags flags;
+ const CommandBufferInheritanceInfo* pInheritanceInfo;
+ };
+ static_assert( sizeof( CommandBufferBeginInfo ) == sizeof( VkCommandBufferBeginInfo ), "struct and wrapper have different size!" );
+
+ struct QueryPoolCreateInfo
+ {
+ QueryPoolCreateInfo( QueryPoolCreateFlags flags_ = QueryPoolCreateFlags(), QueryType queryType_ = QueryType::eOcclusion, uint32_t queryCount_ = 0, QueryPipelineStatisticFlags pipelineStatistics_ = QueryPipelineStatisticFlags() )
+ : sType( StructureType::eQueryPoolCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , queryType( queryType_ )
+ , queryCount( queryCount_ )
+ , pipelineStatistics( pipelineStatistics_ )
+ {
+ }
+
+ QueryPoolCreateInfo( VkQueryPoolCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(QueryPoolCreateInfo) );
+ }
+
+ QueryPoolCreateInfo& operator=( VkQueryPoolCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(QueryPoolCreateInfo) );
+ return *this;
+ }
+
+ QueryPoolCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ QueryPoolCreateInfo& setFlags( QueryPoolCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ QueryPoolCreateInfo& setQueryType( QueryType queryType_ )
+ {
+ queryType = queryType_;
+ return *this;
+ }
+
+ QueryPoolCreateInfo& setQueryCount( uint32_t queryCount_ )
+ {
+ queryCount = queryCount_;
+ return *this;
+ }
+
+ QueryPoolCreateInfo& setPipelineStatistics( QueryPipelineStatisticFlags pipelineStatistics_ )
+ {
+ pipelineStatistics = pipelineStatistics_;
+ return *this;
+ }
+
+ operator const VkQueryPoolCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkQueryPoolCreateInfo*>(this);
+ }
+
+ bool operator==( QueryPoolCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( queryType == rhs.queryType )
+ && ( queryCount == rhs.queryCount )
+ && ( pipelineStatistics == rhs.pipelineStatistics );
+ }
+
+ bool operator!=( QueryPoolCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ QueryPoolCreateFlags flags;
+ QueryType queryType;
+ uint32_t queryCount;
+ QueryPipelineStatisticFlags pipelineStatistics;
+ };
+ static_assert( sizeof( QueryPoolCreateInfo ) == sizeof( VkQueryPoolCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class ImageAspectFlagBits
+ {
+ eColor = VK_IMAGE_ASPECT_COLOR_BIT,
+ eDepth = VK_IMAGE_ASPECT_DEPTH_BIT,
+ eStencil = VK_IMAGE_ASPECT_STENCIL_BIT,
+ eMetadata = VK_IMAGE_ASPECT_METADATA_BIT
+ };
+
+ using ImageAspectFlags = Flags<ImageAspectFlagBits, VkImageAspectFlags>;
+
+ VULKAN_HPP_INLINE ImageAspectFlags operator|( ImageAspectFlagBits bit0, ImageAspectFlagBits bit1 )
+ {
+ return ImageAspectFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ImageAspectFlags operator~( ImageAspectFlagBits bits )
+ {
+ return ~( ImageAspectFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<ImageAspectFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(ImageAspectFlagBits::eColor) | VkFlags(ImageAspectFlagBits::eDepth) | VkFlags(ImageAspectFlagBits::eStencil) | VkFlags(ImageAspectFlagBits::eMetadata)
+ };
+ };
+
+ struct ImageSubresource
+ {
+ ImageSubresource( ImageAspectFlags aspectMask_ = ImageAspectFlags(), uint32_t mipLevel_ = 0, uint32_t arrayLayer_ = 0 )
+ : aspectMask( aspectMask_ )
+ , mipLevel( mipLevel_ )
+ , arrayLayer( arrayLayer_ )
+ {
+ }
+
+ ImageSubresource( VkImageSubresource const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSubresource) );
+ }
+
+ ImageSubresource& operator=( VkImageSubresource const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSubresource) );
+ return *this;
+ }
+
+ ImageSubresource& setAspectMask( ImageAspectFlags aspectMask_ )
+ {
+ aspectMask = aspectMask_;
+ return *this;
+ }
+
+ ImageSubresource& setMipLevel( uint32_t mipLevel_ )
+ {
+ mipLevel = mipLevel_;
+ return *this;
+ }
+
+ ImageSubresource& setArrayLayer( uint32_t arrayLayer_ )
+ {
+ arrayLayer = arrayLayer_;
+ return *this;
+ }
+
+ operator const VkImageSubresource&() const
+ {
+ return *reinterpret_cast<const VkImageSubresource*>(this);
+ }
+
+ bool operator==( ImageSubresource const& rhs ) const
+ {
+ return ( aspectMask == rhs.aspectMask )
+ && ( mipLevel == rhs.mipLevel )
+ && ( arrayLayer == rhs.arrayLayer );
+ }
+
+ bool operator!=( ImageSubresource const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t arrayLayer;
+ };
+ static_assert( sizeof( ImageSubresource ) == sizeof( VkImageSubresource ), "struct and wrapper have different size!" );
+
+ struct ImageSubresourceLayers
+ {
+ ImageSubresourceLayers( ImageAspectFlags aspectMask_ = ImageAspectFlags(), uint32_t mipLevel_ = 0, uint32_t baseArrayLayer_ = 0, uint32_t layerCount_ = 0 )
+ : aspectMask( aspectMask_ )
+ , mipLevel( mipLevel_ )
+ , baseArrayLayer( baseArrayLayer_ )
+ , layerCount( layerCount_ )
+ {
+ }
+
+ ImageSubresourceLayers( VkImageSubresourceLayers const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSubresourceLayers) );
+ }
+
+ ImageSubresourceLayers& operator=( VkImageSubresourceLayers const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSubresourceLayers) );
+ return *this;
+ }
+
+ ImageSubresourceLayers& setAspectMask( ImageAspectFlags aspectMask_ )
+ {
+ aspectMask = aspectMask_;
+ return *this;
+ }
+
+ ImageSubresourceLayers& setMipLevel( uint32_t mipLevel_ )
+ {
+ mipLevel = mipLevel_;
+ return *this;
+ }
+
+ ImageSubresourceLayers& setBaseArrayLayer( uint32_t baseArrayLayer_ )
+ {
+ baseArrayLayer = baseArrayLayer_;
+ return *this;
+ }
+
+ ImageSubresourceLayers& setLayerCount( uint32_t layerCount_ )
+ {
+ layerCount = layerCount_;
+ return *this;
+ }
+
+ operator const VkImageSubresourceLayers&() const
+ {
+ return *reinterpret_cast<const VkImageSubresourceLayers*>(this);
+ }
+
+ bool operator==( ImageSubresourceLayers const& rhs ) const
+ {
+ return ( aspectMask == rhs.aspectMask )
+ && ( mipLevel == rhs.mipLevel )
+ && ( baseArrayLayer == rhs.baseArrayLayer )
+ && ( layerCount == rhs.layerCount );
+ }
+
+ bool operator!=( ImageSubresourceLayers const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+ };
+ static_assert( sizeof( ImageSubresourceLayers ) == sizeof( VkImageSubresourceLayers ), "struct and wrapper have different size!" );
+
+ struct ImageSubresourceRange
+ {
+ ImageSubresourceRange( ImageAspectFlags aspectMask_ = ImageAspectFlags(), uint32_t baseMipLevel_ = 0, uint32_t levelCount_ = 0, uint32_t baseArrayLayer_ = 0, uint32_t layerCount_ = 0 )
+ : aspectMask( aspectMask_ )
+ , baseMipLevel( baseMipLevel_ )
+ , levelCount( levelCount_ )
+ , baseArrayLayer( baseArrayLayer_ )
+ , layerCount( layerCount_ )
+ {
+ }
+
+ ImageSubresourceRange( VkImageSubresourceRange const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSubresourceRange) );
+ }
+
+ ImageSubresourceRange& operator=( VkImageSubresourceRange const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageSubresourceRange) );
+ return *this;
+ }
+
+ ImageSubresourceRange& setAspectMask( ImageAspectFlags aspectMask_ )
+ {
+ aspectMask = aspectMask_;
+ return *this;
+ }
+
+ ImageSubresourceRange& setBaseMipLevel( uint32_t baseMipLevel_ )
+ {
+ baseMipLevel = baseMipLevel_;
+ return *this;
+ }
+
+ ImageSubresourceRange& setLevelCount( uint32_t levelCount_ )
+ {
+ levelCount = levelCount_;
+ return *this;
+ }
+
+ ImageSubresourceRange& setBaseArrayLayer( uint32_t baseArrayLayer_ )
+ {
+ baseArrayLayer = baseArrayLayer_;
+ return *this;
+ }
+
+ ImageSubresourceRange& setLayerCount( uint32_t layerCount_ )
+ {
+ layerCount = layerCount_;
+ return *this;
+ }
+
+ operator const VkImageSubresourceRange&() const
+ {
+ return *reinterpret_cast<const VkImageSubresourceRange*>(this);
+ }
+
+ bool operator==( ImageSubresourceRange const& rhs ) const
+ {
+ return ( aspectMask == rhs.aspectMask )
+ && ( baseMipLevel == rhs.baseMipLevel )
+ && ( levelCount == rhs.levelCount )
+ && ( baseArrayLayer == rhs.baseArrayLayer )
+ && ( layerCount == rhs.layerCount );
+ }
+
+ bool operator!=( ImageSubresourceRange const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageAspectFlags aspectMask;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+ };
+ static_assert( sizeof( ImageSubresourceRange ) == sizeof( VkImageSubresourceRange ), "struct and wrapper have different size!" );
+
+ struct ImageMemoryBarrier
+ {
+ ImageMemoryBarrier( AccessFlags srcAccessMask_ = AccessFlags(), AccessFlags dstAccessMask_ = AccessFlags(), ImageLayout oldLayout_ = ImageLayout::eUndefined, ImageLayout newLayout_ = ImageLayout::eUndefined, uint32_t srcQueueFamilyIndex_ = 0, uint32_t dstQueueFamilyIndex_ = 0, Image image_ = Image(), ImageSubresourceRange subresourceRange_ = ImageSubresourceRange() )
+ : sType( StructureType::eImageMemoryBarrier )
+ , pNext( nullptr )
+ , srcAccessMask( srcAccessMask_ )
+ , dstAccessMask( dstAccessMask_ )
+ , oldLayout( oldLayout_ )
+ , newLayout( newLayout_ )
+ , srcQueueFamilyIndex( srcQueueFamilyIndex_ )
+ , dstQueueFamilyIndex( dstQueueFamilyIndex_ )
+ , image( image_ )
+ , subresourceRange( subresourceRange_ )
+ {
+ }
+
+ ImageMemoryBarrier( VkImageMemoryBarrier const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageMemoryBarrier) );
+ }
+
+ ImageMemoryBarrier& operator=( VkImageMemoryBarrier const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageMemoryBarrier) );
+ return *this;
+ }
+
+ ImageMemoryBarrier& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setSrcAccessMask( AccessFlags srcAccessMask_ )
+ {
+ srcAccessMask = srcAccessMask_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setDstAccessMask( AccessFlags dstAccessMask_ )
+ {
+ dstAccessMask = dstAccessMask_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setOldLayout( ImageLayout oldLayout_ )
+ {
+ oldLayout = oldLayout_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setNewLayout( ImageLayout newLayout_ )
+ {
+ newLayout = newLayout_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setSrcQueueFamilyIndex( uint32_t srcQueueFamilyIndex_ )
+ {
+ srcQueueFamilyIndex = srcQueueFamilyIndex_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setDstQueueFamilyIndex( uint32_t dstQueueFamilyIndex_ )
+ {
+ dstQueueFamilyIndex = dstQueueFamilyIndex_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setImage( Image image_ )
+ {
+ image = image_;
+ return *this;
+ }
+
+ ImageMemoryBarrier& setSubresourceRange( ImageSubresourceRange subresourceRange_ )
+ {
+ subresourceRange = subresourceRange_;
+ return *this;
+ }
+
+ operator const VkImageMemoryBarrier&() const
+ {
+ return *reinterpret_cast<const VkImageMemoryBarrier*>(this);
+ }
+
+ bool operator==( ImageMemoryBarrier const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( srcAccessMask == rhs.srcAccessMask )
+ && ( dstAccessMask == rhs.dstAccessMask )
+ && ( oldLayout == rhs.oldLayout )
+ && ( newLayout == rhs.newLayout )
+ && ( srcQueueFamilyIndex == rhs.srcQueueFamilyIndex )
+ && ( dstQueueFamilyIndex == rhs.dstQueueFamilyIndex )
+ && ( image == rhs.image )
+ && ( subresourceRange == rhs.subresourceRange );
+ }
+
+ bool operator!=( ImageMemoryBarrier const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ AccessFlags srcAccessMask;
+ AccessFlags dstAccessMask;
+ ImageLayout oldLayout;
+ ImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ Image image;
+ ImageSubresourceRange subresourceRange;
+ };
+ static_assert( sizeof( ImageMemoryBarrier ) == sizeof( VkImageMemoryBarrier ), "struct and wrapper have different size!" );
+
+ struct ImageViewCreateInfo
+ {
+ ImageViewCreateInfo( ImageViewCreateFlags flags_ = ImageViewCreateFlags(), Image image_ = Image(), ImageViewType viewType_ = ImageViewType::e1D, Format format_ = Format::eUndefined, ComponentMapping components_ = ComponentMapping(), ImageSubresourceRange subresourceRange_ = ImageSubresourceRange() )
+ : sType( StructureType::eImageViewCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , image( image_ )
+ , viewType( viewType_ )
+ , format( format_ )
+ , components( components_ )
+ , subresourceRange( subresourceRange_ )
+ {
+ }
+
+ ImageViewCreateInfo( VkImageViewCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageViewCreateInfo) );
+ }
+
+ ImageViewCreateInfo& operator=( VkImageViewCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageViewCreateInfo) );
+ return *this;
+ }
+
+ ImageViewCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImageViewCreateInfo& setFlags( ImageViewCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ImageViewCreateInfo& setImage( Image image_ )
+ {
+ image = image_;
+ return *this;
+ }
+
+ ImageViewCreateInfo& setViewType( ImageViewType viewType_ )
+ {
+ viewType = viewType_;
+ return *this;
+ }
+
+ ImageViewCreateInfo& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ ImageViewCreateInfo& setComponents( ComponentMapping components_ )
+ {
+ components = components_;
+ return *this;
+ }
+
+ ImageViewCreateInfo& setSubresourceRange( ImageSubresourceRange subresourceRange_ )
+ {
+ subresourceRange = subresourceRange_;
+ return *this;
+ }
+
+ operator const VkImageViewCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkImageViewCreateInfo*>(this);
+ }
+
+ bool operator==( ImageViewCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( image == rhs.image )
+ && ( viewType == rhs.viewType )
+ && ( format == rhs.format )
+ && ( components == rhs.components )
+ && ( subresourceRange == rhs.subresourceRange );
+ }
+
+ bool operator!=( ImageViewCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ImageViewCreateFlags flags;
+ Image image;
+ ImageViewType viewType;
+ Format format;
+ ComponentMapping components;
+ ImageSubresourceRange subresourceRange;
+ };
+ static_assert( sizeof( ImageViewCreateInfo ) == sizeof( VkImageViewCreateInfo ), "struct and wrapper have different size!" );
+
+ struct ImageCopy
+ {
+ ImageCopy( ImageSubresourceLayers srcSubresource_ = ImageSubresourceLayers(), Offset3D srcOffset_ = Offset3D(), ImageSubresourceLayers dstSubresource_ = ImageSubresourceLayers(), Offset3D dstOffset_ = Offset3D(), Extent3D extent_ = Extent3D() )
+ : srcSubresource( srcSubresource_ )
+ , srcOffset( srcOffset_ )
+ , dstSubresource( dstSubresource_ )
+ , dstOffset( dstOffset_ )
+ , extent( extent_ )
+ {
+ }
+
+ ImageCopy( VkImageCopy const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageCopy) );
+ }
+
+ ImageCopy& operator=( VkImageCopy const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageCopy) );
+ return *this;
+ }
+
+ ImageCopy& setSrcSubresource( ImageSubresourceLayers srcSubresource_ )
+ {
+ srcSubresource = srcSubresource_;
+ return *this;
+ }
+
+ ImageCopy& setSrcOffset( Offset3D srcOffset_ )
+ {
+ srcOffset = srcOffset_;
+ return *this;
+ }
+
+ ImageCopy& setDstSubresource( ImageSubresourceLayers dstSubresource_ )
+ {
+ dstSubresource = dstSubresource_;
+ return *this;
+ }
+
+ ImageCopy& setDstOffset( Offset3D dstOffset_ )
+ {
+ dstOffset = dstOffset_;
+ return *this;
+ }
+
+ ImageCopy& setExtent( Extent3D extent_ )
+ {
+ extent = extent_;
+ return *this;
+ }
+
+ operator const VkImageCopy&() const
+ {
+ return *reinterpret_cast<const VkImageCopy*>(this);
+ }
+
+ bool operator==( ImageCopy const& rhs ) const
+ {
+ return ( srcSubresource == rhs.srcSubresource )
+ && ( srcOffset == rhs.srcOffset )
+ && ( dstSubresource == rhs.dstSubresource )
+ && ( dstOffset == rhs.dstOffset )
+ && ( extent == rhs.extent );
+ }
+
+ bool operator!=( ImageCopy const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageSubresourceLayers srcSubresource;
+ Offset3D srcOffset;
+ ImageSubresourceLayers dstSubresource;
+ Offset3D dstOffset;
+ Extent3D extent;
+ };
+ static_assert( sizeof( ImageCopy ) == sizeof( VkImageCopy ), "struct and wrapper have different size!" );
+
+ struct ImageBlit
+ {
+ ImageBlit( ImageSubresourceLayers srcSubresource_ = ImageSubresourceLayers(), std::array<Offset3D,2> const& srcOffsets_ = { { Offset3D(), Offset3D() } }, ImageSubresourceLayers dstSubresource_ = ImageSubresourceLayers(), std::array<Offset3D,2> const& dstOffsets_ = { { Offset3D(), Offset3D() } } )
+ : srcSubresource( srcSubresource_ )
+ , dstSubresource( dstSubresource_ )
+ {
+ memcpy( &srcOffsets, srcOffsets_.data(), 2 * sizeof( Offset3D ) );
+ memcpy( &dstOffsets, dstOffsets_.data(), 2 * sizeof( Offset3D ) );
+ }
+
+ ImageBlit( VkImageBlit const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageBlit) );
+ }
+
+ ImageBlit& operator=( VkImageBlit const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageBlit) );
+ return *this;
+ }
+
+ ImageBlit& setSrcSubresource( ImageSubresourceLayers srcSubresource_ )
+ {
+ srcSubresource = srcSubresource_;
+ return *this;
+ }
+
+ ImageBlit& setSrcOffsets( std::array<Offset3D,2> srcOffsets_ )
+ {
+ memcpy( &srcOffsets, srcOffsets_.data(), 2 * sizeof( Offset3D ) );
+ return *this;
+ }
+
+ ImageBlit& setDstSubresource( ImageSubresourceLayers dstSubresource_ )
+ {
+ dstSubresource = dstSubresource_;
+ return *this;
+ }
+
+ ImageBlit& setDstOffsets( std::array<Offset3D,2> dstOffsets_ )
+ {
+ memcpy( &dstOffsets, dstOffsets_.data(), 2 * sizeof( Offset3D ) );
+ return *this;
+ }
+
+ operator const VkImageBlit&() const
+ {
+ return *reinterpret_cast<const VkImageBlit*>(this);
+ }
+
+ bool operator==( ImageBlit const& rhs ) const
+ {
+ return ( srcSubresource == rhs.srcSubresource )
+ && ( memcmp( srcOffsets, rhs.srcOffsets, 2 * sizeof( Offset3D ) ) == 0 )
+ && ( dstSubresource == rhs.dstSubresource )
+ && ( memcmp( dstOffsets, rhs.dstOffsets, 2 * sizeof( Offset3D ) ) == 0 );
+ }
+
+ bool operator!=( ImageBlit const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageSubresourceLayers srcSubresource;
+ Offset3D srcOffsets[2];
+ ImageSubresourceLayers dstSubresource;
+ Offset3D dstOffsets[2];
+ };
+ static_assert( sizeof( ImageBlit ) == sizeof( VkImageBlit ), "struct and wrapper have different size!" );
+
+ struct BufferImageCopy
+ {
+ BufferImageCopy( DeviceSize bufferOffset_ = 0, uint32_t bufferRowLength_ = 0, uint32_t bufferImageHeight_ = 0, ImageSubresourceLayers imageSubresource_ = ImageSubresourceLayers(), Offset3D imageOffset_ = Offset3D(), Extent3D imageExtent_ = Extent3D() )
+ : bufferOffset( bufferOffset_ )
+ , bufferRowLength( bufferRowLength_ )
+ , bufferImageHeight( bufferImageHeight_ )
+ , imageSubresource( imageSubresource_ )
+ , imageOffset( imageOffset_ )
+ , imageExtent( imageExtent_ )
+ {
+ }
+
+ BufferImageCopy( VkBufferImageCopy const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferImageCopy) );
+ }
+
+ BufferImageCopy& operator=( VkBufferImageCopy const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BufferImageCopy) );
+ return *this;
+ }
+
+ BufferImageCopy& setBufferOffset( DeviceSize bufferOffset_ )
+ {
+ bufferOffset = bufferOffset_;
+ return *this;
+ }
+
+ BufferImageCopy& setBufferRowLength( uint32_t bufferRowLength_ )
+ {
+ bufferRowLength = bufferRowLength_;
+ return *this;
+ }
+
+ BufferImageCopy& setBufferImageHeight( uint32_t bufferImageHeight_ )
+ {
+ bufferImageHeight = bufferImageHeight_;
+ return *this;
+ }
+
+ BufferImageCopy& setImageSubresource( ImageSubresourceLayers imageSubresource_ )
+ {
+ imageSubresource = imageSubresource_;
+ return *this;
+ }
+
+ BufferImageCopy& setImageOffset( Offset3D imageOffset_ )
+ {
+ imageOffset = imageOffset_;
+ return *this;
+ }
+
+ BufferImageCopy& setImageExtent( Extent3D imageExtent_ )
+ {
+ imageExtent = imageExtent_;
+ return *this;
+ }
+
+ operator const VkBufferImageCopy&() const
+ {
+ return *reinterpret_cast<const VkBufferImageCopy*>(this);
+ }
+
+ bool operator==( BufferImageCopy const& rhs ) const
+ {
+ return ( bufferOffset == rhs.bufferOffset )
+ && ( bufferRowLength == rhs.bufferRowLength )
+ && ( bufferImageHeight == rhs.bufferImageHeight )
+ && ( imageSubresource == rhs.imageSubresource )
+ && ( imageOffset == rhs.imageOffset )
+ && ( imageExtent == rhs.imageExtent );
+ }
+
+ bool operator!=( BufferImageCopy const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ ImageSubresourceLayers imageSubresource;
+ Offset3D imageOffset;
+ Extent3D imageExtent;
+ };
+ static_assert( sizeof( BufferImageCopy ) == sizeof( VkBufferImageCopy ), "struct and wrapper have different size!" );
+
+ struct ImageResolve
+ {
+ ImageResolve( ImageSubresourceLayers srcSubresource_ = ImageSubresourceLayers(), Offset3D srcOffset_ = Offset3D(), ImageSubresourceLayers dstSubresource_ = ImageSubresourceLayers(), Offset3D dstOffset_ = Offset3D(), Extent3D extent_ = Extent3D() )
+ : srcSubresource( srcSubresource_ )
+ , srcOffset( srcOffset_ )
+ , dstSubresource( dstSubresource_ )
+ , dstOffset( dstOffset_ )
+ , extent( extent_ )
+ {
+ }
+
+ ImageResolve( VkImageResolve const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageResolve) );
+ }
+
+ ImageResolve& operator=( VkImageResolve const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageResolve) );
+ return *this;
+ }
+
+ ImageResolve& setSrcSubresource( ImageSubresourceLayers srcSubresource_ )
+ {
+ srcSubresource = srcSubresource_;
+ return *this;
+ }
+
+ ImageResolve& setSrcOffset( Offset3D srcOffset_ )
+ {
+ srcOffset = srcOffset_;
+ return *this;
+ }
+
+ ImageResolve& setDstSubresource( ImageSubresourceLayers dstSubresource_ )
+ {
+ dstSubresource = dstSubresource_;
+ return *this;
+ }
+
+ ImageResolve& setDstOffset( Offset3D dstOffset_ )
+ {
+ dstOffset = dstOffset_;
+ return *this;
+ }
+
+ ImageResolve& setExtent( Extent3D extent_ )
+ {
+ extent = extent_;
+ return *this;
+ }
+
+ operator const VkImageResolve&() const
+ {
+ return *reinterpret_cast<const VkImageResolve*>(this);
+ }
+
+ bool operator==( ImageResolve const& rhs ) const
+ {
+ return ( srcSubresource == rhs.srcSubresource )
+ && ( srcOffset == rhs.srcOffset )
+ && ( dstSubresource == rhs.dstSubresource )
+ && ( dstOffset == rhs.dstOffset )
+ && ( extent == rhs.extent );
+ }
+
+ bool operator!=( ImageResolve const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageSubresourceLayers srcSubresource;
+ Offset3D srcOffset;
+ ImageSubresourceLayers dstSubresource;
+ Offset3D dstOffset;
+ Extent3D extent;
+ };
+ static_assert( sizeof( ImageResolve ) == sizeof( VkImageResolve ), "struct and wrapper have different size!" );
+
+ struct ClearAttachment
+ {
+ ClearAttachment( ImageAspectFlags aspectMask_ = ImageAspectFlags(), uint32_t colorAttachment_ = 0, ClearValue clearValue_ = ClearValue() )
+ : aspectMask( aspectMask_ )
+ , colorAttachment( colorAttachment_ )
+ , clearValue( clearValue_ )
+ {
+ }
+
+ ClearAttachment( VkClearAttachment const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ClearAttachment) );
+ }
+
+ ClearAttachment& operator=( VkClearAttachment const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ClearAttachment) );
+ return *this;
+ }
+
+ ClearAttachment& setAspectMask( ImageAspectFlags aspectMask_ )
+ {
+ aspectMask = aspectMask_;
+ return *this;
+ }
+
+ ClearAttachment& setColorAttachment( uint32_t colorAttachment_ )
+ {
+ colorAttachment = colorAttachment_;
+ return *this;
+ }
+
+ ClearAttachment& setClearValue( ClearValue clearValue_ )
+ {
+ clearValue = clearValue_;
+ return *this;
+ }
+
+ operator const VkClearAttachment&() const
+ {
+ return *reinterpret_cast<const VkClearAttachment*>(this);
+ }
+
+ ImageAspectFlags aspectMask;
+ uint32_t colorAttachment;
+ ClearValue clearValue;
+ };
+ static_assert( sizeof( ClearAttachment ) == sizeof( VkClearAttachment ), "struct and wrapper have different size!" );
+
+ enum class SparseImageFormatFlagBits
+ {
+ eSingleMiptail = VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT,
+ eAlignedMipSize = VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT,
+ eNonstandardBlockSize = VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT
+ };
+
+ using SparseImageFormatFlags = Flags<SparseImageFormatFlagBits, VkSparseImageFormatFlags>;
+
+ VULKAN_HPP_INLINE SparseImageFormatFlags operator|( SparseImageFormatFlagBits bit0, SparseImageFormatFlagBits bit1 )
+ {
+ return SparseImageFormatFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SparseImageFormatFlags operator~( SparseImageFormatFlagBits bits )
+ {
+ return ~( SparseImageFormatFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<SparseImageFormatFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(SparseImageFormatFlagBits::eSingleMiptail) | VkFlags(SparseImageFormatFlagBits::eAlignedMipSize) | VkFlags(SparseImageFormatFlagBits::eNonstandardBlockSize)
+ };
+ };
+
+ struct SparseImageFormatProperties
+ {
+ operator const VkSparseImageFormatProperties&() const
+ {
+ return *reinterpret_cast<const VkSparseImageFormatProperties*>(this);
+ }
+
+ bool operator==( SparseImageFormatProperties const& rhs ) const
+ {
+ return ( aspectMask == rhs.aspectMask )
+ && ( imageGranularity == rhs.imageGranularity )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( SparseImageFormatProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageAspectFlags aspectMask;
+ Extent3D imageGranularity;
+ SparseImageFormatFlags flags;
+ };
+ static_assert( sizeof( SparseImageFormatProperties ) == sizeof( VkSparseImageFormatProperties ), "struct and wrapper have different size!" );
+
+ struct SparseImageMemoryRequirements
+ {
+ operator const VkSparseImageMemoryRequirements&() const
+ {
+ return *reinterpret_cast<const VkSparseImageMemoryRequirements*>(this);
+ }
+
+ bool operator==( SparseImageMemoryRequirements const& rhs ) const
+ {
+ return ( formatProperties == rhs.formatProperties )
+ && ( imageMipTailFirstLod == rhs.imageMipTailFirstLod )
+ && ( imageMipTailSize == rhs.imageMipTailSize )
+ && ( imageMipTailOffset == rhs.imageMipTailOffset )
+ && ( imageMipTailStride == rhs.imageMipTailStride );
+ }
+
+ bool operator!=( SparseImageMemoryRequirements const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ SparseImageFormatProperties formatProperties;
+ uint32_t imageMipTailFirstLod;
+ DeviceSize imageMipTailSize;
+ DeviceSize imageMipTailOffset;
+ DeviceSize imageMipTailStride;
+ };
+ static_assert( sizeof( SparseImageMemoryRequirements ) == sizeof( VkSparseImageMemoryRequirements ), "struct and wrapper have different size!" );
+
+ struct SparseImageFormatProperties2KHR
+ {
+ operator const VkSparseImageFormatProperties2KHR&() const
+ {
+ return *reinterpret_cast<const VkSparseImageFormatProperties2KHR*>(this);
+ }
+
+ bool operator==( SparseImageFormatProperties2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( properties == rhs.properties );
+ }
+
+ bool operator!=( SparseImageFormatProperties2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ SparseImageFormatProperties properties;
+ };
+ static_assert( sizeof( SparseImageFormatProperties2KHR ) == sizeof( VkSparseImageFormatProperties2KHR ), "struct and wrapper have different size!" );
+
+ enum class SparseMemoryBindFlagBits
+ {
+ eMetadata = VK_SPARSE_MEMORY_BIND_METADATA_BIT
+ };
+
+ using SparseMemoryBindFlags = Flags<SparseMemoryBindFlagBits, VkSparseMemoryBindFlags>;
+
+ VULKAN_HPP_INLINE SparseMemoryBindFlags operator|( SparseMemoryBindFlagBits bit0, SparseMemoryBindFlagBits bit1 )
+ {
+ return SparseMemoryBindFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SparseMemoryBindFlags operator~( SparseMemoryBindFlagBits bits )
+ {
+ return ~( SparseMemoryBindFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<SparseMemoryBindFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(SparseMemoryBindFlagBits::eMetadata)
+ };
+ };
+
+ struct SparseMemoryBind
+ {
+ SparseMemoryBind( DeviceSize resourceOffset_ = 0, DeviceSize size_ = 0, DeviceMemory memory_ = DeviceMemory(), DeviceSize memoryOffset_ = 0, SparseMemoryBindFlags flags_ = SparseMemoryBindFlags() )
+ : resourceOffset( resourceOffset_ )
+ , size( size_ )
+ , memory( memory_ )
+ , memoryOffset( memoryOffset_ )
+ , flags( flags_ )
+ {
+ }
+
+ SparseMemoryBind( VkSparseMemoryBind const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseMemoryBind) );
+ }
+
+ SparseMemoryBind& operator=( VkSparseMemoryBind const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseMemoryBind) );
+ return *this;
+ }
+
+ SparseMemoryBind& setResourceOffset( DeviceSize resourceOffset_ )
+ {
+ resourceOffset = resourceOffset_;
+ return *this;
+ }
+
+ SparseMemoryBind& setSize( DeviceSize size_ )
+ {
+ size = size_;
+ return *this;
+ }
+
+ SparseMemoryBind& setMemory( DeviceMemory memory_ )
+ {
+ memory = memory_;
+ return *this;
+ }
+
+ SparseMemoryBind& setMemoryOffset( DeviceSize memoryOffset_ )
+ {
+ memoryOffset = memoryOffset_;
+ return *this;
+ }
+
+ SparseMemoryBind& setFlags( SparseMemoryBindFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkSparseMemoryBind&() const
+ {
+ return *reinterpret_cast<const VkSparseMemoryBind*>(this);
+ }
+
+ bool operator==( SparseMemoryBind const& rhs ) const
+ {
+ return ( resourceOffset == rhs.resourceOffset )
+ && ( size == rhs.size )
+ && ( memory == rhs.memory )
+ && ( memoryOffset == rhs.memoryOffset )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( SparseMemoryBind const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DeviceSize resourceOffset;
+ DeviceSize size;
+ DeviceMemory memory;
+ DeviceSize memoryOffset;
+ SparseMemoryBindFlags flags;
+ };
+ static_assert( sizeof( SparseMemoryBind ) == sizeof( VkSparseMemoryBind ), "struct and wrapper have different size!" );
+
+ struct SparseImageMemoryBind
+ {
+ SparseImageMemoryBind( ImageSubresource subresource_ = ImageSubresource(), Offset3D offset_ = Offset3D(), Extent3D extent_ = Extent3D(), DeviceMemory memory_ = DeviceMemory(), DeviceSize memoryOffset_ = 0, SparseMemoryBindFlags flags_ = SparseMemoryBindFlags() )
+ : subresource( subresource_ )
+ , offset( offset_ )
+ , extent( extent_ )
+ , memory( memory_ )
+ , memoryOffset( memoryOffset_ )
+ , flags( flags_ )
+ {
+ }
+
+ SparseImageMemoryBind( VkSparseImageMemoryBind const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseImageMemoryBind) );
+ }
+
+ SparseImageMemoryBind& operator=( VkSparseImageMemoryBind const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseImageMemoryBind) );
+ return *this;
+ }
+
+ SparseImageMemoryBind& setSubresource( ImageSubresource subresource_ )
+ {
+ subresource = subresource_;
+ return *this;
+ }
+
+ SparseImageMemoryBind& setOffset( Offset3D offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ SparseImageMemoryBind& setExtent( Extent3D extent_ )
+ {
+ extent = extent_;
+ return *this;
+ }
+
+ SparseImageMemoryBind& setMemory( DeviceMemory memory_ )
+ {
+ memory = memory_;
+ return *this;
+ }
+
+ SparseImageMemoryBind& setMemoryOffset( DeviceSize memoryOffset_ )
+ {
+ memoryOffset = memoryOffset_;
+ return *this;
+ }
+
+ SparseImageMemoryBind& setFlags( SparseMemoryBindFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkSparseImageMemoryBind&() const
+ {
+ return *reinterpret_cast<const VkSparseImageMemoryBind*>(this);
+ }
+
+ bool operator==( SparseImageMemoryBind const& rhs ) const
+ {
+ return ( subresource == rhs.subresource )
+ && ( offset == rhs.offset )
+ && ( extent == rhs.extent )
+ && ( memory == rhs.memory )
+ && ( memoryOffset == rhs.memoryOffset )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( SparseImageMemoryBind const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageSubresource subresource;
+ Offset3D offset;
+ Extent3D extent;
+ DeviceMemory memory;
+ DeviceSize memoryOffset;
+ SparseMemoryBindFlags flags;
+ };
+ static_assert( sizeof( SparseImageMemoryBind ) == sizeof( VkSparseImageMemoryBind ), "struct and wrapper have different size!" );
+
+ struct SparseBufferMemoryBindInfo
+ {
+ SparseBufferMemoryBindInfo( Buffer buffer_ = Buffer(), uint32_t bindCount_ = 0, const SparseMemoryBind* pBinds_ = nullptr )
+ : buffer( buffer_ )
+ , bindCount( bindCount_ )
+ , pBinds( pBinds_ )
+ {
+ }
+
+ SparseBufferMemoryBindInfo( VkSparseBufferMemoryBindInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseBufferMemoryBindInfo) );
+ }
+
+ SparseBufferMemoryBindInfo& operator=( VkSparseBufferMemoryBindInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseBufferMemoryBindInfo) );
+ return *this;
+ }
+
+ SparseBufferMemoryBindInfo& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ SparseBufferMemoryBindInfo& setBindCount( uint32_t bindCount_ )
+ {
+ bindCount = bindCount_;
+ return *this;
+ }
+
+ SparseBufferMemoryBindInfo& setPBinds( const SparseMemoryBind* pBinds_ )
+ {
+ pBinds = pBinds_;
+ return *this;
+ }
+
+ operator const VkSparseBufferMemoryBindInfo&() const
+ {
+ return *reinterpret_cast<const VkSparseBufferMemoryBindInfo*>(this);
+ }
+
+ bool operator==( SparseBufferMemoryBindInfo const& rhs ) const
+ {
+ return ( buffer == rhs.buffer )
+ && ( bindCount == rhs.bindCount )
+ && ( pBinds == rhs.pBinds );
+ }
+
+ bool operator!=( SparseBufferMemoryBindInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Buffer buffer;
+ uint32_t bindCount;
+ const SparseMemoryBind* pBinds;
+ };
+ static_assert( sizeof( SparseBufferMemoryBindInfo ) == sizeof( VkSparseBufferMemoryBindInfo ), "struct and wrapper have different size!" );
+
+ struct SparseImageOpaqueMemoryBindInfo
+ {
+ SparseImageOpaqueMemoryBindInfo( Image image_ = Image(), uint32_t bindCount_ = 0, const SparseMemoryBind* pBinds_ = nullptr )
+ : image( image_ )
+ , bindCount( bindCount_ )
+ , pBinds( pBinds_ )
+ {
+ }
+
+ SparseImageOpaqueMemoryBindInfo( VkSparseImageOpaqueMemoryBindInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseImageOpaqueMemoryBindInfo) );
+ }
+
+ SparseImageOpaqueMemoryBindInfo& operator=( VkSparseImageOpaqueMemoryBindInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseImageOpaqueMemoryBindInfo) );
+ return *this;
+ }
+
+ SparseImageOpaqueMemoryBindInfo& setImage( Image image_ )
+ {
+ image = image_;
+ return *this;
+ }
+
+ SparseImageOpaqueMemoryBindInfo& setBindCount( uint32_t bindCount_ )
+ {
+ bindCount = bindCount_;
+ return *this;
+ }
+
+ SparseImageOpaqueMemoryBindInfo& setPBinds( const SparseMemoryBind* pBinds_ )
+ {
+ pBinds = pBinds_;
+ return *this;
+ }
+
+ operator const VkSparseImageOpaqueMemoryBindInfo&() const
+ {
+ return *reinterpret_cast<const VkSparseImageOpaqueMemoryBindInfo*>(this);
+ }
+
+ bool operator==( SparseImageOpaqueMemoryBindInfo const& rhs ) const
+ {
+ return ( image == rhs.image )
+ && ( bindCount == rhs.bindCount )
+ && ( pBinds == rhs.pBinds );
+ }
+
+ bool operator!=( SparseImageOpaqueMemoryBindInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Image image;
+ uint32_t bindCount;
+ const SparseMemoryBind* pBinds;
+ };
+ static_assert( sizeof( SparseImageOpaqueMemoryBindInfo ) == sizeof( VkSparseImageOpaqueMemoryBindInfo ), "struct and wrapper have different size!" );
+
+ struct SparseImageMemoryBindInfo
+ {
+ SparseImageMemoryBindInfo( Image image_ = Image(), uint32_t bindCount_ = 0, const SparseImageMemoryBind* pBinds_ = nullptr )
+ : image( image_ )
+ , bindCount( bindCount_ )
+ , pBinds( pBinds_ )
+ {
+ }
+
+ SparseImageMemoryBindInfo( VkSparseImageMemoryBindInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseImageMemoryBindInfo) );
+ }
+
+ SparseImageMemoryBindInfo& operator=( VkSparseImageMemoryBindInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SparseImageMemoryBindInfo) );
+ return *this;
+ }
+
+ SparseImageMemoryBindInfo& setImage( Image image_ )
+ {
+ image = image_;
+ return *this;
+ }
+
+ SparseImageMemoryBindInfo& setBindCount( uint32_t bindCount_ )
+ {
+ bindCount = bindCount_;
+ return *this;
+ }
+
+ SparseImageMemoryBindInfo& setPBinds( const SparseImageMemoryBind* pBinds_ )
+ {
+ pBinds = pBinds_;
+ return *this;
+ }
+
+ operator const VkSparseImageMemoryBindInfo&() const
+ {
+ return *reinterpret_cast<const VkSparseImageMemoryBindInfo*>(this);
+ }
+
+ bool operator==( SparseImageMemoryBindInfo const& rhs ) const
+ {
+ return ( image == rhs.image )
+ && ( bindCount == rhs.bindCount )
+ && ( pBinds == rhs.pBinds );
+ }
+
+ bool operator!=( SparseImageMemoryBindInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Image image;
+ uint32_t bindCount;
+ const SparseImageMemoryBind* pBinds;
+ };
+ static_assert( sizeof( SparseImageMemoryBindInfo ) == sizeof( VkSparseImageMemoryBindInfo ), "struct and wrapper have different size!" );
+
+ struct BindSparseInfo
+ {
+ BindSparseInfo( uint32_t waitSemaphoreCount_ = 0, const Semaphore* pWaitSemaphores_ = nullptr, uint32_t bufferBindCount_ = 0, const SparseBufferMemoryBindInfo* pBufferBinds_ = nullptr, uint32_t imageOpaqueBindCount_ = 0, const SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds_ = nullptr, uint32_t imageBindCount_ = 0, const SparseImageMemoryBindInfo* pImageBinds_ = nullptr, uint32_t signalSemaphoreCount_ = 0, const Semaphore* pSignalSemaphores_ = nullptr )
+ : sType( StructureType::eBindSparseInfo )
+ , pNext( nullptr )
+ , waitSemaphoreCount( waitSemaphoreCount_ )
+ , pWaitSemaphores( pWaitSemaphores_ )
+ , bufferBindCount( bufferBindCount_ )
+ , pBufferBinds( pBufferBinds_ )
+ , imageOpaqueBindCount( imageOpaqueBindCount_ )
+ , pImageOpaqueBinds( pImageOpaqueBinds_ )
+ , imageBindCount( imageBindCount_ )
+ , pImageBinds( pImageBinds_ )
+ , signalSemaphoreCount( signalSemaphoreCount_ )
+ , pSignalSemaphores( pSignalSemaphores_ )
+ {
+ }
+
+ BindSparseInfo( VkBindSparseInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindSparseInfo) );
+ }
+
+ BindSparseInfo& operator=( VkBindSparseInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(BindSparseInfo) );
+ return *this;
+ }
+
+ BindSparseInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ BindSparseInfo& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ )
+ {
+ waitSemaphoreCount = waitSemaphoreCount_;
+ return *this;
+ }
+
+ BindSparseInfo& setPWaitSemaphores( const Semaphore* pWaitSemaphores_ )
+ {
+ pWaitSemaphores = pWaitSemaphores_;
+ return *this;
+ }
+
+ BindSparseInfo& setBufferBindCount( uint32_t bufferBindCount_ )
+ {
+ bufferBindCount = bufferBindCount_;
+ return *this;
+ }
+
+ BindSparseInfo& setPBufferBinds( const SparseBufferMemoryBindInfo* pBufferBinds_ )
+ {
+ pBufferBinds = pBufferBinds_;
+ return *this;
+ }
+
+ BindSparseInfo& setImageOpaqueBindCount( uint32_t imageOpaqueBindCount_ )
+ {
+ imageOpaqueBindCount = imageOpaqueBindCount_;
+ return *this;
+ }
+
+ BindSparseInfo& setPImageOpaqueBinds( const SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds_ )
+ {
+ pImageOpaqueBinds = pImageOpaqueBinds_;
+ return *this;
+ }
+
+ BindSparseInfo& setImageBindCount( uint32_t imageBindCount_ )
+ {
+ imageBindCount = imageBindCount_;
+ return *this;
+ }
+
+ BindSparseInfo& setPImageBinds( const SparseImageMemoryBindInfo* pImageBinds_ )
+ {
+ pImageBinds = pImageBinds_;
+ return *this;
+ }
+
+ BindSparseInfo& setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ )
+ {
+ signalSemaphoreCount = signalSemaphoreCount_;
+ return *this;
+ }
+
+ BindSparseInfo& setPSignalSemaphores( const Semaphore* pSignalSemaphores_ )
+ {
+ pSignalSemaphores = pSignalSemaphores_;
+ return *this;
+ }
+
+ operator const VkBindSparseInfo&() const
+ {
+ return *reinterpret_cast<const VkBindSparseInfo*>(this);
+ }
+
+ bool operator==( BindSparseInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( waitSemaphoreCount == rhs.waitSemaphoreCount )
+ && ( pWaitSemaphores == rhs.pWaitSemaphores )
+ && ( bufferBindCount == rhs.bufferBindCount )
+ && ( pBufferBinds == rhs.pBufferBinds )
+ && ( imageOpaqueBindCount == rhs.imageOpaqueBindCount )
+ && ( pImageOpaqueBinds == rhs.pImageOpaqueBinds )
+ && ( imageBindCount == rhs.imageBindCount )
+ && ( pImageBinds == rhs.pImageBinds )
+ && ( signalSemaphoreCount == rhs.signalSemaphoreCount )
+ && ( pSignalSemaphores == rhs.pSignalSemaphores );
+ }
+
+ bool operator!=( BindSparseInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const Semaphore* pWaitSemaphores;
+ uint32_t bufferBindCount;
+ const SparseBufferMemoryBindInfo* pBufferBinds;
+ uint32_t imageOpaqueBindCount;
+ const SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ uint32_t imageBindCount;
+ const SparseImageMemoryBindInfo* pImageBinds;
+ uint32_t signalSemaphoreCount;
+ const Semaphore* pSignalSemaphores;
+ };
+ static_assert( sizeof( BindSparseInfo ) == sizeof( VkBindSparseInfo ), "struct and wrapper have different size!" );
+
+ enum class PipelineStageFlagBits
+ {
+ eTopOfPipe = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ eDrawIndirect = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
+ eVertexInput = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ eVertexShader = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
+ eTessellationControlShader = VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
+ eTessellationEvaluationShader = VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
+ eGeometryShader = VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
+ eFragmentShader = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
+ eEarlyFragmentTests = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
+ eLateFragmentTests = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ eColorAttachmentOutput = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ eComputeShader = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ eTransfer = VK_PIPELINE_STAGE_TRANSFER_BIT,
+ eBottomOfPipe = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+ eHost = VK_PIPELINE_STAGE_HOST_BIT,
+ eAllGraphics = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ eAllCommands = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+ eCommandProcessNVX = VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX
+ };
+
+ using PipelineStageFlags = Flags<PipelineStageFlagBits, VkPipelineStageFlags>;
+
+ VULKAN_HPP_INLINE PipelineStageFlags operator|( PipelineStageFlagBits bit0, PipelineStageFlagBits bit1 )
+ {
+ return PipelineStageFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE PipelineStageFlags operator~( PipelineStageFlagBits bits )
+ {
+ return ~( PipelineStageFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<PipelineStageFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(PipelineStageFlagBits::eTopOfPipe) | VkFlags(PipelineStageFlagBits::eDrawIndirect) | VkFlags(PipelineStageFlagBits::eVertexInput) | VkFlags(PipelineStageFlagBits::eVertexShader) | VkFlags(PipelineStageFlagBits::eTessellationControlShader) | VkFlags(PipelineStageFlagBits::eTessellationEvaluationShader) | VkFlags(PipelineStageFlagBits::eGeometryShader) | VkFlags(PipelineStageFlagBits::eFragmentShader) | VkFlags(PipelineStageFlagBits::eEarlyFragmentTests) | VkFlags(PipelineStageFlagBits::eLateFragmentTests) | VkFlags(PipelineStageFlagBits::eColorAttachmentOutput) | VkFlags(PipelineStageFlagBits::eComputeShader) | VkFlags(PipelineStageFlagBits::eTransfer) | VkFlags(PipelineStageFlagBits::eBottomOfPipe) | VkFlags(PipelineStageFlagBits::eHost) | VkFlags(PipelineStageFlagBits::eAllGraphics) | VkFlags(PipelineStageFlagBits::eAllCommands) | VkFlags(PipelineStageFlagBits::eCommandProcessNVX)
+ };
+ };
+
+ enum class CommandPoolCreateFlagBits
+ {
+ eTransient = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
+ eResetCommandBuffer = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
+ };
+
+ using CommandPoolCreateFlags = Flags<CommandPoolCreateFlagBits, VkCommandPoolCreateFlags>;
+
+ VULKAN_HPP_INLINE CommandPoolCreateFlags operator|( CommandPoolCreateFlagBits bit0, CommandPoolCreateFlagBits bit1 )
+ {
+ return CommandPoolCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE CommandPoolCreateFlags operator~( CommandPoolCreateFlagBits bits )
+ {
+ return ~( CommandPoolCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<CommandPoolCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(CommandPoolCreateFlagBits::eTransient) | VkFlags(CommandPoolCreateFlagBits::eResetCommandBuffer)
+ };
+ };
+
+ struct CommandPoolCreateInfo
+ {
+ CommandPoolCreateInfo( CommandPoolCreateFlags flags_ = CommandPoolCreateFlags(), uint32_t queueFamilyIndex_ = 0 )
+ : sType( StructureType::eCommandPoolCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , queueFamilyIndex( queueFamilyIndex_ )
+ {
+ }
+
+ CommandPoolCreateInfo( VkCommandPoolCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandPoolCreateInfo) );
+ }
+
+ CommandPoolCreateInfo& operator=( VkCommandPoolCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CommandPoolCreateInfo) );
+ return *this;
+ }
+
+ CommandPoolCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CommandPoolCreateInfo& setFlags( CommandPoolCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ CommandPoolCreateInfo& setQueueFamilyIndex( uint32_t queueFamilyIndex_ )
+ {
+ queueFamilyIndex = queueFamilyIndex_;
+ return *this;
+ }
+
+ operator const VkCommandPoolCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkCommandPoolCreateInfo*>(this);
+ }
+
+ bool operator==( CommandPoolCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( queueFamilyIndex == rhs.queueFamilyIndex );
+ }
+
+ bool operator!=( CommandPoolCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ CommandPoolCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ };
+ static_assert( sizeof( CommandPoolCreateInfo ) == sizeof( VkCommandPoolCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class CommandPoolResetFlagBits
+ {
+ eReleaseResources = VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT
+ };
+
+ using CommandPoolResetFlags = Flags<CommandPoolResetFlagBits, VkCommandPoolResetFlags>;
+
+ VULKAN_HPP_INLINE CommandPoolResetFlags operator|( CommandPoolResetFlagBits bit0, CommandPoolResetFlagBits bit1 )
+ {
+ return CommandPoolResetFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE CommandPoolResetFlags operator~( CommandPoolResetFlagBits bits )
+ {
+ return ~( CommandPoolResetFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<CommandPoolResetFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(CommandPoolResetFlagBits::eReleaseResources)
+ };
+ };
+
+ enum class CommandBufferResetFlagBits
+ {
+ eReleaseResources = VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT
+ };
+
+ using CommandBufferResetFlags = Flags<CommandBufferResetFlagBits, VkCommandBufferResetFlags>;
+
+ VULKAN_HPP_INLINE CommandBufferResetFlags operator|( CommandBufferResetFlagBits bit0, CommandBufferResetFlagBits bit1 )
+ {
+ return CommandBufferResetFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE CommandBufferResetFlags operator~( CommandBufferResetFlagBits bits )
+ {
+ return ~( CommandBufferResetFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<CommandBufferResetFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(CommandBufferResetFlagBits::eReleaseResources)
+ };
+ };
+
+ enum class SampleCountFlagBits
+ {
+ e1 = VK_SAMPLE_COUNT_1_BIT,
+ e2 = VK_SAMPLE_COUNT_2_BIT,
+ e4 = VK_SAMPLE_COUNT_4_BIT,
+ e8 = VK_SAMPLE_COUNT_8_BIT,
+ e16 = VK_SAMPLE_COUNT_16_BIT,
+ e32 = VK_SAMPLE_COUNT_32_BIT,
+ e64 = VK_SAMPLE_COUNT_64_BIT
+ };
+
+ using SampleCountFlags = Flags<SampleCountFlagBits, VkSampleCountFlags>;
+
+ VULKAN_HPP_INLINE SampleCountFlags operator|( SampleCountFlagBits bit0, SampleCountFlagBits bit1 )
+ {
+ return SampleCountFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SampleCountFlags operator~( SampleCountFlagBits bits )
+ {
+ return ~( SampleCountFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<SampleCountFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(SampleCountFlagBits::e1) | VkFlags(SampleCountFlagBits::e2) | VkFlags(SampleCountFlagBits::e4) | VkFlags(SampleCountFlagBits::e8) | VkFlags(SampleCountFlagBits::e16) | VkFlags(SampleCountFlagBits::e32) | VkFlags(SampleCountFlagBits::e64)
+ };
+ };
+
+ struct ImageFormatProperties
+ {
+ operator const VkImageFormatProperties&() const
+ {
+ return *reinterpret_cast<const VkImageFormatProperties*>(this);
+ }
+
+ bool operator==( ImageFormatProperties const& rhs ) const
+ {
+ return ( maxExtent == rhs.maxExtent )
+ && ( maxMipLevels == rhs.maxMipLevels )
+ && ( maxArrayLayers == rhs.maxArrayLayers )
+ && ( sampleCounts == rhs.sampleCounts )
+ && ( maxResourceSize == rhs.maxResourceSize );
+ }
+
+ bool operator!=( ImageFormatProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Extent3D maxExtent;
+ uint32_t maxMipLevels;
+ uint32_t maxArrayLayers;
+ SampleCountFlags sampleCounts;
+ DeviceSize maxResourceSize;
+ };
+ static_assert( sizeof( ImageFormatProperties ) == sizeof( VkImageFormatProperties ), "struct and wrapper have different size!" );
+
+ struct ImageCreateInfo
+ {
+ ImageCreateInfo( ImageCreateFlags flags_ = ImageCreateFlags(), ImageType imageType_ = ImageType::e1D, Format format_ = Format::eUndefined, Extent3D extent_ = Extent3D(), uint32_t mipLevels_ = 0, uint32_t arrayLayers_ = 0, SampleCountFlagBits samples_ = SampleCountFlagBits::e1, ImageTiling tiling_ = ImageTiling::eOptimal, ImageUsageFlags usage_ = ImageUsageFlags(), SharingMode sharingMode_ = SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = 0, const uint32_t* pQueueFamilyIndices_ = nullptr, ImageLayout initialLayout_ = ImageLayout::eUndefined )
+ : sType( StructureType::eImageCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , imageType( imageType_ )
+ , format( format_ )
+ , extent( extent_ )
+ , mipLevels( mipLevels_ )
+ , arrayLayers( arrayLayers_ )
+ , samples( samples_ )
+ , tiling( tiling_ )
+ , usage( usage_ )
+ , sharingMode( sharingMode_ )
+ , queueFamilyIndexCount( queueFamilyIndexCount_ )
+ , pQueueFamilyIndices( pQueueFamilyIndices_ )
+ , initialLayout( initialLayout_ )
+ {
+ }
+
+ ImageCreateInfo( VkImageCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageCreateInfo) );
+ }
+
+ ImageCreateInfo& operator=( VkImageCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImageCreateInfo) );
+ return *this;
+ }
+
+ ImageCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImageCreateInfo& setFlags( ImageCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ImageCreateInfo& setImageType( ImageType imageType_ )
+ {
+ imageType = imageType_;
+ return *this;
+ }
+
+ ImageCreateInfo& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ ImageCreateInfo& setExtent( Extent3D extent_ )
+ {
+ extent = extent_;
+ return *this;
+ }
+
+ ImageCreateInfo& setMipLevels( uint32_t mipLevels_ )
+ {
+ mipLevels = mipLevels_;
+ return *this;
+ }
+
+ ImageCreateInfo& setArrayLayers( uint32_t arrayLayers_ )
+ {
+ arrayLayers = arrayLayers_;
+ return *this;
+ }
+
+ ImageCreateInfo& setSamples( SampleCountFlagBits samples_ )
+ {
+ samples = samples_;
+ return *this;
+ }
+
+ ImageCreateInfo& setTiling( ImageTiling tiling_ )
+ {
+ tiling = tiling_;
+ return *this;
+ }
+
+ ImageCreateInfo& setUsage( ImageUsageFlags usage_ )
+ {
+ usage = usage_;
+ return *this;
+ }
+
+ ImageCreateInfo& setSharingMode( SharingMode sharingMode_ )
+ {
+ sharingMode = sharingMode_;
+ return *this;
+ }
+
+ ImageCreateInfo& setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ )
+ {
+ queueFamilyIndexCount = queueFamilyIndexCount_;
+ return *this;
+ }
+
+ ImageCreateInfo& setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ )
+ {
+ pQueueFamilyIndices = pQueueFamilyIndices_;
+ return *this;
+ }
+
+ ImageCreateInfo& setInitialLayout( ImageLayout initialLayout_ )
+ {
+ initialLayout = initialLayout_;
+ return *this;
+ }
+
+ operator const VkImageCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkImageCreateInfo*>(this);
+ }
+
+ bool operator==( ImageCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( imageType == rhs.imageType )
+ && ( format == rhs.format )
+ && ( extent == rhs.extent )
+ && ( mipLevels == rhs.mipLevels )
+ && ( arrayLayers == rhs.arrayLayers )
+ && ( samples == rhs.samples )
+ && ( tiling == rhs.tiling )
+ && ( usage == rhs.usage )
+ && ( sharingMode == rhs.sharingMode )
+ && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount )
+ && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices )
+ && ( initialLayout == rhs.initialLayout );
+ }
+
+ bool operator!=( ImageCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ImageCreateFlags flags;
+ ImageType imageType;
+ Format format;
+ Extent3D extent;
+ uint32_t mipLevels;
+ uint32_t arrayLayers;
+ SampleCountFlagBits samples;
+ ImageTiling tiling;
+ ImageUsageFlags usage;
+ SharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ ImageLayout initialLayout;
+ };
+ static_assert( sizeof( ImageCreateInfo ) == sizeof( VkImageCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PipelineMultisampleStateCreateInfo
+ {
+ PipelineMultisampleStateCreateInfo( PipelineMultisampleStateCreateFlags flags_ = PipelineMultisampleStateCreateFlags(), SampleCountFlagBits rasterizationSamples_ = SampleCountFlagBits::e1, Bool32 sampleShadingEnable_ = 0, float minSampleShading_ = 0, const SampleMask* pSampleMask_ = nullptr, Bool32 alphaToCoverageEnable_ = 0, Bool32 alphaToOneEnable_ = 0 )
+ : sType( StructureType::ePipelineMultisampleStateCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , rasterizationSamples( rasterizationSamples_ )
+ , sampleShadingEnable( sampleShadingEnable_ )
+ , minSampleShading( minSampleShading_ )
+ , pSampleMask( pSampleMask_ )
+ , alphaToCoverageEnable( alphaToCoverageEnable_ )
+ , alphaToOneEnable( alphaToOneEnable_ )
+ {
+ }
+
+ PipelineMultisampleStateCreateInfo( VkPipelineMultisampleStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineMultisampleStateCreateInfo) );
+ }
+
+ PipelineMultisampleStateCreateInfo& operator=( VkPipelineMultisampleStateCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineMultisampleStateCreateInfo) );
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setFlags( PipelineMultisampleStateCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setRasterizationSamples( SampleCountFlagBits rasterizationSamples_ )
+ {
+ rasterizationSamples = rasterizationSamples_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setSampleShadingEnable( Bool32 sampleShadingEnable_ )
+ {
+ sampleShadingEnable = sampleShadingEnable_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setMinSampleShading( float minSampleShading_ )
+ {
+ minSampleShading = minSampleShading_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setPSampleMask( const SampleMask* pSampleMask_ )
+ {
+ pSampleMask = pSampleMask_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setAlphaToCoverageEnable( Bool32 alphaToCoverageEnable_ )
+ {
+ alphaToCoverageEnable = alphaToCoverageEnable_;
+ return *this;
+ }
+
+ PipelineMultisampleStateCreateInfo& setAlphaToOneEnable( Bool32 alphaToOneEnable_ )
+ {
+ alphaToOneEnable = alphaToOneEnable_;
+ return *this;
+ }
+
+ operator const VkPipelineMultisampleStateCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkPipelineMultisampleStateCreateInfo*>(this);
+ }
+
+ bool operator==( PipelineMultisampleStateCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( rasterizationSamples == rhs.rasterizationSamples )
+ && ( sampleShadingEnable == rhs.sampleShadingEnable )
+ && ( minSampleShading == rhs.minSampleShading )
+ && ( pSampleMask == rhs.pSampleMask )
+ && ( alphaToCoverageEnable == rhs.alphaToCoverageEnable )
+ && ( alphaToOneEnable == rhs.alphaToOneEnable );
+ }
+
+ bool operator!=( PipelineMultisampleStateCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineMultisampleStateCreateFlags flags;
+ SampleCountFlagBits rasterizationSamples;
+ Bool32 sampleShadingEnable;
+ float minSampleShading;
+ const SampleMask* pSampleMask;
+ Bool32 alphaToCoverageEnable;
+ Bool32 alphaToOneEnable;
+ };
+ static_assert( sizeof( PipelineMultisampleStateCreateInfo ) == sizeof( VkPipelineMultisampleStateCreateInfo ), "struct and wrapper have different size!" );
+
+ struct GraphicsPipelineCreateInfo
+ {
+ GraphicsPipelineCreateInfo( PipelineCreateFlags flags_ = PipelineCreateFlags(), uint32_t stageCount_ = 0, const PipelineShaderStageCreateInfo* pStages_ = nullptr, const PipelineVertexInputStateCreateInfo* pVertexInputState_ = nullptr, const PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ = nullptr, const PipelineTessellationStateCreateInfo* pTessellationState_ = nullptr, const PipelineViewportStateCreateInfo* pViewportState_ = nullptr, const PipelineRasterizationStateCreateInfo* pRasterizationState_ = nullptr, const PipelineMultisampleStateCreateInfo* pMultisampleState_ = nullptr, const PipelineDepthStencilStateCreateInfo* pDepthStencilState_ = nullptr, const PipelineColorBlendStateCreateInfo* pColorBlendState_ = nullptr, const PipelineDynamicStateCreateInfo* pDynamicState_ = nullptr, PipelineLayout layout_ = PipelineLayout(), RenderPass renderPass_ = RenderPass(), uint32_t subpass_ = 0, Pipeline basePipelineHandle_ = Pipeline(), int32_t basePipelineIndex_ = 0 )
+ : sType( StructureType::eGraphicsPipelineCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , stageCount( stageCount_ )
+ , pStages( pStages_ )
+ , pVertexInputState( pVertexInputState_ )
+ , pInputAssemblyState( pInputAssemblyState_ )
+ , pTessellationState( pTessellationState_ )
+ , pViewportState( pViewportState_ )
+ , pRasterizationState( pRasterizationState_ )
+ , pMultisampleState( pMultisampleState_ )
+ , pDepthStencilState( pDepthStencilState_ )
+ , pColorBlendState( pColorBlendState_ )
+ , pDynamicState( pDynamicState_ )
+ , layout( layout_ )
+ , renderPass( renderPass_ )
+ , subpass( subpass_ )
+ , basePipelineHandle( basePipelineHandle_ )
+ , basePipelineIndex( basePipelineIndex_ )
+ {
+ }
+
+ GraphicsPipelineCreateInfo( VkGraphicsPipelineCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(GraphicsPipelineCreateInfo) );
+ }
+
+ GraphicsPipelineCreateInfo& operator=( VkGraphicsPipelineCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(GraphicsPipelineCreateInfo) );
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setFlags( PipelineCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setStageCount( uint32_t stageCount_ )
+ {
+ stageCount = stageCount_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPStages( const PipelineShaderStageCreateInfo* pStages_ )
+ {
+ pStages = pStages_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPVertexInputState( const PipelineVertexInputStateCreateInfo* pVertexInputState_ )
+ {
+ pVertexInputState = pVertexInputState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPInputAssemblyState( const PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ )
+ {
+ pInputAssemblyState = pInputAssemblyState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPTessellationState( const PipelineTessellationStateCreateInfo* pTessellationState_ )
+ {
+ pTessellationState = pTessellationState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPViewportState( const PipelineViewportStateCreateInfo* pViewportState_ )
+ {
+ pViewportState = pViewportState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPRasterizationState( const PipelineRasterizationStateCreateInfo* pRasterizationState_ )
+ {
+ pRasterizationState = pRasterizationState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPMultisampleState( const PipelineMultisampleStateCreateInfo* pMultisampleState_ )
+ {
+ pMultisampleState = pMultisampleState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPDepthStencilState( const PipelineDepthStencilStateCreateInfo* pDepthStencilState_ )
+ {
+ pDepthStencilState = pDepthStencilState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPColorBlendState( const PipelineColorBlendStateCreateInfo* pColorBlendState_ )
+ {
+ pColorBlendState = pColorBlendState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setPDynamicState( const PipelineDynamicStateCreateInfo* pDynamicState_ )
+ {
+ pDynamicState = pDynamicState_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setLayout( PipelineLayout layout_ )
+ {
+ layout = layout_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setRenderPass( RenderPass renderPass_ )
+ {
+ renderPass = renderPass_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setSubpass( uint32_t subpass_ )
+ {
+ subpass = subpass_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setBasePipelineHandle( Pipeline basePipelineHandle_ )
+ {
+ basePipelineHandle = basePipelineHandle_;
+ return *this;
+ }
+
+ GraphicsPipelineCreateInfo& setBasePipelineIndex( int32_t basePipelineIndex_ )
+ {
+ basePipelineIndex = basePipelineIndex_;
+ return *this;
+ }
+
+ operator const VkGraphicsPipelineCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkGraphicsPipelineCreateInfo*>(this);
+ }
+
+ bool operator==( GraphicsPipelineCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( stageCount == rhs.stageCount )
+ && ( pStages == rhs.pStages )
+ && ( pVertexInputState == rhs.pVertexInputState )
+ && ( pInputAssemblyState == rhs.pInputAssemblyState )
+ && ( pTessellationState == rhs.pTessellationState )
+ && ( pViewportState == rhs.pViewportState )
+ && ( pRasterizationState == rhs.pRasterizationState )
+ && ( pMultisampleState == rhs.pMultisampleState )
+ && ( pDepthStencilState == rhs.pDepthStencilState )
+ && ( pColorBlendState == rhs.pColorBlendState )
+ && ( pDynamicState == rhs.pDynamicState )
+ && ( layout == rhs.layout )
+ && ( renderPass == rhs.renderPass )
+ && ( subpass == rhs.subpass )
+ && ( basePipelineHandle == rhs.basePipelineHandle )
+ && ( basePipelineIndex == rhs.basePipelineIndex );
+ }
+
+ bool operator!=( GraphicsPipelineCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineCreateFlags flags;
+ uint32_t stageCount;
+ const PipelineShaderStageCreateInfo* pStages;
+ const PipelineVertexInputStateCreateInfo* pVertexInputState;
+ const PipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ const PipelineTessellationStateCreateInfo* pTessellationState;
+ const PipelineViewportStateCreateInfo* pViewportState;
+ const PipelineRasterizationStateCreateInfo* pRasterizationState;
+ const PipelineMultisampleStateCreateInfo* pMultisampleState;
+ const PipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ const PipelineColorBlendStateCreateInfo* pColorBlendState;
+ const PipelineDynamicStateCreateInfo* pDynamicState;
+ PipelineLayout layout;
+ RenderPass renderPass;
+ uint32_t subpass;
+ Pipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+ };
+ static_assert( sizeof( GraphicsPipelineCreateInfo ) == sizeof( VkGraphicsPipelineCreateInfo ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceLimits
+ {
+ operator const VkPhysicalDeviceLimits&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceLimits*>(this);
+ }
+
+ bool operator==( PhysicalDeviceLimits const& rhs ) const
+ {
+ return ( maxImageDimension1D == rhs.maxImageDimension1D )
+ && ( maxImageDimension2D == rhs.maxImageDimension2D )
+ && ( maxImageDimension3D == rhs.maxImageDimension3D )
+ && ( maxImageDimensionCube == rhs.maxImageDimensionCube )
+ && ( maxImageArrayLayers == rhs.maxImageArrayLayers )
+ && ( maxTexelBufferElements == rhs.maxTexelBufferElements )
+ && ( maxUniformBufferRange == rhs.maxUniformBufferRange )
+ && ( maxStorageBufferRange == rhs.maxStorageBufferRange )
+ && ( maxPushConstantsSize == rhs.maxPushConstantsSize )
+ && ( maxMemoryAllocationCount == rhs.maxMemoryAllocationCount )
+ && ( maxSamplerAllocationCount == rhs.maxSamplerAllocationCount )
+ && ( bufferImageGranularity == rhs.bufferImageGranularity )
+ && ( sparseAddressSpaceSize == rhs.sparseAddressSpaceSize )
+ && ( maxBoundDescriptorSets == rhs.maxBoundDescriptorSets )
+ && ( maxPerStageDescriptorSamplers == rhs.maxPerStageDescriptorSamplers )
+ && ( maxPerStageDescriptorUniformBuffers == rhs.maxPerStageDescriptorUniformBuffers )
+ && ( maxPerStageDescriptorStorageBuffers == rhs.maxPerStageDescriptorStorageBuffers )
+ && ( maxPerStageDescriptorSampledImages == rhs.maxPerStageDescriptorSampledImages )
+ && ( maxPerStageDescriptorStorageImages == rhs.maxPerStageDescriptorStorageImages )
+ && ( maxPerStageDescriptorInputAttachments == rhs.maxPerStageDescriptorInputAttachments )
+ && ( maxPerStageResources == rhs.maxPerStageResources )
+ && ( maxDescriptorSetSamplers == rhs.maxDescriptorSetSamplers )
+ && ( maxDescriptorSetUniformBuffers == rhs.maxDescriptorSetUniformBuffers )
+ && ( maxDescriptorSetUniformBuffersDynamic == rhs.maxDescriptorSetUniformBuffersDynamic )
+ && ( maxDescriptorSetStorageBuffers == rhs.maxDescriptorSetStorageBuffers )
+ && ( maxDescriptorSetStorageBuffersDynamic == rhs.maxDescriptorSetStorageBuffersDynamic )
+ && ( maxDescriptorSetSampledImages == rhs.maxDescriptorSetSampledImages )
+ && ( maxDescriptorSetStorageImages == rhs.maxDescriptorSetStorageImages )
+ && ( maxDescriptorSetInputAttachments == rhs.maxDescriptorSetInputAttachments )
+ && ( maxVertexInputAttributes == rhs.maxVertexInputAttributes )
+ && ( maxVertexInputBindings == rhs.maxVertexInputBindings )
+ && ( maxVertexInputAttributeOffset == rhs.maxVertexInputAttributeOffset )
+ && ( maxVertexInputBindingStride == rhs.maxVertexInputBindingStride )
+ && ( maxVertexOutputComponents == rhs.maxVertexOutputComponents )
+ && ( maxTessellationGenerationLevel == rhs.maxTessellationGenerationLevel )
+ && ( maxTessellationPatchSize == rhs.maxTessellationPatchSize )
+ && ( maxTessellationControlPerVertexInputComponents == rhs.maxTessellationControlPerVertexInputComponents )
+ && ( maxTessellationControlPerVertexOutputComponents == rhs.maxTessellationControlPerVertexOutputComponents )
+ && ( maxTessellationControlPerPatchOutputComponents == rhs.maxTessellationControlPerPatchOutputComponents )
+ && ( maxTessellationControlTotalOutputComponents == rhs.maxTessellationControlTotalOutputComponents )
+ && ( maxTessellationEvaluationInputComponents == rhs.maxTessellationEvaluationInputComponents )
+ && ( maxTessellationEvaluationOutputComponents == rhs.maxTessellationEvaluationOutputComponents )
+ && ( maxGeometryShaderInvocations == rhs.maxGeometryShaderInvocations )
+ && ( maxGeometryInputComponents == rhs.maxGeometryInputComponents )
+ && ( maxGeometryOutputComponents == rhs.maxGeometryOutputComponents )
+ && ( maxGeometryOutputVertices == rhs.maxGeometryOutputVertices )
+ && ( maxGeometryTotalOutputComponents == rhs.maxGeometryTotalOutputComponents )
+ && ( maxFragmentInputComponents == rhs.maxFragmentInputComponents )
+ && ( maxFragmentOutputAttachments == rhs.maxFragmentOutputAttachments )
+ && ( maxFragmentDualSrcAttachments == rhs.maxFragmentDualSrcAttachments )
+ && ( maxFragmentCombinedOutputResources == rhs.maxFragmentCombinedOutputResources )
+ && ( maxComputeSharedMemorySize == rhs.maxComputeSharedMemorySize )
+ && ( memcmp( maxComputeWorkGroupCount, rhs.maxComputeWorkGroupCount, 3 * sizeof( uint32_t ) ) == 0 )
+ && ( maxComputeWorkGroupInvocations == rhs.maxComputeWorkGroupInvocations )
+ && ( memcmp( maxComputeWorkGroupSize, rhs.maxComputeWorkGroupSize, 3 * sizeof( uint32_t ) ) == 0 )
+ && ( subPixelPrecisionBits == rhs.subPixelPrecisionBits )
+ && ( subTexelPrecisionBits == rhs.subTexelPrecisionBits )
+ && ( mipmapPrecisionBits == rhs.mipmapPrecisionBits )
+ && ( maxDrawIndexedIndexValue == rhs.maxDrawIndexedIndexValue )
+ && ( maxDrawIndirectCount == rhs.maxDrawIndirectCount )
+ && ( maxSamplerLodBias == rhs.maxSamplerLodBias )
+ && ( maxSamplerAnisotropy == rhs.maxSamplerAnisotropy )
+ && ( maxViewports == rhs.maxViewports )
+ && ( memcmp( maxViewportDimensions, rhs.maxViewportDimensions, 2 * sizeof( uint32_t ) ) == 0 )
+ && ( memcmp( viewportBoundsRange, rhs.viewportBoundsRange, 2 * sizeof( float ) ) == 0 )
+ && ( viewportSubPixelBits == rhs.viewportSubPixelBits )
+ && ( minMemoryMapAlignment == rhs.minMemoryMapAlignment )
+ && ( minTexelBufferOffsetAlignment == rhs.minTexelBufferOffsetAlignment )
+ && ( minUniformBufferOffsetAlignment == rhs.minUniformBufferOffsetAlignment )
+ && ( minStorageBufferOffsetAlignment == rhs.minStorageBufferOffsetAlignment )
+ && ( minTexelOffset == rhs.minTexelOffset )
+ && ( maxTexelOffset == rhs.maxTexelOffset )
+ && ( minTexelGatherOffset == rhs.minTexelGatherOffset )
+ && ( maxTexelGatherOffset == rhs.maxTexelGatherOffset )
+ && ( minInterpolationOffset == rhs.minInterpolationOffset )
+ && ( maxInterpolationOffset == rhs.maxInterpolationOffset )
+ && ( subPixelInterpolationOffsetBits == rhs.subPixelInterpolationOffsetBits )
+ && ( maxFramebufferWidth == rhs.maxFramebufferWidth )
+ && ( maxFramebufferHeight == rhs.maxFramebufferHeight )
+ && ( maxFramebufferLayers == rhs.maxFramebufferLayers )
+ && ( framebufferColorSampleCounts == rhs.framebufferColorSampleCounts )
+ && ( framebufferDepthSampleCounts == rhs.framebufferDepthSampleCounts )
+ && ( framebufferStencilSampleCounts == rhs.framebufferStencilSampleCounts )
+ && ( framebufferNoAttachmentsSampleCounts == rhs.framebufferNoAttachmentsSampleCounts )
+ && ( maxColorAttachments == rhs.maxColorAttachments )
+ && ( sampledImageColorSampleCounts == rhs.sampledImageColorSampleCounts )
+ && ( sampledImageIntegerSampleCounts == rhs.sampledImageIntegerSampleCounts )
+ && ( sampledImageDepthSampleCounts == rhs.sampledImageDepthSampleCounts )
+ && ( sampledImageStencilSampleCounts == rhs.sampledImageStencilSampleCounts )
+ && ( storageImageSampleCounts == rhs.storageImageSampleCounts )
+ && ( maxSampleMaskWords == rhs.maxSampleMaskWords )
+ && ( timestampComputeAndGraphics == rhs.timestampComputeAndGraphics )
+ && ( timestampPeriod == rhs.timestampPeriod )
+ && ( maxClipDistances == rhs.maxClipDistances )
+ && ( maxCullDistances == rhs.maxCullDistances )
+ && ( maxCombinedClipAndCullDistances == rhs.maxCombinedClipAndCullDistances )
+ && ( discreteQueuePriorities == rhs.discreteQueuePriorities )
+ && ( memcmp( pointSizeRange, rhs.pointSizeRange, 2 * sizeof( float ) ) == 0 )
+ && ( memcmp( lineWidthRange, rhs.lineWidthRange, 2 * sizeof( float ) ) == 0 )
+ && ( pointSizeGranularity == rhs.pointSizeGranularity )
+ && ( lineWidthGranularity == rhs.lineWidthGranularity )
+ && ( strictLines == rhs.strictLines )
+ && ( standardSampleLocations == rhs.standardSampleLocations )
+ && ( optimalBufferCopyOffsetAlignment == rhs.optimalBufferCopyOffsetAlignment )
+ && ( optimalBufferCopyRowPitchAlignment == rhs.optimalBufferCopyRowPitchAlignment )
+ && ( nonCoherentAtomSize == rhs.nonCoherentAtomSize );
+ }
+
+ bool operator!=( PhysicalDeviceLimits const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t maxImageDimension1D;
+ uint32_t maxImageDimension2D;
+ uint32_t maxImageDimension3D;
+ uint32_t maxImageDimensionCube;
+ uint32_t maxImageArrayLayers;
+ uint32_t maxTexelBufferElements;
+ uint32_t maxUniformBufferRange;
+ uint32_t maxStorageBufferRange;
+ uint32_t maxPushConstantsSize;
+ uint32_t maxMemoryAllocationCount;
+ uint32_t maxSamplerAllocationCount;
+ DeviceSize bufferImageGranularity;
+ DeviceSize sparseAddressSpaceSize;
+ uint32_t maxBoundDescriptorSets;
+ uint32_t maxPerStageDescriptorSamplers;
+ uint32_t maxPerStageDescriptorUniformBuffers;
+ uint32_t maxPerStageDescriptorStorageBuffers;
+ uint32_t maxPerStageDescriptorSampledImages;
+ uint32_t maxPerStageDescriptorStorageImages;
+ uint32_t maxPerStageDescriptorInputAttachments;
+ uint32_t maxPerStageResources;
+ uint32_t maxDescriptorSetSamplers;
+ uint32_t maxDescriptorSetUniformBuffers;
+ uint32_t maxDescriptorSetUniformBuffersDynamic;
+ uint32_t maxDescriptorSetStorageBuffers;
+ uint32_t maxDescriptorSetStorageBuffersDynamic;
+ uint32_t maxDescriptorSetSampledImages;
+ uint32_t maxDescriptorSetStorageImages;
+ uint32_t maxDescriptorSetInputAttachments;
+ uint32_t maxVertexInputAttributes;
+ uint32_t maxVertexInputBindings;
+ uint32_t maxVertexInputAttributeOffset;
+ uint32_t maxVertexInputBindingStride;
+ uint32_t maxVertexOutputComponents;
+ uint32_t maxTessellationGenerationLevel;
+ uint32_t maxTessellationPatchSize;
+ uint32_t maxTessellationControlPerVertexInputComponents;
+ uint32_t maxTessellationControlPerVertexOutputComponents;
+ uint32_t maxTessellationControlPerPatchOutputComponents;
+ uint32_t maxTessellationControlTotalOutputComponents;
+ uint32_t maxTessellationEvaluationInputComponents;
+ uint32_t maxTessellationEvaluationOutputComponents;
+ uint32_t maxGeometryShaderInvocations;
+ uint32_t maxGeometryInputComponents;
+ uint32_t maxGeometryOutputComponents;
+ uint32_t maxGeometryOutputVertices;
+ uint32_t maxGeometryTotalOutputComponents;
+ uint32_t maxFragmentInputComponents;
+ uint32_t maxFragmentOutputAttachments;
+ uint32_t maxFragmentDualSrcAttachments;
+ uint32_t maxFragmentCombinedOutputResources;
+ uint32_t maxComputeSharedMemorySize;
+ uint32_t maxComputeWorkGroupCount[3];
+ uint32_t maxComputeWorkGroupInvocations;
+ uint32_t maxComputeWorkGroupSize[3];
+ uint32_t subPixelPrecisionBits;
+ uint32_t subTexelPrecisionBits;
+ uint32_t mipmapPrecisionBits;
+ uint32_t maxDrawIndexedIndexValue;
+ uint32_t maxDrawIndirectCount;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ uint32_t maxViewports;
+ uint32_t maxViewportDimensions[2];
+ float viewportBoundsRange[2];
+ uint32_t viewportSubPixelBits;
+ size_t minMemoryMapAlignment;
+ DeviceSize minTexelBufferOffsetAlignment;
+ DeviceSize minUniformBufferOffsetAlignment;
+ DeviceSize minStorageBufferOffsetAlignment;
+ int32_t minTexelOffset;
+ uint32_t maxTexelOffset;
+ int32_t minTexelGatherOffset;
+ uint32_t maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ uint32_t subPixelInterpolationOffsetBits;
+ uint32_t maxFramebufferWidth;
+ uint32_t maxFramebufferHeight;
+ uint32_t maxFramebufferLayers;
+ SampleCountFlags framebufferColorSampleCounts;
+ SampleCountFlags framebufferDepthSampleCounts;
+ SampleCountFlags framebufferStencilSampleCounts;
+ SampleCountFlags framebufferNoAttachmentsSampleCounts;
+ uint32_t maxColorAttachments;
+ SampleCountFlags sampledImageColorSampleCounts;
+ SampleCountFlags sampledImageIntegerSampleCounts;
+ SampleCountFlags sampledImageDepthSampleCounts;
+ SampleCountFlags sampledImageStencilSampleCounts;
+ SampleCountFlags storageImageSampleCounts;
+ uint32_t maxSampleMaskWords;
+ Bool32 timestampComputeAndGraphics;
+ float timestampPeriod;
+ uint32_t maxClipDistances;
+ uint32_t maxCullDistances;
+ uint32_t maxCombinedClipAndCullDistances;
+ uint32_t discreteQueuePriorities;
+ float pointSizeRange[2];
+ float lineWidthRange[2];
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+ Bool32 strictLines;
+ Bool32 standardSampleLocations;
+ DeviceSize optimalBufferCopyOffsetAlignment;
+ DeviceSize optimalBufferCopyRowPitchAlignment;
+ DeviceSize nonCoherentAtomSize;
+ };
+ static_assert( sizeof( PhysicalDeviceLimits ) == sizeof( VkPhysicalDeviceLimits ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceProperties
+ {
+ operator const VkPhysicalDeviceProperties&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceProperties*>(this);
+ }
+
+ bool operator==( PhysicalDeviceProperties const& rhs ) const
+ {
+ return ( apiVersion == rhs.apiVersion )
+ && ( driverVersion == rhs.driverVersion )
+ && ( vendorID == rhs.vendorID )
+ && ( deviceID == rhs.deviceID )
+ && ( deviceType == rhs.deviceType )
+ && ( memcmp( deviceName, rhs.deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof( char ) ) == 0 )
+ && ( memcmp( pipelineCacheUUID, rhs.pipelineCacheUUID, VK_UUID_SIZE * sizeof( uint8_t ) ) == 0 )
+ && ( limits == rhs.limits )
+ && ( sparseProperties == rhs.sparseProperties );
+ }
+
+ bool operator!=( PhysicalDeviceProperties const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t apiVersion;
+ uint32_t driverVersion;
+ uint32_t vendorID;
+ uint32_t deviceID;
+ PhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
+ uint8_t pipelineCacheUUID[VK_UUID_SIZE];
+ PhysicalDeviceLimits limits;
+ PhysicalDeviceSparseProperties sparseProperties;
+ };
+ static_assert( sizeof( PhysicalDeviceProperties ) == sizeof( VkPhysicalDeviceProperties ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceProperties2KHR
+ {
+ operator const VkPhysicalDeviceProperties2KHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceProperties2KHR*>(this);
+ }
+
+ bool operator==( PhysicalDeviceProperties2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( properties == rhs.properties );
+ }
+
+ bool operator!=( PhysicalDeviceProperties2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ PhysicalDeviceProperties properties;
+ };
+ static_assert( sizeof( PhysicalDeviceProperties2KHR ) == sizeof( VkPhysicalDeviceProperties2KHR ), "struct and wrapper have different size!" );
+
+ struct ImageFormatProperties2KHR
+ {
+ operator const VkImageFormatProperties2KHR&() const
+ {
+ return *reinterpret_cast<const VkImageFormatProperties2KHR*>(this);
+ }
+
+ bool operator==( ImageFormatProperties2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( imageFormatProperties == rhs.imageFormatProperties );
+ }
+
+ bool operator!=( ImageFormatProperties2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ ImageFormatProperties imageFormatProperties;
+ };
+ static_assert( sizeof( ImageFormatProperties2KHR ) == sizeof( VkImageFormatProperties2KHR ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceSparseImageFormatInfo2KHR
+ {
+ PhysicalDeviceSparseImageFormatInfo2KHR( Format format_ = Format::eUndefined, ImageType type_ = ImageType::e1D, SampleCountFlagBits samples_ = SampleCountFlagBits::e1, ImageUsageFlags usage_ = ImageUsageFlags(), ImageTiling tiling_ = ImageTiling::eOptimal )
+ : sType( StructureType::ePhysicalDeviceSparseImageFormatInfo2KHR )
+ , pNext( nullptr )
+ , format( format_ )
+ , type( type_ )
+ , samples( samples_ )
+ , usage( usage_ )
+ , tiling( tiling_ )
+ {
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR( VkPhysicalDeviceSparseImageFormatInfo2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceSparseImageFormatInfo2KHR) );
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& operator=( VkPhysicalDeviceSparseImageFormatInfo2KHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceSparseImageFormatInfo2KHR) );
+ return *this;
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& setType( ImageType type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& setSamples( SampleCountFlagBits samples_ )
+ {
+ samples = samples_;
+ return *this;
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& setUsage( ImageUsageFlags usage_ )
+ {
+ usage = usage_;
+ return *this;
+ }
+
+ PhysicalDeviceSparseImageFormatInfo2KHR& setTiling( ImageTiling tiling_ )
+ {
+ tiling = tiling_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceSparseImageFormatInfo2KHR&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2KHR*>(this);
+ }
+
+ bool operator==( PhysicalDeviceSparseImageFormatInfo2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( format == rhs.format )
+ && ( type == rhs.type )
+ && ( samples == rhs.samples )
+ && ( usage == rhs.usage )
+ && ( tiling == rhs.tiling );
+ }
+
+ bool operator!=( PhysicalDeviceSparseImageFormatInfo2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Format format;
+ ImageType type;
+ SampleCountFlagBits samples;
+ ImageUsageFlags usage;
+ ImageTiling tiling;
+ };
+ static_assert( sizeof( PhysicalDeviceSparseImageFormatInfo2KHR ) == sizeof( VkPhysicalDeviceSparseImageFormatInfo2KHR ), "struct and wrapper have different size!" );
+
+ enum class AttachmentDescriptionFlagBits
+ {
+ eMayAlias = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT
+ };
+
+ using AttachmentDescriptionFlags = Flags<AttachmentDescriptionFlagBits, VkAttachmentDescriptionFlags>;
+
+ VULKAN_HPP_INLINE AttachmentDescriptionFlags operator|( AttachmentDescriptionFlagBits bit0, AttachmentDescriptionFlagBits bit1 )
+ {
+ return AttachmentDescriptionFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE AttachmentDescriptionFlags operator~( AttachmentDescriptionFlagBits bits )
+ {
+ return ~( AttachmentDescriptionFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<AttachmentDescriptionFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(AttachmentDescriptionFlagBits::eMayAlias)
+ };
+ };
+
+ struct AttachmentDescription
+ {
+ AttachmentDescription( AttachmentDescriptionFlags flags_ = AttachmentDescriptionFlags(), Format format_ = Format::eUndefined, SampleCountFlagBits samples_ = SampleCountFlagBits::e1, AttachmentLoadOp loadOp_ = AttachmentLoadOp::eLoad, AttachmentStoreOp storeOp_ = AttachmentStoreOp::eStore, AttachmentLoadOp stencilLoadOp_ = AttachmentLoadOp::eLoad, AttachmentStoreOp stencilStoreOp_ = AttachmentStoreOp::eStore, ImageLayout initialLayout_ = ImageLayout::eUndefined, ImageLayout finalLayout_ = ImageLayout::eUndefined )
+ : flags( flags_ )
+ , format( format_ )
+ , samples( samples_ )
+ , loadOp( loadOp_ )
+ , storeOp( storeOp_ )
+ , stencilLoadOp( stencilLoadOp_ )
+ , stencilStoreOp( stencilStoreOp_ )
+ , initialLayout( initialLayout_ )
+ , finalLayout( finalLayout_ )
+ {
+ }
+
+ AttachmentDescription( VkAttachmentDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AttachmentDescription) );
+ }
+
+ AttachmentDescription& operator=( VkAttachmentDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(AttachmentDescription) );
+ return *this;
+ }
+
+ AttachmentDescription& setFlags( AttachmentDescriptionFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ AttachmentDescription& setFormat( Format format_ )
+ {
+ format = format_;
+ return *this;
+ }
+
+ AttachmentDescription& setSamples( SampleCountFlagBits samples_ )
+ {
+ samples = samples_;
+ return *this;
+ }
+
+ AttachmentDescription& setLoadOp( AttachmentLoadOp loadOp_ )
+ {
+ loadOp = loadOp_;
+ return *this;
+ }
+
+ AttachmentDescription& setStoreOp( AttachmentStoreOp storeOp_ )
+ {
+ storeOp = storeOp_;
+ return *this;
+ }
+
+ AttachmentDescription& setStencilLoadOp( AttachmentLoadOp stencilLoadOp_ )
+ {
+ stencilLoadOp = stencilLoadOp_;
+ return *this;
+ }
+
+ AttachmentDescription& setStencilStoreOp( AttachmentStoreOp stencilStoreOp_ )
+ {
+ stencilStoreOp = stencilStoreOp_;
+ return *this;
+ }
+
+ AttachmentDescription& setInitialLayout( ImageLayout initialLayout_ )
+ {
+ initialLayout = initialLayout_;
+ return *this;
+ }
+
+ AttachmentDescription& setFinalLayout( ImageLayout finalLayout_ )
+ {
+ finalLayout = finalLayout_;
+ return *this;
+ }
+
+ operator const VkAttachmentDescription&() const
+ {
+ return *reinterpret_cast<const VkAttachmentDescription*>(this);
+ }
+
+ bool operator==( AttachmentDescription const& rhs ) const
+ {
+ return ( flags == rhs.flags )
+ && ( format == rhs.format )
+ && ( samples == rhs.samples )
+ && ( loadOp == rhs.loadOp )
+ && ( storeOp == rhs.storeOp )
+ && ( stencilLoadOp == rhs.stencilLoadOp )
+ && ( stencilStoreOp == rhs.stencilStoreOp )
+ && ( initialLayout == rhs.initialLayout )
+ && ( finalLayout == rhs.finalLayout );
+ }
+
+ bool operator!=( AttachmentDescription const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ AttachmentDescriptionFlags flags;
+ Format format;
+ SampleCountFlagBits samples;
+ AttachmentLoadOp loadOp;
+ AttachmentStoreOp storeOp;
+ AttachmentLoadOp stencilLoadOp;
+ AttachmentStoreOp stencilStoreOp;
+ ImageLayout initialLayout;
+ ImageLayout finalLayout;
+ };
+ static_assert( sizeof( AttachmentDescription ) == sizeof( VkAttachmentDescription ), "struct and wrapper have different size!" );
+
+ enum class StencilFaceFlagBits
+ {
+ eFront = VK_STENCIL_FACE_FRONT_BIT,
+ eBack = VK_STENCIL_FACE_BACK_BIT,
+ eVkStencilFrontAndBack = VK_STENCIL_FRONT_AND_BACK
+ };
+
+ using StencilFaceFlags = Flags<StencilFaceFlagBits, VkStencilFaceFlags>;
+
+ VULKAN_HPP_INLINE StencilFaceFlags operator|( StencilFaceFlagBits bit0, StencilFaceFlagBits bit1 )
+ {
+ return StencilFaceFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE StencilFaceFlags operator~( StencilFaceFlagBits bits )
+ {
+ return ~( StencilFaceFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<StencilFaceFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(StencilFaceFlagBits::eFront) | VkFlags(StencilFaceFlagBits::eBack) | VkFlags(StencilFaceFlagBits::eVkStencilFrontAndBack)
+ };
+ };
+
+ enum class DescriptorPoolCreateFlagBits
+ {
+ eFreeDescriptorSet = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT
+ };
+
+ using DescriptorPoolCreateFlags = Flags<DescriptorPoolCreateFlagBits, VkDescriptorPoolCreateFlags>;
+
+ VULKAN_HPP_INLINE DescriptorPoolCreateFlags operator|( DescriptorPoolCreateFlagBits bit0, DescriptorPoolCreateFlagBits bit1 )
+ {
+ return DescriptorPoolCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE DescriptorPoolCreateFlags operator~( DescriptorPoolCreateFlagBits bits )
+ {
+ return ~( DescriptorPoolCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<DescriptorPoolCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(DescriptorPoolCreateFlagBits::eFreeDescriptorSet)
+ };
+ };
+
+ struct DescriptorPoolCreateInfo
+ {
+ DescriptorPoolCreateInfo( DescriptorPoolCreateFlags flags_ = DescriptorPoolCreateFlags(), uint32_t maxSets_ = 0, uint32_t poolSizeCount_ = 0, const DescriptorPoolSize* pPoolSizes_ = nullptr )
+ : sType( StructureType::eDescriptorPoolCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , maxSets( maxSets_ )
+ , poolSizeCount( poolSizeCount_ )
+ , pPoolSizes( pPoolSizes_ )
+ {
+ }
+
+ DescriptorPoolCreateInfo( VkDescriptorPoolCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorPoolCreateInfo) );
+ }
+
+ DescriptorPoolCreateInfo& operator=( VkDescriptorPoolCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorPoolCreateInfo) );
+ return *this;
+ }
+
+ DescriptorPoolCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DescriptorPoolCreateInfo& setFlags( DescriptorPoolCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DescriptorPoolCreateInfo& setMaxSets( uint32_t maxSets_ )
+ {
+ maxSets = maxSets_;
+ return *this;
+ }
+
+ DescriptorPoolCreateInfo& setPoolSizeCount( uint32_t poolSizeCount_ )
+ {
+ poolSizeCount = poolSizeCount_;
+ return *this;
+ }
+
+ DescriptorPoolCreateInfo& setPPoolSizes( const DescriptorPoolSize* pPoolSizes_ )
+ {
+ pPoolSizes = pPoolSizes_;
+ return *this;
+ }
+
+ operator const VkDescriptorPoolCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkDescriptorPoolCreateInfo*>(this);
+ }
+
+ bool operator==( DescriptorPoolCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( maxSets == rhs.maxSets )
+ && ( poolSizeCount == rhs.poolSizeCount )
+ && ( pPoolSizes == rhs.pPoolSizes );
+ }
+
+ bool operator!=( DescriptorPoolCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DescriptorPoolCreateFlags flags;
+ uint32_t maxSets;
+ uint32_t poolSizeCount;
+ const DescriptorPoolSize* pPoolSizes;
+ };
+ static_assert( sizeof( DescriptorPoolCreateInfo ) == sizeof( VkDescriptorPoolCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class DependencyFlagBits
+ {
+ eByRegion = VK_DEPENDENCY_BY_REGION_BIT,
+ eViewLocalKHX = VK_DEPENDENCY_VIEW_LOCAL_BIT_KHX,
+ eDeviceGroupKHX = VK_DEPENDENCY_DEVICE_GROUP_BIT_KHX
+ };
+
+ using DependencyFlags = Flags<DependencyFlagBits, VkDependencyFlags>;
+
+ VULKAN_HPP_INLINE DependencyFlags operator|( DependencyFlagBits bit0, DependencyFlagBits bit1 )
+ {
+ return DependencyFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE DependencyFlags operator~( DependencyFlagBits bits )
+ {
+ return ~( DependencyFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<DependencyFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(DependencyFlagBits::eByRegion) | VkFlags(DependencyFlagBits::eViewLocalKHX) | VkFlags(DependencyFlagBits::eDeviceGroupKHX)
+ };
+ };
+
+ struct SubpassDependency
+ {
+ SubpassDependency( uint32_t srcSubpass_ = 0, uint32_t dstSubpass_ = 0, PipelineStageFlags srcStageMask_ = PipelineStageFlags(), PipelineStageFlags dstStageMask_ = PipelineStageFlags(), AccessFlags srcAccessMask_ = AccessFlags(), AccessFlags dstAccessMask_ = AccessFlags(), DependencyFlags dependencyFlags_ = DependencyFlags() )
+ : srcSubpass( srcSubpass_ )
+ , dstSubpass( dstSubpass_ )
+ , srcStageMask( srcStageMask_ )
+ , dstStageMask( dstStageMask_ )
+ , srcAccessMask( srcAccessMask_ )
+ , dstAccessMask( dstAccessMask_ )
+ , dependencyFlags( dependencyFlags_ )
+ {
+ }
+
+ SubpassDependency( VkSubpassDependency const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SubpassDependency) );
+ }
+
+ SubpassDependency& operator=( VkSubpassDependency const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SubpassDependency) );
+ return *this;
+ }
+
+ SubpassDependency& setSrcSubpass( uint32_t srcSubpass_ )
+ {
+ srcSubpass = srcSubpass_;
+ return *this;
+ }
+
+ SubpassDependency& setDstSubpass( uint32_t dstSubpass_ )
+ {
+ dstSubpass = dstSubpass_;
+ return *this;
+ }
+
+ SubpassDependency& setSrcStageMask( PipelineStageFlags srcStageMask_ )
+ {
+ srcStageMask = srcStageMask_;
+ return *this;
+ }
+
+ SubpassDependency& setDstStageMask( PipelineStageFlags dstStageMask_ )
+ {
+ dstStageMask = dstStageMask_;
+ return *this;
+ }
+
+ SubpassDependency& setSrcAccessMask( AccessFlags srcAccessMask_ )
+ {
+ srcAccessMask = srcAccessMask_;
+ return *this;
+ }
+
+ SubpassDependency& setDstAccessMask( AccessFlags dstAccessMask_ )
+ {
+ dstAccessMask = dstAccessMask_;
+ return *this;
+ }
+
+ SubpassDependency& setDependencyFlags( DependencyFlags dependencyFlags_ )
+ {
+ dependencyFlags = dependencyFlags_;
+ return *this;
+ }
+
+ operator const VkSubpassDependency&() const
+ {
+ return *reinterpret_cast<const VkSubpassDependency*>(this);
+ }
+
+ bool operator==( SubpassDependency const& rhs ) const
+ {
+ return ( srcSubpass == rhs.srcSubpass )
+ && ( dstSubpass == rhs.dstSubpass )
+ && ( srcStageMask == rhs.srcStageMask )
+ && ( dstStageMask == rhs.dstStageMask )
+ && ( srcAccessMask == rhs.srcAccessMask )
+ && ( dstAccessMask == rhs.dstAccessMask )
+ && ( dependencyFlags == rhs.dependencyFlags );
+ }
+
+ bool operator!=( SubpassDependency const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t srcSubpass;
+ uint32_t dstSubpass;
+ PipelineStageFlags srcStageMask;
+ PipelineStageFlags dstStageMask;
+ AccessFlags srcAccessMask;
+ AccessFlags dstAccessMask;
+ DependencyFlags dependencyFlags;
+ };
+ static_assert( sizeof( SubpassDependency ) == sizeof( VkSubpassDependency ), "struct and wrapper have different size!" );
+
+ enum class PresentModeKHR
+ {
+ eImmediate = VK_PRESENT_MODE_IMMEDIATE_KHR,
+ eMailbox = VK_PRESENT_MODE_MAILBOX_KHR,
+ eFifo = VK_PRESENT_MODE_FIFO_KHR,
+ eFifoRelaxed = VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ eSharedDemandRefresh = VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR,
+ eSharedContinuousRefresh = VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR
+ };
+
+ enum class ColorSpaceKHR
+ {
+ eSrgbNonlinear = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ eDisplayP3NonlinearEXT = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT,
+ eExtendedSrgbLinearEXT = VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT,
+ eDciP3LinearEXT = VK_COLOR_SPACE_DCI_P3_LINEAR_EXT,
+ eDciP3NonlinearEXT = VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT,
+ eBt709LinearEXT = VK_COLOR_SPACE_BT709_LINEAR_EXT,
+ eBt709NonlinearEXT = VK_COLOR_SPACE_BT709_NONLINEAR_EXT,
+ eBt2020LinearEXT = VK_COLOR_SPACE_BT2020_LINEAR_EXT,
+ eHdr10St2084EXT = VK_COLOR_SPACE_HDR10_ST2084_EXT,
+ eDolbyvisionEXT = VK_COLOR_SPACE_DOLBYVISION_EXT,
+ eHdr10HlgEXT = VK_COLOR_SPACE_HDR10_HLG_EXT,
+ eAdobergbLinearEXT = VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT,
+ eAdobergbNonlinearEXT = VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT,
+ ePassThroughEXT = VK_COLOR_SPACE_PASS_THROUGH_EXT
+ };
+
+ struct SurfaceFormatKHR
+ {
+ operator const VkSurfaceFormatKHR&() const
+ {
+ return *reinterpret_cast<const VkSurfaceFormatKHR*>(this);
+ }
+
+ bool operator==( SurfaceFormatKHR const& rhs ) const
+ {
+ return ( format == rhs.format )
+ && ( colorSpace == rhs.colorSpace );
+ }
+
+ bool operator!=( SurfaceFormatKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ Format format;
+ ColorSpaceKHR colorSpace;
+ };
+ static_assert( sizeof( SurfaceFormatKHR ) == sizeof( VkSurfaceFormatKHR ), "struct and wrapper have different size!" );
+
+ struct SurfaceFormat2KHR
+ {
+ operator const VkSurfaceFormat2KHR&() const
+ {
+ return *reinterpret_cast<const VkSurfaceFormat2KHR*>(this);
+ }
+
+ bool operator==( SurfaceFormat2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( surfaceFormat == rhs.surfaceFormat );
+ }
+
+ bool operator!=( SurfaceFormat2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ SurfaceFormatKHR surfaceFormat;
+ };
+ static_assert( sizeof( SurfaceFormat2KHR ) == sizeof( VkSurfaceFormat2KHR ), "struct and wrapper have different size!" );
+
+ enum class DisplayPlaneAlphaFlagBitsKHR
+ {
+ eOpaque = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR,
+ eGlobal = VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR,
+ ePerPixel = VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR,
+ ePerPixelPremultiplied = VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR
+ };
+
+ using DisplayPlaneAlphaFlagsKHR = Flags<DisplayPlaneAlphaFlagBitsKHR, VkDisplayPlaneAlphaFlagsKHR>;
+
+ VULKAN_HPP_INLINE DisplayPlaneAlphaFlagsKHR operator|( DisplayPlaneAlphaFlagBitsKHR bit0, DisplayPlaneAlphaFlagBitsKHR bit1 )
+ {
+ return DisplayPlaneAlphaFlagsKHR( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE DisplayPlaneAlphaFlagsKHR operator~( DisplayPlaneAlphaFlagBitsKHR bits )
+ {
+ return ~( DisplayPlaneAlphaFlagsKHR( bits ) );
+ }
+
+ template <> struct FlagTraits<DisplayPlaneAlphaFlagBitsKHR>
+ {
+ enum
+ {
+ allFlags = VkFlags(DisplayPlaneAlphaFlagBitsKHR::eOpaque) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::eGlobal) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::ePerPixel) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied)
+ };
+ };
+
+ struct DisplayPlaneCapabilitiesKHR
+ {
+ operator const VkDisplayPlaneCapabilitiesKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayPlaneCapabilitiesKHR*>(this);
+ }
+
+ bool operator==( DisplayPlaneCapabilitiesKHR const& rhs ) const
+ {
+ return ( supportedAlpha == rhs.supportedAlpha )
+ && ( minSrcPosition == rhs.minSrcPosition )
+ && ( maxSrcPosition == rhs.maxSrcPosition )
+ && ( minSrcExtent == rhs.minSrcExtent )
+ && ( maxSrcExtent == rhs.maxSrcExtent )
+ && ( minDstPosition == rhs.minDstPosition )
+ && ( maxDstPosition == rhs.maxDstPosition )
+ && ( minDstExtent == rhs.minDstExtent )
+ && ( maxDstExtent == rhs.maxDstExtent );
+ }
+
+ bool operator!=( DisplayPlaneCapabilitiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DisplayPlaneAlphaFlagsKHR supportedAlpha;
+ Offset2D minSrcPosition;
+ Offset2D maxSrcPosition;
+ Extent2D minSrcExtent;
+ Extent2D maxSrcExtent;
+ Offset2D minDstPosition;
+ Offset2D maxDstPosition;
+ Extent2D minDstExtent;
+ Extent2D maxDstExtent;
+ };
+ static_assert( sizeof( DisplayPlaneCapabilitiesKHR ) == sizeof( VkDisplayPlaneCapabilitiesKHR ), "struct and wrapper have different size!" );
+
+ enum class CompositeAlphaFlagBitsKHR
+ {
+ eOpaque = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ ePreMultiplied = VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
+ ePostMultiplied = VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
+ eInherit = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
+ };
+
+ using CompositeAlphaFlagsKHR = Flags<CompositeAlphaFlagBitsKHR, VkCompositeAlphaFlagsKHR>;
+
+ VULKAN_HPP_INLINE CompositeAlphaFlagsKHR operator|( CompositeAlphaFlagBitsKHR bit0, CompositeAlphaFlagBitsKHR bit1 )
+ {
+ return CompositeAlphaFlagsKHR( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE CompositeAlphaFlagsKHR operator~( CompositeAlphaFlagBitsKHR bits )
+ {
+ return ~( CompositeAlphaFlagsKHR( bits ) );
+ }
+
+ template <> struct FlagTraits<CompositeAlphaFlagBitsKHR>
+ {
+ enum
+ {
+ allFlags = VkFlags(CompositeAlphaFlagBitsKHR::eOpaque) | VkFlags(CompositeAlphaFlagBitsKHR::ePreMultiplied) | VkFlags(CompositeAlphaFlagBitsKHR::ePostMultiplied) | VkFlags(CompositeAlphaFlagBitsKHR::eInherit)
+ };
+ };
+
+ enum class SurfaceTransformFlagBitsKHR
+ {
+ eIdentity = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
+ eRotate90 = VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR,
+ eRotate180 = VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR,
+ eRotate270 = VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR,
+ eHorizontalMirror = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR,
+ eHorizontalMirrorRotate90 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR,
+ eHorizontalMirrorRotate180 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR,
+ eHorizontalMirrorRotate270 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR,
+ eInherit = VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR
+ };
+
+ using SurfaceTransformFlagsKHR = Flags<SurfaceTransformFlagBitsKHR, VkSurfaceTransformFlagsKHR>;
+
+ VULKAN_HPP_INLINE SurfaceTransformFlagsKHR operator|( SurfaceTransformFlagBitsKHR bit0, SurfaceTransformFlagBitsKHR bit1 )
+ {
+ return SurfaceTransformFlagsKHR( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SurfaceTransformFlagsKHR operator~( SurfaceTransformFlagBitsKHR bits )
+ {
+ return ~( SurfaceTransformFlagsKHR( bits ) );
+ }
+
+ template <> struct FlagTraits<SurfaceTransformFlagBitsKHR>
+ {
+ enum
+ {
+ allFlags = VkFlags(SurfaceTransformFlagBitsKHR::eIdentity) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate90) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate180) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate270) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirror) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270) | VkFlags(SurfaceTransformFlagBitsKHR::eInherit)
+ };
+ };
+
+ struct DisplayPropertiesKHR
+ {
+ operator const VkDisplayPropertiesKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplayPropertiesKHR*>(this);
+ }
+
+ bool operator==( DisplayPropertiesKHR const& rhs ) const
+ {
+ return ( display == rhs.display )
+ && ( displayName == rhs.displayName )
+ && ( physicalDimensions == rhs.physicalDimensions )
+ && ( physicalResolution == rhs.physicalResolution )
+ && ( supportedTransforms == rhs.supportedTransforms )
+ && ( planeReorderPossible == rhs.planeReorderPossible )
+ && ( persistentContent == rhs.persistentContent );
+ }
+
+ bool operator!=( DisplayPropertiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ DisplayKHR display;
+ const char* displayName;
+ Extent2D physicalDimensions;
+ Extent2D physicalResolution;
+ SurfaceTransformFlagsKHR supportedTransforms;
+ Bool32 planeReorderPossible;
+ Bool32 persistentContent;
+ };
+ static_assert( sizeof( DisplayPropertiesKHR ) == sizeof( VkDisplayPropertiesKHR ), "struct and wrapper have different size!" );
+
+ struct DisplaySurfaceCreateInfoKHR
+ {
+ DisplaySurfaceCreateInfoKHR( DisplaySurfaceCreateFlagsKHR flags_ = DisplaySurfaceCreateFlagsKHR(), DisplayModeKHR displayMode_ = DisplayModeKHR(), uint32_t planeIndex_ = 0, uint32_t planeStackIndex_ = 0, SurfaceTransformFlagBitsKHR transform_ = SurfaceTransformFlagBitsKHR::eIdentity, float globalAlpha_ = 0, DisplayPlaneAlphaFlagBitsKHR alphaMode_ = DisplayPlaneAlphaFlagBitsKHR::eOpaque, Extent2D imageExtent_ = Extent2D() )
+ : sType( StructureType::eDisplaySurfaceCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , displayMode( displayMode_ )
+ , planeIndex( planeIndex_ )
+ , planeStackIndex( planeStackIndex_ )
+ , transform( transform_ )
+ , globalAlpha( globalAlpha_ )
+ , alphaMode( alphaMode_ )
+ , imageExtent( imageExtent_ )
+ {
+ }
+
+ DisplaySurfaceCreateInfoKHR( VkDisplaySurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplaySurfaceCreateInfoKHR) );
+ }
+
+ DisplaySurfaceCreateInfoKHR& operator=( VkDisplaySurfaceCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplaySurfaceCreateInfoKHR) );
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setFlags( DisplaySurfaceCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setDisplayMode( DisplayModeKHR displayMode_ )
+ {
+ displayMode = displayMode_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setPlaneIndex( uint32_t planeIndex_ )
+ {
+ planeIndex = planeIndex_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setPlaneStackIndex( uint32_t planeStackIndex_ )
+ {
+ planeStackIndex = planeStackIndex_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setTransform( SurfaceTransformFlagBitsKHR transform_ )
+ {
+ transform = transform_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setGlobalAlpha( float globalAlpha_ )
+ {
+ globalAlpha = globalAlpha_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setAlphaMode( DisplayPlaneAlphaFlagBitsKHR alphaMode_ )
+ {
+ alphaMode = alphaMode_;
+ return *this;
+ }
+
+ DisplaySurfaceCreateInfoKHR& setImageExtent( Extent2D imageExtent_ )
+ {
+ imageExtent = imageExtent_;
+ return *this;
+ }
+
+ operator const VkDisplaySurfaceCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR*>(this);
+ }
+
+ bool operator==( DisplaySurfaceCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( displayMode == rhs.displayMode )
+ && ( planeIndex == rhs.planeIndex )
+ && ( planeStackIndex == rhs.planeStackIndex )
+ && ( transform == rhs.transform )
+ && ( globalAlpha == rhs.globalAlpha )
+ && ( alphaMode == rhs.alphaMode )
+ && ( imageExtent == rhs.imageExtent );
+ }
+
+ bool operator!=( DisplaySurfaceCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DisplaySurfaceCreateFlagsKHR flags;
+ DisplayModeKHR displayMode;
+ uint32_t planeIndex;
+ uint32_t planeStackIndex;
+ SurfaceTransformFlagBitsKHR transform;
+ float globalAlpha;
+ DisplayPlaneAlphaFlagBitsKHR alphaMode;
+ Extent2D imageExtent;
+ };
+ static_assert( sizeof( DisplaySurfaceCreateInfoKHR ) == sizeof( VkDisplaySurfaceCreateInfoKHR ), "struct and wrapper have different size!" );
+
+ struct SurfaceCapabilitiesKHR
+ {
+ operator const VkSurfaceCapabilitiesKHR&() const
+ {
+ return *reinterpret_cast<const VkSurfaceCapabilitiesKHR*>(this);
+ }
+
+ bool operator==( SurfaceCapabilitiesKHR const& rhs ) const
+ {
+ return ( minImageCount == rhs.minImageCount )
+ && ( maxImageCount == rhs.maxImageCount )
+ && ( currentExtent == rhs.currentExtent )
+ && ( minImageExtent == rhs.minImageExtent )
+ && ( maxImageExtent == rhs.maxImageExtent )
+ && ( maxImageArrayLayers == rhs.maxImageArrayLayers )
+ && ( supportedTransforms == rhs.supportedTransforms )
+ && ( currentTransform == rhs.currentTransform )
+ && ( supportedCompositeAlpha == rhs.supportedCompositeAlpha )
+ && ( supportedUsageFlags == rhs.supportedUsageFlags );
+ }
+
+ bool operator!=( SurfaceCapabilitiesKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ Extent2D currentExtent;
+ Extent2D minImageExtent;
+ Extent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ SurfaceTransformFlagsKHR supportedTransforms;
+ SurfaceTransformFlagBitsKHR currentTransform;
+ CompositeAlphaFlagsKHR supportedCompositeAlpha;
+ ImageUsageFlags supportedUsageFlags;
+ };
+ static_assert( sizeof( SurfaceCapabilitiesKHR ) == sizeof( VkSurfaceCapabilitiesKHR ), "struct and wrapper have different size!" );
+
+ struct SurfaceCapabilities2KHR
+ {
+ operator const VkSurfaceCapabilities2KHR&() const
+ {
+ return *reinterpret_cast<const VkSurfaceCapabilities2KHR*>(this);
+ }
+
+ bool operator==( SurfaceCapabilities2KHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( surfaceCapabilities == rhs.surfaceCapabilities );
+ }
+
+ bool operator!=( SurfaceCapabilities2KHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ SurfaceCapabilitiesKHR surfaceCapabilities;
+ };
+ static_assert( sizeof( SurfaceCapabilities2KHR ) == sizeof( VkSurfaceCapabilities2KHR ), "struct and wrapper have different size!" );
+
+ enum class DebugReportFlagBitsEXT
+ {
+ eInformation = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+ eWarning = VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ ePerformanceWarning = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ eError = VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ eDebug = VK_DEBUG_REPORT_DEBUG_BIT_EXT
+ };
+
+ using DebugReportFlagsEXT = Flags<DebugReportFlagBitsEXT, VkDebugReportFlagsEXT>;
+
+ VULKAN_HPP_INLINE DebugReportFlagsEXT operator|( DebugReportFlagBitsEXT bit0, DebugReportFlagBitsEXT bit1 )
+ {
+ return DebugReportFlagsEXT( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE DebugReportFlagsEXT operator~( DebugReportFlagBitsEXT bits )
+ {
+ return ~( DebugReportFlagsEXT( bits ) );
+ }
+
+ template <> struct FlagTraits<DebugReportFlagBitsEXT>
+ {
+ enum
+ {
+ allFlags = VkFlags(DebugReportFlagBitsEXT::eInformation) | VkFlags(DebugReportFlagBitsEXT::eWarning) | VkFlags(DebugReportFlagBitsEXT::ePerformanceWarning) | VkFlags(DebugReportFlagBitsEXT::eError) | VkFlags(DebugReportFlagBitsEXT::eDebug)
+ };
+ };
+
+ struct DebugReportCallbackCreateInfoEXT
+ {
+ DebugReportCallbackCreateInfoEXT( DebugReportFlagsEXT flags_ = DebugReportFlagsEXT(), PFN_vkDebugReportCallbackEXT pfnCallback_ = nullptr, void* pUserData_ = nullptr )
+ : sType( StructureType::eDebugReportCallbackCreateInfoEXT )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , pfnCallback( pfnCallback_ )
+ , pUserData( pUserData_ )
+ {
+ }
+
+ DebugReportCallbackCreateInfoEXT( VkDebugReportCallbackCreateInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugReportCallbackCreateInfoEXT) );
+ }
+
+ DebugReportCallbackCreateInfoEXT& operator=( VkDebugReportCallbackCreateInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugReportCallbackCreateInfoEXT) );
+ return *this;
+ }
+
+ DebugReportCallbackCreateInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DebugReportCallbackCreateInfoEXT& setFlags( DebugReportFlagsEXT flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DebugReportCallbackCreateInfoEXT& setPfnCallback( PFN_vkDebugReportCallbackEXT pfnCallback_ )
+ {
+ pfnCallback = pfnCallback_;
+ return *this;
+ }
+
+ DebugReportCallbackCreateInfoEXT& setPUserData( void* pUserData_ )
+ {
+ pUserData = pUserData_;
+ return *this;
+ }
+
+ operator const VkDebugReportCallbackCreateInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(this);
+ }
+
+ bool operator==( DebugReportCallbackCreateInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( pfnCallback == rhs.pfnCallback )
+ && ( pUserData == rhs.pUserData );
+ }
+
+ bool operator!=( DebugReportCallbackCreateInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DebugReportFlagsEXT flags;
+ PFN_vkDebugReportCallbackEXT pfnCallback;
+ void* pUserData;
+ };
+ static_assert( sizeof( DebugReportCallbackCreateInfoEXT ) == sizeof( VkDebugReportCallbackCreateInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class DebugReportObjectTypeEXT
+ {
+ eUnknown = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
+ eInstance = VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
+ ePhysicalDevice = VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
+ eDevice = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ eQueue = VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
+ eSemaphore = VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
+ eCommandBuffer = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ eFence = VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ eDeviceMemory = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ eBuffer = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
+ eImage = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ eEvent = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
+ eQueryPool = VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
+ eBufferView = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
+ eImageView = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ eShaderModule = VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
+ ePipelineCache = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
+ ePipelineLayout = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
+ eRenderPass = VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
+ ePipeline = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
+ eDescriptorSetLayout = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
+ eSampler = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
+ eDescriptorPool = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
+ eDescriptorSet = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ eFramebuffer = VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
+ eCommandPool = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
+ eSurfaceKhr = VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT,
+ eSwapchainKhr = VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
+ eDebugReport = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT,
+ eDisplayKhr = VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT,
+ eDisplayModeKhr = VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT,
+ eObjectTableNvx = VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT,
+ eIndirectCommandsLayoutNvx = VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT,
+ eDescriptorUpdateTemplateKHR = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT
+ };
+
+ struct DebugMarkerObjectNameInfoEXT
+ {
+ DebugMarkerObjectNameInfoEXT( DebugReportObjectTypeEXT objectType_ = DebugReportObjectTypeEXT::eUnknown, uint64_t object_ = 0, const char* pObjectName_ = nullptr )
+ : sType( StructureType::eDebugMarkerObjectNameInfoEXT )
+ , pNext( nullptr )
+ , objectType( objectType_ )
+ , object( object_ )
+ , pObjectName( pObjectName_ )
+ {
+ }
+
+ DebugMarkerObjectNameInfoEXT( VkDebugMarkerObjectNameInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugMarkerObjectNameInfoEXT) );
+ }
+
+ DebugMarkerObjectNameInfoEXT& operator=( VkDebugMarkerObjectNameInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugMarkerObjectNameInfoEXT) );
+ return *this;
+ }
+
+ DebugMarkerObjectNameInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DebugMarkerObjectNameInfoEXT& setObjectType( DebugReportObjectTypeEXT objectType_ )
+ {
+ objectType = objectType_;
+ return *this;
+ }
+
+ DebugMarkerObjectNameInfoEXT& setObject( uint64_t object_ )
+ {
+ object = object_;
+ return *this;
+ }
+
+ DebugMarkerObjectNameInfoEXT& setPObjectName( const char* pObjectName_ )
+ {
+ pObjectName = pObjectName_;
+ return *this;
+ }
+
+ operator const VkDebugMarkerObjectNameInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDebugMarkerObjectNameInfoEXT*>(this);
+ }
+
+ bool operator==( DebugMarkerObjectNameInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( objectType == rhs.objectType )
+ && ( object == rhs.object )
+ && ( pObjectName == rhs.pObjectName );
+ }
+
+ bool operator!=( DebugMarkerObjectNameInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DebugReportObjectTypeEXT objectType;
+ uint64_t object;
+ const char* pObjectName;
+ };
+ static_assert( sizeof( DebugMarkerObjectNameInfoEXT ) == sizeof( VkDebugMarkerObjectNameInfoEXT ), "struct and wrapper have different size!" );
+
+ struct DebugMarkerObjectTagInfoEXT
+ {
+ DebugMarkerObjectTagInfoEXT( DebugReportObjectTypeEXT objectType_ = DebugReportObjectTypeEXT::eUnknown, uint64_t object_ = 0, uint64_t tagName_ = 0, size_t tagSize_ = 0, const void* pTag_ = nullptr )
+ : sType( StructureType::eDebugMarkerObjectTagInfoEXT )
+ , pNext( nullptr )
+ , objectType( objectType_ )
+ , object( object_ )
+ , tagName( tagName_ )
+ , tagSize( tagSize_ )
+ , pTag( pTag_ )
+ {
+ }
+
+ DebugMarkerObjectTagInfoEXT( VkDebugMarkerObjectTagInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugMarkerObjectTagInfoEXT) );
+ }
+
+ DebugMarkerObjectTagInfoEXT& operator=( VkDebugMarkerObjectTagInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DebugMarkerObjectTagInfoEXT) );
+ return *this;
+ }
+
+ DebugMarkerObjectTagInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DebugMarkerObjectTagInfoEXT& setObjectType( DebugReportObjectTypeEXT objectType_ )
+ {
+ objectType = objectType_;
+ return *this;
+ }
+
+ DebugMarkerObjectTagInfoEXT& setObject( uint64_t object_ )
+ {
+ object = object_;
+ return *this;
+ }
+
+ DebugMarkerObjectTagInfoEXT& setTagName( uint64_t tagName_ )
+ {
+ tagName = tagName_;
+ return *this;
+ }
+
+ DebugMarkerObjectTagInfoEXT& setTagSize( size_t tagSize_ )
+ {
+ tagSize = tagSize_;
+ return *this;
+ }
+
+ DebugMarkerObjectTagInfoEXT& setPTag( const void* pTag_ )
+ {
+ pTag = pTag_;
+ return *this;
+ }
+
+ operator const VkDebugMarkerObjectTagInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDebugMarkerObjectTagInfoEXT*>(this);
+ }
+
+ bool operator==( DebugMarkerObjectTagInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( objectType == rhs.objectType )
+ && ( object == rhs.object )
+ && ( tagName == rhs.tagName )
+ && ( tagSize == rhs.tagSize )
+ && ( pTag == rhs.pTag );
+ }
+
+ bool operator!=( DebugMarkerObjectTagInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DebugReportObjectTypeEXT objectType;
+ uint64_t object;
+ uint64_t tagName;
+ size_t tagSize;
+ const void* pTag;
+ };
+ static_assert( sizeof( DebugMarkerObjectTagInfoEXT ) == sizeof( VkDebugMarkerObjectTagInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class DebugReportErrorEXT
+ {
+ eNone = VK_DEBUG_REPORT_ERROR_NONE_EXT,
+ eCallbackRef = VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT
+ };
+
+ enum class RasterizationOrderAMD
+ {
+ eStrict = VK_RASTERIZATION_ORDER_STRICT_AMD,
+ eRelaxed = VK_RASTERIZATION_ORDER_RELAXED_AMD
+ };
+
+ struct PipelineRasterizationStateRasterizationOrderAMD
+ {
+ PipelineRasterizationStateRasterizationOrderAMD( RasterizationOrderAMD rasterizationOrder_ = RasterizationOrderAMD::eStrict )
+ : sType( StructureType::ePipelineRasterizationStateRasterizationOrderAMD )
+ , pNext( nullptr )
+ , rasterizationOrder( rasterizationOrder_ )
+ {
+ }
+
+ PipelineRasterizationStateRasterizationOrderAMD( VkPipelineRasterizationStateRasterizationOrderAMD const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineRasterizationStateRasterizationOrderAMD) );
+ }
+
+ PipelineRasterizationStateRasterizationOrderAMD& operator=( VkPipelineRasterizationStateRasterizationOrderAMD const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineRasterizationStateRasterizationOrderAMD) );
+ return *this;
+ }
+
+ PipelineRasterizationStateRasterizationOrderAMD& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineRasterizationStateRasterizationOrderAMD& setRasterizationOrder( RasterizationOrderAMD rasterizationOrder_ )
+ {
+ rasterizationOrder = rasterizationOrder_;
+ return *this;
+ }
+
+ operator const VkPipelineRasterizationStateRasterizationOrderAMD&() const
+ {
+ return *reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(this);
+ }
+
+ bool operator==( PipelineRasterizationStateRasterizationOrderAMD const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( rasterizationOrder == rhs.rasterizationOrder );
+ }
+
+ bool operator!=( PipelineRasterizationStateRasterizationOrderAMD const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ RasterizationOrderAMD rasterizationOrder;
+ };
+ static_assert( sizeof( PipelineRasterizationStateRasterizationOrderAMD ) == sizeof( VkPipelineRasterizationStateRasterizationOrderAMD ), "struct and wrapper have different size!" );
+
+ enum class ExternalMemoryHandleTypeFlagBitsNV
+ {
+ eOpaqueWin32 = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV,
+ eOpaqueWin32Kmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV,
+ eD3D11Image = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV,
+ eD3D11ImageKmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV
+ };
+
+ using ExternalMemoryHandleTypeFlagsNV = Flags<ExternalMemoryHandleTypeFlagBitsNV, VkExternalMemoryHandleTypeFlagsNV>;
+
+ VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlagsNV operator|( ExternalMemoryHandleTypeFlagBitsNV bit0, ExternalMemoryHandleTypeFlagBitsNV bit1 )
+ {
+ return ExternalMemoryHandleTypeFlagsNV( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlagsNV operator~( ExternalMemoryHandleTypeFlagBitsNV bits )
+ {
+ return ~( ExternalMemoryHandleTypeFlagsNV( bits ) );
+ }
+
+ template <> struct FlagTraits<ExternalMemoryHandleTypeFlagBitsNV>
+ {
+ enum
+ {
+ allFlags = VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt)
+ };
+ };
+
+ struct ExternalMemoryImageCreateInfoNV
+ {
+ ExternalMemoryImageCreateInfoNV( ExternalMemoryHandleTypeFlagsNV handleTypes_ = ExternalMemoryHandleTypeFlagsNV() )
+ : sType( StructureType::eExternalMemoryImageCreateInfoNV )
+ , pNext( nullptr )
+ , handleTypes( handleTypes_ )
+ {
+ }
+
+ ExternalMemoryImageCreateInfoNV( VkExternalMemoryImageCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExternalMemoryImageCreateInfoNV) );
+ }
+
+ ExternalMemoryImageCreateInfoNV& operator=( VkExternalMemoryImageCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExternalMemoryImageCreateInfoNV) );
+ return *this;
+ }
+
+ ExternalMemoryImageCreateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExternalMemoryImageCreateInfoNV& setHandleTypes( ExternalMemoryHandleTypeFlagsNV handleTypes_ )
+ {
+ handleTypes = handleTypes_;
+ return *this;
+ }
+
+ operator const VkExternalMemoryImageCreateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(this);
+ }
+
+ bool operator==( ExternalMemoryImageCreateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleTypes == rhs.handleTypes );
+ }
+
+ bool operator!=( ExternalMemoryImageCreateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagsNV handleTypes;
+ };
+ static_assert( sizeof( ExternalMemoryImageCreateInfoNV ) == sizeof( VkExternalMemoryImageCreateInfoNV ), "struct and wrapper have different size!" );
+
+ struct ExportMemoryAllocateInfoNV
+ {
+ ExportMemoryAllocateInfoNV( ExternalMemoryHandleTypeFlagsNV handleTypes_ = ExternalMemoryHandleTypeFlagsNV() )
+ : sType( StructureType::eExportMemoryAllocateInfoNV )
+ , pNext( nullptr )
+ , handleTypes( handleTypes_ )
+ {
+ }
+
+ ExportMemoryAllocateInfoNV( VkExportMemoryAllocateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryAllocateInfoNV) );
+ }
+
+ ExportMemoryAllocateInfoNV& operator=( VkExportMemoryAllocateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryAllocateInfoNV) );
+ return *this;
+ }
+
+ ExportMemoryAllocateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExportMemoryAllocateInfoNV& setHandleTypes( ExternalMemoryHandleTypeFlagsNV handleTypes_ )
+ {
+ handleTypes = handleTypes_;
+ return *this;
+ }
+
+ operator const VkExportMemoryAllocateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(this);
+ }
+
+ bool operator==( ExportMemoryAllocateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleTypes == rhs.handleTypes );
+ }
+
+ bool operator!=( ExportMemoryAllocateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagsNV handleTypes;
+ };
+ static_assert( sizeof( ExportMemoryAllocateInfoNV ) == sizeof( VkExportMemoryAllocateInfoNV ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ struct ImportMemoryWin32HandleInfoNV
+ {
+ ImportMemoryWin32HandleInfoNV( ExternalMemoryHandleTypeFlagsNV handleType_ = ExternalMemoryHandleTypeFlagsNV(), HANDLE handle_ = 0 )
+ : sType( StructureType::eImportMemoryWin32HandleInfoNV )
+ , pNext( nullptr )
+ , handleType( handleType_ )
+ , handle( handle_ )
+ {
+ }
+
+ ImportMemoryWin32HandleInfoNV( VkImportMemoryWin32HandleInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportMemoryWin32HandleInfoNV) );
+ }
+
+ ImportMemoryWin32HandleInfoNV& operator=( VkImportMemoryWin32HandleInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportMemoryWin32HandleInfoNV) );
+ return *this;
+ }
+
+ ImportMemoryWin32HandleInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImportMemoryWin32HandleInfoNV& setHandleType( ExternalMemoryHandleTypeFlagsNV handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ ImportMemoryWin32HandleInfoNV& setHandle( HANDLE handle_ )
+ {
+ handle = handle_;
+ return *this;
+ }
+
+ operator const VkImportMemoryWin32HandleInfoNV&() const
+ {
+ return *reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(this);
+ }
+
+ bool operator==( ImportMemoryWin32HandleInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleType == rhs.handleType )
+ && ( handle == rhs.handle );
+ }
+
+ bool operator!=( ImportMemoryWin32HandleInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagsNV handleType;
+ HANDLE handle;
+ };
+ static_assert( sizeof( ImportMemoryWin32HandleInfoNV ) == sizeof( VkImportMemoryWin32HandleInfoNV ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+ enum class ExternalMemoryFeatureFlagBitsNV
+ {
+ eDedicatedOnly = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV,
+ eExportable = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV,
+ eImportable = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV
+ };
+
+ using ExternalMemoryFeatureFlagsNV = Flags<ExternalMemoryFeatureFlagBitsNV, VkExternalMemoryFeatureFlagsNV>;
+
+ VULKAN_HPP_INLINE ExternalMemoryFeatureFlagsNV operator|( ExternalMemoryFeatureFlagBitsNV bit0, ExternalMemoryFeatureFlagBitsNV bit1 )
+ {
+ return ExternalMemoryFeatureFlagsNV( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ExternalMemoryFeatureFlagsNV operator~( ExternalMemoryFeatureFlagBitsNV bits )
+ {
+ return ~( ExternalMemoryFeatureFlagsNV( bits ) );
+ }
+
+ template <> struct FlagTraits<ExternalMemoryFeatureFlagBitsNV>
+ {
+ enum
+ {
+ allFlags = VkFlags(ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly) | VkFlags(ExternalMemoryFeatureFlagBitsNV::eExportable) | VkFlags(ExternalMemoryFeatureFlagBitsNV::eImportable)
+ };
+ };
+
+ struct ExternalImageFormatPropertiesNV
+ {
+ operator const VkExternalImageFormatPropertiesNV&() const
+ {
+ return *reinterpret_cast<const VkExternalImageFormatPropertiesNV*>(this);
+ }
+
+ bool operator==( ExternalImageFormatPropertiesNV const& rhs ) const
+ {
+ return ( imageFormatProperties == rhs.imageFormatProperties )
+ && ( externalMemoryFeatures == rhs.externalMemoryFeatures )
+ && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes )
+ && ( compatibleHandleTypes == rhs.compatibleHandleTypes );
+ }
+
+ bool operator!=( ExternalImageFormatPropertiesNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ImageFormatProperties imageFormatProperties;
+ ExternalMemoryFeatureFlagsNV externalMemoryFeatures;
+ ExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes;
+ ExternalMemoryHandleTypeFlagsNV compatibleHandleTypes;
+ };
+ static_assert( sizeof( ExternalImageFormatPropertiesNV ) == sizeof( VkExternalImageFormatPropertiesNV ), "struct and wrapper have different size!" );
+
+ enum class ValidationCheckEXT
+ {
+ eAll = VK_VALIDATION_CHECK_ALL_EXT
+ };
+
+ struct ValidationFlagsEXT
+ {
+ ValidationFlagsEXT( uint32_t disabledValidationCheckCount_ = 0, ValidationCheckEXT* pDisabledValidationChecks_ = nullptr )
+ : sType( StructureType::eValidationFlagsEXT )
+ , pNext( nullptr )
+ , disabledValidationCheckCount( disabledValidationCheckCount_ )
+ , pDisabledValidationChecks( pDisabledValidationChecks_ )
+ {
+ }
+
+ ValidationFlagsEXT( VkValidationFlagsEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ValidationFlagsEXT) );
+ }
+
+ ValidationFlagsEXT& operator=( VkValidationFlagsEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ValidationFlagsEXT) );
+ return *this;
+ }
+
+ ValidationFlagsEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ValidationFlagsEXT& setDisabledValidationCheckCount( uint32_t disabledValidationCheckCount_ )
+ {
+ disabledValidationCheckCount = disabledValidationCheckCount_;
+ return *this;
+ }
+
+ ValidationFlagsEXT& setPDisabledValidationChecks( ValidationCheckEXT* pDisabledValidationChecks_ )
+ {
+ pDisabledValidationChecks = pDisabledValidationChecks_;
+ return *this;
+ }
+
+ operator const VkValidationFlagsEXT&() const
+ {
+ return *reinterpret_cast<const VkValidationFlagsEXT*>(this);
+ }
+
+ bool operator==( ValidationFlagsEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( disabledValidationCheckCount == rhs.disabledValidationCheckCount )
+ && ( pDisabledValidationChecks == rhs.pDisabledValidationChecks );
+ }
+
+ bool operator!=( ValidationFlagsEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t disabledValidationCheckCount;
+ ValidationCheckEXT* pDisabledValidationChecks;
+ };
+ static_assert( sizeof( ValidationFlagsEXT ) == sizeof( VkValidationFlagsEXT ), "struct and wrapper have different size!" );
+
+ enum class IndirectCommandsLayoutUsageFlagBitsNVX
+ {
+ eUnorderedSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX,
+ eSparseSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX,
+ eEmptyExecutions = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX,
+ eIndexedSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX
+ };
+
+ using IndirectCommandsLayoutUsageFlagsNVX = Flags<IndirectCommandsLayoutUsageFlagBitsNVX, VkIndirectCommandsLayoutUsageFlagsNVX>;
+
+ VULKAN_HPP_INLINE IndirectCommandsLayoutUsageFlagsNVX operator|( IndirectCommandsLayoutUsageFlagBitsNVX bit0, IndirectCommandsLayoutUsageFlagBitsNVX bit1 )
+ {
+ return IndirectCommandsLayoutUsageFlagsNVX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE IndirectCommandsLayoutUsageFlagsNVX operator~( IndirectCommandsLayoutUsageFlagBitsNVX bits )
+ {
+ return ~( IndirectCommandsLayoutUsageFlagsNVX( bits ) );
+ }
+
+ template <> struct FlagTraits<IndirectCommandsLayoutUsageFlagBitsNVX>
+ {
+ enum
+ {
+ allFlags = VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eUnorderedSequences) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eSparseSequences) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eEmptyExecutions) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eIndexedSequences)
+ };
+ };
+
+ enum class ObjectEntryUsageFlagBitsNVX
+ {
+ eGraphics = VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX,
+ eCompute = VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX
+ };
+
+ using ObjectEntryUsageFlagsNVX = Flags<ObjectEntryUsageFlagBitsNVX, VkObjectEntryUsageFlagsNVX>;
+
+ VULKAN_HPP_INLINE ObjectEntryUsageFlagsNVX operator|( ObjectEntryUsageFlagBitsNVX bit0, ObjectEntryUsageFlagBitsNVX bit1 )
+ {
+ return ObjectEntryUsageFlagsNVX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ObjectEntryUsageFlagsNVX operator~( ObjectEntryUsageFlagBitsNVX bits )
+ {
+ return ~( ObjectEntryUsageFlagsNVX( bits ) );
+ }
+
+ template <> struct FlagTraits<ObjectEntryUsageFlagBitsNVX>
+ {
+ enum
+ {
+ allFlags = VkFlags(ObjectEntryUsageFlagBitsNVX::eGraphics) | VkFlags(ObjectEntryUsageFlagBitsNVX::eCompute)
+ };
+ };
+
+ enum class IndirectCommandsTokenTypeNVX
+ {
+ eVkIndirectCommandsTokenPipeline = VK_INDIRECT_COMMANDS_TOKEN_PIPELINE_NVX,
+ eVkIndirectCommandsTokenDescriptorSet = VK_INDIRECT_COMMANDS_TOKEN_DESCRIPTOR_SET_NVX,
+ eVkIndirectCommandsTokenIndexBuffer = VK_INDIRECT_COMMANDS_TOKEN_INDEX_BUFFER_NVX,
+ eVkIndirectCommandsTokenVertexBuffer = VK_INDIRECT_COMMANDS_TOKEN_VERTEX_BUFFER_NVX,
+ eVkIndirectCommandsTokenPushConstant = VK_INDIRECT_COMMANDS_TOKEN_PUSH_CONSTANT_NVX,
+ eVkIndirectCommandsTokenDrawIndexed = VK_INDIRECT_COMMANDS_TOKEN_DRAW_INDEXED_NVX,
+ eVkIndirectCommandsTokenDraw = VK_INDIRECT_COMMANDS_TOKEN_DRAW_NVX,
+ eVkIndirectCommandsTokenDispatch = VK_INDIRECT_COMMANDS_TOKEN_DISPATCH_NVX
+ };
+
+ struct IndirectCommandsTokenNVX
+ {
+ IndirectCommandsTokenNVX( IndirectCommandsTokenTypeNVX tokenType_ = IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenPipeline, Buffer buffer_ = Buffer(), DeviceSize offset_ = 0 )
+ : tokenType( tokenType_ )
+ , buffer( buffer_ )
+ , offset( offset_ )
+ {
+ }
+
+ IndirectCommandsTokenNVX( VkIndirectCommandsTokenNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IndirectCommandsTokenNVX) );
+ }
+
+ IndirectCommandsTokenNVX& operator=( VkIndirectCommandsTokenNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IndirectCommandsTokenNVX) );
+ return *this;
+ }
+
+ IndirectCommandsTokenNVX& setTokenType( IndirectCommandsTokenTypeNVX tokenType_ )
+ {
+ tokenType = tokenType_;
+ return *this;
+ }
+
+ IndirectCommandsTokenNVX& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ IndirectCommandsTokenNVX& setOffset( DeviceSize offset_ )
+ {
+ offset = offset_;
+ return *this;
+ }
+
+ operator const VkIndirectCommandsTokenNVX&() const
+ {
+ return *reinterpret_cast<const VkIndirectCommandsTokenNVX*>(this);
+ }
+
+ bool operator==( IndirectCommandsTokenNVX const& rhs ) const
+ {
+ return ( tokenType == rhs.tokenType )
+ && ( buffer == rhs.buffer )
+ && ( offset == rhs.offset );
+ }
+
+ bool operator!=( IndirectCommandsTokenNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ IndirectCommandsTokenTypeNVX tokenType;
+ Buffer buffer;
+ DeviceSize offset;
+ };
+ static_assert( sizeof( IndirectCommandsTokenNVX ) == sizeof( VkIndirectCommandsTokenNVX ), "struct and wrapper have different size!" );
+
+ struct IndirectCommandsLayoutTokenNVX
+ {
+ IndirectCommandsLayoutTokenNVX( IndirectCommandsTokenTypeNVX tokenType_ = IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenPipeline, uint32_t bindingUnit_ = 0, uint32_t dynamicCount_ = 0, uint32_t divisor_ = 0 )
+ : tokenType( tokenType_ )
+ , bindingUnit( bindingUnit_ )
+ , dynamicCount( dynamicCount_ )
+ , divisor( divisor_ )
+ {
+ }
+
+ IndirectCommandsLayoutTokenNVX( VkIndirectCommandsLayoutTokenNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IndirectCommandsLayoutTokenNVX) );
+ }
+
+ IndirectCommandsLayoutTokenNVX& operator=( VkIndirectCommandsLayoutTokenNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IndirectCommandsLayoutTokenNVX) );
+ return *this;
+ }
+
+ IndirectCommandsLayoutTokenNVX& setTokenType( IndirectCommandsTokenTypeNVX tokenType_ )
+ {
+ tokenType = tokenType_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutTokenNVX& setBindingUnit( uint32_t bindingUnit_ )
+ {
+ bindingUnit = bindingUnit_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutTokenNVX& setDynamicCount( uint32_t dynamicCount_ )
+ {
+ dynamicCount = dynamicCount_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutTokenNVX& setDivisor( uint32_t divisor_ )
+ {
+ divisor = divisor_;
+ return *this;
+ }
+
+ operator const VkIndirectCommandsLayoutTokenNVX&() const
+ {
+ return *reinterpret_cast<const VkIndirectCommandsLayoutTokenNVX*>(this);
+ }
+
+ bool operator==( IndirectCommandsLayoutTokenNVX const& rhs ) const
+ {
+ return ( tokenType == rhs.tokenType )
+ && ( bindingUnit == rhs.bindingUnit )
+ && ( dynamicCount == rhs.dynamicCount )
+ && ( divisor == rhs.divisor );
+ }
+
+ bool operator!=( IndirectCommandsLayoutTokenNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ IndirectCommandsTokenTypeNVX tokenType;
+ uint32_t bindingUnit;
+ uint32_t dynamicCount;
+ uint32_t divisor;
+ };
+ static_assert( sizeof( IndirectCommandsLayoutTokenNVX ) == sizeof( VkIndirectCommandsLayoutTokenNVX ), "struct and wrapper have different size!" );
+
+ struct IndirectCommandsLayoutCreateInfoNVX
+ {
+ IndirectCommandsLayoutCreateInfoNVX( PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, IndirectCommandsLayoutUsageFlagsNVX flags_ = IndirectCommandsLayoutUsageFlagsNVX(), uint32_t tokenCount_ = 0, const IndirectCommandsLayoutTokenNVX* pTokens_ = nullptr )
+ : sType( StructureType::eIndirectCommandsLayoutCreateInfoNVX )
+ , pNext( nullptr )
+ , pipelineBindPoint( pipelineBindPoint_ )
+ , flags( flags_ )
+ , tokenCount( tokenCount_ )
+ , pTokens( pTokens_ )
+ {
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX( VkIndirectCommandsLayoutCreateInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IndirectCommandsLayoutCreateInfoNVX) );
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX& operator=( VkIndirectCommandsLayoutCreateInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(IndirectCommandsLayoutCreateInfoNVX) );
+ return *this;
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ )
+ {
+ pipelineBindPoint = pipelineBindPoint_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX& setFlags( IndirectCommandsLayoutUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX& setTokenCount( uint32_t tokenCount_ )
+ {
+ tokenCount = tokenCount_;
+ return *this;
+ }
+
+ IndirectCommandsLayoutCreateInfoNVX& setPTokens( const IndirectCommandsLayoutTokenNVX* pTokens_ )
+ {
+ pTokens = pTokens_;
+ return *this;
+ }
+
+ operator const VkIndirectCommandsLayoutCreateInfoNVX&() const
+ {
+ return *reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNVX*>(this);
+ }
+
+ bool operator==( IndirectCommandsLayoutCreateInfoNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( pipelineBindPoint == rhs.pipelineBindPoint )
+ && ( flags == rhs.flags )
+ && ( tokenCount == rhs.tokenCount )
+ && ( pTokens == rhs.pTokens );
+ }
+
+ bool operator!=( IndirectCommandsLayoutCreateInfoNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineBindPoint pipelineBindPoint;
+ IndirectCommandsLayoutUsageFlagsNVX flags;
+ uint32_t tokenCount;
+ const IndirectCommandsLayoutTokenNVX* pTokens;
+ };
+ static_assert( sizeof( IndirectCommandsLayoutCreateInfoNVX ) == sizeof( VkIndirectCommandsLayoutCreateInfoNVX ), "struct and wrapper have different size!" );
+
+ enum class ObjectEntryTypeNVX
+ {
+ eVkObjectEntryDescriptorSet = VK_OBJECT_ENTRY_DESCRIPTOR_SET_NVX,
+ eVkObjectEntryPipeline = VK_OBJECT_ENTRY_PIPELINE_NVX,
+ eVkObjectEntryIndexBuffer = VK_OBJECT_ENTRY_INDEX_BUFFER_NVX,
+ eVkObjectEntryVertexBuffer = VK_OBJECT_ENTRY_VERTEX_BUFFER_NVX,
+ eVkObjectEntryPushConstant = VK_OBJECT_ENTRY_PUSH_CONSTANT_NVX
+ };
+
+ struct ObjectTableCreateInfoNVX
+ {
+ ObjectTableCreateInfoNVX( uint32_t objectCount_ = 0, const ObjectEntryTypeNVX* pObjectEntryTypes_ = nullptr, const uint32_t* pObjectEntryCounts_ = nullptr, const ObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags_ = nullptr, uint32_t maxUniformBuffersPerDescriptor_ = 0, uint32_t maxStorageBuffersPerDescriptor_ = 0, uint32_t maxStorageImagesPerDescriptor_ = 0, uint32_t maxSampledImagesPerDescriptor_ = 0, uint32_t maxPipelineLayouts_ = 0 )
+ : sType( StructureType::eObjectTableCreateInfoNVX )
+ , pNext( nullptr )
+ , objectCount( objectCount_ )
+ , pObjectEntryTypes( pObjectEntryTypes_ )
+ , pObjectEntryCounts( pObjectEntryCounts_ )
+ , pObjectEntryUsageFlags( pObjectEntryUsageFlags_ )
+ , maxUniformBuffersPerDescriptor( maxUniformBuffersPerDescriptor_ )
+ , maxStorageBuffersPerDescriptor( maxStorageBuffersPerDescriptor_ )
+ , maxStorageImagesPerDescriptor( maxStorageImagesPerDescriptor_ )
+ , maxSampledImagesPerDescriptor( maxSampledImagesPerDescriptor_ )
+ , maxPipelineLayouts( maxPipelineLayouts_ )
+ {
+ }
+
+ ObjectTableCreateInfoNVX( VkObjectTableCreateInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableCreateInfoNVX) );
+ }
+
+ ObjectTableCreateInfoNVX& operator=( VkObjectTableCreateInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableCreateInfoNVX) );
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setObjectCount( uint32_t objectCount_ )
+ {
+ objectCount = objectCount_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setPObjectEntryTypes( const ObjectEntryTypeNVX* pObjectEntryTypes_ )
+ {
+ pObjectEntryTypes = pObjectEntryTypes_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setPObjectEntryCounts( const uint32_t* pObjectEntryCounts_ )
+ {
+ pObjectEntryCounts = pObjectEntryCounts_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setPObjectEntryUsageFlags( const ObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags_ )
+ {
+ pObjectEntryUsageFlags = pObjectEntryUsageFlags_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setMaxUniformBuffersPerDescriptor( uint32_t maxUniformBuffersPerDescriptor_ )
+ {
+ maxUniformBuffersPerDescriptor = maxUniformBuffersPerDescriptor_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setMaxStorageBuffersPerDescriptor( uint32_t maxStorageBuffersPerDescriptor_ )
+ {
+ maxStorageBuffersPerDescriptor = maxStorageBuffersPerDescriptor_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setMaxStorageImagesPerDescriptor( uint32_t maxStorageImagesPerDescriptor_ )
+ {
+ maxStorageImagesPerDescriptor = maxStorageImagesPerDescriptor_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setMaxSampledImagesPerDescriptor( uint32_t maxSampledImagesPerDescriptor_ )
+ {
+ maxSampledImagesPerDescriptor = maxSampledImagesPerDescriptor_;
+ return *this;
+ }
+
+ ObjectTableCreateInfoNVX& setMaxPipelineLayouts( uint32_t maxPipelineLayouts_ )
+ {
+ maxPipelineLayouts = maxPipelineLayouts_;
+ return *this;
+ }
+
+ operator const VkObjectTableCreateInfoNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTableCreateInfoNVX*>(this);
+ }
+
+ bool operator==( ObjectTableCreateInfoNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( objectCount == rhs.objectCount )
+ && ( pObjectEntryTypes == rhs.pObjectEntryTypes )
+ && ( pObjectEntryCounts == rhs.pObjectEntryCounts )
+ && ( pObjectEntryUsageFlags == rhs.pObjectEntryUsageFlags )
+ && ( maxUniformBuffersPerDescriptor == rhs.maxUniformBuffersPerDescriptor )
+ && ( maxStorageBuffersPerDescriptor == rhs.maxStorageBuffersPerDescriptor )
+ && ( maxStorageImagesPerDescriptor == rhs.maxStorageImagesPerDescriptor )
+ && ( maxSampledImagesPerDescriptor == rhs.maxSampledImagesPerDescriptor )
+ && ( maxPipelineLayouts == rhs.maxPipelineLayouts );
+ }
+
+ bool operator!=( ObjectTableCreateInfoNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t objectCount;
+ const ObjectEntryTypeNVX* pObjectEntryTypes;
+ const uint32_t* pObjectEntryCounts;
+ const ObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags;
+ uint32_t maxUniformBuffersPerDescriptor;
+ uint32_t maxStorageBuffersPerDescriptor;
+ uint32_t maxStorageImagesPerDescriptor;
+ uint32_t maxSampledImagesPerDescriptor;
+ uint32_t maxPipelineLayouts;
+ };
+ static_assert( sizeof( ObjectTableCreateInfoNVX ) == sizeof( VkObjectTableCreateInfoNVX ), "struct and wrapper have different size!" );
+
+ struct ObjectTableEntryNVX
+ {
+ ObjectTableEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet, ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX() )
+ : type( type_ )
+ , flags( flags_ )
+ {
+ }
+
+ ObjectTableEntryNVX( VkObjectTableEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableEntryNVX) );
+ }
+
+ ObjectTableEntryNVX& operator=( VkObjectTableEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableEntryNVX) );
+ return *this;
+ }
+
+ ObjectTableEntryNVX& setType( ObjectEntryTypeNVX type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ ObjectTableEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ operator const VkObjectTableEntryNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTableEntryNVX*>(this);
+ }
+
+ bool operator==( ObjectTableEntryNVX const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( flags == rhs.flags );
+ }
+
+ bool operator!=( ObjectTableEntryNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ObjectEntryTypeNVX type;
+ ObjectEntryUsageFlagsNVX flags;
+ };
+ static_assert( sizeof( ObjectTableEntryNVX ) == sizeof( VkObjectTableEntryNVX ), "struct and wrapper have different size!" );
+
+ struct ObjectTablePipelineEntryNVX
+ {
+ ObjectTablePipelineEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet, ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), Pipeline pipeline_ = Pipeline() )
+ : type( type_ )
+ , flags( flags_ )
+ , pipeline( pipeline_ )
+ {
+ }
+
+ ObjectTablePipelineEntryNVX( VkObjectTablePipelineEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTablePipelineEntryNVX) );
+ }
+
+ ObjectTablePipelineEntryNVX& operator=( VkObjectTablePipelineEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTablePipelineEntryNVX) );
+ return *this;
+ }
+
+ ObjectTablePipelineEntryNVX& setType( ObjectEntryTypeNVX type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ ObjectTablePipelineEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ObjectTablePipelineEntryNVX& setPipeline( Pipeline pipeline_ )
+ {
+ pipeline = pipeline_;
+ return *this;
+ }
+
+ operator const VkObjectTablePipelineEntryNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTablePipelineEntryNVX*>(this);
+ }
+
+ bool operator==( ObjectTablePipelineEntryNVX const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( flags == rhs.flags )
+ && ( pipeline == rhs.pipeline );
+ }
+
+ bool operator!=( ObjectTablePipelineEntryNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ObjectEntryTypeNVX type;
+ ObjectEntryUsageFlagsNVX flags;
+ Pipeline pipeline;
+ };
+ static_assert( sizeof( ObjectTablePipelineEntryNVX ) == sizeof( VkObjectTablePipelineEntryNVX ), "struct and wrapper have different size!" );
+
+ struct ObjectTableDescriptorSetEntryNVX
+ {
+ ObjectTableDescriptorSetEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet, ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), PipelineLayout pipelineLayout_ = PipelineLayout(), DescriptorSet descriptorSet_ = DescriptorSet() )
+ : type( type_ )
+ , flags( flags_ )
+ , pipelineLayout( pipelineLayout_ )
+ , descriptorSet( descriptorSet_ )
+ {
+ }
+
+ ObjectTableDescriptorSetEntryNVX( VkObjectTableDescriptorSetEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableDescriptorSetEntryNVX) );
+ }
+
+ ObjectTableDescriptorSetEntryNVX& operator=( VkObjectTableDescriptorSetEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableDescriptorSetEntryNVX) );
+ return *this;
+ }
+
+ ObjectTableDescriptorSetEntryNVX& setType( ObjectEntryTypeNVX type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ ObjectTableDescriptorSetEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ObjectTableDescriptorSetEntryNVX& setPipelineLayout( PipelineLayout pipelineLayout_ )
+ {
+ pipelineLayout = pipelineLayout_;
+ return *this;
+ }
+
+ ObjectTableDescriptorSetEntryNVX& setDescriptorSet( DescriptorSet descriptorSet_ )
+ {
+ descriptorSet = descriptorSet_;
+ return *this;
+ }
+
+ operator const VkObjectTableDescriptorSetEntryNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTableDescriptorSetEntryNVX*>(this);
+ }
+
+ bool operator==( ObjectTableDescriptorSetEntryNVX const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( flags == rhs.flags )
+ && ( pipelineLayout == rhs.pipelineLayout )
+ && ( descriptorSet == rhs.descriptorSet );
+ }
+
+ bool operator!=( ObjectTableDescriptorSetEntryNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ObjectEntryTypeNVX type;
+ ObjectEntryUsageFlagsNVX flags;
+ PipelineLayout pipelineLayout;
+ DescriptorSet descriptorSet;
+ };
+ static_assert( sizeof( ObjectTableDescriptorSetEntryNVX ) == sizeof( VkObjectTableDescriptorSetEntryNVX ), "struct and wrapper have different size!" );
+
+ struct ObjectTableVertexBufferEntryNVX
+ {
+ ObjectTableVertexBufferEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet, ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), Buffer buffer_ = Buffer() )
+ : type( type_ )
+ , flags( flags_ )
+ , buffer( buffer_ )
+ {
+ }
+
+ ObjectTableVertexBufferEntryNVX( VkObjectTableVertexBufferEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableVertexBufferEntryNVX) );
+ }
+
+ ObjectTableVertexBufferEntryNVX& operator=( VkObjectTableVertexBufferEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableVertexBufferEntryNVX) );
+ return *this;
+ }
+
+ ObjectTableVertexBufferEntryNVX& setType( ObjectEntryTypeNVX type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ ObjectTableVertexBufferEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ObjectTableVertexBufferEntryNVX& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ operator const VkObjectTableVertexBufferEntryNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTableVertexBufferEntryNVX*>(this);
+ }
+
+ bool operator==( ObjectTableVertexBufferEntryNVX const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( flags == rhs.flags )
+ && ( buffer == rhs.buffer );
+ }
+
+ bool operator!=( ObjectTableVertexBufferEntryNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ObjectEntryTypeNVX type;
+ ObjectEntryUsageFlagsNVX flags;
+ Buffer buffer;
+ };
+ static_assert( sizeof( ObjectTableVertexBufferEntryNVX ) == sizeof( VkObjectTableVertexBufferEntryNVX ), "struct and wrapper have different size!" );
+
+ struct ObjectTableIndexBufferEntryNVX
+ {
+ ObjectTableIndexBufferEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet, ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), Buffer buffer_ = Buffer(), IndexType indexType_ = IndexType::eUint16 )
+ : type( type_ )
+ , flags( flags_ )
+ , buffer( buffer_ )
+ , indexType( indexType_ )
+ {
+ }
+
+ ObjectTableIndexBufferEntryNVX( VkObjectTableIndexBufferEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableIndexBufferEntryNVX) );
+ }
+
+ ObjectTableIndexBufferEntryNVX& operator=( VkObjectTableIndexBufferEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTableIndexBufferEntryNVX) );
+ return *this;
+ }
+
+ ObjectTableIndexBufferEntryNVX& setType( ObjectEntryTypeNVX type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ ObjectTableIndexBufferEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ObjectTableIndexBufferEntryNVX& setBuffer( Buffer buffer_ )
+ {
+ buffer = buffer_;
+ return *this;
+ }
+
+ ObjectTableIndexBufferEntryNVX& setIndexType( IndexType indexType_ )
+ {
+ indexType = indexType_;
+ return *this;
+ }
+
+ operator const VkObjectTableIndexBufferEntryNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTableIndexBufferEntryNVX*>(this);
+ }
+
+ bool operator==( ObjectTableIndexBufferEntryNVX const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( flags == rhs.flags )
+ && ( buffer == rhs.buffer )
+ && ( indexType == rhs.indexType );
+ }
+
+ bool operator!=( ObjectTableIndexBufferEntryNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ObjectEntryTypeNVX type;
+ ObjectEntryUsageFlagsNVX flags;
+ Buffer buffer;
+ IndexType indexType;
+ };
+ static_assert( sizeof( ObjectTableIndexBufferEntryNVX ) == sizeof( VkObjectTableIndexBufferEntryNVX ), "struct and wrapper have different size!" );
+
+ struct ObjectTablePushConstantEntryNVX
+ {
+ ObjectTablePushConstantEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet, ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), PipelineLayout pipelineLayout_ = PipelineLayout(), ShaderStageFlags stageFlags_ = ShaderStageFlags() )
+ : type( type_ )
+ , flags( flags_ )
+ , pipelineLayout( pipelineLayout_ )
+ , stageFlags( stageFlags_ )
+ {
+ }
+
+ ObjectTablePushConstantEntryNVX( VkObjectTablePushConstantEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTablePushConstantEntryNVX) );
+ }
+
+ ObjectTablePushConstantEntryNVX& operator=( VkObjectTablePushConstantEntryNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ObjectTablePushConstantEntryNVX) );
+ return *this;
+ }
+
+ ObjectTablePushConstantEntryNVX& setType( ObjectEntryTypeNVX type_ )
+ {
+ type = type_;
+ return *this;
+ }
+
+ ObjectTablePushConstantEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ ObjectTablePushConstantEntryNVX& setPipelineLayout( PipelineLayout pipelineLayout_ )
+ {
+ pipelineLayout = pipelineLayout_;
+ return *this;
+ }
+
+ ObjectTablePushConstantEntryNVX& setStageFlags( ShaderStageFlags stageFlags_ )
+ {
+ stageFlags = stageFlags_;
+ return *this;
+ }
+
+ operator const VkObjectTablePushConstantEntryNVX&() const
+ {
+ return *reinterpret_cast<const VkObjectTablePushConstantEntryNVX*>(this);
+ }
+
+ bool operator==( ObjectTablePushConstantEntryNVX const& rhs ) const
+ {
+ return ( type == rhs.type )
+ && ( flags == rhs.flags )
+ && ( pipelineLayout == rhs.pipelineLayout )
+ && ( stageFlags == rhs.stageFlags );
+ }
+
+ bool operator!=( ObjectTablePushConstantEntryNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ObjectEntryTypeNVX type;
+ ObjectEntryUsageFlagsNVX flags;
+ PipelineLayout pipelineLayout;
+ ShaderStageFlags stageFlags;
+ };
+ static_assert( sizeof( ObjectTablePushConstantEntryNVX ) == sizeof( VkObjectTablePushConstantEntryNVX ), "struct and wrapper have different size!" );
+
+ enum class DescriptorSetLayoutCreateFlagBits
+ {
+ ePushDescriptorKHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
+ };
+
+ using DescriptorSetLayoutCreateFlags = Flags<DescriptorSetLayoutCreateFlagBits, VkDescriptorSetLayoutCreateFlags>;
+
+ VULKAN_HPP_INLINE DescriptorSetLayoutCreateFlags operator|( DescriptorSetLayoutCreateFlagBits bit0, DescriptorSetLayoutCreateFlagBits bit1 )
+ {
+ return DescriptorSetLayoutCreateFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE DescriptorSetLayoutCreateFlags operator~( DescriptorSetLayoutCreateFlagBits bits )
+ {
+ return ~( DescriptorSetLayoutCreateFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<DescriptorSetLayoutCreateFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR)
+ };
+ };
+
+ struct DescriptorSetLayoutCreateInfo
+ {
+ DescriptorSetLayoutCreateInfo( DescriptorSetLayoutCreateFlags flags_ = DescriptorSetLayoutCreateFlags(), uint32_t bindingCount_ = 0, const DescriptorSetLayoutBinding* pBindings_ = nullptr )
+ : sType( StructureType::eDescriptorSetLayoutCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , bindingCount( bindingCount_ )
+ , pBindings( pBindings_ )
+ {
+ }
+
+ DescriptorSetLayoutCreateInfo( VkDescriptorSetLayoutCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorSetLayoutCreateInfo) );
+ }
+
+ DescriptorSetLayoutCreateInfo& operator=( VkDescriptorSetLayoutCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DescriptorSetLayoutCreateInfo) );
+ return *this;
+ }
+
+ DescriptorSetLayoutCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DescriptorSetLayoutCreateInfo& setFlags( DescriptorSetLayoutCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ DescriptorSetLayoutCreateInfo& setBindingCount( uint32_t bindingCount_ )
+ {
+ bindingCount = bindingCount_;
+ return *this;
+ }
+
+ DescriptorSetLayoutCreateInfo& setPBindings( const DescriptorSetLayoutBinding* pBindings_ )
+ {
+ pBindings = pBindings_;
+ return *this;
+ }
+
+ operator const VkDescriptorSetLayoutCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>(this);
+ }
+
+ bool operator==( DescriptorSetLayoutCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( bindingCount == rhs.bindingCount )
+ && ( pBindings == rhs.pBindings );
+ }
+
+ bool operator!=( DescriptorSetLayoutCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DescriptorSetLayoutCreateFlags flags;
+ uint32_t bindingCount;
+ const DescriptorSetLayoutBinding* pBindings;
+ };
+ static_assert( sizeof( DescriptorSetLayoutCreateInfo ) == sizeof( VkDescriptorSetLayoutCreateInfo ), "struct and wrapper have different size!" );
+
+ enum class ExternalMemoryHandleTypeFlagBitsKHX
+ {
+ eOpaqueFd = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHX,
+ eOpaqueWin32 = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHX,
+ eOpaqueWin32Kmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHX,
+ eD3D11Texture = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHX,
+ eD3D11TextureKmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHX,
+ eD3D12Heap = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHX,
+ eD3D12Resource = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHX
+ };
+
+ using ExternalMemoryHandleTypeFlagsKHX = Flags<ExternalMemoryHandleTypeFlagBitsKHX, VkExternalMemoryHandleTypeFlagsKHX>;
+
+ VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlagsKHX operator|( ExternalMemoryHandleTypeFlagBitsKHX bit0, ExternalMemoryHandleTypeFlagBitsKHX bit1 )
+ {
+ return ExternalMemoryHandleTypeFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlagsKHX operator~( ExternalMemoryHandleTypeFlagBitsKHX bits )
+ {
+ return ~( ExternalMemoryHandleTypeFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<ExternalMemoryHandleTypeFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd) | VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueWin32) | VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueWin32Kmt) | VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eD3D11Texture) | VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eD3D11TextureKmt) | VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eD3D12Heap) | VkFlags(ExternalMemoryHandleTypeFlagBitsKHX::eD3D12Resource)
+ };
+ };
+
+ struct PhysicalDeviceExternalImageFormatInfoKHX
+ {
+ PhysicalDeviceExternalImageFormatInfoKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType_ = ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd )
+ : sType( StructureType::ePhysicalDeviceExternalImageFormatInfoKHX )
+ , pNext( nullptr )
+ , handleType( handleType_ )
+ {
+ }
+
+ PhysicalDeviceExternalImageFormatInfoKHX( VkPhysicalDeviceExternalImageFormatInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceExternalImageFormatInfoKHX) );
+ }
+
+ PhysicalDeviceExternalImageFormatInfoKHX& operator=( VkPhysicalDeviceExternalImageFormatInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceExternalImageFormatInfoKHX) );
+ return *this;
+ }
+
+ PhysicalDeviceExternalImageFormatInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceExternalImageFormatInfoKHX& setHandleType( ExternalMemoryHandleTypeFlagBitsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceExternalImageFormatInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfoKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceExternalImageFormatInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleType == rhs.handleType );
+ }
+
+ bool operator!=( PhysicalDeviceExternalImageFormatInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagBitsKHX handleType;
+ };
+ static_assert( sizeof( PhysicalDeviceExternalImageFormatInfoKHX ) == sizeof( VkPhysicalDeviceExternalImageFormatInfoKHX ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceExternalBufferInfoKHX
+ {
+ PhysicalDeviceExternalBufferInfoKHX( BufferCreateFlags flags_ = BufferCreateFlags(), BufferUsageFlags usage_ = BufferUsageFlags(), ExternalMemoryHandleTypeFlagBitsKHX handleType_ = ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd )
+ : sType( StructureType::ePhysicalDeviceExternalBufferInfoKHX )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , usage( usage_ )
+ , handleType( handleType_ )
+ {
+ }
+
+ PhysicalDeviceExternalBufferInfoKHX( VkPhysicalDeviceExternalBufferInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceExternalBufferInfoKHX) );
+ }
+
+ PhysicalDeviceExternalBufferInfoKHX& operator=( VkPhysicalDeviceExternalBufferInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceExternalBufferInfoKHX) );
+ return *this;
+ }
+
+ PhysicalDeviceExternalBufferInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceExternalBufferInfoKHX& setFlags( BufferCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PhysicalDeviceExternalBufferInfoKHX& setUsage( BufferUsageFlags usage_ )
+ {
+ usage = usage_;
+ return *this;
+ }
+
+ PhysicalDeviceExternalBufferInfoKHX& setHandleType( ExternalMemoryHandleTypeFlagBitsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceExternalBufferInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceExternalBufferInfoKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceExternalBufferInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( usage == rhs.usage )
+ && ( handleType == rhs.handleType );
+ }
+
+ bool operator!=( PhysicalDeviceExternalBufferInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ BufferCreateFlags flags;
+ BufferUsageFlags usage;
+ ExternalMemoryHandleTypeFlagBitsKHX handleType;
+ };
+ static_assert( sizeof( PhysicalDeviceExternalBufferInfoKHX ) == sizeof( VkPhysicalDeviceExternalBufferInfoKHX ), "struct and wrapper have different size!" );
+
+ struct ExternalMemoryImageCreateInfoKHX
+ {
+ ExternalMemoryImageCreateInfoKHX( ExternalMemoryHandleTypeFlagsKHX handleTypes_ = ExternalMemoryHandleTypeFlagsKHX() )
+ : sType( StructureType::eExternalMemoryImageCreateInfoKHX )
+ , pNext( nullptr )
+ , handleTypes( handleTypes_ )
+ {
+ }
+
+ ExternalMemoryImageCreateInfoKHX( VkExternalMemoryImageCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExternalMemoryImageCreateInfoKHX) );
+ }
+
+ ExternalMemoryImageCreateInfoKHX& operator=( VkExternalMemoryImageCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExternalMemoryImageCreateInfoKHX) );
+ return *this;
+ }
+
+ ExternalMemoryImageCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExternalMemoryImageCreateInfoKHX& setHandleTypes( ExternalMemoryHandleTypeFlagsKHX handleTypes_ )
+ {
+ handleTypes = handleTypes_;
+ return *this;
+ }
+
+ operator const VkExternalMemoryImageCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkExternalMemoryImageCreateInfoKHX*>(this);
+ }
+
+ bool operator==( ExternalMemoryImageCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleTypes == rhs.handleTypes );
+ }
+
+ bool operator!=( ExternalMemoryImageCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagsKHX handleTypes;
+ };
+ static_assert( sizeof( ExternalMemoryImageCreateInfoKHX ) == sizeof( VkExternalMemoryImageCreateInfoKHX ), "struct and wrapper have different size!" );
+
+ struct ExternalMemoryBufferCreateInfoKHX
+ {
+ ExternalMemoryBufferCreateInfoKHX( ExternalMemoryHandleTypeFlagsKHX handleTypes_ = ExternalMemoryHandleTypeFlagsKHX() )
+ : sType( StructureType::eExternalMemoryBufferCreateInfoKHX )
+ , pNext( nullptr )
+ , handleTypes( handleTypes_ )
+ {
+ }
+
+ ExternalMemoryBufferCreateInfoKHX( VkExternalMemoryBufferCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExternalMemoryBufferCreateInfoKHX) );
+ }
+
+ ExternalMemoryBufferCreateInfoKHX& operator=( VkExternalMemoryBufferCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExternalMemoryBufferCreateInfoKHX) );
+ return *this;
+ }
+
+ ExternalMemoryBufferCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExternalMemoryBufferCreateInfoKHX& setHandleTypes( ExternalMemoryHandleTypeFlagsKHX handleTypes_ )
+ {
+ handleTypes = handleTypes_;
+ return *this;
+ }
+
+ operator const VkExternalMemoryBufferCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkExternalMemoryBufferCreateInfoKHX*>(this);
+ }
+
+ bool operator==( ExternalMemoryBufferCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleTypes == rhs.handleTypes );
+ }
+
+ bool operator!=( ExternalMemoryBufferCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagsKHX handleTypes;
+ };
+ static_assert( sizeof( ExternalMemoryBufferCreateInfoKHX ) == sizeof( VkExternalMemoryBufferCreateInfoKHX ), "struct and wrapper have different size!" );
+
+ struct ExportMemoryAllocateInfoKHX
+ {
+ ExportMemoryAllocateInfoKHX( ExternalMemoryHandleTypeFlagsKHX handleTypes_ = ExternalMemoryHandleTypeFlagsKHX() )
+ : sType( StructureType::eExportMemoryAllocateInfoKHX )
+ , pNext( nullptr )
+ , handleTypes( handleTypes_ )
+ {
+ }
+
+ ExportMemoryAllocateInfoKHX( VkExportMemoryAllocateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryAllocateInfoKHX) );
+ }
+
+ ExportMemoryAllocateInfoKHX& operator=( VkExportMemoryAllocateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportMemoryAllocateInfoKHX) );
+ return *this;
+ }
+
+ ExportMemoryAllocateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExportMemoryAllocateInfoKHX& setHandleTypes( ExternalMemoryHandleTypeFlagsKHX handleTypes_ )
+ {
+ handleTypes = handleTypes_;
+ return *this;
+ }
+
+ operator const VkExportMemoryAllocateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkExportMemoryAllocateInfoKHX*>(this);
+ }
+
+ bool operator==( ExportMemoryAllocateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleTypes == rhs.handleTypes );
+ }
+
+ bool operator!=( ExportMemoryAllocateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagsKHX handleTypes;
+ };
+ static_assert( sizeof( ExportMemoryAllocateInfoKHX ) == sizeof( VkExportMemoryAllocateInfoKHX ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ struct ImportMemoryWin32HandleInfoKHX
+ {
+ ImportMemoryWin32HandleInfoKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType_ = ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd, HANDLE handle_ = 0 )
+ : sType( StructureType::eImportMemoryWin32HandleInfoKHX )
+ , pNext( nullptr )
+ , handleType( handleType_ )
+ , handle( handle_ )
+ {
+ }
+
+ ImportMemoryWin32HandleInfoKHX( VkImportMemoryWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportMemoryWin32HandleInfoKHX) );
+ }
+
+ ImportMemoryWin32HandleInfoKHX& operator=( VkImportMemoryWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportMemoryWin32HandleInfoKHX) );
+ return *this;
+ }
+
+ ImportMemoryWin32HandleInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImportMemoryWin32HandleInfoKHX& setHandleType( ExternalMemoryHandleTypeFlagBitsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ ImportMemoryWin32HandleInfoKHX& setHandle( HANDLE handle_ )
+ {
+ handle = handle_;
+ return *this;
+ }
+
+ operator const VkImportMemoryWin32HandleInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkImportMemoryWin32HandleInfoKHX*>(this);
+ }
+
+ bool operator==( ImportMemoryWin32HandleInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleType == rhs.handleType )
+ && ( handle == rhs.handle );
+ }
+
+ bool operator!=( ImportMemoryWin32HandleInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagBitsKHX handleType;
+ HANDLE handle;
+ };
+ static_assert( sizeof( ImportMemoryWin32HandleInfoKHX ) == sizeof( VkImportMemoryWin32HandleInfoKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ struct ImportMemoryFdInfoKHX
+ {
+ ImportMemoryFdInfoKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType_ = ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd, int fd_ = 0 )
+ : sType( StructureType::eImportMemoryFdInfoKHX )
+ , pNext( nullptr )
+ , handleType( handleType_ )
+ , fd( fd_ )
+ {
+ }
+
+ ImportMemoryFdInfoKHX( VkImportMemoryFdInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportMemoryFdInfoKHX) );
+ }
+
+ ImportMemoryFdInfoKHX& operator=( VkImportMemoryFdInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportMemoryFdInfoKHX) );
+ return *this;
+ }
+
+ ImportMemoryFdInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImportMemoryFdInfoKHX& setHandleType( ExternalMemoryHandleTypeFlagBitsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ ImportMemoryFdInfoKHX& setFd( int fd_ )
+ {
+ fd = fd_;
+ return *this;
+ }
+
+ operator const VkImportMemoryFdInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkImportMemoryFdInfoKHX*>(this);
+ }
+
+ bool operator==( ImportMemoryFdInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleType == rhs.handleType )
+ && ( fd == rhs.fd );
+ }
+
+ bool operator!=( ImportMemoryFdInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalMemoryHandleTypeFlagBitsKHX handleType;
+ int fd;
+ };
+ static_assert( sizeof( ImportMemoryFdInfoKHX ) == sizeof( VkImportMemoryFdInfoKHX ), "struct and wrapper have different size!" );
+
+ enum class ExternalMemoryFeatureFlagBitsKHX
+ {
+ eDedicatedOnly = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHX,
+ eExportable = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHX,
+ eImportable = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHX
+ };
+
+ using ExternalMemoryFeatureFlagsKHX = Flags<ExternalMemoryFeatureFlagBitsKHX, VkExternalMemoryFeatureFlagsKHX>;
+
+ VULKAN_HPP_INLINE ExternalMemoryFeatureFlagsKHX operator|( ExternalMemoryFeatureFlagBitsKHX bit0, ExternalMemoryFeatureFlagBitsKHX bit1 )
+ {
+ return ExternalMemoryFeatureFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ExternalMemoryFeatureFlagsKHX operator~( ExternalMemoryFeatureFlagBitsKHX bits )
+ {
+ return ~( ExternalMemoryFeatureFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<ExternalMemoryFeatureFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(ExternalMemoryFeatureFlagBitsKHX::eDedicatedOnly) | VkFlags(ExternalMemoryFeatureFlagBitsKHX::eExportable) | VkFlags(ExternalMemoryFeatureFlagBitsKHX::eImportable)
+ };
+ };
+
+ struct ExternalMemoryPropertiesKHX
+ {
+ operator const VkExternalMemoryPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkExternalMemoryPropertiesKHX*>(this);
+ }
+
+ bool operator==( ExternalMemoryPropertiesKHX const& rhs ) const
+ {
+ return ( externalMemoryFeatures == rhs.externalMemoryFeatures )
+ && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes )
+ && ( compatibleHandleTypes == rhs.compatibleHandleTypes );
+ }
+
+ bool operator!=( ExternalMemoryPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ExternalMemoryFeatureFlagsKHX externalMemoryFeatures;
+ ExternalMemoryHandleTypeFlagsKHX exportFromImportedHandleTypes;
+ ExternalMemoryHandleTypeFlagsKHX compatibleHandleTypes;
+ };
+ static_assert( sizeof( ExternalMemoryPropertiesKHX ) == sizeof( VkExternalMemoryPropertiesKHX ), "struct and wrapper have different size!" );
+
+ struct ExternalImageFormatPropertiesKHX
+ {
+ operator const VkExternalImageFormatPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkExternalImageFormatPropertiesKHX*>(this);
+ }
+
+ bool operator==( ExternalImageFormatPropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( externalMemoryProperties == rhs.externalMemoryProperties );
+ }
+
+ bool operator!=( ExternalImageFormatPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ ExternalMemoryPropertiesKHX externalMemoryProperties;
+ };
+ static_assert( sizeof( ExternalImageFormatPropertiesKHX ) == sizeof( VkExternalImageFormatPropertiesKHX ), "struct and wrapper have different size!" );
+
+ struct ExternalBufferPropertiesKHX
+ {
+ operator const VkExternalBufferPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkExternalBufferPropertiesKHX*>(this);
+ }
+
+ bool operator==( ExternalBufferPropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( externalMemoryProperties == rhs.externalMemoryProperties );
+ }
+
+ bool operator!=( ExternalBufferPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ ExternalMemoryPropertiesKHX externalMemoryProperties;
+ };
+ static_assert( sizeof( ExternalBufferPropertiesKHX ) == sizeof( VkExternalBufferPropertiesKHX ), "struct and wrapper have different size!" );
+
+ enum class ExternalSemaphoreHandleTypeFlagBitsKHX
+ {
+ eOpaqueFd = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX,
+ eOpaqueWin32 = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHX,
+ eOpaqueWin32Kmt = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHX,
+ eD3D12Fence = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHX,
+ eFenceFd = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FENCE_FD_BIT_KHX
+ };
+
+ using ExternalSemaphoreHandleTypeFlagsKHX = Flags<ExternalSemaphoreHandleTypeFlagBitsKHX, VkExternalSemaphoreHandleTypeFlagsKHX>;
+
+ VULKAN_HPP_INLINE ExternalSemaphoreHandleTypeFlagsKHX operator|( ExternalSemaphoreHandleTypeFlagBitsKHX bit0, ExternalSemaphoreHandleTypeFlagBitsKHX bit1 )
+ {
+ return ExternalSemaphoreHandleTypeFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ExternalSemaphoreHandleTypeFlagsKHX operator~( ExternalSemaphoreHandleTypeFlagBitsKHX bits )
+ {
+ return ~( ExternalSemaphoreHandleTypeFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<ExternalSemaphoreHandleTypeFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueFd) | VkFlags(ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueWin32) | VkFlags(ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueWin32Kmt) | VkFlags(ExternalSemaphoreHandleTypeFlagBitsKHX::eD3D12Fence) | VkFlags(ExternalSemaphoreHandleTypeFlagBitsKHX::eFenceFd)
+ };
+ };
+
+ struct PhysicalDeviceExternalSemaphoreInfoKHX
+ {
+ PhysicalDeviceExternalSemaphoreInfoKHX( ExternalSemaphoreHandleTypeFlagBitsKHX handleType_ = ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueFd )
+ : sType( StructureType::ePhysicalDeviceExternalSemaphoreInfoKHX )
+ , pNext( nullptr )
+ , handleType( handleType_ )
+ {
+ }
+
+ PhysicalDeviceExternalSemaphoreInfoKHX( VkPhysicalDeviceExternalSemaphoreInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceExternalSemaphoreInfoKHX) );
+ }
+
+ PhysicalDeviceExternalSemaphoreInfoKHX& operator=( VkPhysicalDeviceExternalSemaphoreInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PhysicalDeviceExternalSemaphoreInfoKHX) );
+ return *this;
+ }
+
+ PhysicalDeviceExternalSemaphoreInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PhysicalDeviceExternalSemaphoreInfoKHX& setHandleType( ExternalSemaphoreHandleTypeFlagBitsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ operator const VkPhysicalDeviceExternalSemaphoreInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfoKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceExternalSemaphoreInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleType == rhs.handleType );
+ }
+
+ bool operator!=( PhysicalDeviceExternalSemaphoreInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalSemaphoreHandleTypeFlagBitsKHX handleType;
+ };
+ static_assert( sizeof( PhysicalDeviceExternalSemaphoreInfoKHX ) == sizeof( VkPhysicalDeviceExternalSemaphoreInfoKHX ), "struct and wrapper have different size!" );
+
+ struct ExportSemaphoreCreateInfoKHX
+ {
+ ExportSemaphoreCreateInfoKHX( ExternalSemaphoreHandleTypeFlagsKHX handleTypes_ = ExternalSemaphoreHandleTypeFlagsKHX() )
+ : sType( StructureType::eExportSemaphoreCreateInfoKHX )
+ , pNext( nullptr )
+ , handleTypes( handleTypes_ )
+ {
+ }
+
+ ExportSemaphoreCreateInfoKHX( VkExportSemaphoreCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportSemaphoreCreateInfoKHX) );
+ }
+
+ ExportSemaphoreCreateInfoKHX& operator=( VkExportSemaphoreCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ExportSemaphoreCreateInfoKHX) );
+ return *this;
+ }
+
+ ExportSemaphoreCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ExportSemaphoreCreateInfoKHX& setHandleTypes( ExternalSemaphoreHandleTypeFlagsKHX handleTypes_ )
+ {
+ handleTypes = handleTypes_;
+ return *this;
+ }
+
+ operator const VkExportSemaphoreCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkExportSemaphoreCreateInfoKHX*>(this);
+ }
+
+ bool operator==( ExportSemaphoreCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( handleTypes == rhs.handleTypes );
+ }
+
+ bool operator!=( ExportSemaphoreCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ExternalSemaphoreHandleTypeFlagsKHX handleTypes;
+ };
+ static_assert( sizeof( ExportSemaphoreCreateInfoKHX ) == sizeof( VkExportSemaphoreCreateInfoKHX ), "struct and wrapper have different size!" );
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ struct ImportSemaphoreWin32HandleInfoKHX
+ {
+ ImportSemaphoreWin32HandleInfoKHX( Semaphore semaphore_ = Semaphore(), ExternalSemaphoreHandleTypeFlagsKHX handleType_ = ExternalSemaphoreHandleTypeFlagsKHX(), HANDLE handle_ = 0 )
+ : sType( StructureType::eImportSemaphoreWin32HandleInfoKHX )
+ , pNext( nullptr )
+ , semaphore( semaphore_ )
+ , handleType( handleType_ )
+ , handle( handle_ )
+ {
+ }
+
+ ImportSemaphoreWin32HandleInfoKHX( VkImportSemaphoreWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportSemaphoreWin32HandleInfoKHX) );
+ }
+
+ ImportSemaphoreWin32HandleInfoKHX& operator=( VkImportSemaphoreWin32HandleInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportSemaphoreWin32HandleInfoKHX) );
+ return *this;
+ }
+
+ ImportSemaphoreWin32HandleInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImportSemaphoreWin32HandleInfoKHX& setSemaphore( Semaphore semaphore_ )
+ {
+ semaphore = semaphore_;
+ return *this;
+ }
+
+ ImportSemaphoreWin32HandleInfoKHX& setHandleType( ExternalSemaphoreHandleTypeFlagsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ ImportSemaphoreWin32HandleInfoKHX& setHandle( HANDLE handle_ )
+ {
+ handle = handle_;
+ return *this;
+ }
+
+ operator const VkImportSemaphoreWin32HandleInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHX*>(this);
+ }
+
+ bool operator==( ImportSemaphoreWin32HandleInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( semaphore == rhs.semaphore )
+ && ( handleType == rhs.handleType )
+ && ( handle == rhs.handle );
+ }
+
+ bool operator!=( ImportSemaphoreWin32HandleInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Semaphore semaphore;
+ ExternalSemaphoreHandleTypeFlagsKHX handleType;
+ HANDLE handle;
+ };
+ static_assert( sizeof( ImportSemaphoreWin32HandleInfoKHX ) == sizeof( VkImportSemaphoreWin32HandleInfoKHX ), "struct and wrapper have different size!" );
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ struct ImportSemaphoreFdInfoKHX
+ {
+ ImportSemaphoreFdInfoKHX( Semaphore semaphore_ = Semaphore(), ExternalSemaphoreHandleTypeFlagBitsKHX handleType_ = ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueFd, int fd_ = 0 )
+ : sType( StructureType::eImportSemaphoreFdInfoKHX )
+ , pNext( nullptr )
+ , semaphore( semaphore_ )
+ , handleType( handleType_ )
+ , fd( fd_ )
+ {
+ }
+
+ ImportSemaphoreFdInfoKHX( VkImportSemaphoreFdInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportSemaphoreFdInfoKHX) );
+ }
+
+ ImportSemaphoreFdInfoKHX& operator=( VkImportSemaphoreFdInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ImportSemaphoreFdInfoKHX) );
+ return *this;
+ }
+
+ ImportSemaphoreFdInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ ImportSemaphoreFdInfoKHX& setSemaphore( Semaphore semaphore_ )
+ {
+ semaphore = semaphore_;
+ return *this;
+ }
+
+ ImportSemaphoreFdInfoKHX& setHandleType( ExternalSemaphoreHandleTypeFlagBitsKHX handleType_ )
+ {
+ handleType = handleType_;
+ return *this;
+ }
+
+ ImportSemaphoreFdInfoKHX& setFd( int fd_ )
+ {
+ fd = fd_;
+ return *this;
+ }
+
+ operator const VkImportSemaphoreFdInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkImportSemaphoreFdInfoKHX*>(this);
+ }
+
+ bool operator==( ImportSemaphoreFdInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( semaphore == rhs.semaphore )
+ && ( handleType == rhs.handleType )
+ && ( fd == rhs.fd );
+ }
+
+ bool operator!=( ImportSemaphoreFdInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ Semaphore semaphore;
+ ExternalSemaphoreHandleTypeFlagBitsKHX handleType;
+ int fd;
+ };
+ static_assert( sizeof( ImportSemaphoreFdInfoKHX ) == sizeof( VkImportSemaphoreFdInfoKHX ), "struct and wrapper have different size!" );
+
+ enum class ExternalSemaphoreFeatureFlagBitsKHX
+ {
+ eExportable = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHX,
+ eImportable = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHX
+ };
+
+ using ExternalSemaphoreFeatureFlagsKHX = Flags<ExternalSemaphoreFeatureFlagBitsKHX, VkExternalSemaphoreFeatureFlagsKHX>;
+
+ VULKAN_HPP_INLINE ExternalSemaphoreFeatureFlagsKHX operator|( ExternalSemaphoreFeatureFlagBitsKHX bit0, ExternalSemaphoreFeatureFlagBitsKHX bit1 )
+ {
+ return ExternalSemaphoreFeatureFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE ExternalSemaphoreFeatureFlagsKHX operator~( ExternalSemaphoreFeatureFlagBitsKHX bits )
+ {
+ return ~( ExternalSemaphoreFeatureFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<ExternalSemaphoreFeatureFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(ExternalSemaphoreFeatureFlagBitsKHX::eExportable) | VkFlags(ExternalSemaphoreFeatureFlagBitsKHX::eImportable)
+ };
+ };
+
+ struct ExternalSemaphorePropertiesKHX
+ {
+ operator const VkExternalSemaphorePropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkExternalSemaphorePropertiesKHX*>(this);
+ }
+
+ bool operator==( ExternalSemaphorePropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes )
+ && ( compatibleHandleTypes == rhs.compatibleHandleTypes )
+ && ( externalSemaphoreFeatures == rhs.externalSemaphoreFeatures );
+ }
+
+ bool operator!=( ExternalSemaphorePropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ ExternalSemaphoreHandleTypeFlagsKHX exportFromImportedHandleTypes;
+ ExternalSemaphoreHandleTypeFlagsKHX compatibleHandleTypes;
+ ExternalSemaphoreFeatureFlagsKHX externalSemaphoreFeatures;
+ };
+ static_assert( sizeof( ExternalSemaphorePropertiesKHX ) == sizeof( VkExternalSemaphorePropertiesKHX ), "struct and wrapper have different size!" );
+
+ enum class SurfaceCounterFlagBitsEXT
+ {
+ eVblank = VK_SURFACE_COUNTER_VBLANK_EXT
+ };
+
+ using SurfaceCounterFlagsEXT = Flags<SurfaceCounterFlagBitsEXT, VkSurfaceCounterFlagsEXT>;
+
+ VULKAN_HPP_INLINE SurfaceCounterFlagsEXT operator|( SurfaceCounterFlagBitsEXT bit0, SurfaceCounterFlagBitsEXT bit1 )
+ {
+ return SurfaceCounterFlagsEXT( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SurfaceCounterFlagsEXT operator~( SurfaceCounterFlagBitsEXT bits )
+ {
+ return ~( SurfaceCounterFlagsEXT( bits ) );
+ }
+
+ template <> struct FlagTraits<SurfaceCounterFlagBitsEXT>
+ {
+ enum
+ {
+ allFlags = VkFlags(SurfaceCounterFlagBitsEXT::eVblank)
+ };
+ };
+
+ struct SurfaceCapabilities2EXT
+ {
+ operator const VkSurfaceCapabilities2EXT&() const
+ {
+ return *reinterpret_cast<const VkSurfaceCapabilities2EXT*>(this);
+ }
+
+ bool operator==( SurfaceCapabilities2EXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( minImageCount == rhs.minImageCount )
+ && ( maxImageCount == rhs.maxImageCount )
+ && ( currentExtent == rhs.currentExtent )
+ && ( minImageExtent == rhs.minImageExtent )
+ && ( maxImageExtent == rhs.maxImageExtent )
+ && ( maxImageArrayLayers == rhs.maxImageArrayLayers )
+ && ( supportedTransforms == rhs.supportedTransforms )
+ && ( currentTransform == rhs.currentTransform )
+ && ( supportedCompositeAlpha == rhs.supportedCompositeAlpha )
+ && ( supportedUsageFlags == rhs.supportedUsageFlags )
+ && ( supportedSurfaceCounters == rhs.supportedSurfaceCounters );
+ }
+
+ bool operator!=( SurfaceCapabilities2EXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ Extent2D currentExtent;
+ Extent2D minImageExtent;
+ Extent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ SurfaceTransformFlagsKHR supportedTransforms;
+ SurfaceTransformFlagBitsKHR currentTransform;
+ CompositeAlphaFlagsKHR supportedCompositeAlpha;
+ ImageUsageFlags supportedUsageFlags;
+ SurfaceCounterFlagsEXT supportedSurfaceCounters;
+ };
+ static_assert( sizeof( SurfaceCapabilities2EXT ) == sizeof( VkSurfaceCapabilities2EXT ), "struct and wrapper have different size!" );
+
+ struct SwapchainCounterCreateInfoEXT
+ {
+ SwapchainCounterCreateInfoEXT( SurfaceCounterFlagsEXT surfaceCounters_ = SurfaceCounterFlagsEXT() )
+ : sType( StructureType::eSwapchainCounterCreateInfoEXT )
+ , pNext( nullptr )
+ , surfaceCounters( surfaceCounters_ )
+ {
+ }
+
+ SwapchainCounterCreateInfoEXT( VkSwapchainCounterCreateInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SwapchainCounterCreateInfoEXT) );
+ }
+
+ SwapchainCounterCreateInfoEXT& operator=( VkSwapchainCounterCreateInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SwapchainCounterCreateInfoEXT) );
+ return *this;
+ }
+
+ SwapchainCounterCreateInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ SwapchainCounterCreateInfoEXT& setSurfaceCounters( SurfaceCounterFlagsEXT surfaceCounters_ )
+ {
+ surfaceCounters = surfaceCounters_;
+ return *this;
+ }
+
+ operator const VkSwapchainCounterCreateInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(this);
+ }
+
+ bool operator==( SwapchainCounterCreateInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( surfaceCounters == rhs.surfaceCounters );
+ }
+
+ bool operator!=( SwapchainCounterCreateInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SurfaceCounterFlagsEXT surfaceCounters;
+ };
+ static_assert( sizeof( SwapchainCounterCreateInfoEXT ) == sizeof( VkSwapchainCounterCreateInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class DisplayPowerStateEXT
+ {
+ eOff = VK_DISPLAY_POWER_STATE_OFF_EXT,
+ eSuspend = VK_DISPLAY_POWER_STATE_SUSPEND_EXT,
+ eOn = VK_DISPLAY_POWER_STATE_ON_EXT
+ };
+
+ struct DisplayPowerInfoEXT
+ {
+ DisplayPowerInfoEXT( DisplayPowerStateEXT powerState_ = DisplayPowerStateEXT::eOff )
+ : sType( StructureType::eDisplayPowerInfoEXT )
+ , pNext( nullptr )
+ , powerState( powerState_ )
+ {
+ }
+
+ DisplayPowerInfoEXT( VkDisplayPowerInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayPowerInfoEXT) );
+ }
+
+ DisplayPowerInfoEXT& operator=( VkDisplayPowerInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayPowerInfoEXT) );
+ return *this;
+ }
+
+ DisplayPowerInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DisplayPowerInfoEXT& setPowerState( DisplayPowerStateEXT powerState_ )
+ {
+ powerState = powerState_;
+ return *this;
+ }
+
+ operator const VkDisplayPowerInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDisplayPowerInfoEXT*>(this);
+ }
+
+ bool operator==( DisplayPowerInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( powerState == rhs.powerState );
+ }
+
+ bool operator!=( DisplayPowerInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DisplayPowerStateEXT powerState;
+ };
+ static_assert( sizeof( DisplayPowerInfoEXT ) == sizeof( VkDisplayPowerInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class DeviceEventTypeEXT
+ {
+ eDisplayHotplug = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT
+ };
+
+ struct DeviceEventInfoEXT
+ {
+ DeviceEventInfoEXT( DeviceEventTypeEXT deviceEvent_ = DeviceEventTypeEXT::eDisplayHotplug )
+ : sType( StructureType::eDeviceEventInfoEXT )
+ , pNext( nullptr )
+ , deviceEvent( deviceEvent_ )
+ {
+ }
+
+ DeviceEventInfoEXT( VkDeviceEventInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceEventInfoEXT) );
+ }
+
+ DeviceEventInfoEXT& operator=( VkDeviceEventInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceEventInfoEXT) );
+ return *this;
+ }
+
+ DeviceEventInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceEventInfoEXT& setDeviceEvent( DeviceEventTypeEXT deviceEvent_ )
+ {
+ deviceEvent = deviceEvent_;
+ return *this;
+ }
+
+ operator const VkDeviceEventInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDeviceEventInfoEXT*>(this);
+ }
+
+ bool operator==( DeviceEventInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( deviceEvent == rhs.deviceEvent );
+ }
+
+ bool operator!=( DeviceEventInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DeviceEventTypeEXT deviceEvent;
+ };
+ static_assert( sizeof( DeviceEventInfoEXT ) == sizeof( VkDeviceEventInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class DisplayEventTypeEXT
+ {
+ eFirstPixelOut = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT
+ };
+
+ struct DisplayEventInfoEXT
+ {
+ DisplayEventInfoEXT( DisplayEventTypeEXT displayEvent_ = DisplayEventTypeEXT::eFirstPixelOut )
+ : sType( StructureType::eDisplayEventInfoEXT )
+ , pNext( nullptr )
+ , displayEvent( displayEvent_ )
+ {
+ }
+
+ DisplayEventInfoEXT( VkDisplayEventInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayEventInfoEXT) );
+ }
+
+ DisplayEventInfoEXT& operator=( VkDisplayEventInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DisplayEventInfoEXT) );
+ return *this;
+ }
+
+ DisplayEventInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DisplayEventInfoEXT& setDisplayEvent( DisplayEventTypeEXT displayEvent_ )
+ {
+ displayEvent = displayEvent_;
+ return *this;
+ }
+
+ operator const VkDisplayEventInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkDisplayEventInfoEXT*>(this);
+ }
+
+ bool operator==( DisplayEventInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( displayEvent == rhs.displayEvent );
+ }
+
+ bool operator!=( DisplayEventInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DisplayEventTypeEXT displayEvent;
+ };
+ static_assert( sizeof( DisplayEventInfoEXT ) == sizeof( VkDisplayEventInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class PeerMemoryFeatureFlagBitsKHX
+ {
+ eCopySrc = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHX,
+ eCopyDst = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHX,
+ eGenericSrc = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHX,
+ eGenericDst = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHX
+ };
+
+ using PeerMemoryFeatureFlagsKHX = Flags<PeerMemoryFeatureFlagBitsKHX, VkPeerMemoryFeatureFlagsKHX>;
+
+ VULKAN_HPP_INLINE PeerMemoryFeatureFlagsKHX operator|( PeerMemoryFeatureFlagBitsKHX bit0, PeerMemoryFeatureFlagBitsKHX bit1 )
+ {
+ return PeerMemoryFeatureFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE PeerMemoryFeatureFlagsKHX operator~( PeerMemoryFeatureFlagBitsKHX bits )
+ {
+ return ~( PeerMemoryFeatureFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<PeerMemoryFeatureFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(PeerMemoryFeatureFlagBitsKHX::eCopySrc) | VkFlags(PeerMemoryFeatureFlagBitsKHX::eCopyDst) | VkFlags(PeerMemoryFeatureFlagBitsKHX::eGenericSrc) | VkFlags(PeerMemoryFeatureFlagBitsKHX::eGenericDst)
+ };
+ };
+
+ enum class MemoryAllocateFlagBitsKHX
+ {
+ eDeviceMask = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHX
+ };
+
+ using MemoryAllocateFlagsKHX = Flags<MemoryAllocateFlagBitsKHX, VkMemoryAllocateFlagsKHX>;
+
+ VULKAN_HPP_INLINE MemoryAllocateFlagsKHX operator|( MemoryAllocateFlagBitsKHX bit0, MemoryAllocateFlagBitsKHX bit1 )
+ {
+ return MemoryAllocateFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE MemoryAllocateFlagsKHX operator~( MemoryAllocateFlagBitsKHX bits )
+ {
+ return ~( MemoryAllocateFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<MemoryAllocateFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(MemoryAllocateFlagBitsKHX::eDeviceMask)
+ };
+ };
+
+ struct MemoryAllocateFlagsInfoKHX
+ {
+ MemoryAllocateFlagsInfoKHX( MemoryAllocateFlagsKHX flags_ = MemoryAllocateFlagsKHX(), uint32_t deviceMask_ = 0 )
+ : sType( StructureType::eMemoryAllocateFlagsInfoKHX )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , deviceMask( deviceMask_ )
+ {
+ }
+
+ MemoryAllocateFlagsInfoKHX( VkMemoryAllocateFlagsInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MemoryAllocateFlagsInfoKHX) );
+ }
+
+ MemoryAllocateFlagsInfoKHX& operator=( VkMemoryAllocateFlagsInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(MemoryAllocateFlagsInfoKHX) );
+ return *this;
+ }
+
+ MemoryAllocateFlagsInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ MemoryAllocateFlagsInfoKHX& setFlags( MemoryAllocateFlagsKHX flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ MemoryAllocateFlagsInfoKHX& setDeviceMask( uint32_t deviceMask_ )
+ {
+ deviceMask = deviceMask_;
+ return *this;
+ }
+
+ operator const VkMemoryAllocateFlagsInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkMemoryAllocateFlagsInfoKHX*>(this);
+ }
+
+ bool operator==( MemoryAllocateFlagsInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( deviceMask == rhs.deviceMask );
+ }
+
+ bool operator!=( MemoryAllocateFlagsInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ MemoryAllocateFlagsKHX flags;
+ uint32_t deviceMask;
+ };
+ static_assert( sizeof( MemoryAllocateFlagsInfoKHX ) == sizeof( VkMemoryAllocateFlagsInfoKHX ), "struct and wrapper have different size!" );
+
+ enum class DeviceGroupPresentModeFlagBitsKHX
+ {
+ eLocal = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHX,
+ eRemote = VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHX,
+ eSum = VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHX,
+ eLocalMultiDevice = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHX
+ };
+
+ using DeviceGroupPresentModeFlagsKHX = Flags<DeviceGroupPresentModeFlagBitsKHX, VkDeviceGroupPresentModeFlagsKHX>;
+
+ VULKAN_HPP_INLINE DeviceGroupPresentModeFlagsKHX operator|( DeviceGroupPresentModeFlagBitsKHX bit0, DeviceGroupPresentModeFlagBitsKHX bit1 )
+ {
+ return DeviceGroupPresentModeFlagsKHX( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE DeviceGroupPresentModeFlagsKHX operator~( DeviceGroupPresentModeFlagBitsKHX bits )
+ {
+ return ~( DeviceGroupPresentModeFlagsKHX( bits ) );
+ }
+
+ template <> struct FlagTraits<DeviceGroupPresentModeFlagBitsKHX>
+ {
+ enum
+ {
+ allFlags = VkFlags(DeviceGroupPresentModeFlagBitsKHX::eLocal) | VkFlags(DeviceGroupPresentModeFlagBitsKHX::eRemote) | VkFlags(DeviceGroupPresentModeFlagBitsKHX::eSum) | VkFlags(DeviceGroupPresentModeFlagBitsKHX::eLocalMultiDevice)
+ };
+ };
+
+ struct DeviceGroupPresentCapabilitiesKHX
+ {
+ operator const VkDeviceGroupPresentCapabilitiesKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupPresentCapabilitiesKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupPresentCapabilitiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( memcmp( presentMask, rhs.presentMask, VK_MAX_DEVICE_GROUP_SIZE_KHX * sizeof( uint32_t ) ) == 0 )
+ && ( modes == rhs.modes );
+ }
+
+ bool operator!=( DeviceGroupPresentCapabilitiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE_KHX];
+ DeviceGroupPresentModeFlagsKHX modes;
+ };
+ static_assert( sizeof( DeviceGroupPresentCapabilitiesKHX ) == sizeof( VkDeviceGroupPresentCapabilitiesKHX ), "struct and wrapper have different size!" );
+
+ struct DeviceGroupPresentInfoKHX
+ {
+ DeviceGroupPresentInfoKHX( uint32_t swapchainCount_ = 0, const uint32_t* pDeviceMasks_ = nullptr, DeviceGroupPresentModeFlagBitsKHX mode_ = DeviceGroupPresentModeFlagBitsKHX::eLocal )
+ : sType( StructureType::eDeviceGroupPresentInfoKHX )
+ , pNext( nullptr )
+ , swapchainCount( swapchainCount_ )
+ , pDeviceMasks( pDeviceMasks_ )
+ , mode( mode_ )
+ {
+ }
+
+ DeviceGroupPresentInfoKHX( VkDeviceGroupPresentInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupPresentInfoKHX) );
+ }
+
+ DeviceGroupPresentInfoKHX& operator=( VkDeviceGroupPresentInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupPresentInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupPresentInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupPresentInfoKHX& setSwapchainCount( uint32_t swapchainCount_ )
+ {
+ swapchainCount = swapchainCount_;
+ return *this;
+ }
+
+ DeviceGroupPresentInfoKHX& setPDeviceMasks( const uint32_t* pDeviceMasks_ )
+ {
+ pDeviceMasks = pDeviceMasks_;
+ return *this;
+ }
+
+ DeviceGroupPresentInfoKHX& setMode( DeviceGroupPresentModeFlagBitsKHX mode_ )
+ {
+ mode = mode_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupPresentInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupPresentInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupPresentInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( swapchainCount == rhs.swapchainCount )
+ && ( pDeviceMasks == rhs.pDeviceMasks )
+ && ( mode == rhs.mode );
+ }
+
+ bool operator!=( DeviceGroupPresentInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t swapchainCount;
+ const uint32_t* pDeviceMasks;
+ DeviceGroupPresentModeFlagBitsKHX mode;
+ };
+ static_assert( sizeof( DeviceGroupPresentInfoKHX ) == sizeof( VkDeviceGroupPresentInfoKHX ), "struct and wrapper have different size!" );
+
+ struct DeviceGroupSwapchainCreateInfoKHX
+ {
+ DeviceGroupSwapchainCreateInfoKHX( DeviceGroupPresentModeFlagsKHX modes_ = DeviceGroupPresentModeFlagsKHX() )
+ : sType( StructureType::eDeviceGroupSwapchainCreateInfoKHX )
+ , pNext( nullptr )
+ , modes( modes_ )
+ {
+ }
+
+ DeviceGroupSwapchainCreateInfoKHX( VkDeviceGroupSwapchainCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupSwapchainCreateInfoKHX) );
+ }
+
+ DeviceGroupSwapchainCreateInfoKHX& operator=( VkDeviceGroupSwapchainCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupSwapchainCreateInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupSwapchainCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupSwapchainCreateInfoKHX& setModes( DeviceGroupPresentModeFlagsKHX modes_ )
+ {
+ modes = modes_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupSwapchainCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupSwapchainCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( modes == rhs.modes );
+ }
+
+ bool operator!=( DeviceGroupSwapchainCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ DeviceGroupPresentModeFlagsKHX modes;
+ };
+ static_assert( sizeof( DeviceGroupSwapchainCreateInfoKHX ) == sizeof( VkDeviceGroupSwapchainCreateInfoKHX ), "struct and wrapper have different size!" );
+
+ enum class SwapchainCreateFlagBitsKHR
+ {
+ eBindSfrKHX = VK_SWAPCHAIN_CREATE_BIND_SFR_BIT_KHX
+ };
+
+ using SwapchainCreateFlagsKHR = Flags<SwapchainCreateFlagBitsKHR, VkSwapchainCreateFlagsKHR>;
+
+ VULKAN_HPP_INLINE SwapchainCreateFlagsKHR operator|( SwapchainCreateFlagBitsKHR bit0, SwapchainCreateFlagBitsKHR bit1 )
+ {
+ return SwapchainCreateFlagsKHR( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SwapchainCreateFlagsKHR operator~( SwapchainCreateFlagBitsKHR bits )
+ {
+ return ~( SwapchainCreateFlagsKHR( bits ) );
+ }
+
+ template <> struct FlagTraits<SwapchainCreateFlagBitsKHR>
+ {
+ enum
+ {
+ allFlags = VkFlags(SwapchainCreateFlagBitsKHR::eBindSfrKHX)
+ };
+ };
+
+ struct SwapchainCreateInfoKHR
+ {
+ SwapchainCreateInfoKHR( SwapchainCreateFlagsKHR flags_ = SwapchainCreateFlagsKHR(), SurfaceKHR surface_ = SurfaceKHR(), uint32_t minImageCount_ = 0, Format imageFormat_ = Format::eUndefined, ColorSpaceKHR imageColorSpace_ = ColorSpaceKHR::eSrgbNonlinear, Extent2D imageExtent_ = Extent2D(), uint32_t imageArrayLayers_ = 0, ImageUsageFlags imageUsage_ = ImageUsageFlags(), SharingMode imageSharingMode_ = SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = 0, const uint32_t* pQueueFamilyIndices_ = nullptr, SurfaceTransformFlagBitsKHR preTransform_ = SurfaceTransformFlagBitsKHR::eIdentity, CompositeAlphaFlagBitsKHR compositeAlpha_ = CompositeAlphaFlagBitsKHR::eOpaque, PresentModeKHR presentMode_ = PresentModeKHR::eImmediate, Bool32 clipped_ = 0, SwapchainKHR oldSwapchain_ = SwapchainKHR() )
+ : sType( StructureType::eSwapchainCreateInfoKHR )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , surface( surface_ )
+ , minImageCount( minImageCount_ )
+ , imageFormat( imageFormat_ )
+ , imageColorSpace( imageColorSpace_ )
+ , imageExtent( imageExtent_ )
+ , imageArrayLayers( imageArrayLayers_ )
+ , imageUsage( imageUsage_ )
+ , imageSharingMode( imageSharingMode_ )
+ , queueFamilyIndexCount( queueFamilyIndexCount_ )
+ , pQueueFamilyIndices( pQueueFamilyIndices_ )
+ , preTransform( preTransform_ )
+ , compositeAlpha( compositeAlpha_ )
+ , presentMode( presentMode_ )
+ , clipped( clipped_ )
+ , oldSwapchain( oldSwapchain_ )
+ {
+ }
+
+ SwapchainCreateInfoKHR( VkSwapchainCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SwapchainCreateInfoKHR) );
+ }
+
+ SwapchainCreateInfoKHR& operator=( VkSwapchainCreateInfoKHR const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SwapchainCreateInfoKHR) );
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setFlags( SwapchainCreateFlagsKHR flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setSurface( SurfaceKHR surface_ )
+ {
+ surface = surface_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setMinImageCount( uint32_t minImageCount_ )
+ {
+ minImageCount = minImageCount_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setImageFormat( Format imageFormat_ )
+ {
+ imageFormat = imageFormat_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setImageColorSpace( ColorSpaceKHR imageColorSpace_ )
+ {
+ imageColorSpace = imageColorSpace_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setImageExtent( Extent2D imageExtent_ )
+ {
+ imageExtent = imageExtent_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setImageArrayLayers( uint32_t imageArrayLayers_ )
+ {
+ imageArrayLayers = imageArrayLayers_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setImageUsage( ImageUsageFlags imageUsage_ )
+ {
+ imageUsage = imageUsage_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setImageSharingMode( SharingMode imageSharingMode_ )
+ {
+ imageSharingMode = imageSharingMode_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ )
+ {
+ queueFamilyIndexCount = queueFamilyIndexCount_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ )
+ {
+ pQueueFamilyIndices = pQueueFamilyIndices_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setPreTransform( SurfaceTransformFlagBitsKHR preTransform_ )
+ {
+ preTransform = preTransform_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setCompositeAlpha( CompositeAlphaFlagBitsKHR compositeAlpha_ )
+ {
+ compositeAlpha = compositeAlpha_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setPresentMode( PresentModeKHR presentMode_ )
+ {
+ presentMode = presentMode_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setClipped( Bool32 clipped_ )
+ {
+ clipped = clipped_;
+ return *this;
+ }
+
+ SwapchainCreateInfoKHR& setOldSwapchain( SwapchainKHR oldSwapchain_ )
+ {
+ oldSwapchain = oldSwapchain_;
+ return *this;
+ }
+
+ operator const VkSwapchainCreateInfoKHR&() const
+ {
+ return *reinterpret_cast<const VkSwapchainCreateInfoKHR*>(this);
+ }
+
+ bool operator==( SwapchainCreateInfoKHR const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( surface == rhs.surface )
+ && ( minImageCount == rhs.minImageCount )
+ && ( imageFormat == rhs.imageFormat )
+ && ( imageColorSpace == rhs.imageColorSpace )
+ && ( imageExtent == rhs.imageExtent )
+ && ( imageArrayLayers == rhs.imageArrayLayers )
+ && ( imageUsage == rhs.imageUsage )
+ && ( imageSharingMode == rhs.imageSharingMode )
+ && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount )
+ && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices )
+ && ( preTransform == rhs.preTransform )
+ && ( compositeAlpha == rhs.compositeAlpha )
+ && ( presentMode == rhs.presentMode )
+ && ( clipped == rhs.clipped )
+ && ( oldSwapchain == rhs.oldSwapchain );
+ }
+
+ bool operator!=( SwapchainCreateInfoKHR const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ SwapchainCreateFlagsKHR flags;
+ SurfaceKHR surface;
+ uint32_t minImageCount;
+ Format imageFormat;
+ ColorSpaceKHR imageColorSpace;
+ Extent2D imageExtent;
+ uint32_t imageArrayLayers;
+ ImageUsageFlags imageUsage;
+ SharingMode imageSharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ SurfaceTransformFlagBitsKHR preTransform;
+ CompositeAlphaFlagBitsKHR compositeAlpha;
+ PresentModeKHR presentMode;
+ Bool32 clipped;
+ SwapchainKHR oldSwapchain;
+ };
+ static_assert( sizeof( SwapchainCreateInfoKHR ) == sizeof( VkSwapchainCreateInfoKHR ), "struct and wrapper have different size!" );
+
+ enum class ViewportCoordinateSwizzleNV
+ {
+ ePositiveX = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV,
+ eNegativeX = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV,
+ ePositiveY = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV,
+ eNegativeY = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV,
+ ePositiveZ = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV,
+ eNegativeZ = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV,
+ ePositiveW = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV,
+ eNegativeW = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV
+ };
+
+ struct ViewportSwizzleNV
+ {
+ ViewportSwizzleNV( ViewportCoordinateSwizzleNV x_ = ViewportCoordinateSwizzleNV::ePositiveX, ViewportCoordinateSwizzleNV y_ = ViewportCoordinateSwizzleNV::ePositiveX, ViewportCoordinateSwizzleNV z_ = ViewportCoordinateSwizzleNV::ePositiveX, ViewportCoordinateSwizzleNV w_ = ViewportCoordinateSwizzleNV::ePositiveX )
+ : x( x_ )
+ , y( y_ )
+ , z( z_ )
+ , w( w_ )
+ {
+ }
+
+ ViewportSwizzleNV( VkViewportSwizzleNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ViewportSwizzleNV) );
+ }
+
+ ViewportSwizzleNV& operator=( VkViewportSwizzleNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(ViewportSwizzleNV) );
+ return *this;
+ }
+
+ ViewportSwizzleNV& setX( ViewportCoordinateSwizzleNV x_ )
+ {
+ x = x_;
+ return *this;
+ }
+
+ ViewportSwizzleNV& setY( ViewportCoordinateSwizzleNV y_ )
+ {
+ y = y_;
+ return *this;
+ }
+
+ ViewportSwizzleNV& setZ( ViewportCoordinateSwizzleNV z_ )
+ {
+ z = z_;
+ return *this;
+ }
+
+ ViewportSwizzleNV& setW( ViewportCoordinateSwizzleNV w_ )
+ {
+ w = w_;
+ return *this;
+ }
+
+ operator const VkViewportSwizzleNV&() const
+ {
+ return *reinterpret_cast<const VkViewportSwizzleNV*>(this);
+ }
+
+ bool operator==( ViewportSwizzleNV const& rhs ) const
+ {
+ return ( x == rhs.x )
+ && ( y == rhs.y )
+ && ( z == rhs.z )
+ && ( w == rhs.w );
+ }
+
+ bool operator!=( ViewportSwizzleNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ ViewportCoordinateSwizzleNV x;
+ ViewportCoordinateSwizzleNV y;
+ ViewportCoordinateSwizzleNV z;
+ ViewportCoordinateSwizzleNV w;
+ };
+ static_assert( sizeof( ViewportSwizzleNV ) == sizeof( VkViewportSwizzleNV ), "struct and wrapper have different size!" );
+
+ struct PipelineViewportSwizzleStateCreateInfoNV
+ {
+ PipelineViewportSwizzleStateCreateInfoNV( PipelineViewportSwizzleStateCreateFlagsNV flags_ = PipelineViewportSwizzleStateCreateFlagsNV(), uint32_t viewportCount_ = 0, const ViewportSwizzleNV* pViewportSwizzles_ = nullptr )
+ : sType( StructureType::ePipelineViewportSwizzleStateCreateInfoNV )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , viewportCount( viewportCount_ )
+ , pViewportSwizzles( pViewportSwizzles_ )
+ {
+ }
+
+ PipelineViewportSwizzleStateCreateInfoNV( VkPipelineViewportSwizzleStateCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineViewportSwizzleStateCreateInfoNV) );
+ }
+
+ PipelineViewportSwizzleStateCreateInfoNV& operator=( VkPipelineViewportSwizzleStateCreateInfoNV const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineViewportSwizzleStateCreateInfoNV) );
+ return *this;
+ }
+
+ PipelineViewportSwizzleStateCreateInfoNV& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineViewportSwizzleStateCreateInfoNV& setFlags( PipelineViewportSwizzleStateCreateFlagsNV flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineViewportSwizzleStateCreateInfoNV& setViewportCount( uint32_t viewportCount_ )
+ {
+ viewportCount = viewportCount_;
+ return *this;
+ }
+
+ PipelineViewportSwizzleStateCreateInfoNV& setPViewportSwizzles( const ViewportSwizzleNV* pViewportSwizzles_ )
+ {
+ pViewportSwizzles = pViewportSwizzles_;
+ return *this;
+ }
+
+ operator const VkPipelineViewportSwizzleStateCreateInfoNV&() const
+ {
+ return *reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(this);
+ }
+
+ bool operator==( PipelineViewportSwizzleStateCreateInfoNV const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( viewportCount == rhs.viewportCount )
+ && ( pViewportSwizzles == rhs.pViewportSwizzles );
+ }
+
+ bool operator!=( PipelineViewportSwizzleStateCreateInfoNV const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineViewportSwizzleStateCreateFlagsNV flags;
+ uint32_t viewportCount;
+ const ViewportSwizzleNV* pViewportSwizzles;
+ };
+ static_assert( sizeof( PipelineViewportSwizzleStateCreateInfoNV ) == sizeof( VkPipelineViewportSwizzleStateCreateInfoNV ), "struct and wrapper have different size!" );
+
+ enum class DiscardRectangleModeEXT
+ {
+ eInclusive = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT,
+ eExclusive = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
+ };
+
+ struct PipelineDiscardRectangleStateCreateInfoEXT
+ {
+ PipelineDiscardRectangleStateCreateInfoEXT( PipelineDiscardRectangleStateCreateFlagsEXT flags_ = PipelineDiscardRectangleStateCreateFlagsEXT(), DiscardRectangleModeEXT discardRectangleMode_ = DiscardRectangleModeEXT::eInclusive, uint32_t discardRectangleCount_ = 0, const Rect2D* pDiscardRectangles_ = nullptr )
+ : sType( StructureType::ePipelineDiscardRectangleStateCreateInfoEXT )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , discardRectangleMode( discardRectangleMode_ )
+ , discardRectangleCount( discardRectangleCount_ )
+ , pDiscardRectangles( pDiscardRectangles_ )
+ {
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT( VkPipelineDiscardRectangleStateCreateInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineDiscardRectangleStateCreateInfoEXT) );
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT& operator=( VkPipelineDiscardRectangleStateCreateInfoEXT const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(PipelineDiscardRectangleStateCreateInfoEXT) );
+ return *this;
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT& setFlags( PipelineDiscardRectangleStateCreateFlagsEXT flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT& setDiscardRectangleMode( DiscardRectangleModeEXT discardRectangleMode_ )
+ {
+ discardRectangleMode = discardRectangleMode_;
+ return *this;
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT& setDiscardRectangleCount( uint32_t discardRectangleCount_ )
+ {
+ discardRectangleCount = discardRectangleCount_;
+ return *this;
+ }
+
+ PipelineDiscardRectangleStateCreateInfoEXT& setPDiscardRectangles( const Rect2D* pDiscardRectangles_ )
+ {
+ pDiscardRectangles = pDiscardRectangles_;
+ return *this;
+ }
+
+ operator const VkPipelineDiscardRectangleStateCreateInfoEXT&() const
+ {
+ return *reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(this);
+ }
+
+ bool operator==( PipelineDiscardRectangleStateCreateInfoEXT const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( discardRectangleMode == rhs.discardRectangleMode )
+ && ( discardRectangleCount == rhs.discardRectangleCount )
+ && ( pDiscardRectangles == rhs.pDiscardRectangles );
+ }
+
+ bool operator!=( PipelineDiscardRectangleStateCreateInfoEXT const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ PipelineDiscardRectangleStateCreateFlagsEXT flags;
+ DiscardRectangleModeEXT discardRectangleMode;
+ uint32_t discardRectangleCount;
+ const Rect2D* pDiscardRectangles;
+ };
+ static_assert( sizeof( PipelineDiscardRectangleStateCreateInfoEXT ) == sizeof( VkPipelineDiscardRectangleStateCreateInfoEXT ), "struct and wrapper have different size!" );
+
+ enum class SubpassDescriptionFlagBits
+ {
+ ePerViewAttributesNVX = VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX,
+ ePerViewPositionXOnlyNVX = VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX
+ };
+
+ using SubpassDescriptionFlags = Flags<SubpassDescriptionFlagBits, VkSubpassDescriptionFlags>;
+
+ VULKAN_HPP_INLINE SubpassDescriptionFlags operator|( SubpassDescriptionFlagBits bit0, SubpassDescriptionFlagBits bit1 )
+ {
+ return SubpassDescriptionFlags( bit0 ) | bit1;
+ }
+
+ VULKAN_HPP_INLINE SubpassDescriptionFlags operator~( SubpassDescriptionFlagBits bits )
+ {
+ return ~( SubpassDescriptionFlags( bits ) );
+ }
+
+ template <> struct FlagTraits<SubpassDescriptionFlagBits>
+ {
+ enum
+ {
+ allFlags = VkFlags(SubpassDescriptionFlagBits::ePerViewAttributesNVX) | VkFlags(SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX)
+ };
+ };
+
+ struct SubpassDescription
+ {
+ SubpassDescription( SubpassDescriptionFlags flags_ = SubpassDescriptionFlags(), PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, uint32_t inputAttachmentCount_ = 0, const AttachmentReference* pInputAttachments_ = nullptr, uint32_t colorAttachmentCount_ = 0, const AttachmentReference* pColorAttachments_ = nullptr, const AttachmentReference* pResolveAttachments_ = nullptr, const AttachmentReference* pDepthStencilAttachment_ = nullptr, uint32_t preserveAttachmentCount_ = 0, const uint32_t* pPreserveAttachments_ = nullptr )
+ : flags( flags_ )
+ , pipelineBindPoint( pipelineBindPoint_ )
+ , inputAttachmentCount( inputAttachmentCount_ )
+ , pInputAttachments( pInputAttachments_ )
+ , colorAttachmentCount( colorAttachmentCount_ )
+ , pColorAttachments( pColorAttachments_ )
+ , pResolveAttachments( pResolveAttachments_ )
+ , pDepthStencilAttachment( pDepthStencilAttachment_ )
+ , preserveAttachmentCount( preserveAttachmentCount_ )
+ , pPreserveAttachments( pPreserveAttachments_ )
+ {
+ }
+
+ SubpassDescription( VkSubpassDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SubpassDescription) );
+ }
+
+ SubpassDescription& operator=( VkSubpassDescription const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SubpassDescription) );
+ return *this;
+ }
+
+ SubpassDescription& setFlags( SubpassDescriptionFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ SubpassDescription& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ )
+ {
+ pipelineBindPoint = pipelineBindPoint_;
+ return *this;
+ }
+
+ SubpassDescription& setInputAttachmentCount( uint32_t inputAttachmentCount_ )
+ {
+ inputAttachmentCount = inputAttachmentCount_;
+ return *this;
+ }
+
+ SubpassDescription& setPInputAttachments( const AttachmentReference* pInputAttachments_ )
+ {
+ pInputAttachments = pInputAttachments_;
+ return *this;
+ }
+
+ SubpassDescription& setColorAttachmentCount( uint32_t colorAttachmentCount_ )
+ {
+ colorAttachmentCount = colorAttachmentCount_;
+ return *this;
+ }
+
+ SubpassDescription& setPColorAttachments( const AttachmentReference* pColorAttachments_ )
+ {
+ pColorAttachments = pColorAttachments_;
+ return *this;
+ }
+
+ SubpassDescription& setPResolveAttachments( const AttachmentReference* pResolveAttachments_ )
+ {
+ pResolveAttachments = pResolveAttachments_;
+ return *this;
+ }
+
+ SubpassDescription& setPDepthStencilAttachment( const AttachmentReference* pDepthStencilAttachment_ )
+ {
+ pDepthStencilAttachment = pDepthStencilAttachment_;
+ return *this;
+ }
+
+ SubpassDescription& setPreserveAttachmentCount( uint32_t preserveAttachmentCount_ )
+ {
+ preserveAttachmentCount = preserveAttachmentCount_;
+ return *this;
+ }
+
+ SubpassDescription& setPPreserveAttachments( const uint32_t* pPreserveAttachments_ )
+ {
+ pPreserveAttachments = pPreserveAttachments_;
+ return *this;
+ }
+
+ operator const VkSubpassDescription&() const
+ {
+ return *reinterpret_cast<const VkSubpassDescription*>(this);
+ }
+
+ bool operator==( SubpassDescription const& rhs ) const
+ {
+ return ( flags == rhs.flags )
+ && ( pipelineBindPoint == rhs.pipelineBindPoint )
+ && ( inputAttachmentCount == rhs.inputAttachmentCount )
+ && ( pInputAttachments == rhs.pInputAttachments )
+ && ( colorAttachmentCount == rhs.colorAttachmentCount )
+ && ( pColorAttachments == rhs.pColorAttachments )
+ && ( pResolveAttachments == rhs.pResolveAttachments )
+ && ( pDepthStencilAttachment == rhs.pDepthStencilAttachment )
+ && ( preserveAttachmentCount == rhs.preserveAttachmentCount )
+ && ( pPreserveAttachments == rhs.pPreserveAttachments );
+ }
+
+ bool operator!=( SubpassDescription const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ SubpassDescriptionFlags flags;
+ PipelineBindPoint pipelineBindPoint;
+ uint32_t inputAttachmentCount;
+ const AttachmentReference* pInputAttachments;
+ uint32_t colorAttachmentCount;
+ const AttachmentReference* pColorAttachments;
+ const AttachmentReference* pResolveAttachments;
+ const AttachmentReference* pDepthStencilAttachment;
+ uint32_t preserveAttachmentCount;
+ const uint32_t* pPreserveAttachments;
+ };
+ static_assert( sizeof( SubpassDescription ) == sizeof( VkSubpassDescription ), "struct and wrapper have different size!" );
+
+ struct RenderPassCreateInfo
+ {
+ RenderPassCreateInfo( RenderPassCreateFlags flags_ = RenderPassCreateFlags(), uint32_t attachmentCount_ = 0, const AttachmentDescription* pAttachments_ = nullptr, uint32_t subpassCount_ = 0, const SubpassDescription* pSubpasses_ = nullptr, uint32_t dependencyCount_ = 0, const SubpassDependency* pDependencies_ = nullptr )
+ : sType( StructureType::eRenderPassCreateInfo )
+ , pNext( nullptr )
+ , flags( flags_ )
+ , attachmentCount( attachmentCount_ )
+ , pAttachments( pAttachments_ )
+ , subpassCount( subpassCount_ )
+ , pSubpasses( pSubpasses_ )
+ , dependencyCount( dependencyCount_ )
+ , pDependencies( pDependencies_ )
+ {
+ }
+
+ RenderPassCreateInfo( VkRenderPassCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RenderPassCreateInfo) );
+ }
+
+ RenderPassCreateInfo& operator=( VkRenderPassCreateInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(RenderPassCreateInfo) );
+ return *this;
+ }
+
+ RenderPassCreateInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setFlags( RenderPassCreateFlags flags_ )
+ {
+ flags = flags_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setAttachmentCount( uint32_t attachmentCount_ )
+ {
+ attachmentCount = attachmentCount_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setPAttachments( const AttachmentDescription* pAttachments_ )
+ {
+ pAttachments = pAttachments_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setSubpassCount( uint32_t subpassCount_ )
+ {
+ subpassCount = subpassCount_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setPSubpasses( const SubpassDescription* pSubpasses_ )
+ {
+ pSubpasses = pSubpasses_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setDependencyCount( uint32_t dependencyCount_ )
+ {
+ dependencyCount = dependencyCount_;
+ return *this;
+ }
+
+ RenderPassCreateInfo& setPDependencies( const SubpassDependency* pDependencies_ )
+ {
+ pDependencies = pDependencies_;
+ return *this;
+ }
+
+ operator const VkRenderPassCreateInfo&() const
+ {
+ return *reinterpret_cast<const VkRenderPassCreateInfo*>(this);
+ }
+
+ bool operator==( RenderPassCreateInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( flags == rhs.flags )
+ && ( attachmentCount == rhs.attachmentCount )
+ && ( pAttachments == rhs.pAttachments )
+ && ( subpassCount == rhs.subpassCount )
+ && ( pSubpasses == rhs.pSubpasses )
+ && ( dependencyCount == rhs.dependencyCount )
+ && ( pDependencies == rhs.pDependencies );
+ }
+
+ bool operator!=( RenderPassCreateInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ RenderPassCreateFlags flags;
+ uint32_t attachmentCount;
+ const AttachmentDescription* pAttachments;
+ uint32_t subpassCount;
+ const SubpassDescription* pSubpasses;
+ uint32_t dependencyCount;
+ const SubpassDependency* pDependencies;
+ };
+ static_assert( sizeof( RenderPassCreateInfo ) == sizeof( VkRenderPassCreateInfo ), "struct and wrapper have different size!" );
+
+ Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties );
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<LayerProperties>>
+ typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateInstanceLayerProperties();
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties )
+ {
+ return static_cast<Result>( vkEnumerateInstanceLayerProperties( pPropertyCount, reinterpret_cast<VkLayerProperties*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateInstanceLayerProperties()
+ {
+ std::vector<LayerProperties,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkEnumerateInstanceLayerProperties( &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast<VkLayerProperties*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::enumerateInstanceLayerProperties" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+
+ Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties );
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<ExtensionProperties>>
+ typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName = nullptr );
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties )
+ {
+ return static_cast<Result>( vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, reinterpret_cast<VkExtensionProperties*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName )
+ {
+ std::vector<ExtensionProperties,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::enumerateInstanceExtensionProperties" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+
+ // forward declarations
+ struct CmdProcessCommandsInfoNVX;
+
+ class CommandBuffer
+ {
+ public:
+ CommandBuffer()
+ : m_commandBuffer(VK_NULL_HANDLE)
+ {}
+
+ CommandBuffer( std::nullptr_t )
+ : m_commandBuffer(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT CommandBuffer(VkCommandBuffer commandBuffer)
+ : m_commandBuffer(commandBuffer)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ CommandBuffer& operator=(VkCommandBuffer commandBuffer)
+ {
+ m_commandBuffer = commandBuffer;
+ return *this;
+ }
+#endif
+
+ CommandBuffer& operator=( std::nullptr_t )
+ {
+ m_commandBuffer = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(CommandBuffer const &rhs) const
+ {
+ return m_commandBuffer == rhs.m_commandBuffer;
+ }
+
+ bool operator!=(CommandBuffer const &rhs) const
+ {
+ return m_commandBuffer != rhs.m_commandBuffer;
+ }
+
+ bool operator<(CommandBuffer const &rhs) const
+ {
+ return m_commandBuffer < rhs.m_commandBuffer;
+ }
+
+ Result begin( const CommandBufferBeginInfo* pBeginInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type begin( const CommandBufferBeginInfo & beginInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result end() const;
+#else
+ ResultValueType<void>::type end() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result reset( CommandBufferResetFlags flags ) const;
+#else
+ ResultValueType<void>::type reset( CommandBufferResetFlags flags ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void bindPipeline( PipelineBindPoint pipelineBindPoint, Pipeline pipeline ) const;
+
+ void setViewport( uint32_t firstViewport, uint32_t viewportCount, const Viewport* pViewports ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void setViewport( uint32_t firstViewport, ArrayProxy<const Viewport> viewports ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void setScissor( uint32_t firstScissor, uint32_t scissorCount, const Rect2D* pScissors ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void setScissor( uint32_t firstScissor, ArrayProxy<const Rect2D> scissors ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void setLineWidth( float lineWidth ) const;
+
+ void setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor ) const;
+
+ void setBlendConstants( const float blendConstants[4] ) const;
+
+ void setDepthBounds( float minDepthBounds, float maxDepthBounds ) const;
+
+ void setStencilCompareMask( StencilFaceFlags faceMask, uint32_t compareMask ) const;
+
+ void setStencilWriteMask( StencilFaceFlags faceMask, uint32_t writeMask ) const;
+
+ void setStencilReference( StencilFaceFlags faceMask, uint32_t reference ) const;
+
+ void bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, ArrayProxy<const DescriptorSet> descriptorSets, ArrayProxy<const uint32_t> dynamicOffsets ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void bindIndexBuffer( Buffer buffer, DeviceSize offset, IndexType indexType ) const;
+
+ void bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const Buffer* pBuffers, const DeviceSize* pOffsets ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void bindVertexBuffers( uint32_t firstBinding, ArrayProxy<const Buffer> buffers, ArrayProxy<const DeviceSize> offsets ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance ) const;
+
+ void drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance ) const;
+
+ void drawIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride ) const;
+
+ void drawIndexedIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride ) const;
+
+ void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const;
+
+ void dispatchIndirect( Buffer buffer, DeviceSize offset ) const;
+
+ void copyBuffer( Buffer srcBuffer, Buffer dstBuffer, uint32_t regionCount, const BufferCopy* pRegions ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void copyBuffer( Buffer srcBuffer, Buffer dstBuffer, ArrayProxy<const BufferCopy> regions ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageCopy* pRegions ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const ImageCopy> regions ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageBlit* pRegions, Filter filter ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const ImageBlit> regions, Filter filter ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const BufferImageCopy* pRegions ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const BufferImageCopy> regions ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, uint32_t regionCount, const BufferImageCopy* pRegions ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, ArrayProxy<const BufferImageCopy> regions ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize dataSize, const void* pData ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ void updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, ArrayProxy<const T> data ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void fillBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize size, uint32_t data ) const;
+
+ void clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue* pColor, uint32_t rangeCount, const ImageSubresourceRange* pRanges ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy<const ImageSubresourceRange> ranges ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const ImageSubresourceRange* pRanges ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy<const ImageSubresourceRange> ranges ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void clearAttachments( uint32_t attachmentCount, const ClearAttachment* pAttachments, uint32_t rectCount, const ClearRect* pRects ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void clearAttachments( ArrayProxy<const ClearAttachment> attachments, ArrayProxy<const ClearRect> rects ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageResolve* pRegions ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const ImageResolve> regions ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void setEvent( Event event, PipelineStageFlags stageMask ) const;
+
+ void resetEvent( Event event, PipelineStageFlags stageMask ) const;
+
+ void waitEvents( uint32_t eventCount, const Event* pEvents, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void waitEvents( ArrayProxy<const Event> events, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, ArrayProxy<const MemoryBarrier> memoryBarriers, ArrayProxy<const BufferMemoryBarrier> bufferMemoryBarriers, ArrayProxy<const ImageMemoryBarrier> imageMemoryBarriers ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, ArrayProxy<const MemoryBarrier> memoryBarriers, ArrayProxy<const BufferMemoryBarrier> bufferMemoryBarriers, ArrayProxy<const ImageMemoryBarrier> imageMemoryBarriers ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void beginQuery( QueryPool queryPool, uint32_t query, QueryControlFlags flags ) const;
+
+ void endQuery( QueryPool queryPool, uint32_t query ) const;
+
+ void resetQueryPool( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount ) const;
+
+ void writeTimestamp( PipelineStageFlagBits pipelineStage, QueryPool queryPool, uint32_t query ) const;
+
+ void copyQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Buffer dstBuffer, DeviceSize dstOffset, DeviceSize stride, QueryResultFlags flags ) const;
+
+ void pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ void pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy<const T> values ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void beginRenderPass( const RenderPassBeginInfo* pRenderPassBegin, SubpassContents contents ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void beginRenderPass( const RenderPassBeginInfo & renderPassBegin, SubpassContents contents ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void nextSubpass( SubpassContents contents ) const;
+
+ void endRenderPass() const;
+
+ void executeCommands( uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void executeCommands( ArrayProxy<const CommandBuffer> commandBuffers ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void debugMarkerBeginEXT( DebugMarkerMarkerInfoEXT* pMarkerInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ DebugMarkerMarkerInfoEXT debugMarkerBeginEXT() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void debugMarkerEndEXT() const;
+
+ void debugMarkerInsertEXT( DebugMarkerMarkerInfoEXT* pMarkerInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ DebugMarkerMarkerInfoEXT debugMarkerInsertEXT() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void drawIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const;
+
+ void drawIndexedIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const;
+
+ void processCommandsNVX( const CmdProcessCommandsInfoNVX* pProcessCommandsInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void processCommandsNVX( const CmdProcessCommandsInfoNVX & processCommandsInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX & reserveSpaceInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, ArrayProxy<const WriteDescriptorSet> descriptorWrites ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void setDeviceMaskKHX( uint32_t deviceMask ) const;
+
+ void dispatchBaseKHX( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const;
+
+ void pushDescriptorSetWithTemplateKHR( DescriptorUpdateTemplateKHR descriptorUpdateTemplate, PipelineLayout layout, uint32_t set, const void* pData ) const;
+
+ void setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const ViewportWScalingNV* pViewportWScalings ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void setViewportWScalingNV( uint32_t firstViewport, ArrayProxy<const ViewportWScalingNV> viewportWScalings ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const Rect2D* pDiscardRectangles ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy<const Rect2D> discardRectangles ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandBuffer() const
+ {
+ return m_commandBuffer;
+ }
+
+ explicit operator bool() const
+ {
+ return m_commandBuffer != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_commandBuffer == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkCommandBuffer m_commandBuffer;
+ };
+ static_assert( sizeof( CommandBuffer ) == sizeof( VkCommandBuffer ), "handle and wrapper have different size!" );
+
+ VULKAN_HPP_INLINE Result CommandBuffer::begin( const CommandBufferBeginInfo* pBeginInfo ) const
+ {
+ return static_cast<Result>( vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast<const VkCommandBufferBeginInfo*>( pBeginInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type CommandBuffer::begin( const CommandBufferBeginInfo & beginInfo ) const
+ {
+ Result result = static_cast<Result>( vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast<const VkCommandBufferBeginInfo*>( &beginInfo ) ) );
+ return createResultValue( result, "vk::CommandBuffer::begin" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result CommandBuffer::end() const
+ {
+ return static_cast<Result>( vkEndCommandBuffer( m_commandBuffer ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type CommandBuffer::end() const
+ {
+ Result result = static_cast<Result>( vkEndCommandBuffer( m_commandBuffer ) );
+ return createResultValue( result, "vk::CommandBuffer::end" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result CommandBuffer::reset( CommandBufferResetFlags flags ) const
+ {
+ return static_cast<Result>( vkResetCommandBuffer( m_commandBuffer, static_cast<VkCommandBufferResetFlags>( flags ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type CommandBuffer::reset( CommandBufferResetFlags flags ) const
+ {
+ Result result = static_cast<Result>( vkResetCommandBuffer( m_commandBuffer, static_cast<VkCommandBufferResetFlags>( flags ) ) );
+ return createResultValue( result, "vk::CommandBuffer::reset" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( PipelineBindPoint pipelineBindPoint, Pipeline pipeline ) const
+ {
+ vkCmdBindPipeline( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipeline>( pipeline ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, uint32_t viewportCount, const Viewport* pViewports ) const
+ {
+ vkCmdSetViewport( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkViewport*>( pViewports ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, ArrayProxy<const Viewport> viewports ) const
+ {
+ vkCmdSetViewport( m_commandBuffer, firstViewport, viewports.size() , reinterpret_cast<const VkViewport*>( viewports.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, uint32_t scissorCount, const Rect2D* pScissors ) const
+ {
+ vkCmdSetScissor( m_commandBuffer, firstScissor, scissorCount, reinterpret_cast<const VkRect2D*>( pScissors ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, ArrayProxy<const Rect2D> scissors ) const
+ {
+ vkCmdSetScissor( m_commandBuffer, firstScissor, scissors.size() , reinterpret_cast<const VkRect2D*>( scissors.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth ) const
+ {
+ vkCmdSetLineWidth( m_commandBuffer, lineWidth );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor ) const
+ {
+ vkCmdSetDepthBias( m_commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4] ) const
+ {
+ vkCmdSetBlendConstants( m_commandBuffer, blendConstants );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds ) const
+ {
+ vkCmdSetDepthBounds( m_commandBuffer, minDepthBounds, maxDepthBounds );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( StencilFaceFlags faceMask, uint32_t compareMask ) const
+ {
+ vkCmdSetStencilCompareMask( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), compareMask );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( StencilFaceFlags faceMask, uint32_t writeMask ) const
+ {
+ vkCmdSetStencilWriteMask( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), writeMask );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( StencilFaceFlags faceMask, uint32_t reference ) const
+ {
+ vkCmdSetStencilReference( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), reference );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets ) const
+ {
+ vkCmdBindDescriptorSets( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), firstSet, descriptorSetCount, reinterpret_cast<const VkDescriptorSet*>( pDescriptorSets ), dynamicOffsetCount, pDynamicOffsets );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, ArrayProxy<const DescriptorSet> descriptorSets, ArrayProxy<const uint32_t> dynamicOffsets ) const
+ {
+ vkCmdBindDescriptorSets( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), firstSet, descriptorSets.size() , reinterpret_cast<const VkDescriptorSet*>( descriptorSets.data() ), dynamicOffsets.size() , dynamicOffsets.data() );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( Buffer buffer, DeviceSize offset, IndexType indexType ) const
+ {
+ vkCmdBindIndexBuffer( m_commandBuffer, static_cast<VkBuffer>( buffer ), offset, static_cast<VkIndexType>( indexType ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const Buffer* pBuffers, const DeviceSize* pOffsets ) const
+ {
+ vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer*>( pBuffers ), pOffsets );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, ArrayProxy<const Buffer> buffers, ArrayProxy<const DeviceSize> offsets ) const
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( buffers.size() == offsets.size() );
+#else
+ if ( buffers.size() != offsets.size() )
+ {
+ throw std::logic_error( "vk::CommandBuffer::bindVertexBuffers: buffers.size() != offsets.size()" );
+ }
+#endif // VULKAN_HPP_NO_EXCEPTIONS
+ vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, buffers.size() , reinterpret_cast<const VkBuffer*>( buffers.data() ), offsets.data() );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance ) const
+ {
+ vkCmdDraw( m_commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance ) const
+ {
+ vkCmdDrawIndexed( m_commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride ) const
+ {
+ vkCmdDrawIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), offset, drawCount, stride );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride ) const
+ {
+ vkCmdDrawIndexedIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), offset, drawCount, stride );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const
+ {
+ vkCmdDispatch( m_commandBuffer, groupCountX, groupCountY, groupCountZ );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( Buffer buffer, DeviceSize offset ) const
+ {
+ vkCmdDispatchIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), offset );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( Buffer srcBuffer, Buffer dstBuffer, uint32_t regionCount, const BufferCopy* pRegions ) const
+ {
+ vkCmdCopyBuffer( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkBuffer>( dstBuffer ), regionCount, reinterpret_cast<const VkBufferCopy*>( pRegions ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( Buffer srcBuffer, Buffer dstBuffer, ArrayProxy<const BufferCopy> regions ) const
+ {
+ vkCmdCopyBuffer( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkBuffer>( dstBuffer ), regions.size() , reinterpret_cast<const VkBufferCopy*>( regions.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageCopy* pRegions ) const
+ {
+ vkCmdCopyImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageCopy*>( pRegions ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const ImageCopy> regions ) const
+ {
+ vkCmdCopyImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkImageCopy*>( regions.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageBlit* pRegions, Filter filter ) const
+ {
+ vkCmdBlitImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageBlit*>( pRegions ), static_cast<VkFilter>( filter ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const ImageBlit> regions, Filter filter ) const
+ {
+ vkCmdBlitImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkImageBlit*>( regions.data() ), static_cast<VkFilter>( filter ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const BufferImageCopy* pRegions ) const
+ {
+ vkCmdCopyBufferToImage( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkBufferImageCopy*>( pRegions ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const BufferImageCopy> regions ) const
+ {
+ vkCmdCopyBufferToImage( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkBufferImageCopy*>( regions.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, uint32_t regionCount, const BufferImageCopy* pRegions ) const
+ {
+ vkCmdCopyImageToBuffer( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkBuffer>( dstBuffer ), regionCount, reinterpret_cast<const VkBufferImageCopy*>( pRegions ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, ArrayProxy<const BufferImageCopy> regions ) const
+ {
+ vkCmdCopyImageToBuffer( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkBuffer>( dstBuffer ), regions.size() , reinterpret_cast<const VkBufferImageCopy*>( regions.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize dataSize, const void* pData ) const
+ {
+ vkCmdUpdateBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), dstOffset, dataSize, pData );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, ArrayProxy<const T> data ) const
+ {
+ vkCmdUpdateBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), dstOffset, data.size() * sizeof( T ) , reinterpret_cast<const void*>( data.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize size, uint32_t data ) const
+ {
+ vkCmdFillBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), dstOffset, size, data );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue* pColor, uint32_t rangeCount, const ImageSubresourceRange* pRanges ) const
+ {
+ vkCmdClearColorImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearColorValue*>( pColor ), rangeCount, reinterpret_cast<const VkImageSubresourceRange*>( pRanges ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy<const ImageSubresourceRange> ranges ) const
+ {
+ vkCmdClearColorImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearColorValue*>( &color ), ranges.size() , reinterpret_cast<const VkImageSubresourceRange*>( ranges.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const ImageSubresourceRange* pRanges ) const
+ {
+ vkCmdClearDepthStencilImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearDepthStencilValue*>( pDepthStencil ), rangeCount, reinterpret_cast<const VkImageSubresourceRange*>( pRanges ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy<const ImageSubresourceRange> ranges ) const
+ {
+ vkCmdClearDepthStencilImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearDepthStencilValue*>( &depthStencil ), ranges.size() , reinterpret_cast<const VkImageSubresourceRange*>( ranges.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( uint32_t attachmentCount, const ClearAttachment* pAttachments, uint32_t rectCount, const ClearRect* pRects ) const
+ {
+ vkCmdClearAttachments( m_commandBuffer, attachmentCount, reinterpret_cast<const VkClearAttachment*>( pAttachments ), rectCount, reinterpret_cast<const VkClearRect*>( pRects ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( ArrayProxy<const ClearAttachment> attachments, ArrayProxy<const ClearRect> rects ) const
+ {
+ vkCmdClearAttachments( m_commandBuffer, attachments.size() , reinterpret_cast<const VkClearAttachment*>( attachments.data() ), rects.size() , reinterpret_cast<const VkClearRect*>( rects.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageResolve* pRegions ) const
+ {
+ vkCmdResolveImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageResolve*>( pRegions ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy<const ImageResolve> regions ) const
+ {
+ vkCmdResolveImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkImageResolve*>( regions.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::setEvent( Event event, PipelineStageFlags stageMask ) const
+ {
+ vkCmdSetEvent( m_commandBuffer, static_cast<VkEvent>( event ), static_cast<VkPipelineStageFlags>( stageMask ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::resetEvent( Event event, PipelineStageFlags stageMask ) const
+ {
+ vkCmdResetEvent( m_commandBuffer, static_cast<VkEvent>( event ), static_cast<VkPipelineStageFlags>( stageMask ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::waitEvents( uint32_t eventCount, const Event* pEvents, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers ) const
+ {
+ vkCmdWaitEvents( m_commandBuffer, eventCount, reinterpret_cast<const VkEvent*>( pEvents ), static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), memoryBarrierCount, reinterpret_cast<const VkMemoryBarrier*>( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast<const VkBufferMemoryBarrier*>( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast<const VkImageMemoryBarrier*>( pImageMemoryBarriers ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::waitEvents( ArrayProxy<const Event> events, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, ArrayProxy<const MemoryBarrier> memoryBarriers, ArrayProxy<const BufferMemoryBarrier> bufferMemoryBarriers, ArrayProxy<const ImageMemoryBarrier> imageMemoryBarriers ) const
+ {
+ vkCmdWaitEvents( m_commandBuffer, events.size() , reinterpret_cast<const VkEvent*>( events.data() ), static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), memoryBarriers.size() , reinterpret_cast<const VkMemoryBarrier*>( memoryBarriers.data() ), bufferMemoryBarriers.size() , reinterpret_cast<const VkBufferMemoryBarrier*>( bufferMemoryBarriers.data() ), imageMemoryBarriers.size() , reinterpret_cast<const VkImageMemoryBarrier*>( imageMemoryBarriers.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers ) const
+ {
+ vkCmdPipelineBarrier( m_commandBuffer, static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), static_cast<VkDependencyFlags>( dependencyFlags ), memoryBarrierCount, reinterpret_cast<const VkMemoryBarrier*>( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast<const VkBufferMemoryBarrier*>( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast<const VkImageMemoryBarrier*>( pImageMemoryBarriers ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, ArrayProxy<const MemoryBarrier> memoryBarriers, ArrayProxy<const BufferMemoryBarrier> bufferMemoryBarriers, ArrayProxy<const ImageMemoryBarrier> imageMemoryBarriers ) const
+ {
+ vkCmdPipelineBarrier( m_commandBuffer, static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), static_cast<VkDependencyFlags>( dependencyFlags ), memoryBarriers.size() , reinterpret_cast<const VkMemoryBarrier*>( memoryBarriers.data() ), bufferMemoryBarriers.size() , reinterpret_cast<const VkBufferMemoryBarrier*>( bufferMemoryBarriers.data() ), imageMemoryBarriers.size() , reinterpret_cast<const VkImageMemoryBarrier*>( imageMemoryBarriers.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::beginQuery( QueryPool queryPool, uint32_t query, QueryControlFlags flags ) const
+ {
+ vkCmdBeginQuery( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, static_cast<VkQueryControlFlags>( flags ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::endQuery( QueryPool queryPool, uint32_t query ) const
+ {
+ vkCmdEndQuery( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount ) const
+ {
+ vkCmdResetQueryPool( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( PipelineStageFlagBits pipelineStage, QueryPool queryPool, uint32_t query ) const
+ {
+ vkCmdWriteTimestamp( m_commandBuffer, static_cast<VkPipelineStageFlagBits>( pipelineStage ), static_cast<VkQueryPool>( queryPool ), query );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Buffer dstBuffer, DeviceSize dstOffset, DeviceSize stride, QueryResultFlags flags ) const
+ {
+ vkCmdCopyQueryPoolResults( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, static_cast<VkBuffer>( dstBuffer ), dstOffset, stride, static_cast<VkQueryResultFlags>( flags ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues ) const
+ {
+ vkCmdPushConstants( m_commandBuffer, static_cast<VkPipelineLayout>( layout ), static_cast<VkShaderStageFlags>( stageFlags ), offset, size, pValues );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ VULKAN_HPP_INLINE void CommandBuffer::pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy<const T> values ) const
+ {
+ vkCmdPushConstants( m_commandBuffer, static_cast<VkPipelineLayout>( layout ), static_cast<VkShaderStageFlags>( stageFlags ), offset, values.size() * sizeof( T ) , reinterpret_cast<const void*>( values.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo* pRenderPassBegin, SubpassContents contents ) const
+ {
+ vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( pRenderPassBegin ), static_cast<VkSubpassContents>( contents ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo & renderPassBegin, SubpassContents contents ) const
+ {
+ vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( &renderPassBegin ), static_cast<VkSubpassContents>( contents ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( SubpassContents contents ) const
+ {
+ vkCmdNextSubpass( m_commandBuffer, static_cast<VkSubpassContents>( contents ) );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::endRenderPass() const
+ {
+ vkCmdEndRenderPass( m_commandBuffer );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::executeCommands( uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers ) const
+ {
+ vkCmdExecuteCommands( m_commandBuffer, commandBufferCount, reinterpret_cast<const VkCommandBuffer*>( pCommandBuffers ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::executeCommands( ArrayProxy<const CommandBuffer> commandBuffers ) const
+ {
+ vkCmdExecuteCommands( m_commandBuffer, commandBuffers.size() , reinterpret_cast<const VkCommandBuffer*>( commandBuffers.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( DebugMarkerMarkerInfoEXT* pMarkerInfo ) const
+ {
+ vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast<VkDebugMarkerMarkerInfoEXT*>( pMarkerInfo ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE DebugMarkerMarkerInfoEXT CommandBuffer::debugMarkerBeginEXT() const
+ {
+ DebugMarkerMarkerInfoEXT markerInfo;
+ vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast<VkDebugMarkerMarkerInfoEXT*>( &markerInfo ) );
+ return markerInfo;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT() const
+ {
+ vkCmdDebugMarkerEndEXT( m_commandBuffer );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( DebugMarkerMarkerInfoEXT* pMarkerInfo ) const
+ {
+ vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast<VkDebugMarkerMarkerInfoEXT*>( pMarkerInfo ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE DebugMarkerMarkerInfoEXT CommandBuffer::debugMarkerInsertEXT() const
+ {
+ DebugMarkerMarkerInfoEXT markerInfo;
+ vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast<VkDebugMarkerMarkerInfoEXT*>( &markerInfo ) );
+ return markerInfo;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const
+ {
+ vkCmdDrawIndirectCountAMD( m_commandBuffer, static_cast<VkBuffer>( buffer ), offset, static_cast<VkBuffer>( countBuffer ), countBufferOffset, maxDrawCount, stride );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const
+ {
+ vkCmdDrawIndexedIndirectCountAMD( m_commandBuffer, static_cast<VkBuffer>( buffer ), offset, static_cast<VkBuffer>( countBuffer ), countBufferOffset, maxDrawCount, stride );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::processCommandsNVX( const CmdProcessCommandsInfoNVX* pProcessCommandsInfo ) const
+ {
+ vkCmdProcessCommandsNVX( m_commandBuffer, reinterpret_cast<const VkCmdProcessCommandsInfoNVX*>( pProcessCommandsInfo ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::processCommandsNVX( const CmdProcessCommandsInfoNVX & processCommandsInfo ) const
+ {
+ vkCmdProcessCommandsNVX( m_commandBuffer, reinterpret_cast<const VkCmdProcessCommandsInfoNVX*>( &processCommandsInfo ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo ) const
+ {
+ vkCmdReserveSpaceForCommandsNVX( m_commandBuffer, reinterpret_cast<const VkCmdReserveSpaceForCommandsInfoNVX*>( pReserveSpaceInfo ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX & reserveSpaceInfo ) const
+ {
+ vkCmdReserveSpaceForCommandsNVX( m_commandBuffer, reinterpret_cast<const VkCmdReserveSpaceForCommandsInfoNVX*>( &reserveSpaceInfo ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites ) const
+ {
+ vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), set, descriptorWriteCount, reinterpret_cast<const VkWriteDescriptorSet*>( pDescriptorWrites ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, ArrayProxy<const WriteDescriptorSet> descriptorWrites ) const
+ {
+ vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), set, descriptorWrites.size() , reinterpret_cast<const VkWriteDescriptorSet*>( descriptorWrites.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHX( uint32_t deviceMask ) const
+ {
+ vkCmdSetDeviceMaskKHX( m_commandBuffer, deviceMask );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHX( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const
+ {
+ vkCmdDispatchBaseKHX( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( DescriptorUpdateTemplateKHR descriptorUpdateTemplate, PipelineLayout layout, uint32_t set, const void* pData ) const
+ {
+ vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, static_cast<VkDescriptorUpdateTemplateKHR>( descriptorUpdateTemplate ), static_cast<VkPipelineLayout>( layout ), set, pData );
+ }
+
+ VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const ViewportWScalingNV* pViewportWScalings ) const
+ {
+ vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkViewportWScalingNV*>( pViewportWScalings ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, ArrayProxy<const ViewportWScalingNV> viewportWScalings ) const
+ {
+ vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportWScalings.size() , reinterpret_cast<const VkViewportWScalingNV*>( viewportWScalings.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const Rect2D* pDiscardRectangles ) const
+ {
+ vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangleCount, reinterpret_cast<const VkRect2D*>( pDiscardRectangles ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy<const Rect2D> discardRectangles ) const
+ {
+ vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangles.size() , reinterpret_cast<const VkRect2D*>( discardRectangles.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ struct SubmitInfo
+ {
+ SubmitInfo( uint32_t waitSemaphoreCount_ = 0, const Semaphore* pWaitSemaphores_ = nullptr, const PipelineStageFlags* pWaitDstStageMask_ = nullptr, uint32_t commandBufferCount_ = 0, const CommandBuffer* pCommandBuffers_ = nullptr, uint32_t signalSemaphoreCount_ = 0, const Semaphore* pSignalSemaphores_ = nullptr )
+ : sType( StructureType::eSubmitInfo )
+ , pNext( nullptr )
+ , waitSemaphoreCount( waitSemaphoreCount_ )
+ , pWaitSemaphores( pWaitSemaphores_ )
+ , pWaitDstStageMask( pWaitDstStageMask_ )
+ , commandBufferCount( commandBufferCount_ )
+ , pCommandBuffers( pCommandBuffers_ )
+ , signalSemaphoreCount( signalSemaphoreCount_ )
+ , pSignalSemaphores( pSignalSemaphores_ )
+ {
+ }
+
+ SubmitInfo( VkSubmitInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SubmitInfo) );
+ }
+
+ SubmitInfo& operator=( VkSubmitInfo const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(SubmitInfo) );
+ return *this;
+ }
+
+ SubmitInfo& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ SubmitInfo& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ )
+ {
+ waitSemaphoreCount = waitSemaphoreCount_;
+ return *this;
+ }
+
+ SubmitInfo& setPWaitSemaphores( const Semaphore* pWaitSemaphores_ )
+ {
+ pWaitSemaphores = pWaitSemaphores_;
+ return *this;
+ }
+
+ SubmitInfo& setPWaitDstStageMask( const PipelineStageFlags* pWaitDstStageMask_ )
+ {
+ pWaitDstStageMask = pWaitDstStageMask_;
+ return *this;
+ }
+
+ SubmitInfo& setCommandBufferCount( uint32_t commandBufferCount_ )
+ {
+ commandBufferCount = commandBufferCount_;
+ return *this;
+ }
+
+ SubmitInfo& setPCommandBuffers( const CommandBuffer* pCommandBuffers_ )
+ {
+ pCommandBuffers = pCommandBuffers_;
+ return *this;
+ }
+
+ SubmitInfo& setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ )
+ {
+ signalSemaphoreCount = signalSemaphoreCount_;
+ return *this;
+ }
+
+ SubmitInfo& setPSignalSemaphores( const Semaphore* pSignalSemaphores_ )
+ {
+ pSignalSemaphores = pSignalSemaphores_;
+ return *this;
+ }
+
+ operator const VkSubmitInfo&() const
+ {
+ return *reinterpret_cast<const VkSubmitInfo*>(this);
+ }
+
+ bool operator==( SubmitInfo const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( waitSemaphoreCount == rhs.waitSemaphoreCount )
+ && ( pWaitSemaphores == rhs.pWaitSemaphores )
+ && ( pWaitDstStageMask == rhs.pWaitDstStageMask )
+ && ( commandBufferCount == rhs.commandBufferCount )
+ && ( pCommandBuffers == rhs.pCommandBuffers )
+ && ( signalSemaphoreCount == rhs.signalSemaphoreCount )
+ && ( pSignalSemaphores == rhs.pSignalSemaphores );
+ }
+
+ bool operator!=( SubmitInfo const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const Semaphore* pWaitSemaphores;
+ const PipelineStageFlags* pWaitDstStageMask;
+ uint32_t commandBufferCount;
+ const CommandBuffer* pCommandBuffers;
+ uint32_t signalSemaphoreCount;
+ const Semaphore* pSignalSemaphores;
+ };
+ static_assert( sizeof( SubmitInfo ) == sizeof( VkSubmitInfo ), "struct and wrapper have different size!" );
+
+ class Queue
+ {
+ public:
+ Queue()
+ : m_queue(VK_NULL_HANDLE)
+ {}
+
+ Queue( std::nullptr_t )
+ : m_queue(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Queue(VkQueue queue)
+ : m_queue(queue)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Queue& operator=(VkQueue queue)
+ {
+ m_queue = queue;
+ return *this;
+ }
+#endif
+
+ Queue& operator=( std::nullptr_t )
+ {
+ m_queue = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Queue const &rhs) const
+ {
+ return m_queue == rhs.m_queue;
+ }
+
+ bool operator!=(Queue const &rhs) const
+ {
+ return m_queue != rhs.m_queue;
+ }
+
+ bool operator<(Queue const &rhs) const
+ {
+ return m_queue < rhs.m_queue;
+ }
+
+ Result submit( uint32_t submitCount, const SubmitInfo* pSubmits, Fence fence ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type submit( ArrayProxy<const SubmitInfo> submits, Fence fence ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result waitIdle() const;
+#else
+ ResultValueType<void>::type waitIdle() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result bindSparse( uint32_t bindInfoCount, const BindSparseInfo* pBindInfo, Fence fence ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type bindSparse( ArrayProxy<const BindSparseInfo> bindInfo, Fence fence ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result presentKHR( const PresentInfoKHR* pPresentInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result presentKHR( const PresentInfoKHR & presentInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueue() const
+ {
+ return m_queue;
+ }
+
+ explicit operator bool() const
+ {
+ return m_queue != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_queue == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkQueue m_queue;
+ };
+ static_assert( sizeof( Queue ) == sizeof( VkQueue ), "handle and wrapper have different size!" );
+
+ VULKAN_HPP_INLINE Result Queue::submit( uint32_t submitCount, const SubmitInfo* pSubmits, Fence fence ) const
+ {
+ return static_cast<Result>( vkQueueSubmit( m_queue, submitCount, reinterpret_cast<const VkSubmitInfo*>( pSubmits ), static_cast<VkFence>( fence ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Queue::submit( ArrayProxy<const SubmitInfo> submits, Fence fence ) const
+ {
+ Result result = static_cast<Result>( vkQueueSubmit( m_queue, submits.size() , reinterpret_cast<const VkSubmitInfo*>( submits.data() ), static_cast<VkFence>( fence ) ) );
+ return createResultValue( result, "vk::Queue::submit" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Queue::waitIdle() const
+ {
+ return static_cast<Result>( vkQueueWaitIdle( m_queue ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Queue::waitIdle() const
+ {
+ Result result = static_cast<Result>( vkQueueWaitIdle( m_queue ) );
+ return createResultValue( result, "vk::Queue::waitIdle" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Queue::bindSparse( uint32_t bindInfoCount, const BindSparseInfo* pBindInfo, Fence fence ) const
+ {
+ return static_cast<Result>( vkQueueBindSparse( m_queue, bindInfoCount, reinterpret_cast<const VkBindSparseInfo*>( pBindInfo ), static_cast<VkFence>( fence ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Queue::bindSparse( ArrayProxy<const BindSparseInfo> bindInfo, Fence fence ) const
+ {
+ Result result = static_cast<Result>( vkQueueBindSparse( m_queue, bindInfo.size() , reinterpret_cast<const VkBindSparseInfo*>( bindInfo.data() ), static_cast<VkFence>( fence ) ) );
+ return createResultValue( result, "vk::Queue::bindSparse" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR* pPresentInfo ) const
+ {
+ return static_cast<Result>( vkQueuePresentKHR( m_queue, reinterpret_cast<const VkPresentInfoKHR*>( pPresentInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR & presentInfo ) const
+ {
+ Result result = static_cast<Result>( vkQueuePresentKHR( m_queue, reinterpret_cast<const VkPresentInfoKHR*>( &presentInfo ) ) );
+ return createResultValue( result, "vk::Queue::presentKHR", { Result::eSuccess, Result::eSuboptimalKHR } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class BufferDeleter;
+ using UniqueBuffer = UniqueHandle<Buffer, BufferDeleter>;
+ class BufferViewDeleter;
+ using UniqueBufferView = UniqueHandle<BufferView, BufferViewDeleter>;
+ class CommandBufferDeleter;
+ using UniqueCommandBuffer = UniqueHandle<CommandBuffer, CommandBufferDeleter>;
+ class CommandPoolDeleter;
+ using UniqueCommandPool = UniqueHandle<CommandPool, CommandPoolDeleter>;
+ class DescriptorPoolDeleter;
+ using UniqueDescriptorPool = UniqueHandle<DescriptorPool, DescriptorPoolDeleter>;
+ class DescriptorSetDeleter;
+ using UniqueDescriptorSet = UniqueHandle<DescriptorSet, DescriptorSetDeleter>;
+ class DescriptorSetLayoutDeleter;
+ using UniqueDescriptorSetLayout = UniqueHandle<DescriptorSetLayout, DescriptorSetLayoutDeleter>;
+ class DescriptorUpdateTemplateKHRDeleter;
+ using UniqueDescriptorUpdateTemplateKHR = UniqueHandle<DescriptorUpdateTemplateKHR, DescriptorUpdateTemplateKHRDeleter>;
+ class DeviceMemoryDeleter;
+ using UniqueDeviceMemory = UniqueHandle<DeviceMemory, DeviceMemoryDeleter>;
+ class EventDeleter;
+ using UniqueEvent = UniqueHandle<Event, EventDeleter>;
+ class FenceDeleter;
+ using UniqueFence = UniqueHandle<Fence, FenceDeleter>;
+ class FramebufferDeleter;
+ using UniqueFramebuffer = UniqueHandle<Framebuffer, FramebufferDeleter>;
+ class ImageDeleter;
+ using UniqueImage = UniqueHandle<Image, ImageDeleter>;
+ class ImageViewDeleter;
+ using UniqueImageView = UniqueHandle<ImageView, ImageViewDeleter>;
+ class IndirectCommandsLayoutNVXDeleter;
+ using UniqueIndirectCommandsLayoutNVX = UniqueHandle<IndirectCommandsLayoutNVX, IndirectCommandsLayoutNVXDeleter>;
+ class ObjectTableNVXDeleter;
+ using UniqueObjectTableNVX = UniqueHandle<ObjectTableNVX, ObjectTableNVXDeleter>;
+ class PipelineDeleter;
+ using UniquePipeline = UniqueHandle<Pipeline, PipelineDeleter>;
+ class PipelineCacheDeleter;
+ using UniquePipelineCache = UniqueHandle<PipelineCache, PipelineCacheDeleter>;
+ class PipelineLayoutDeleter;
+ using UniquePipelineLayout = UniqueHandle<PipelineLayout, PipelineLayoutDeleter>;
+ class QueryPoolDeleter;
+ using UniqueQueryPool = UniqueHandle<QueryPool, QueryPoolDeleter>;
+ class RenderPassDeleter;
+ using UniqueRenderPass = UniqueHandle<RenderPass, RenderPassDeleter>;
+ class SamplerDeleter;
+ using UniqueSampler = UniqueHandle<Sampler, SamplerDeleter>;
+ class SemaphoreDeleter;
+ using UniqueSemaphore = UniqueHandle<Semaphore, SemaphoreDeleter>;
+ class ShaderModuleDeleter;
+ using UniqueShaderModule = UniqueHandle<ShaderModule, ShaderModuleDeleter>;
+ class SwapchainKHRDeleter;
+ using UniqueSwapchainKHR = UniqueHandle<SwapchainKHR, SwapchainKHRDeleter>;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ class Device
+ {
+ public:
+ Device()
+ : m_device(VK_NULL_HANDLE)
+ {}
+
+ Device( std::nullptr_t )
+ : m_device(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Device(VkDevice device)
+ : m_device(device)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Device& operator=(VkDevice device)
+ {
+ m_device = device;
+ return *this;
+ }
+#endif
+
+ Device& operator=( std::nullptr_t )
+ {
+ m_device = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Device const &rhs) const
+ {
+ return m_device == rhs.m_device;
+ }
+
+ bool operator!=(Device const &rhs) const
+ {
+ return m_device != rhs.m_device;
+ }
+
+ bool operator<(Device const &rhs) const
+ {
+ return m_device < rhs.m_device;
+ }
+
+ PFN_vkVoidFunction getProcAddr( const char* pName ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PFN_vkVoidFunction getProcAddr( const std::string & name ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroy( const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroy( Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Queue* pQueue ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Queue getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result waitIdle() const;
+#else
+ ResultValueType<void>::type waitIdle() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result allocateMemory( const MemoryAllocateInfo* pAllocateInfo, const AllocationCallbacks* pAllocator, DeviceMemory* pMemory ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DeviceMemory>::type allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueDeviceMemory allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void freeMemory( DeviceMemory memory, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void freeMemory( DeviceMemory memory, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags, void** ppData ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void*>::type mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags = MemoryMapFlags() ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void unmapMemory( DeviceMemory memory ) const;
+
+ Result flushMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type flushMappedMemoryRanges( ArrayProxy<const MappedMemoryRange> memoryRanges ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type invalidateMappedMemoryRanges( ArrayProxy<const MappedMemoryRange> memoryRanges ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getMemoryCommitment( DeviceMemory memory, DeviceSize* pCommittedMemoryInBytes ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ DeviceSize getMemoryCommitment( DeviceMemory memory ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getBufferMemoryRequirements( Buffer buffer, MemoryRequirements* pMemoryRequirements ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ MemoryRequirements getBufferMemoryRequirements( Buffer buffer ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset ) const;
+#else
+ ResultValueType<void>::type bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getImageMemoryRequirements( Image image, MemoryRequirements* pMemoryRequirements ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ MemoryRequirements getImageMemoryRequirements( Image image ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset ) const;
+#else
+ ResultValueType<void>::type bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getImageSparseMemoryRequirements( Image image, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements* pSparseMemoryRequirements ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<SparseImageMemoryRequirements>>
+ std::vector<SparseImageMemoryRequirements,Allocator> getImageSparseMemoryRequirements( Image image ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createFence( const FenceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Fence* pFence ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Fence>::type createFence( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueFence createFenceUnique( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyFence( Fence fence, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyFence( Fence fence, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result resetFences( uint32_t fenceCount, const Fence* pFences ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type resetFences( ArrayProxy<const Fence> fences ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getFenceStatus( Fence fence ) const;
+
+ Result waitForFences( uint32_t fenceCount, const Fence* pFences, Bool32 waitAll, uint64_t timeout ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result waitForFences( ArrayProxy<const Fence> fences, Bool32 waitAll, uint64_t timeout ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createSemaphore( const SemaphoreCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Semaphore* pSemaphore ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Semaphore>::type createSemaphore( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSemaphore createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroySemaphore( Semaphore semaphore, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroySemaphore( Semaphore semaphore, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createEvent( const EventCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Event* pEvent ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Event>::type createEvent( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueEvent createEventUnique( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyEvent( Event event, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyEvent( Event event, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getEventStatus( Event event ) const;
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result setEvent( Event event ) const;
+#else
+ ResultValueType<void>::type setEvent( Event event ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result resetEvent( Event event ) const;
+#else
+ ResultValueType<void>::type resetEvent( Event event ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createQueryPool( const QueryPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, QueryPool* pQueryPool ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<QueryPool>::type createQueryPool( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueQueryPool createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyQueryPool( QueryPool queryPool, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyQueryPool( QueryPool queryPool, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, DeviceSize stride, QueryResultFlags flags ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ Result getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy<T> data, DeviceSize stride, QueryResultFlags flags ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createBuffer( const BufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Buffer* pBuffer ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Buffer>::type createBuffer( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueBuffer createBufferUnique( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyBuffer( Buffer buffer, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyBuffer( Buffer buffer, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createBufferView( const BufferViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, BufferView* pView ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<BufferView>::type createBufferView( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueBufferView createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyBufferView( BufferView bufferView, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyBufferView( BufferView bufferView, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createImage( const ImageCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Image* pImage ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Image>::type createImage( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueImage createImageUnique( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyImage( Image image, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyImage( Image image, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getImageSubresourceLayout( Image image, const ImageSubresource* pSubresource, SubresourceLayout* pLayout ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ SubresourceLayout getImageSubresourceLayout( Image image, const ImageSubresource & subresource ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createImageView( const ImageViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ImageView* pView ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<ImageView>::type createImageView( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueImageView createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyImageView( ImageView imageView, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyImageView( ImageView imageView, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createShaderModule( const ShaderModuleCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ShaderModule* pShaderModule ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<ShaderModule>::type createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueShaderModule createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyShaderModule( ShaderModule shaderModule, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyShaderModule( ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createPipelineCache( const PipelineCacheCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineCache* pPipelineCache ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<PipelineCache>::type createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniquePipelineCache createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyPipelineCache( PipelineCache pipelineCache, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyPipelineCache( PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getPipelineCacheData( PipelineCache pipelineCache, size_t* pDataSize, void* pData ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<uint8_t>>
+ typename ResultValueType<std::vector<uint8_t,Allocator>>::type getPipelineCacheData( PipelineCache pipelineCache ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result mergePipelineCaches( PipelineCache dstCache, uint32_t srcCacheCount, const PipelineCache* pSrcCaches ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type mergePipelineCaches( PipelineCache dstCache, ArrayProxy<const PipelineCache> srcCaches ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createGraphicsPipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const GraphicsPipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<Pipeline>>
+ typename ResultValueType<std::vector<Pipeline,Allocator>>::type createGraphicsPipelines( PipelineCache pipelineCache, ArrayProxy<const GraphicsPipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+ ResultValueType<Pipeline>::type createGraphicsPipeline( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator = std::allocator<Pipeline>>
+ std::vector<UniquePipeline> createGraphicsPipelinesUnique( PipelineCache pipelineCache, ArrayProxy<const GraphicsPipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+ UniquePipeline createGraphicsPipelineUnique( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createComputePipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const ComputePipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<Pipeline>>
+ typename ResultValueType<std::vector<Pipeline,Allocator>>::type createComputePipelines( PipelineCache pipelineCache, ArrayProxy<const ComputePipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+ ResultValueType<Pipeline>::type createComputePipeline( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator = std::allocator<Pipeline>>
+ std::vector<UniquePipeline> createComputePipelinesUnique( PipelineCache pipelineCache, ArrayProxy<const ComputePipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+ UniquePipeline createComputePipelineUnique( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyPipeline( Pipeline pipeline, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyPipeline( Pipeline pipeline, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createPipelineLayout( const PipelineLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineLayout* pPipelineLayout ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<PipelineLayout>::type createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniquePipelineLayout createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyPipelineLayout( PipelineLayout pipelineLayout, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyPipelineLayout( PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createSampler( const SamplerCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Sampler* pSampler ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Sampler>::type createSampler( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSampler createSamplerUnique( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroySampler( Sampler sampler, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroySampler( Sampler sampler, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorSetLayout* pSetLayout ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DescriptorSetLayout>::type createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueDescriptorSetLayout createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createDescriptorPool( const DescriptorPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorPool* pDescriptorPool ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DescriptorPool>::type createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueDescriptorPool createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyDescriptorPool( DescriptorPool descriptorPool, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyDescriptorPool( DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags ) const;
+#else
+ ResultValueType<void>::type resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags = DescriptorPoolResetFlags() ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result allocateDescriptorSets( const DescriptorSetAllocateInfo* pAllocateInfo, DescriptorSet* pDescriptorSets ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<DescriptorSet>>
+ typename ResultValueType<std::vector<DescriptorSet,Allocator>>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator = std::allocator<DescriptorSet>>
+ std::vector<UniqueDescriptorSet> allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result freeDescriptorSets( DescriptorPool descriptorPool, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type freeDescriptorSets( DescriptorPool descriptorPool, ArrayProxy<const DescriptorSet> descriptorSets ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void updateDescriptorSets( uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const CopyDescriptorSet* pDescriptorCopies ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void updateDescriptorSets( ArrayProxy<const WriteDescriptorSet> descriptorWrites, ArrayProxy<const CopyDescriptorSet> descriptorCopies ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createFramebuffer( const FramebufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Framebuffer* pFramebuffer ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Framebuffer>::type createFramebuffer( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueFramebuffer createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyFramebuffer( Framebuffer framebuffer, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyFramebuffer( Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createRenderPass( const RenderPassCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, RenderPass* pRenderPass ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<RenderPass>::type createRenderPass( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueRenderPass createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyRenderPass( RenderPass renderPass, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyRenderPass( RenderPass renderPass, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getRenderAreaGranularity( RenderPass renderPass, Extent2D* pGranularity ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Extent2D getRenderAreaGranularity( RenderPass renderPass ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createCommandPool( const CommandPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, CommandPool* pCommandPool ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<CommandPool>::type createCommandPool( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueCommandPool createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyCommandPool( CommandPool commandPool, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyCommandPool( CommandPool commandPool, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags ) const;
+#else
+ ResultValueType<void>::type resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result allocateCommandBuffers( const CommandBufferAllocateInfo* pAllocateInfo, CommandBuffer* pCommandBuffers ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<CommandBuffer>>
+ typename ResultValueType<std::vector<CommandBuffer,Allocator>>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator = std::allocator<CommandBuffer>>
+ std::vector<UniqueCommandBuffer> allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void freeCommandBuffers( CommandPool commandPool, uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void freeCommandBuffers( CommandPool commandPool, ArrayProxy<const CommandBuffer> commandBuffers ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createSharedSwapchainsKHR( uint32_t swapchainCount, const SwapchainCreateInfoKHR* pCreateInfos, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchains ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<SwapchainKHR>>
+ typename ResultValueType<std::vector<SwapchainKHR,Allocator>>::type createSharedSwapchainsKHR( ArrayProxy<const SwapchainCreateInfoKHR> createInfos, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+ ResultValueType<SwapchainKHR>::type createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator = std::allocator<SwapchainKHR>>
+ std::vector<UniqueSwapchainKHR> createSharedSwapchainsKHRUnique( ArrayProxy<const SwapchainCreateInfoKHR> createInfos, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+ UniqueSwapchainKHR createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createSwapchainKHR( const SwapchainCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchain ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SwapchainKHR>::type createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSwapchainKHR createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroySwapchainKHR( SwapchainKHR swapchain, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroySwapchainKHR( SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSwapchainImagesKHR( SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, Image* pSwapchainImages ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<Image>>
+ typename ResultValueType<std::vector<Image,Allocator>>::type getSwapchainImagesKHR( SwapchainKHR swapchain ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence, uint32_t* pImageIndex ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValue<uint32_t> acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result debugMarkerSetObjectNameEXT( DebugMarkerObjectNameInfoEXT* pNameInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DebugMarkerObjectNameInfoEXT>::type debugMarkerSetObjectNameEXT() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result debugMarkerSetObjectTagEXT( DebugMarkerObjectTagInfoEXT* pTagInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DebugMarkerObjectTagInfoEXT>::type debugMarkerSetObjectTagEXT() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ Result getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<HANDLE>::type getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+ Result createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, IndirectCommandsLayoutNVX* pIndirectCommandsLayout ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<IndirectCommandsLayoutNVX>::type createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueIndirectCommandsLayoutNVX createIndirectCommandsLayoutNVXUnique( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createObjectTableNVX( const ObjectTableCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, ObjectTableNVX* pObjectTable ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<ObjectTableNVX>::type createObjectTableNVX( const ObjectTableCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueObjectTableNVX createObjectTableNVXUnique( const ObjectTableCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyObjectTableNVX( ObjectTableNVX objectTable, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyObjectTableNVX( ObjectTableNVX objectTable, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result registerObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type registerObjectsNVX( ObjectTableNVX objectTable, ArrayProxy<const ObjectTableEntryNVX* const> pObjectTableEntries, ArrayProxy<const uint32_t> objectIndices ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result unregisterObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type unregisterObjectsNVX( ObjectTableNVX objectTable, ArrayProxy<const ObjectEntryTypeNVX> objectEntryTypes, ArrayProxy<const uint32_t> objectIndices ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void trimCommandPoolKHR( CommandPool commandPool, CommandPoolTrimFlagsKHR flags ) const;
+#else
+ void trimCommandPoolKHR( CommandPool commandPool, CommandPoolTrimFlagsKHR flags = CommandPoolTrimFlagsKHR() ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ Result getMemoryWin32HandleKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE* pHandle ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<HANDLE>::type getMemoryWin32HandleKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ Result getMemoryWin32HandlePropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE handle, MemoryWin32HandlePropertiesKHX* pMemoryWin32HandleProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<MemoryWin32HandlePropertiesKHX>::type getMemoryWin32HandlePropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE handle ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ Result getMemoryFdKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType, int* pFd ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<int>::type getMemoryFdKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getMemoryFdPropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, int fd, MemoryFdPropertiesKHX* pMemoryFdProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<MemoryFdPropertiesKHX>::type getMemoryFdPropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, int fd ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ Result getSemaphoreWin32HandleKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType, HANDLE* pHandle ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<HANDLE>::type getSemaphoreWin32HandleKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ Result importSemaphoreWin32HandleKHX( const ImportSemaphoreWin32HandleInfoKHX* pImportSemaphoreWin32HandleInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type importSemaphoreWin32HandleKHX( const ImportSemaphoreWin32HandleInfoKHX & importSemaphoreWin32HandleInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ Result getSemaphoreFdKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType, int* pFd ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<int>::type getSemaphoreFdKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result importSemaphoreFdKHX( const ImportSemaphoreFdInfoKHX* pImportSemaphoreFdInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type importSemaphoreFdKHX( const ImportSemaphoreFdInfoKHX & importSemaphoreFdInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT* pDisplayPowerInfo ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result registerEventEXT( const DeviceEventInfoEXT* pDeviceEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Fence>::type registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, const AllocationCallbacks & allocator ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT* pDisplayEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Fence>::type registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, const AllocationCallbacks & allocator ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValue<uint64_t> getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getGroupPeerMemoryFeaturesKHX( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, PeerMemoryFeatureFlagsKHX* pPeerMemoryFeatures ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PeerMemoryFeatureFlagsKHX getGroupPeerMemoryFeaturesKHX( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result bindBufferMemory2KHX( uint32_t bindInfoCount, const BindBufferMemoryInfoKHX* pBindInfos ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type bindBufferMemory2KHX( ArrayProxy<const BindBufferMemoryInfoKHX> bindInfos ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result bindImageMemory2KHX( uint32_t bindInfoCount, const BindImageMemoryInfoKHX* pBindInfos ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<void>::type bindImageMemory2KHX( ArrayProxy<const BindImageMemoryInfoKHX> bindInfos ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getGroupPresentCapabilitiesKHX( DeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DeviceGroupPresentCapabilitiesKHX>::type getGroupPresentCapabilitiesKHX() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getGroupSurfacePresentModesKHX( SurfaceKHR surface, DeviceGroupPresentModeFlagsKHX* pModes ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DeviceGroupPresentModeFlagsKHX>::type getGroupSurfacePresentModesKHX( SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result acquireNextImage2KHX( const AcquireNextImageInfoKHX* pAcquireInfo, uint32_t* pImageIndex ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValue<uint32_t> acquireNextImage2KHX( const AcquireNextImageInfoKHX & acquireInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DescriptorUpdateTemplateKHR>::type createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueDescriptorUpdateTemplateKHR createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplateKHR descriptorUpdateTemplate, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplateKHR descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void updateDescriptorSetWithTemplateKHR( DescriptorSet descriptorSet, DescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData ) const;
+
+ void setHdrMetadataEXT( uint32_t swapchainCount, const SwapchainKHR* pSwapchains, const HdrMetadataEXT* pMetadata ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void setHdrMetadataEXT( ArrayProxy<const SwapchainKHR> swapchains, ArrayProxy<const HdrMetadataEXT> metadata ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSwapchainStatusKHR( SwapchainKHR swapchain ) const;
+
+ Result getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain, RefreshCycleDurationGOOGLE* pDisplayTimingProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<RefreshCycleDurationGOOGLE>::type getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getPastPresentationTimingGOOGLE( SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, PastPresentationTimingGOOGLE* pPresentationTimings ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<PastPresentationTimingGOOGLE>>
+ typename ResultValueType<std::vector<PastPresentationTimingGOOGLE,Allocator>>::type getPastPresentationTimingGOOGLE( SwapchainKHR swapchain ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDevice() const
+ {
+ return m_device;
+ }
+
+ explicit operator bool() const
+ {
+ return m_device != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_device == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkDevice m_device;
+ };
+ static_assert( sizeof( Device ) == sizeof( VkDevice ), "handle and wrapper have different size!" );
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class BufferDeleter
+ {
+ public:
+ BufferDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Buffer buffer )
+ {
+ m_device.destroyBuffer( buffer, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class BufferViewDeleter
+ {
+ public:
+ BufferViewDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( BufferView bufferView )
+ {
+ m_device.destroyBufferView( bufferView, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class CommandBufferDeleter
+ {
+ public:
+ CommandBufferDeleter( Device device = Device(), CommandPool commandPool = CommandPool() )
+ : m_device( device )
+ , m_commandPool( commandPool )
+ {}
+
+ void operator()( CommandBuffer commandBuffer )
+ {
+ m_device.freeCommandBuffers( m_commandPool, commandBuffer );
+ }
+
+ private:
+ Device m_device;
+ CommandPool m_commandPool;
+ };
+
+ class CommandPoolDeleter
+ {
+ public:
+ CommandPoolDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( CommandPool commandPool )
+ {
+ m_device.destroyCommandPool( commandPool, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class DescriptorPoolDeleter
+ {
+ public:
+ DescriptorPoolDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( DescriptorPool descriptorPool )
+ {
+ m_device.destroyDescriptorPool( descriptorPool, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class DescriptorSetDeleter
+ {
+ public:
+ DescriptorSetDeleter( Device device = Device(), DescriptorPool descriptorPool = DescriptorPool() )
+ : m_device( device )
+ , m_descriptorPool( descriptorPool )
+ {}
+
+ void operator()( DescriptorSet descriptorSet )
+ {
+ m_device.freeDescriptorSets( m_descriptorPool, descriptorSet );
+ }
+
+ private:
+ Device m_device;
+ DescriptorPool m_descriptorPool;
+ };
+
+ class DescriptorSetLayoutDeleter
+ {
+ public:
+ DescriptorSetLayoutDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( DescriptorSetLayout descriptorSetLayout )
+ {
+ m_device.destroyDescriptorSetLayout( descriptorSetLayout, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class DescriptorUpdateTemplateKHRDeleter
+ {
+ public:
+ DescriptorUpdateTemplateKHRDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( DescriptorUpdateTemplateKHR descriptorUpdateTemplateKHR )
+ {
+ m_device.destroyDescriptorUpdateTemplateKHR( descriptorUpdateTemplateKHR, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class DeviceMemoryDeleter
+ {
+ public:
+ DeviceMemoryDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( DeviceMemory deviceMemory )
+ {
+ m_device.freeMemory( deviceMemory, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class EventDeleter
+ {
+ public:
+ EventDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Event event )
+ {
+ m_device.destroyEvent( event, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class FenceDeleter
+ {
+ public:
+ FenceDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Fence fence )
+ {
+ m_device.destroyFence( fence, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class FramebufferDeleter
+ {
+ public:
+ FramebufferDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Framebuffer framebuffer )
+ {
+ m_device.destroyFramebuffer( framebuffer, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class ImageDeleter
+ {
+ public:
+ ImageDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Image image )
+ {
+ m_device.destroyImage( image, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class ImageViewDeleter
+ {
+ public:
+ ImageViewDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( ImageView imageView )
+ {
+ m_device.destroyImageView( imageView, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class IndirectCommandsLayoutNVXDeleter
+ {
+ public:
+ IndirectCommandsLayoutNVXDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( IndirectCommandsLayoutNVX indirectCommandsLayoutNVX )
+ {
+ m_device.destroyIndirectCommandsLayoutNVX( indirectCommandsLayoutNVX, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class ObjectTableNVXDeleter
+ {
+ public:
+ ObjectTableNVXDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( ObjectTableNVX objectTableNVX )
+ {
+ m_device.destroyObjectTableNVX( objectTableNVX, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class PipelineDeleter
+ {
+ public:
+ PipelineDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Pipeline pipeline )
+ {
+ m_device.destroyPipeline( pipeline, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class PipelineCacheDeleter
+ {
+ public:
+ PipelineCacheDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( PipelineCache pipelineCache )
+ {
+ m_device.destroyPipelineCache( pipelineCache, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class PipelineLayoutDeleter
+ {
+ public:
+ PipelineLayoutDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( PipelineLayout pipelineLayout )
+ {
+ m_device.destroyPipelineLayout( pipelineLayout, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class QueryPoolDeleter
+ {
+ public:
+ QueryPoolDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( QueryPool queryPool )
+ {
+ m_device.destroyQueryPool( queryPool, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class RenderPassDeleter
+ {
+ public:
+ RenderPassDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( RenderPass renderPass )
+ {
+ m_device.destroyRenderPass( renderPass, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class SamplerDeleter
+ {
+ public:
+ SamplerDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Sampler sampler )
+ {
+ m_device.destroySampler( sampler, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class SemaphoreDeleter
+ {
+ public:
+ SemaphoreDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( Semaphore semaphore )
+ {
+ m_device.destroySemaphore( semaphore, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class ShaderModuleDeleter
+ {
+ public:
+ ShaderModuleDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( ShaderModule shaderModule )
+ {
+ m_device.destroyShaderModule( shaderModule, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class SwapchainKHRDeleter
+ {
+ public:
+ SwapchainKHRDeleter( Device device = Device(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_device( device )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( SwapchainKHR swapchainKHR )
+ {
+ m_device.destroySwapchainKHR( swapchainKHR, m_allocator );
+ }
+
+ private:
+ Device m_device;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const char* pName ) const
+ {
+ return vkGetDeviceProcAddr( m_device, pName );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const std::string & name ) const
+ {
+ return vkGetDeviceProcAddr( m_device, name.c_str() );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroy( const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyDevice( m_device, reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroy( Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyDevice( m_device, reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Queue* pQueue ) const
+ {
+ vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast<VkQueue*>( pQueue ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Queue Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex ) const
+ {
+ Queue queue;
+ vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast<VkQueue*>( &queue ) );
+ return queue;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::waitIdle() const
+ {
+ return static_cast<Result>( vkDeviceWaitIdle( m_device ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::waitIdle() const
+ {
+ Result result = static_cast<Result>( vkDeviceWaitIdle( m_device ) );
+ return createResultValue( result, "vk::Device::waitIdle" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::allocateMemory( const MemoryAllocateInfo* pAllocateInfo, const AllocationCallbacks* pAllocator, DeviceMemory* pMemory ) const
+ {
+ return static_cast<Result>( vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo*>( pAllocateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDeviceMemory*>( pMemory ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DeviceMemory>::type Device::allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DeviceMemory memory;
+ Result result = static_cast<Result>( vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo*>( &allocateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeviceMemory*>( &memory ) ) );
+ return createResultValue( result, memory, "vk::Device::allocateMemory" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueDeviceMemory Device::allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DeviceMemoryDeleter deleter( *this, allocator );
+ return UniqueDeviceMemory( allocateMemory( allocateInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::freeMemory( DeviceMemory memory, const AllocationCallbacks* pAllocator ) const
+ {
+ vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::freeMemory( DeviceMemory memory, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags, void** ppData ) const
+ {
+ return static_cast<Result>( vkMapMemory( m_device, static_cast<VkDeviceMemory>( memory ), offset, size, static_cast<VkMemoryMapFlags>( flags ), ppData ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void*>::type Device::mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags ) const
+ {
+ void* pData;
+ Result result = static_cast<Result>( vkMapMemory( m_device, static_cast<VkDeviceMemory>( memory ), offset, size, static_cast<VkMemoryMapFlags>( flags ), &pData ) );
+ return createResultValue( result, pData, "vk::Device::mapMemory" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::unmapMemory( DeviceMemory memory ) const
+ {
+ vkUnmapMemory( m_device, static_cast<VkDeviceMemory>( memory ) );
+ }
+
+ VULKAN_HPP_INLINE Result Device::flushMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges ) const
+ {
+ return static_cast<Result>( vkFlushMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast<const VkMappedMemoryRange*>( pMemoryRanges ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::flushMappedMemoryRanges( ArrayProxy<const MappedMemoryRange> memoryRanges ) const
+ {
+ Result result = static_cast<Result>( vkFlushMappedMemoryRanges( m_device, memoryRanges.size() , reinterpret_cast<const VkMappedMemoryRange*>( memoryRanges.data() ) ) );
+ return createResultValue( result, "vk::Device::flushMappedMemoryRanges" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges ) const
+ {
+ return static_cast<Result>( vkInvalidateMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast<const VkMappedMemoryRange*>( pMemoryRanges ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::invalidateMappedMemoryRanges( ArrayProxy<const MappedMemoryRange> memoryRanges ) const
+ {
+ Result result = static_cast<Result>( vkInvalidateMappedMemoryRanges( m_device, memoryRanges.size() , reinterpret_cast<const VkMappedMemoryRange*>( memoryRanges.data() ) ) );
+ return createResultValue( result, "vk::Device::invalidateMappedMemoryRanges" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getMemoryCommitment( DeviceMemory memory, DeviceSize* pCommittedMemoryInBytes ) const
+ {
+ vkGetDeviceMemoryCommitment( m_device, static_cast<VkDeviceMemory>( memory ), pCommittedMemoryInBytes );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE DeviceSize Device::getMemoryCommitment( DeviceMemory memory ) const
+ {
+ DeviceSize committedMemoryInBytes;
+ vkGetDeviceMemoryCommitment( m_device, static_cast<VkDeviceMemory>( memory ), &committedMemoryInBytes );
+ return committedMemoryInBytes;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements( Buffer buffer, MemoryRequirements* pMemoryRequirements ) const
+ {
+ vkGetBufferMemoryRequirements( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<VkMemoryRequirements*>( pMemoryRequirements ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE MemoryRequirements Device::getBufferMemoryRequirements( Buffer buffer ) const
+ {
+ MemoryRequirements memoryRequirements;
+ vkGetBufferMemoryRequirements( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<VkMemoryRequirements*>( &memoryRequirements ) );
+ return memoryRequirements;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset ) const
+ {
+ return static_cast<Result>( vkBindBufferMemory( m_device, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceMemory>( memory ), memoryOffset ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset ) const
+ {
+ Result result = static_cast<Result>( vkBindBufferMemory( m_device, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceMemory>( memory ), memoryOffset ) );
+ return createResultValue( result, "vk::Device::bindBufferMemory" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getImageMemoryRequirements( Image image, MemoryRequirements* pMemoryRequirements ) const
+ {
+ vkGetImageMemoryRequirements( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkMemoryRequirements*>( pMemoryRequirements ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE MemoryRequirements Device::getImageMemoryRequirements( Image image ) const
+ {
+ MemoryRequirements memoryRequirements;
+ vkGetImageMemoryRequirements( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkMemoryRequirements*>( &memoryRequirements ) );
+ return memoryRequirements;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset ) const
+ {
+ return static_cast<Result>( vkBindImageMemory( m_device, static_cast<VkImage>( image ), static_cast<VkDeviceMemory>( memory ), memoryOffset ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset ) const
+ {
+ Result result = static_cast<Result>( vkBindImageMemory( m_device, static_cast<VkImage>( image ), static_cast<VkDeviceMemory>( memory ), memoryOffset ) );
+ return createResultValue( result, "vk::Device::bindImageMemory" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements( Image image, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements* pSparseMemoryRequirements ) const
+ {
+ vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), pSparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements*>( pSparseMemoryRequirements ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements,Allocator> Device::getImageSparseMemoryRequirements( Image image ) const
+ {
+ std::vector<SparseImageMemoryRequirements,Allocator> sparseMemoryRequirements;
+ uint32_t sparseMemoryRequirementCount;
+ vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, nullptr );
+ sparseMemoryRequirements.resize( sparseMemoryRequirementCount );
+ vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements*>( sparseMemoryRequirements.data() ) );
+ return sparseMemoryRequirements;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createFence( const FenceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Fence* pFence ) const
+ {
+ return static_cast<Result>( vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFence*>( pFence ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Fence>::type Device::createFence( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Fence fence;
+ Result result = static_cast<Result>( vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) );
+ return createResultValue( result, fence, "vk::Device::createFence" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueFence Device::createFenceUnique( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ FenceDeleter deleter( *this, allocator );
+ return UniqueFence( createFence( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyFence( Fence fence, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyFence( Fence fence, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::resetFences( uint32_t fenceCount, const Fence* pFences ) const
+ {
+ return static_cast<Result>( vkResetFences( m_device, fenceCount, reinterpret_cast<const VkFence*>( pFences ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::resetFences( ArrayProxy<const Fence> fences ) const
+ {
+ Result result = static_cast<Result>( vkResetFences( m_device, fences.size() , reinterpret_cast<const VkFence*>( fences.data() ) ) );
+ return createResultValue( result, "vk::Device::resetFences" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::getFenceStatus( Fence fence ) const
+ {
+ return static_cast<Result>( vkGetFenceStatus( m_device, static_cast<VkFence>( fence ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE Result Device::getFenceStatus( Fence fence ) const
+ {
+ Result result = static_cast<Result>( vkGetFenceStatus( m_device, static_cast<VkFence>( fence ) ) );
+ return createResultValue( result, "vk::Device::getFenceStatus", { Result::eSuccess, Result::eNotReady } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::waitForFences( uint32_t fenceCount, const Fence* pFences, Bool32 waitAll, uint64_t timeout ) const
+ {
+ return static_cast<Result>( vkWaitForFences( m_device, fenceCount, reinterpret_cast<const VkFence*>( pFences ), waitAll, timeout ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::waitForFences( ArrayProxy<const Fence> fences, Bool32 waitAll, uint64_t timeout ) const
+ {
+ Result result = static_cast<Result>( vkWaitForFences( m_device, fences.size() , reinterpret_cast<const VkFence*>( fences.data() ), waitAll, timeout ) );
+ return createResultValue( result, "vk::Device::waitForFences", { Result::eSuccess, Result::eTimeout } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createSemaphore( const SemaphoreCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Semaphore* pSemaphore ) const
+ {
+ return static_cast<Result>( vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSemaphore*>( pSemaphore ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Semaphore>::type Device::createSemaphore( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Semaphore semaphore;
+ Result result = static_cast<Result>( vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSemaphore*>( &semaphore ) ) );
+ return createResultValue( result, semaphore, "vk::Device::createSemaphore" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSemaphore Device::createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SemaphoreDeleter deleter( *this, allocator );
+ return UniqueSemaphore( createSemaphore( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroySemaphore( Semaphore semaphore, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroySemaphore( Semaphore semaphore, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createEvent( const EventCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Event* pEvent ) const
+ {
+ return static_cast<Result>( vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkEvent*>( pEvent ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Event>::type Device::createEvent( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Event event;
+ Result result = static_cast<Result>( vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkEvent*>( &event ) ) );
+ return createResultValue( result, event, "vk::Device::createEvent" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueEvent Device::createEventUnique( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ EventDeleter deleter( *this, allocator );
+ return UniqueEvent( createEvent( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyEvent( Event event, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyEvent( Event event, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::getEventStatus( Event event ) const
+ {
+ return static_cast<Result>( vkGetEventStatus( m_device, static_cast<VkEvent>( event ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE Result Device::getEventStatus( Event event ) const
+ {
+ Result result = static_cast<Result>( vkGetEventStatus( m_device, static_cast<VkEvent>( event ) ) );
+ return createResultValue( result, "vk::Device::getEventStatus", { Result::eEventSet, Result::eEventReset } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::setEvent( Event event ) const
+ {
+ return static_cast<Result>( vkSetEvent( m_device, static_cast<VkEvent>( event ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::setEvent( Event event ) const
+ {
+ Result result = static_cast<Result>( vkSetEvent( m_device, static_cast<VkEvent>( event ) ) );
+ return createResultValue( result, "vk::Device::setEvent" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::resetEvent( Event event ) const
+ {
+ return static_cast<Result>( vkResetEvent( m_device, static_cast<VkEvent>( event ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::resetEvent( Event event ) const
+ {
+ Result result = static_cast<Result>( vkResetEvent( m_device, static_cast<VkEvent>( event ) ) );
+ return createResultValue( result, "vk::Device::resetEvent" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createQueryPool( const QueryPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, QueryPool* pQueryPool ) const
+ {
+ return static_cast<Result>( vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkQueryPool*>( pQueryPool ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<QueryPool>::type Device::createQueryPool( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ QueryPool queryPool;
+ Result result = static_cast<Result>( vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkQueryPool*>( &queryPool ) ) );
+ return createResultValue( result, queryPool, "vk::Device::createQueryPool" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueQueryPool Device::createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ QueryPoolDeleter deleter( *this, allocator );
+ return UniqueQueryPool( createQueryPool( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyQueryPool( QueryPool queryPool, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyQueryPool( QueryPool queryPool, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, DeviceSize stride, QueryResultFlags flags ) const
+ {
+ return static_cast<Result>( vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, dataSize, pData, stride, static_cast<VkQueryResultFlags>( flags ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename T>
+ VULKAN_HPP_INLINE Result Device::getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy<T> data, DeviceSize stride, QueryResultFlags flags ) const
+ {
+ Result result = static_cast<Result>( vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ), stride, static_cast<VkQueryResultFlags>( flags ) ) );
+ return createResultValue( result, "vk::Device::getQueryPoolResults", { Result::eSuccess, Result::eNotReady } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createBuffer( const BufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Buffer* pBuffer ) const
+ {
+ return static_cast<Result>( vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkBuffer*>( pBuffer ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Buffer>::type Device::createBuffer( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Buffer buffer;
+ Result result = static_cast<Result>( vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBuffer*>( &buffer ) ) );
+ return createResultValue( result, buffer, "vk::Device::createBuffer" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueBuffer Device::createBufferUnique( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ BufferDeleter deleter( *this, allocator );
+ return UniqueBuffer( createBuffer( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyBuffer( Buffer buffer, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyBuffer( Buffer buffer, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createBufferView( const BufferViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, BufferView* pView ) const
+ {
+ return static_cast<Result>( vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkBufferView*>( pView ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<BufferView>::type Device::createBufferView( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ BufferView view;
+ Result result = static_cast<Result>( vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBufferView*>( &view ) ) );
+ return createResultValue( result, view, "vk::Device::createBufferView" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueBufferView Device::createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ BufferViewDeleter deleter( *this, allocator );
+ return UniqueBufferView( createBufferView( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyBufferView( BufferView bufferView, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyBufferView( BufferView bufferView, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createImage( const ImageCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Image* pImage ) const
+ {
+ return static_cast<Result>( vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkImage*>( pImage ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Image>::type Device::createImage( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Image image;
+ Result result = static_cast<Result>( vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImage*>( &image ) ) );
+ return createResultValue( result, image, "vk::Device::createImage" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueImage Device::createImageUnique( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ImageDeleter deleter( *this, allocator );
+ return UniqueImage( createImage( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyImage( Image image, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyImage( Image image, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getImageSubresourceLayout( Image image, const ImageSubresource* pSubresource, SubresourceLayout* pLayout ) const
+ {
+ vkGetImageSubresourceLayout( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkImageSubresource*>( pSubresource ), reinterpret_cast<VkSubresourceLayout*>( pLayout ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE SubresourceLayout Device::getImageSubresourceLayout( Image image, const ImageSubresource & subresource ) const
+ {
+ SubresourceLayout layout;
+ vkGetImageSubresourceLayout( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkImageSubresource*>( &subresource ), reinterpret_cast<VkSubresourceLayout*>( &layout ) );
+ return layout;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createImageView( const ImageViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ImageView* pView ) const
+ {
+ return static_cast<Result>( vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkImageView*>( pView ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<ImageView>::type Device::createImageView( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ImageView view;
+ Result result = static_cast<Result>( vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImageView*>( &view ) ) );
+ return createResultValue( result, view, "vk::Device::createImageView" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueImageView Device::createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ImageViewDeleter deleter( *this, allocator );
+ return UniqueImageView( createImageView( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyImageView( ImageView imageView, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyImageView( ImageView imageView, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createShaderModule( const ShaderModuleCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ShaderModule* pShaderModule ) const
+ {
+ return static_cast<Result>( vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkShaderModule*>( pShaderModule ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<ShaderModule>::type Device::createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ShaderModule shaderModule;
+ Result result = static_cast<Result>( vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkShaderModule*>( &shaderModule ) ) );
+ return createResultValue( result, shaderModule, "vk::Device::createShaderModule" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueShaderModule Device::createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ShaderModuleDeleter deleter( *this, allocator );
+ return UniqueShaderModule( createShaderModule( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyShaderModule( ShaderModule shaderModule, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyShaderModule( ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createPipelineCache( const PipelineCacheCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineCache* pPipelineCache ) const
+ {
+ return static_cast<Result>( vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipelineCache*>( pPipelineCache ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<PipelineCache>::type Device::createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineCache pipelineCache;
+ Result result = static_cast<Result>( vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineCache*>( &pipelineCache ) ) );
+ return createResultValue( result, pipelineCache, "vk::Device::createPipelineCache" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniquePipelineCache Device::createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineCacheDeleter deleter( *this, allocator );
+ return UniquePipelineCache( createPipelineCache( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyPipelineCache( PipelineCache pipelineCache, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyPipelineCache( PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getPipelineCacheData( PipelineCache pipelineCache, size_t* pDataSize, void* pData ) const
+ {
+ return static_cast<Result>( vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), pDataSize, pData ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getPipelineCacheData( PipelineCache pipelineCache ) const
+ {
+ std::vector<uint8_t,Allocator> data;
+ size_t dataSize;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), &dataSize, nullptr ) );
+ if ( ( result == Result::eSuccess ) && dataSize )
+ {
+ data.resize( dataSize );
+ result = static_cast<Result>( vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), &dataSize, reinterpret_cast<void*>( data.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( dataSize <= data.size() );
+ data.resize( dataSize );
+ return createResultValue( result, data, "vk::Device::getPipelineCacheData" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::mergePipelineCaches( PipelineCache dstCache, uint32_t srcCacheCount, const PipelineCache* pSrcCaches ) const
+ {
+ return static_cast<Result>( vkMergePipelineCaches( m_device, static_cast<VkPipelineCache>( dstCache ), srcCacheCount, reinterpret_cast<const VkPipelineCache*>( pSrcCaches ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::mergePipelineCaches( PipelineCache dstCache, ArrayProxy<const PipelineCache> srcCaches ) const
+ {
+ Result result = static_cast<Result>( vkMergePipelineCaches( m_device, static_cast<VkPipelineCache>( dstCache ), srcCaches.size() , reinterpret_cast<const VkPipelineCache*>( srcCaches.data() ) ) );
+ return createResultValue( result, "vk::Device::mergePipelineCaches" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createGraphicsPipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const GraphicsPipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines ) const
+ {
+ return static_cast<Result>( vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipeline*>( pPipelines ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<Pipeline,Allocator>>::type Device::createGraphicsPipelines( PipelineCache pipelineCache, ArrayProxy<const GraphicsPipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator ) const
+ {
+ std::vector<Pipeline,Allocator> pipelines( createInfos.size() );
+ Result result = static_cast<Result>( vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) );
+ return createResultValue( result, pipelines, "vk::Device::createGraphicsPipelines" );
+ }
+ VULKAN_HPP_INLINE ResultValueType<Pipeline>::type Device::createGraphicsPipeline( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Pipeline pipeline;
+ Result result = static_cast<Result>( vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) );
+ return createResultValue( result, pipeline, "vk::Device::createGraphicsPipeline" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<UniquePipeline> Device::createGraphicsPipelinesUnique( PipelineCache pipelineCache, ArrayProxy<const GraphicsPipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineDeleter deleter( *this, allocator );
+ std::vector<Pipeline,Allocator> pipelines = createGraphicsPipelines( pipelineCache, createInfos, allocator );
+ std::vector<UniquePipeline> uniquePipelines;
+ uniquePipelines.reserve( pipelines.size() );
+ for ( auto pipeline : pipelines )
+ {
+ uniquePipelines.push_back( UniquePipeline( pipeline, deleter ) );
+ }
+ return uniquePipelines;
+ }
+ VULKAN_HPP_INLINE UniquePipeline Device::createGraphicsPipelineUnique( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineDeleter deleter( *this, allocator );
+ return UniquePipeline( createGraphicsPipeline( pipelineCache, createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createComputePipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const ComputePipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines ) const
+ {
+ return static_cast<Result>( vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkComputePipelineCreateInfo*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipeline*>( pPipelines ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<Pipeline,Allocator>>::type Device::createComputePipelines( PipelineCache pipelineCache, ArrayProxy<const ComputePipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator ) const
+ {
+ std::vector<Pipeline,Allocator> pipelines( createInfos.size() );
+ Result result = static_cast<Result>( vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkComputePipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) );
+ return createResultValue( result, pipelines, "vk::Device::createComputePipelines" );
+ }
+ VULKAN_HPP_INLINE ResultValueType<Pipeline>::type Device::createComputePipeline( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Pipeline pipeline;
+ Result result = static_cast<Result>( vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkComputePipelineCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) );
+ return createResultValue( result, pipeline, "vk::Device::createComputePipeline" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<UniquePipeline> Device::createComputePipelinesUnique( PipelineCache pipelineCache, ArrayProxy<const ComputePipelineCreateInfo> createInfos, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineDeleter deleter( *this, allocator );
+ std::vector<Pipeline,Allocator> pipelines = createComputePipelines( pipelineCache, createInfos, allocator );
+ std::vector<UniquePipeline> uniquePipelines;
+ uniquePipelines.reserve( pipelines.size() );
+ for ( auto pipeline : pipelines )
+ {
+ uniquePipelines.push_back( UniquePipeline( pipeline, deleter ) );
+ }
+ return uniquePipelines;
+ }
+ VULKAN_HPP_INLINE UniquePipeline Device::createComputePipelineUnique( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineDeleter deleter( *this, allocator );
+ return UniquePipeline( createComputePipeline( pipelineCache, createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyPipeline( Pipeline pipeline, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyPipeline( Pipeline pipeline, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createPipelineLayout( const PipelineLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineLayout* pPipelineLayout ) const
+ {
+ return static_cast<Result>( vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipelineLayout*>( pPipelineLayout ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<PipelineLayout>::type Device::createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineLayout pipelineLayout;
+ Result result = static_cast<Result>( vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineLayout*>( &pipelineLayout ) ) );
+ return createResultValue( result, pipelineLayout, "vk::Device::createPipelineLayout" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniquePipelineLayout Device::createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ PipelineLayoutDeleter deleter( *this, allocator );
+ return UniquePipelineLayout( createPipelineLayout( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyPipelineLayout( PipelineLayout pipelineLayout, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyPipelineLayout( PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createSampler( const SamplerCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Sampler* pSampler ) const
+ {
+ return static_cast<Result>( vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSampler*>( pSampler ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Sampler>::type Device::createSampler( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Sampler sampler;
+ Result result = static_cast<Result>( vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSampler*>( &sampler ) ) );
+ return createResultValue( result, sampler, "vk::Device::createSampler" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSampler Device::createSamplerUnique( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SamplerDeleter deleter( *this, allocator );
+ return UniqueSampler( createSampler( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroySampler( Sampler sampler, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroySampler( Sampler sampler, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorSetLayout* pSetLayout ) const
+ {
+ return static_cast<Result>( vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorSetLayout*>( pSetLayout ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DescriptorSetLayout>::type Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DescriptorSetLayout setLayout;
+ Result result = static_cast<Result>( vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorSetLayout*>( &setLayout ) ) );
+ return createResultValue( result, setLayout, "vk::Device::createDescriptorSetLayout" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueDescriptorSetLayout Device::createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DescriptorSetLayoutDeleter deleter( *this, allocator );
+ return UniqueDescriptorSetLayout( createDescriptorSetLayout( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createDescriptorPool( const DescriptorPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorPool* pDescriptorPool ) const
+ {
+ return static_cast<Result>( vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorPool*>( pDescriptorPool ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DescriptorPool>::type Device::createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DescriptorPool descriptorPool;
+ Result result = static_cast<Result>( vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorPool*>( &descriptorPool ) ) );
+ return createResultValue( result, descriptorPool, "vk::Device::createDescriptorPool" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueDescriptorPool Device::createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DescriptorPoolDeleter deleter( *this, allocator );
+ return UniqueDescriptorPool( createDescriptorPool( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyDescriptorPool( DescriptorPool descriptorPool, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyDescriptorPool( DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags ) const
+ {
+ return static_cast<Result>( vkResetDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), static_cast<VkDescriptorPoolResetFlags>( flags ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags ) const
+ {
+ Result result = static_cast<Result>( vkResetDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), static_cast<VkDescriptorPoolResetFlags>( flags ) ) );
+ return createResultValue( result, "vk::Device::resetDescriptorPool" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::allocateDescriptorSets( const DescriptorSetAllocateInfo* pAllocateInfo, DescriptorSet* pDescriptorSets ) const
+ {
+ return static_cast<Result>( vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( pAllocateInfo ), reinterpret_cast<VkDescriptorSet*>( pDescriptorSets ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<DescriptorSet,Allocator>>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo ) const
+ {
+ std::vector<DescriptorSet,Allocator> descriptorSets( allocateInfo.descriptorSetCount );
+ Result result = static_cast<Result>( vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkDescriptorSet*>( descriptorSets.data() ) ) );
+ return createResultValue( result, descriptorSets, "vk::Device::allocateDescriptorSets" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<UniqueDescriptorSet> Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo ) const
+ {
+ DescriptorSetDeleter deleter( *this, allocateInfo.descriptorPool );
+ std::vector<DescriptorSet,Allocator> descriptorSets = allocateDescriptorSets( allocateInfo );
+ std::vector<UniqueDescriptorSet> uniqueDescriptorSets;
+ uniqueDescriptorSets.reserve( descriptorSets.size() );
+ for ( auto descriptorSet : descriptorSets )
+ {
+ uniqueDescriptorSets.push_back( UniqueDescriptorSet( descriptorSet, deleter ) );
+ }
+ return uniqueDescriptorSets;
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::freeDescriptorSets( DescriptorPool descriptorPool, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets ) const
+ {
+ return static_cast<Result>( vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSetCount, reinterpret_cast<const VkDescriptorSet*>( pDescriptorSets ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::freeDescriptorSets( DescriptorPool descriptorPool, ArrayProxy<const DescriptorSet> descriptorSets ) const
+ {
+ Result result = static_cast<Result>( vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSets.size() , reinterpret_cast<const VkDescriptorSet*>( descriptorSets.data() ) ) );
+ return createResultValue( result, "vk::Device::freeDescriptorSets" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::updateDescriptorSets( uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const CopyDescriptorSet* pDescriptorCopies ) const
+ {
+ vkUpdateDescriptorSets( m_device, descriptorWriteCount, reinterpret_cast<const VkWriteDescriptorSet*>( pDescriptorWrites ), descriptorCopyCount, reinterpret_cast<const VkCopyDescriptorSet*>( pDescriptorCopies ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::updateDescriptorSets( ArrayProxy<const WriteDescriptorSet> descriptorWrites, ArrayProxy<const CopyDescriptorSet> descriptorCopies ) const
+ {
+ vkUpdateDescriptorSets( m_device, descriptorWrites.size() , reinterpret_cast<const VkWriteDescriptorSet*>( descriptorWrites.data() ), descriptorCopies.size() , reinterpret_cast<const VkCopyDescriptorSet*>( descriptorCopies.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createFramebuffer( const FramebufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Framebuffer* pFramebuffer ) const
+ {
+ return static_cast<Result>( vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFramebuffer*>( pFramebuffer ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Framebuffer>::type Device::createFramebuffer( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Framebuffer framebuffer;
+ Result result = static_cast<Result>( vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFramebuffer*>( &framebuffer ) ) );
+ return createResultValue( result, framebuffer, "vk::Device::createFramebuffer" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueFramebuffer Device::createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ FramebufferDeleter deleter( *this, allocator );
+ return UniqueFramebuffer( createFramebuffer( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyFramebuffer( Framebuffer framebuffer, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyFramebuffer( Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createRenderPass( const RenderPassCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, RenderPass* pRenderPass ) const
+ {
+ return static_cast<Result>( vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkRenderPass*>( pRenderPass ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<RenderPass>::type Device::createRenderPass( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ RenderPass renderPass;
+ Result result = static_cast<Result>( vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) );
+ return createResultValue( result, renderPass, "vk::Device::createRenderPass" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueRenderPass Device::createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ RenderPassDeleter deleter( *this, allocator );
+ return UniqueRenderPass( createRenderPass( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyRenderPass( RenderPass renderPass, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyRenderPass( RenderPass renderPass, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getRenderAreaGranularity( RenderPass renderPass, Extent2D* pGranularity ) const
+ {
+ vkGetRenderAreaGranularity( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<VkExtent2D*>( pGranularity ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Extent2D Device::getRenderAreaGranularity( RenderPass renderPass ) const
+ {
+ Extent2D granularity;
+ vkGetRenderAreaGranularity( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<VkExtent2D*>( &granularity ) );
+ return granularity;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createCommandPool( const CommandPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, CommandPool* pCommandPool ) const
+ {
+ return static_cast<Result>( vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkCommandPool*>( pCommandPool ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<CommandPool>::type Device::createCommandPool( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ CommandPool commandPool;
+ Result result = static_cast<Result>( vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkCommandPool*>( &commandPool ) ) );
+ return createResultValue( result, commandPool, "vk::Device::createCommandPool" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueCommandPool Device::createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ CommandPoolDeleter deleter( *this, allocator );
+ return UniqueCommandPool( createCommandPool( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyCommandPool( CommandPool commandPool, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyCommandPool( CommandPool commandPool, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags ) const
+ {
+ return static_cast<Result>( vkResetCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolResetFlags>( flags ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags ) const
+ {
+ Result result = static_cast<Result>( vkResetCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolResetFlags>( flags ) ) );
+ return createResultValue( result, "vk::Device::resetCommandPool" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::allocateCommandBuffers( const CommandBufferAllocateInfo* pAllocateInfo, CommandBuffer* pCommandBuffers ) const
+ {
+ return static_cast<Result>( vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( pAllocateInfo ), reinterpret_cast<VkCommandBuffer*>( pCommandBuffers ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<CommandBuffer,Allocator>>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo ) const
+ {
+ std::vector<CommandBuffer,Allocator> commandBuffers( allocateInfo.commandBufferCount );
+ Result result = static_cast<Result>( vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkCommandBuffer*>( commandBuffers.data() ) ) );
+ return createResultValue( result, commandBuffers, "vk::Device::allocateCommandBuffers" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<UniqueCommandBuffer> Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo ) const
+ {
+ CommandBufferDeleter deleter( *this, allocateInfo.commandPool );
+ std::vector<CommandBuffer,Allocator> commandBuffers = allocateCommandBuffers( allocateInfo );
+ std::vector<UniqueCommandBuffer> uniqueCommandBuffers;
+ uniqueCommandBuffers.reserve( commandBuffers.size() );
+ for ( auto commandBuffer : commandBuffers )
+ {
+ uniqueCommandBuffers.push_back( UniqueCommandBuffer( commandBuffer, deleter ) );
+ }
+ return uniqueCommandBuffers;
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::freeCommandBuffers( CommandPool commandPool, uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers ) const
+ {
+ vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBufferCount, reinterpret_cast<const VkCommandBuffer*>( pCommandBuffers ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::freeCommandBuffers( CommandPool commandPool, ArrayProxy<const CommandBuffer> commandBuffers ) const
+ {
+ vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBuffers.size() , reinterpret_cast<const VkCommandBuffer*>( commandBuffers.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createSharedSwapchainsKHR( uint32_t swapchainCount, const SwapchainCreateInfoKHR* pCreateInfos, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchains ) const
+ {
+ return static_cast<Result>( vkCreateSharedSwapchainsKHR( m_device, swapchainCount, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSwapchainKHR*>( pSwapchains ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<SwapchainKHR,Allocator>>::type Device::createSharedSwapchainsKHR( ArrayProxy<const SwapchainCreateInfoKHR> createInfos, Optional<const AllocationCallbacks> allocator ) const
+ {
+ std::vector<SwapchainKHR,Allocator> swapchains( createInfos.size() );
+ Result result = static_cast<Result>( vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( swapchains.data() ) ) );
+ return createResultValue( result, swapchains, "vk::Device::createSharedSwapchainsKHR" );
+ }
+ VULKAN_HPP_INLINE ResultValueType<SwapchainKHR>::type Device::createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SwapchainKHR swapchain;
+ Result result = static_cast<Result>( vkCreateSharedSwapchainsKHR( m_device, 1 , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( &swapchain ) ) );
+ return createResultValue( result, swapchain, "vk::Device::createSharedSwapchainKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<UniqueSwapchainKHR> Device::createSharedSwapchainsKHRUnique( ArrayProxy<const SwapchainCreateInfoKHR> createInfos, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SwapchainKHRDeleter deleter( *this, allocator );
+ std::vector<SwapchainKHR,Allocator> swapchainKHRs = createSharedSwapchainsKHR( createInfos, allocator );
+ std::vector<UniqueSwapchainKHR> uniqueSwapchainKHRs;
+ uniqueSwapchainKHRs.reserve( swapchainKHRs.size() );
+ for ( auto swapchainKHR : swapchainKHRs )
+ {
+ uniqueSwapchainKHRs.push_back( UniqueSwapchainKHR( swapchainKHR, deleter ) );
+ }
+ return uniqueSwapchainKHRs;
+ }
+ VULKAN_HPP_INLINE UniqueSwapchainKHR Device::createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SwapchainKHRDeleter deleter( *this, allocator );
+ return UniqueSwapchainKHR( createSharedSwapchainKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createSwapchainKHR( const SwapchainCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchain ) const
+ {
+ return static_cast<Result>( vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSwapchainKHR*>( pSwapchain ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SwapchainKHR>::type Device::createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SwapchainKHR swapchain;
+ Result result = static_cast<Result>( vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( &swapchain ) ) );
+ return createResultValue( result, swapchain, "vk::Device::createSwapchainKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSwapchainKHR Device::createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SwapchainKHRDeleter deleter( *this, allocator );
+ return UniqueSwapchainKHR( createSwapchainKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroySwapchainKHR( SwapchainKHR swapchain, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroySwapchainKHR( SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getSwapchainImagesKHR( SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, Image* pSwapchainImages ) const
+ {
+ return static_cast<Result>( vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), pSwapchainImageCount, reinterpret_cast<VkImage*>( pSwapchainImages ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<Image,Allocator>>::type Device::getSwapchainImagesKHR( SwapchainKHR swapchain ) const
+ {
+ std::vector<Image,Allocator> swapchainImages;
+ uint32_t swapchainImageCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), &swapchainImageCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && swapchainImageCount )
+ {
+ swapchainImages.resize( swapchainImageCount );
+ result = static_cast<Result>( vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), &swapchainImageCount, reinterpret_cast<VkImage*>( swapchainImages.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( swapchainImageCount <= swapchainImages.size() );
+ swapchainImages.resize( swapchainImageCount );
+ return createResultValue( result, swapchainImages, "vk::Device::getSwapchainImagesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence, uint32_t* pImageIndex ) const
+ {
+ return static_cast<Result>( vkAcquireNextImageKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), timeout, static_cast<VkSemaphore>( semaphore ), static_cast<VkFence>( fence ), pImageIndex ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValue<uint32_t> Device::acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence ) const
+ {
+ uint32_t imageIndex;
+ Result result = static_cast<Result>( vkAcquireNextImageKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), timeout, static_cast<VkSemaphore>( semaphore ), static_cast<VkFence>( fence ), &imageIndex ) );
+ return createResultValue( result, imageIndex, "vk::Device::acquireNextImageKHR", { Result::eSuccess, Result::eTimeout, Result::eNotReady, Result::eSuboptimalKHR } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectNameEXT( DebugMarkerObjectNameInfoEXT* pNameInfo ) const
+ {
+ return static_cast<Result>( vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT*>( pNameInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DebugMarkerObjectNameInfoEXT>::type Device::debugMarkerSetObjectNameEXT() const
+ {
+ DebugMarkerObjectNameInfoEXT nameInfo;
+ Result result = static_cast<Result>( vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT*>( &nameInfo ) ) );
+ return createResultValue( result, nameInfo, "vk::Device::debugMarkerSetObjectNameEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectTagEXT( DebugMarkerObjectTagInfoEXT* pTagInfo ) const
+ {
+ return static_cast<Result>( vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast<VkDebugMarkerObjectTagInfoEXT*>( pTagInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DebugMarkerObjectTagInfoEXT>::type Device::debugMarkerSetObjectTagEXT() const
+ {
+ DebugMarkerObjectTagInfoEXT tagInfo;
+ Result result = static_cast<Result>( vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast<VkDebugMarkerObjectTagInfoEXT*>( &tagInfo ) ) );
+ return createResultValue( result, tagInfo, "vk::Device::debugMarkerSetObjectTagEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle ) const
+ {
+ return static_cast<Result>( vkGetMemoryWin32HandleNV( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( handleType ), pHandle ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<HANDLE>::type Device::getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType ) const
+ {
+ HANDLE handle;
+ Result result = static_cast<Result>( vkGetMemoryWin32HandleNV( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( handleType ), &handle ) );
+ return createResultValue( result, handle, "vk::Device::getMemoryWin32HandleNV" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+ VULKAN_HPP_INLINE Result Device::createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, IndirectCommandsLayoutNVX* pIndirectCommandsLayout ) const
+ {
+ return static_cast<Result>( vkCreateIndirectCommandsLayoutNVX( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNVX*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkIndirectCommandsLayoutNVX*>( pIndirectCommandsLayout ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<IndirectCommandsLayoutNVX>::type Device::createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ IndirectCommandsLayoutNVX indirectCommandsLayout;
+ Result result = static_cast<Result>( vkCreateIndirectCommandsLayoutNVX( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNVX*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkIndirectCommandsLayoutNVX*>( &indirectCommandsLayout ) ) );
+ return createResultValue( result, indirectCommandsLayout, "vk::Device::createIndirectCommandsLayoutNVX" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueIndirectCommandsLayoutNVX Device::createIndirectCommandsLayoutNVXUnique( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ IndirectCommandsLayoutNVXDeleter deleter( *this, allocator );
+ return UniqueIndirectCommandsLayoutNVX( createIndirectCommandsLayoutNVX( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyIndirectCommandsLayoutNVX( m_device, static_cast<VkIndirectCommandsLayoutNVX>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyIndirectCommandsLayoutNVX( m_device, static_cast<VkIndirectCommandsLayoutNVX>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createObjectTableNVX( const ObjectTableCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, ObjectTableNVX* pObjectTable ) const
+ {
+ return static_cast<Result>( vkCreateObjectTableNVX( m_device, reinterpret_cast<const VkObjectTableCreateInfoNVX*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkObjectTableNVX*>( pObjectTable ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<ObjectTableNVX>::type Device::createObjectTableNVX( const ObjectTableCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ObjectTableNVX objectTable;
+ Result result = static_cast<Result>( vkCreateObjectTableNVX( m_device, reinterpret_cast<const VkObjectTableCreateInfoNVX*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkObjectTableNVX*>( &objectTable ) ) );
+ return createResultValue( result, objectTable, "vk::Device::createObjectTableNVX" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueObjectTableNVX Device::createObjectTableNVXUnique( const ObjectTableCreateInfoNVX & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ ObjectTableNVXDeleter deleter( *this, allocator );
+ return UniqueObjectTableNVX( createObjectTableNVX( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyObjectTableNVX( ObjectTableNVX objectTable, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyObjectTableNVX( m_device, static_cast<VkObjectTableNVX>( objectTable ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyObjectTableNVX( ObjectTableNVX objectTable, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyObjectTableNVX( m_device, static_cast<VkObjectTableNVX>( objectTable ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::registerObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices ) const
+ {
+ return static_cast<Result>( vkRegisterObjectsNVX( m_device, static_cast<VkObjectTableNVX>( objectTable ), objectCount, reinterpret_cast<const VkObjectTableEntryNVX* const*>( ppObjectTableEntries ), pObjectIndices ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::registerObjectsNVX( ObjectTableNVX objectTable, ArrayProxy<const ObjectTableEntryNVX* const> pObjectTableEntries, ArrayProxy<const uint32_t> objectIndices ) const
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( pObjectTableEntries.size() == objectIndices.size() );
+#else
+ if ( pObjectTableEntries.size() != objectIndices.size() )
+ {
+ throw std::logic_error( "vk::Device::registerObjectsNVX: pObjectTableEntries.size() != objectIndices.size()" );
+ }
+#endif // VULKAN_HPP_NO_EXCEPTIONS
+ Result result = static_cast<Result>( vkRegisterObjectsNVX( m_device, static_cast<VkObjectTableNVX>( objectTable ), pObjectTableEntries.size() , reinterpret_cast<const VkObjectTableEntryNVX* const*>( pObjectTableEntries.data() ), objectIndices.data() ) );
+ return createResultValue( result, "vk::Device::registerObjectsNVX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::unregisterObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices ) const
+ {
+ return static_cast<Result>( vkUnregisterObjectsNVX( m_device, static_cast<VkObjectTableNVX>( objectTable ), objectCount, reinterpret_cast<const VkObjectEntryTypeNVX*>( pObjectEntryTypes ), pObjectIndices ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::unregisterObjectsNVX( ObjectTableNVX objectTable, ArrayProxy<const ObjectEntryTypeNVX> objectEntryTypes, ArrayProxy<const uint32_t> objectIndices ) const
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( objectEntryTypes.size() == objectIndices.size() );
+#else
+ if ( objectEntryTypes.size() != objectIndices.size() )
+ {
+ throw std::logic_error( "vk::Device::unregisterObjectsNVX: objectEntryTypes.size() != objectIndices.size()" );
+ }
+#endif // VULKAN_HPP_NO_EXCEPTIONS
+ Result result = static_cast<Result>( vkUnregisterObjectsNVX( m_device, static_cast<VkObjectTableNVX>( objectTable ), objectEntryTypes.size() , reinterpret_cast<const VkObjectEntryTypeNVX*>( objectEntryTypes.data() ), objectIndices.data() ) );
+ return createResultValue( result, "vk::Device::unregisterObjectsNVX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( CommandPool commandPool, CommandPoolTrimFlagsKHR flags ) const
+ {
+ vkTrimCommandPoolKHR( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolTrimFlagsKHR>( flags ) );
+ }
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE* pHandle ) const
+ {
+ return static_cast<Result>( vkGetMemoryWin32HandleKHX( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), pHandle ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<HANDLE>::type Device::getMemoryWin32HandleKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType ) const
+ {
+ HANDLE handle;
+ Result result = static_cast<Result>( vkGetMemoryWin32HandleKHX( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), &handle ) );
+ return createResultValue( result, handle, "vk::Device::getMemoryWin32HandleKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ VULKAN_HPP_INLINE Result Device::getMemoryWin32HandlePropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE handle, MemoryWin32HandlePropertiesKHX* pMemoryWin32HandleProperties ) const
+ {
+ return static_cast<Result>( vkGetMemoryWin32HandlePropertiesKHX( m_device, static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), handle, reinterpret_cast<VkMemoryWin32HandlePropertiesKHX*>( pMemoryWin32HandleProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<MemoryWin32HandlePropertiesKHX>::type Device::getMemoryWin32HandlePropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, HANDLE handle ) const
+ {
+ MemoryWin32HandlePropertiesKHX memoryWin32HandleProperties;
+ Result result = static_cast<Result>( vkGetMemoryWin32HandlePropertiesKHX( m_device, static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), handle, reinterpret_cast<VkMemoryWin32HandlePropertiesKHX*>( &memoryWin32HandleProperties ) ) );
+ return createResultValue( result, memoryWin32HandleProperties, "vk::Device::getMemoryWin32HandlePropertiesKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ VULKAN_HPP_INLINE Result Device::getMemoryFdKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType, int* pFd ) const
+ {
+ return static_cast<Result>( vkGetMemoryFdKHX( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), pFd ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<int>::type Device::getMemoryFdKHX( DeviceMemory memory, ExternalMemoryHandleTypeFlagBitsKHX handleType ) const
+ {
+ int fd;
+ Result result = static_cast<Result>( vkGetMemoryFdKHX( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), &fd ) );
+ return createResultValue( result, fd, "vk::Device::getMemoryFdKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getMemoryFdPropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, int fd, MemoryFdPropertiesKHX* pMemoryFdProperties ) const
+ {
+ return static_cast<Result>( vkGetMemoryFdPropertiesKHX( m_device, static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), fd, reinterpret_cast<VkMemoryFdPropertiesKHX*>( pMemoryFdProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<MemoryFdPropertiesKHX>::type Device::getMemoryFdPropertiesKHX( ExternalMemoryHandleTypeFlagBitsKHX handleType, int fd ) const
+ {
+ MemoryFdPropertiesKHX memoryFdProperties;
+ Result result = static_cast<Result>( vkGetMemoryFdPropertiesKHX( m_device, static_cast<VkExternalMemoryHandleTypeFlagBitsKHX>( handleType ), fd, reinterpret_cast<VkMemoryFdPropertiesKHX*>( &memoryFdProperties ) ) );
+ return createResultValue( result, memoryFdProperties, "vk::Device::getMemoryFdPropertiesKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ VULKAN_HPP_INLINE Result Device::getSemaphoreWin32HandleKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType, HANDLE* pHandle ) const
+ {
+ return static_cast<Result>( vkGetSemaphoreWin32HandleKHX( m_device, static_cast<VkSemaphore>( semaphore ), static_cast<VkExternalSemaphoreHandleTypeFlagBitsKHX>( handleType ), pHandle ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<HANDLE>::type Device::getSemaphoreWin32HandleKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType ) const
+ {
+ HANDLE handle;
+ Result result = static_cast<Result>( vkGetSemaphoreWin32HandleKHX( m_device, static_cast<VkSemaphore>( semaphore ), static_cast<VkExternalSemaphoreHandleTypeFlagBitsKHX>( handleType ), &handle ) );
+ return createResultValue( result, handle, "vk::Device::getSemaphoreWin32HandleKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHX
+ VULKAN_HPP_INLINE Result Device::importSemaphoreWin32HandleKHX( const ImportSemaphoreWin32HandleInfoKHX* pImportSemaphoreWin32HandleInfo ) const
+ {
+ return static_cast<Result>( vkImportSemaphoreWin32HandleKHX( m_device, reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHX*>( pImportSemaphoreWin32HandleInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::importSemaphoreWin32HandleKHX( const ImportSemaphoreWin32HandleInfoKHX & importSemaphoreWin32HandleInfo ) const
+ {
+ Result result = static_cast<Result>( vkImportSemaphoreWin32HandleKHX( m_device, reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHX*>( &importSemaphoreWin32HandleInfo ) ) );
+ return createResultValue( result, "vk::Device::importSemaphoreWin32HandleKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHX*/
+
+ VULKAN_HPP_INLINE Result Device::getSemaphoreFdKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType, int* pFd ) const
+ {
+ return static_cast<Result>( vkGetSemaphoreFdKHX( m_device, static_cast<VkSemaphore>( semaphore ), static_cast<VkExternalSemaphoreHandleTypeFlagBitsKHX>( handleType ), pFd ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<int>::type Device::getSemaphoreFdKHX( Semaphore semaphore, ExternalSemaphoreHandleTypeFlagBitsKHX handleType ) const
+ {
+ int fd;
+ Result result = static_cast<Result>( vkGetSemaphoreFdKHX( m_device, static_cast<VkSemaphore>( semaphore ), static_cast<VkExternalSemaphoreHandleTypeFlagBitsKHX>( handleType ), &fd ) );
+ return createResultValue( result, fd, "vk::Device::getSemaphoreFdKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::importSemaphoreFdKHX( const ImportSemaphoreFdInfoKHX* pImportSemaphoreFdInfo ) const
+ {
+ return static_cast<Result>( vkImportSemaphoreFdKHX( m_device, reinterpret_cast<const VkImportSemaphoreFdInfoKHX*>( pImportSemaphoreFdInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::importSemaphoreFdKHX( const ImportSemaphoreFdInfoKHX & importSemaphoreFdInfo ) const
+ {
+ Result result = static_cast<Result>( vkImportSemaphoreFdKHX( m_device, reinterpret_cast<const VkImportSemaphoreFdInfoKHX*>( &importSemaphoreFdInfo ) ) );
+ return createResultValue( result, "vk::Device::importSemaphoreFdKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT* pDisplayPowerInfo ) const
+ {
+ return static_cast<Result>( vkDisplayPowerControlEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayPowerInfoEXT*>( pDisplayPowerInfo ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo ) const
+ {
+ Result result = static_cast<Result>( vkDisplayPowerControlEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayPowerInfoEXT*>( &displayPowerInfo ) ) );
+ return createResultValue( result, "vk::Device::displayPowerControlEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::registerEventEXT( const DeviceEventInfoEXT* pDeviceEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence ) const
+ {
+ return static_cast<Result>( vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT*>( pDeviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFence*>( pFence ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Fence>::type Device::registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, const AllocationCallbacks & allocator ) const
+ {
+ Fence fence;
+ Result result = static_cast<Result>( vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT*>( &deviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( &allocator ), reinterpret_cast<VkFence*>( &fence ) ) );
+ return createResultValue( result, fence, "vk::Device::registerEventEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT* pDisplayEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence ) const
+ {
+ return static_cast<Result>( vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT*>( pDisplayEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFence*>( pFence ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Fence>::type Device::registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, const AllocationCallbacks & allocator ) const
+ {
+ Fence fence;
+ Result result = static_cast<Result>( vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT*>( &displayEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( &allocator ), reinterpret_cast<VkFence*>( &fence ) ) );
+ return createResultValue( result, fence, "vk::Device::registerDisplayEventEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue ) const
+ {
+ return static_cast<Result>( vkGetSwapchainCounterEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ), static_cast<VkSurfaceCounterFlagBitsEXT>( counter ), pCounterValue ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValue<uint64_t> Device::getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter ) const
+ {
+ uint64_t counterValue;
+ Result result = static_cast<Result>( vkGetSwapchainCounterEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ), static_cast<VkSurfaceCounterFlagBitsEXT>( counter ), &counterValue ) );
+ return createResultValue( result, counterValue, "vk::Device::getSwapchainCounterEXT", { Result::eSuccess, Result::eErrorDeviceLost, Result::eErrorOutOfDateKHR } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeaturesKHX( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, PeerMemoryFeatureFlagsKHX* pPeerMemoryFeatures ) const
+ {
+ vkGetDeviceGroupPeerMemoryFeaturesKHX( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlagsKHX*>( pPeerMemoryFeatures ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PeerMemoryFeatureFlagsKHX Device::getGroupPeerMemoryFeaturesKHX( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex ) const
+ {
+ PeerMemoryFeatureFlagsKHX peerMemoryFeatures;
+ vkGetDeviceGroupPeerMemoryFeaturesKHX( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlagsKHX*>( &peerMemoryFeatures ) );
+ return peerMemoryFeatures;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::bindBufferMemory2KHX( uint32_t bindInfoCount, const BindBufferMemoryInfoKHX* pBindInfos ) const
+ {
+ return static_cast<Result>( vkBindBufferMemory2KHX( m_device, bindInfoCount, reinterpret_cast<const VkBindBufferMemoryInfoKHX*>( pBindInfos ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::bindBufferMemory2KHX( ArrayProxy<const BindBufferMemoryInfoKHX> bindInfos ) const
+ {
+ Result result = static_cast<Result>( vkBindBufferMemory2KHX( m_device, bindInfos.size() , reinterpret_cast<const VkBindBufferMemoryInfoKHX*>( bindInfos.data() ) ) );
+ return createResultValue( result, "vk::Device::bindBufferMemory2KHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::bindImageMemory2KHX( uint32_t bindInfoCount, const BindImageMemoryInfoKHX* pBindInfos ) const
+ {
+ return static_cast<Result>( vkBindImageMemory2KHX( m_device, bindInfoCount, reinterpret_cast<const VkBindImageMemoryInfoKHX*>( pBindInfos ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<void>::type Device::bindImageMemory2KHX( ArrayProxy<const BindImageMemoryInfoKHX> bindInfos ) const
+ {
+ Result result = static_cast<Result>( vkBindImageMemory2KHX( m_device, bindInfos.size() , reinterpret_cast<const VkBindImageMemoryInfoKHX*>( bindInfos.data() ) ) );
+ return createResultValue( result, "vk::Device::bindImageMemory2KHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getGroupPresentCapabilitiesKHX( DeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities ) const
+ {
+ return static_cast<Result>( vkGetDeviceGroupPresentCapabilitiesKHX( m_device, reinterpret_cast<VkDeviceGroupPresentCapabilitiesKHX*>( pDeviceGroupPresentCapabilities ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DeviceGroupPresentCapabilitiesKHX>::type Device::getGroupPresentCapabilitiesKHX() const
+ {
+ DeviceGroupPresentCapabilitiesKHX deviceGroupPresentCapabilities;
+ Result result = static_cast<Result>( vkGetDeviceGroupPresentCapabilitiesKHX( m_device, reinterpret_cast<VkDeviceGroupPresentCapabilitiesKHX*>( &deviceGroupPresentCapabilities ) ) );
+ return createResultValue( result, deviceGroupPresentCapabilities, "vk::Device::getGroupPresentCapabilitiesKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModesKHX( SurfaceKHR surface, DeviceGroupPresentModeFlagsKHX* pModes ) const
+ {
+ return static_cast<Result>( vkGetDeviceGroupSurfacePresentModesKHX( m_device, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHX*>( pModes ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DeviceGroupPresentModeFlagsKHX>::type Device::getGroupSurfacePresentModesKHX( SurfaceKHR surface ) const
+ {
+ DeviceGroupPresentModeFlagsKHX modes;
+ Result result = static_cast<Result>( vkGetDeviceGroupSurfacePresentModesKHX( m_device, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHX*>( &modes ) ) );
+ return createResultValue( result, modes, "vk::Device::getGroupSurfacePresentModesKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::acquireNextImage2KHX( const AcquireNextImageInfoKHX* pAcquireInfo, uint32_t* pImageIndex ) const
+ {
+ return static_cast<Result>( vkAcquireNextImage2KHX( m_device, reinterpret_cast<const VkAcquireNextImageInfoKHX*>( pAcquireInfo ), pImageIndex ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValue<uint32_t> Device::acquireNextImage2KHX( const AcquireNextImageInfoKHX & acquireInfo ) const
+ {
+ uint32_t imageIndex;
+ Result result = static_cast<Result>( vkAcquireNextImage2KHX( m_device, reinterpret_cast<const VkAcquireNextImageInfoKHX*>( &acquireInfo ), &imageIndex ) );
+ return createResultValue( result, imageIndex, "vk::Device::acquireNextImage2KHX", { Result::eSuccess, Result::eTimeout, Result::eNotReady, Result::eSuboptimalKHR } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate ) const
+ {
+ return static_cast<Result>( vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorUpdateTemplateKHR*>( pDescriptorUpdateTemplate ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DescriptorUpdateTemplateKHR>::type Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DescriptorUpdateTemplateKHR descriptorUpdateTemplate;
+ Result result = static_cast<Result>( vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplateKHR*>( &descriptorUpdateTemplate ) ) );
+ return createResultValue( result, descriptorUpdateTemplate, "vk::Device::createDescriptorUpdateTemplateKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueDescriptorUpdateTemplateKHR Device::createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DescriptorUpdateTemplateKHRDeleter deleter( *this, allocator );
+ return UniqueDescriptorUpdateTemplateKHR( createDescriptorUpdateTemplateKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplateKHR descriptorUpdateTemplate, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast<VkDescriptorUpdateTemplateKHR>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplateKHR descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast<VkDescriptorUpdateTemplateKHR>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( DescriptorSet descriptorSet, DescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData ) const
+ {
+ vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast<VkDescriptorSet>( descriptorSet ), static_cast<VkDescriptorUpdateTemplateKHR>( descriptorUpdateTemplate ), pData );
+ }
+
+ VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( uint32_t swapchainCount, const SwapchainKHR* pSwapchains, const HdrMetadataEXT* pMetadata ) const
+ {
+ vkSetHdrMetadataEXT( m_device, swapchainCount, reinterpret_cast<const VkSwapchainKHR*>( pSwapchains ), reinterpret_cast<const VkHdrMetadataEXT*>( pMetadata ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( ArrayProxy<const SwapchainKHR> swapchains, ArrayProxy<const HdrMetadataEXT> metadata ) const
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( swapchains.size() == metadata.size() );
+#else
+ if ( swapchains.size() != metadata.size() )
+ {
+ throw std::logic_error( "vk::Device::setHdrMetadataEXT: swapchains.size() != metadata.size()" );
+ }
+#endif // VULKAN_HPP_NO_EXCEPTIONS
+ vkSetHdrMetadataEXT( m_device, swapchains.size() , reinterpret_cast<const VkSwapchainKHR*>( swapchains.data() ), reinterpret_cast<const VkHdrMetadataEXT*>( metadata.data() ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( SwapchainKHR swapchain ) const
+ {
+ return static_cast<Result>( vkGetSwapchainStatusKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( SwapchainKHR swapchain ) const
+ {
+ Result result = static_cast<Result>( vkGetSwapchainStatusKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) );
+ return createResultValue( result, "vk::Device::getSwapchainStatusKHR", { Result::eSuccess, Result::eSuboptimalKHR } );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain, RefreshCycleDurationGOOGLE* pDisplayTimingProperties ) const
+ {
+ return static_cast<Result>( vkGetRefreshCycleDurationGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<VkRefreshCycleDurationGOOGLE*>( pDisplayTimingProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<RefreshCycleDurationGOOGLE>::type Device::getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain ) const
+ {
+ RefreshCycleDurationGOOGLE displayTimingProperties;
+ Result result = static_cast<Result>( vkGetRefreshCycleDurationGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<VkRefreshCycleDurationGOOGLE*>( &displayTimingProperties ) ) );
+ return createResultValue( result, displayTimingProperties, "vk::Device::getRefreshCycleDurationGOOGLE" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Device::getPastPresentationTimingGOOGLE( SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, PastPresentationTimingGOOGLE* pPresentationTimings ) const
+ {
+ return static_cast<Result>( vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), pPresentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE*>( pPresentationTimings ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<PastPresentationTimingGOOGLE,Allocator>>::type Device::getPastPresentationTimingGOOGLE( SwapchainKHR swapchain ) const
+ {
+ std::vector<PastPresentationTimingGOOGLE,Allocator> presentationTimings;
+ uint32_t presentationTimingCount;
+ Result result = static_cast<Result>( vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), &presentationTimingCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && presentationTimingCount )
+ {
+ presentationTimings.resize( presentationTimingCount );
+ result = static_cast<Result>( vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), &presentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE*>( presentationTimings.data() ) ) );
+ }
+ return createResultValue( result, presentationTimings, "vk::Device::getPastPresentationTimingGOOGLE" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class DeviceDeleter;
+ using UniqueDevice = UniqueHandle<Device, DeviceDeleter>;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ class PhysicalDevice
+ {
+ public:
+ PhysicalDevice()
+ : m_physicalDevice(VK_NULL_HANDLE)
+ {}
+
+ PhysicalDevice( std::nullptr_t )
+ : m_physicalDevice(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT PhysicalDevice(VkPhysicalDevice physicalDevice)
+ : m_physicalDevice(physicalDevice)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ PhysicalDevice& operator=(VkPhysicalDevice physicalDevice)
+ {
+ m_physicalDevice = physicalDevice;
+ return *this;
+ }
+#endif
+
+ PhysicalDevice& operator=( std::nullptr_t )
+ {
+ m_physicalDevice = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(PhysicalDevice const &rhs) const
+ {
+ return m_physicalDevice == rhs.m_physicalDevice;
+ }
+
+ bool operator!=(PhysicalDevice const &rhs) const
+ {
+ return m_physicalDevice != rhs.m_physicalDevice;
+ }
+
+ bool operator<(PhysicalDevice const &rhs) const
+ {
+ return m_physicalDevice < rhs.m_physicalDevice;
+ }
+
+ void getProperties( PhysicalDeviceProperties* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PhysicalDeviceProperties getProperties() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties* pQueueFamilyProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<QueueFamilyProperties>>
+ std::vector<QueueFamilyProperties,Allocator> getQueueFamilyProperties() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getMemoryProperties( PhysicalDeviceMemoryProperties* pMemoryProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PhysicalDeviceMemoryProperties getMemoryProperties() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getFeatures( PhysicalDeviceFeatures* pFeatures ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PhysicalDeviceFeatures getFeatures() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getFormatProperties( Format format, FormatProperties* pFormatProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ FormatProperties getFormatProperties( Format format ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ImageFormatProperties* pImageFormatProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<ImageFormatProperties>::type getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createDevice( const DeviceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Device* pDevice ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Device>::type createDevice( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueDevice createDeviceUnique( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result enumerateDeviceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<LayerProperties>>
+ typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateDeviceLayerProperties() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<ExtensionProperties>>
+ typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateDeviceExtensionProperties( Optional<const std::string> layerName = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling, uint32_t* pPropertyCount, SparseImageFormatProperties* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<SparseImageFormatProperties>>
+ std::vector<SparseImageFormatProperties,Allocator> getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getDisplayPropertiesKHR( uint32_t* pPropertyCount, DisplayPropertiesKHR* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<DisplayPropertiesKHR>>
+ typename ResultValueType<std::vector<DisplayPropertiesKHR,Allocator>>::type getDisplayPropertiesKHR() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, DisplayPlanePropertiesKHR* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<DisplayPlanePropertiesKHR>>
+ typename ResultValueType<std::vector<DisplayPlanePropertiesKHR,Allocator>>::type getDisplayPlanePropertiesKHR() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, DisplayKHR* pDisplays ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<DisplayKHR>>
+ typename ResultValueType<std::vector<DisplayKHR,Allocator>>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getDisplayModePropertiesKHR( DisplayKHR display, uint32_t* pPropertyCount, DisplayModePropertiesKHR* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<DisplayModePropertiesKHR>>
+ typename ResultValueType<std::vector<DisplayModePropertiesKHR,Allocator>>::type getDisplayModePropertiesKHR( DisplayKHR display ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, DisplayModeKHR* pMode ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DisplayModeKHR>::type createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex, DisplayPlaneCapabilitiesKHR* pCapabilities ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DisplayPlaneCapabilitiesKHR>::type getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ Bool32 getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection* connection ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Bool32 getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection & connection ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+ Result getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface, Bool32* pSupported ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Bool32>::type getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSurfaceCapabilitiesKHR( SurfaceKHR surface, SurfaceCapabilitiesKHR* pSurfaceCapabilities ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceCapabilitiesKHR>::type getSurfaceCapabilitiesKHR( SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSurfaceFormatsKHR( SurfaceKHR surface, uint32_t* pSurfaceFormatCount, SurfaceFormatKHR* pSurfaceFormats ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<SurfaceFormatKHR>>
+ typename ResultValueType<std::vector<SurfaceFormatKHR,Allocator>>::type getSurfaceFormatsKHR( SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSurfacePresentModesKHR( SurfaceKHR surface, uint32_t* pPresentModeCount, PresentModeKHR* pPresentModes ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<PresentModeKHR>>
+ typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type getSurfacePresentModesKHR( SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ Bool32 getWin32PresentationSupportKHR( uint32_t queueFamilyIndex ) const;
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+ Result getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType, ExternalImageFormatPropertiesNV* pExternalImageFormatProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<ExternalImageFormatPropertiesNV>::type getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX* pFeatures, DeviceGeneratedCommandsLimitsNVX* pLimits ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ DeviceGeneratedCommandsLimitsNVX getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX & features ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getFeatures2KHR( PhysicalDeviceFeatures2KHR* pFeatures ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PhysicalDeviceFeatures2KHR getFeatures2KHR() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getProperties2KHR( PhysicalDeviceProperties2KHR* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PhysicalDeviceProperties2KHR getProperties2KHR() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getFormatProperties2KHR( Format format, FormatProperties2KHR* pFormatProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ FormatProperties2KHR getFormatProperties2KHR( Format format ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo, ImageFormatProperties2KHR* pImageFormatProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<ImageFormatProperties2KHR>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2KHR & imageFormatInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties2KHR* pQueueFamilyProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<QueueFamilyProperties2KHR>>
+ std::vector<QueueFamilyProperties2KHR,Allocator> getQueueFamilyProperties2KHR() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getMemoryProperties2KHR( PhysicalDeviceMemoryProperties2KHR* pMemoryProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PhysicalDeviceMemoryProperties2KHR getMemoryProperties2KHR() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo, uint32_t* pPropertyCount, SparseImageFormatProperties2KHR* pProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<SparseImageFormatProperties2KHR>>
+ std::vector<SparseImageFormatProperties2KHR,Allocator> getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2KHR & formatInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getExternalBufferPropertiesKHX( const PhysicalDeviceExternalBufferInfoKHX* pExternalBufferInfo, ExternalBufferPropertiesKHX* pExternalBufferProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ExternalBufferPropertiesKHX getExternalBufferPropertiesKHX( const PhysicalDeviceExternalBufferInfoKHX & externalBufferInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void getExternalSemaphorePropertiesKHX( const PhysicalDeviceExternalSemaphoreInfoKHX* pExternalSemaphoreInfo, ExternalSemaphorePropertiesKHX* pExternalSemaphoreProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ExternalSemaphorePropertiesKHX getExternalSemaphorePropertiesKHX( const PhysicalDeviceExternalSemaphoreInfoKHX & externalSemaphoreInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result releaseDisplayEXT( DisplayKHR display ) const;
+#else
+ ResultValueType<void>::type releaseDisplayEXT( DisplayKHR display ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ Result acquireXlibDisplayEXT( Display* dpy, DisplayKHR display ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Display>::type acquireXlibDisplayEXT( DisplayKHR display ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/
+
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ Result getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, DisplayKHR* pDisplay ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DisplayKHR>::type getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/
+
+ Result getSurfaceCapabilities2EXT( SurfaceKHR surface, SurfaceCapabilities2EXT* pSurfaceCapabilities ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceCapabilities2EXT>::type getSurfaceCapabilities2EXT( SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getPresentRectanglesKHX( SurfaceKHR surface, uint32_t* pRectCount, Rect2D* pRects ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<Rect2D>>
+ typename ResultValueType<std::vector<Rect2D,Allocator>>::type getPresentRectanglesKHX( SurfaceKHR surface ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, SurfaceCapabilities2KHR* pSurfaceCapabilities ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ Result getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, SurfaceFormat2KHR* pSurfaceFormats ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<SurfaceFormat2KHR>>
+ typename ResultValueType<std::vector<SurfaceFormat2KHR,Allocator>>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPhysicalDevice() const
+ {
+ return m_physicalDevice;
+ }
+
+ explicit operator bool() const
+ {
+ return m_physicalDevice != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_physicalDevice == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkPhysicalDevice m_physicalDevice;
+ };
+ static_assert( sizeof( PhysicalDevice ) == sizeof( VkPhysicalDevice ), "handle and wrapper have different size!" );
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class DeviceDeleter
+ {
+ public:
+ DeviceDeleter( Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_allocator( allocator )
+ {}
+
+ void operator()( Device device )
+ {
+ device.destroy( m_allocator );
+ }
+
+ private:
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getProperties( PhysicalDeviceProperties* pProperties ) const
+ {
+ vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties*>( pProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PhysicalDeviceProperties PhysicalDevice::getProperties() const
+ {
+ PhysicalDeviceProperties properties;
+ vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties*>( &properties ) );
+ return properties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties* pQueueFamilyProperties ) const
+ {
+ vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties*>( pQueueFamilyProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<QueueFamilyProperties,Allocator> PhysicalDevice::getQueueFamilyProperties() const
+ {
+ std::vector<QueueFamilyProperties,Allocator> queueFamilyProperties;
+ uint32_t queueFamilyPropertyCount;
+ vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, nullptr );
+ queueFamilyProperties.resize( queueFamilyPropertyCount );
+ vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties*>( queueFamilyProperties.data() ) );
+ return queueFamilyProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties( PhysicalDeviceMemoryProperties* pMemoryProperties ) const
+ {
+ vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties*>( pMemoryProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PhysicalDeviceMemoryProperties PhysicalDevice::getMemoryProperties() const
+ {
+ PhysicalDeviceMemoryProperties memoryProperties;
+ vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties*>( &memoryProperties ) );
+ return memoryProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getFeatures( PhysicalDeviceFeatures* pFeatures ) const
+ {
+ vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures*>( pFeatures ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PhysicalDeviceFeatures PhysicalDevice::getFeatures() const
+ {
+ PhysicalDeviceFeatures features;
+ vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures*>( &features ) );
+ return features;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties( Format format, FormatProperties* pFormatProperties ) const
+ {
+ vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties*>( pFormatProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE FormatProperties PhysicalDevice::getFormatProperties( Format format ) const
+ {
+ FormatProperties formatProperties;
+ vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties*>( &formatProperties ) );
+ return formatProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ImageFormatProperties* pImageFormatProperties ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), reinterpret_cast<VkImageFormatProperties*>( pImageFormatProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<ImageFormatProperties>::type PhysicalDevice::getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags ) const
+ {
+ ImageFormatProperties imageFormatProperties;
+ Result result = static_cast<Result>( vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), reinterpret_cast<VkImageFormatProperties*>( &imageFormatProperties ) ) );
+ return createResultValue( result, imageFormatProperties, "vk::PhysicalDevice::getImageFormatProperties" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::createDevice( const DeviceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Device* pDevice ) const
+ {
+ return static_cast<Result>( vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDevice*>( pDevice ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Device>::type PhysicalDevice::createDevice( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ Device device;
+ Result result = static_cast<Result>( vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDevice*>( &device ) ) );
+ return createResultValue( result, device, "vk::PhysicalDevice::createDevice" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueDevice PhysicalDevice::createDeviceUnique( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DeviceDeleter deleter( allocator );
+ return UniqueDevice( createDevice( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties ) const
+ {
+ return static_cast<Result>( vkEnumerateDeviceLayerProperties( m_physicalDevice, pPropertyCount, reinterpret_cast<VkLayerProperties*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties,Allocator>>::type PhysicalDevice::enumerateDeviceLayerProperties() const
+ {
+ std::vector<LayerProperties,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast<VkLayerProperties*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::PhysicalDevice::enumerateDeviceLayerProperties" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties ) const
+ {
+ return static_cast<Result>( vkEnumerateDeviceExtensionProperties( m_physicalDevice, pLayerName, pPropertyCount, reinterpret_cast<VkExtensionProperties*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional<const std::string> layerName ) const
+ {
+ std::vector<ExtensionProperties,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::PhysicalDevice::enumerateDeviceExtensionProperties" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling, uint32_t* pPropertyCount, SparseImageFormatProperties* pProperties ) const
+ {
+ vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), pPropertyCount, reinterpret_cast<VkSparseImageFormatProperties*>( pProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties,Allocator> PhysicalDevice::getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling ) const
+ {
+ std::vector<SparseImageFormatProperties,Allocator> properties;
+ uint32_t propertyCount;
+ vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, nullptr );
+ properties.resize( propertyCount );
+ vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties*>( properties.data() ) );
+ return properties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPropertiesKHR( uint32_t* pPropertyCount, DisplayPropertiesKHR* pProperties ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast<VkDisplayPropertiesKHR*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayPropertiesKHR() const
+ {
+ std::vector<DisplayPropertiesKHR,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPropertiesKHR*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::PhysicalDevice::getDisplayPropertiesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, DisplayPlanePropertiesKHR* pProperties ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlanePropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayPlanePropertiesKHR() const
+ {
+ std::vector<DisplayPlanePropertiesKHR,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::PhysicalDevice::getDisplayPlanePropertiesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, DisplayKHR* pDisplays ) const
+ {
+ return static_cast<Result>( vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, pDisplayCount, reinterpret_cast<VkDisplayKHR*>( pDisplays ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayKHR,Allocator>>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex ) const
+ {
+ std::vector<DisplayKHR,Allocator> displays;
+ uint32_t displayCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && displayCount )
+ {
+ displays.resize( displayCount );
+ result = static_cast<Result>( vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast<VkDisplayKHR*>( displays.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( displayCount <= displays.size() );
+ displays.resize( displayCount );
+ return createResultValue( result, displays, "vk::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModePropertiesKHR( DisplayKHR display, uint32_t* pPropertyCount, DisplayModePropertiesKHR* pProperties ) const
+ {
+ return static_cast<Result>( vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), pPropertyCount, reinterpret_cast<VkDisplayModePropertiesKHR*>( pProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModePropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayModePropertiesKHR( DisplayKHR display ) const
+ {
+ std::vector<DisplayModePropertiesKHR,Allocator> properties;
+ uint32_t propertyCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && propertyCount )
+ {
+ properties.resize( propertyCount );
+ result = static_cast<Result>( vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModePropertiesKHR*>( properties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( propertyCount <= properties.size() );
+ properties.resize( propertyCount );
+ return createResultValue( result, properties, "vk::PhysicalDevice::getDisplayModePropertiesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, DisplayModeKHR* pMode ) const
+ {
+ return static_cast<Result>( vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDisplayModeKHR*>( pMode ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DisplayModeKHR>::type PhysicalDevice::createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DisplayModeKHR mode;
+ Result result = static_cast<Result>( vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDisplayModeKHR*>( &mode ) ) );
+ return createResultValue( result, mode, "vk::PhysicalDevice::createDisplayModeKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex, DisplayPlaneCapabilitiesKHR* pCapabilities ) const
+ {
+ return static_cast<Result>( vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast<VkDisplayModeKHR>( mode ), planeIndex, reinterpret_cast<VkDisplayPlaneCapabilitiesKHR*>( pCapabilities ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DisplayPlaneCapabilitiesKHR>::type PhysicalDevice::getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex ) const
+ {
+ DisplayPlaneCapabilitiesKHR capabilities;
+ Result result = static_cast<Result>( vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast<VkDisplayModeKHR>( mode ), planeIndex, reinterpret_cast<VkDisplayPlaneCapabilitiesKHR*>( &capabilities ) ) );
+ return createResultValue( result, capabilities, "vk::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection* connection ) const
+ {
+ return vkGetPhysicalDeviceMirPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, connection );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection & connection ) const
+ {
+ return vkGetPhysicalDeviceMirPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &connection );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface, Bool32* pSupported ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast<VkSurfaceKHR>( surface ), pSupported ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Bool32>::type PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface ) const
+ {
+ Bool32 supported;
+ Result result = static_cast<Result>( vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast<VkSurfaceKHR>( surface ), &supported ) );
+ return createResultValue( result, supported, "vk::PhysicalDevice::getSurfaceSupportKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilitiesKHR( SurfaceKHR surface, SurfaceCapabilitiesKHR* pSurfaceCapabilities ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilitiesKHR*>( pSurfaceCapabilities ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceCapabilitiesKHR>::type PhysicalDevice::getSurfaceCapabilitiesKHR( SurfaceKHR surface ) const
+ {
+ SurfaceCapabilitiesKHR surfaceCapabilities;
+ Result result = static_cast<Result>( vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilitiesKHR*>( &surfaceCapabilities ) ) );
+ return createResultValue( result, surfaceCapabilities, "vk::PhysicalDevice::getSurfaceCapabilitiesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormatsKHR( SurfaceKHR surface, uint32_t* pSurfaceFormatCount, SurfaceFormatKHR* pSurfaceFormats ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pSurfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR*>( pSurfaceFormats ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormatKHR,Allocator>>::type PhysicalDevice::getSurfaceFormatsKHR( SurfaceKHR surface ) const
+ {
+ std::vector<SurfaceFormatKHR,Allocator> surfaceFormats;
+ uint32_t surfaceFormatCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &surfaceFormatCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && surfaceFormatCount )
+ {
+ surfaceFormats.resize( surfaceFormatCount );
+ result = static_cast<Result>( vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR*>( surfaceFormats.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( surfaceFormatCount <= surfaceFormats.size() );
+ surfaceFormats.resize( surfaceFormatCount );
+ return createResultValue( result, surfaceFormats, "vk::PhysicalDevice::getSurfaceFormatsKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModesKHR( SurfaceKHR surface, uint32_t* pPresentModeCount, PresentModeKHR* pPresentModes ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pPresentModeCount, reinterpret_cast<VkPresentModeKHR*>( pPresentModes ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type PhysicalDevice::getSurfacePresentModesKHR( SurfaceKHR surface ) const
+ {
+ std::vector<PresentModeKHR,Allocator> presentModes;
+ uint32_t presentModeCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &presentModeCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && presentModeCount )
+ {
+ presentModes.resize( presentModeCount );
+ result = static_cast<Result>( vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &presentModeCount, reinterpret_cast<VkPresentModeKHR*>( presentModes.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( presentModeCount <= presentModes.size() );
+ presentModes.resize( presentModeCount );
+ return createResultValue( result, presentModes, "vk::PhysicalDevice::getSurfacePresentModesKHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display ) const
+ {
+ return vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, display );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display ) const
+ {
+ return vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &display );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex ) const
+ {
+ return vkGetPhysicalDeviceWin32PresentationSupportKHR( m_physicalDevice, queueFamilyIndex );
+ }
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID ) const
+ {
+ return vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, dpy, visualID );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID ) const
+ {
+ return vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &dpy, visualID );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id ) const
+ {
+ return vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, connection, visual_id );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id ) const
+ {
+ return vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &connection, visual_id );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType, ExternalImageFormatPropertiesNV* pExternalImageFormatProperties ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( externalHandleType ), reinterpret_cast<VkExternalImageFormatPropertiesNV*>( pExternalImageFormatProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<ExternalImageFormatPropertiesNV>::type PhysicalDevice::getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType ) const
+ {
+ ExternalImageFormatPropertiesNV externalImageFormatProperties;
+ Result result = static_cast<Result>( vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( externalHandleType ), reinterpret_cast<VkExternalImageFormatPropertiesNV*>( &externalImageFormatProperties ) ) );
+ return createResultValue( result, externalImageFormatProperties, "vk::PhysicalDevice::getExternalImageFormatPropertiesNV" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX* pFeatures, DeviceGeneratedCommandsLimitsNVX* pLimits ) const
+ {
+ vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( m_physicalDevice, reinterpret_cast<VkDeviceGeneratedCommandsFeaturesNVX*>( pFeatures ), reinterpret_cast<VkDeviceGeneratedCommandsLimitsNVX*>( pLimits ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE DeviceGeneratedCommandsLimitsNVX PhysicalDevice::getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX & features ) const
+ {
+ DeviceGeneratedCommandsLimitsNVX limits;
+ vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( m_physicalDevice, reinterpret_cast<VkDeviceGeneratedCommandsFeaturesNVX*>( &features ), reinterpret_cast<VkDeviceGeneratedCommandsLimitsNVX*>( &limits ) );
+ return limits;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2KHR( PhysicalDeviceFeatures2KHR* pFeatures ) const
+ {
+ vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2KHR*>( pFeatures ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PhysicalDeviceFeatures2KHR PhysicalDevice::getFeatures2KHR() const
+ {
+ PhysicalDeviceFeatures2KHR features;
+ vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2KHR*>( &features ) );
+ return features;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getProperties2KHR( PhysicalDeviceProperties2KHR* pProperties ) const
+ {
+ vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2KHR*>( pProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PhysicalDeviceProperties2KHR PhysicalDevice::getProperties2KHR() const
+ {
+ PhysicalDeviceProperties2KHR properties;
+ vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2KHR*>( &properties ) );
+ return properties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2KHR( Format format, FormatProperties2KHR* pFormatProperties ) const
+ {
+ vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2KHR*>( pFormatProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE FormatProperties2KHR PhysicalDevice::getFormatProperties2KHR( Format format ) const
+ {
+ FormatProperties2KHR formatProperties;
+ vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2KHR*>( &formatProperties ) );
+ return formatProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo, ImageFormatProperties2KHR* pImageFormatProperties ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2KHR*>( pImageFormatInfo ), reinterpret_cast<VkImageFormatProperties2KHR*>( pImageFormatProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<ImageFormatProperties2KHR>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2KHR & imageFormatInfo ) const
+ {
+ ImageFormatProperties2KHR imageFormatProperties;
+ Result result = static_cast<Result>( vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2KHR*>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2KHR*>( &imageFormatProperties ) ) );
+ return createResultValue( result, imageFormatProperties, "vk::PhysicalDevice::getImageFormatProperties2KHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties2KHR* pQueueFamilyProperties ) const
+ {
+ vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2KHR*>( pQueueFamilyProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2KHR,Allocator> PhysicalDevice::getQueueFamilyProperties2KHR() const
+ {
+ std::vector<QueueFamilyProperties2KHR,Allocator> queueFamilyProperties;
+ uint32_t queueFamilyPropertyCount;
+ vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr );
+ queueFamilyProperties.resize( queueFamilyPropertyCount );
+ vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2KHR*>( queueFamilyProperties.data() ) );
+ return queueFamilyProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2KHR( PhysicalDeviceMemoryProperties2KHR* pMemoryProperties ) const
+ {
+ vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2KHR*>( pMemoryProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PhysicalDeviceMemoryProperties2KHR PhysicalDevice::getMemoryProperties2KHR() const
+ {
+ PhysicalDeviceMemoryProperties2KHR memoryProperties;
+ vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2KHR*>( &memoryProperties ) );
+ return memoryProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo, uint32_t* pPropertyCount, SparseImageFormatProperties2KHR* pProperties ) const
+ {
+ vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2KHR*>( pFormatInfo ), pPropertyCount, reinterpret_cast<VkSparseImageFormatProperties2KHR*>( pProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2KHR,Allocator> PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2KHR & formatInfo ) const
+ {
+ std::vector<SparseImageFormatProperties2KHR,Allocator> properties;
+ uint32_t propertyCount;
+ vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2KHR*>( &formatInfo ), &propertyCount, nullptr );
+ properties.resize( propertyCount );
+ vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2KHR*>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2KHR*>( properties.data() ) );
+ return properties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferPropertiesKHX( const PhysicalDeviceExternalBufferInfoKHX* pExternalBufferInfo, ExternalBufferPropertiesKHX* pExternalBufferProperties ) const
+ {
+ vkGetPhysicalDeviceExternalBufferPropertiesKHX( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfoKHX*>( pExternalBufferInfo ), reinterpret_cast<VkExternalBufferPropertiesKHX*>( pExternalBufferProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ExternalBufferPropertiesKHX PhysicalDevice::getExternalBufferPropertiesKHX( const PhysicalDeviceExternalBufferInfoKHX & externalBufferInfo ) const
+ {
+ ExternalBufferPropertiesKHX externalBufferProperties;
+ vkGetPhysicalDeviceExternalBufferPropertiesKHX( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfoKHX*>( &externalBufferInfo ), reinterpret_cast<VkExternalBufferPropertiesKHX*>( &externalBufferProperties ) );
+ return externalBufferProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphorePropertiesKHX( const PhysicalDeviceExternalSemaphoreInfoKHX* pExternalSemaphoreInfo, ExternalSemaphorePropertiesKHX* pExternalSemaphoreProperties ) const
+ {
+ vkGetPhysicalDeviceExternalSemaphorePropertiesKHX( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfoKHX*>( pExternalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphorePropertiesKHX*>( pExternalSemaphoreProperties ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ExternalSemaphorePropertiesKHX PhysicalDevice::getExternalSemaphorePropertiesKHX( const PhysicalDeviceExternalSemaphoreInfoKHX & externalSemaphoreInfo ) const
+ {
+ ExternalSemaphorePropertiesKHX externalSemaphoreProperties;
+ vkGetPhysicalDeviceExternalSemaphorePropertiesKHX( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfoKHX*>( &externalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphorePropertiesKHX*>( &externalSemaphoreProperties ) );
+ return externalSemaphoreProperties;
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE Result PhysicalDevice::releaseDisplayEXT( DisplayKHR display ) const
+ {
+ return static_cast<Result>( vkReleaseDisplayEXT( m_physicalDevice, static_cast<VkDisplayKHR>( display ) ) );
+ }
+#else
+ VULKAN_HPP_INLINE ResultValueType<void>::type PhysicalDevice::releaseDisplayEXT( DisplayKHR display ) const
+ {
+ Result result = static_cast<Result>( vkReleaseDisplayEXT( m_physicalDevice, static_cast<VkDisplayKHR>( display ) ) );
+ return createResultValue( result, "vk::PhysicalDevice::releaseDisplayEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ VULKAN_HPP_INLINE Result PhysicalDevice::acquireXlibDisplayEXT( Display* dpy, DisplayKHR display ) const
+ {
+ return static_cast<Result>( vkAcquireXlibDisplayEXT( m_physicalDevice, dpy, static_cast<VkDisplayKHR>( display ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Display>::type PhysicalDevice::acquireXlibDisplayEXT( DisplayKHR display ) const
+ {
+ Display dpy;
+ Result result = static_cast<Result>( vkAcquireXlibDisplayEXT( m_physicalDevice, &dpy, static_cast<VkDisplayKHR>( display ) ) );
+ return createResultValue( result, dpy, "vk::PhysicalDevice::acquireXlibDisplayEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/
+
+#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
+ VULKAN_HPP_INLINE Result PhysicalDevice::getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, DisplayKHR* pDisplay ) const
+ {
+ return static_cast<Result>( vkGetRandROutputDisplayEXT( m_physicalDevice, dpy, rrOutput, reinterpret_cast<VkDisplayKHR*>( pDisplay ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DisplayKHR>::type PhysicalDevice::getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput ) const
+ {
+ DisplayKHR display;
+ Result result = static_cast<Result>( vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast<VkDisplayKHR*>( &display ) ) );
+ return createResultValue( result, display, "vk::PhysicalDevice::getRandROutputDisplayEXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2EXT( SurfaceKHR surface, SurfaceCapabilities2EXT* pSurfaceCapabilities ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilities2EXT*>( pSurfaceCapabilities ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceCapabilities2EXT>::type PhysicalDevice::getSurfaceCapabilities2EXT( SurfaceKHR surface ) const
+ {
+ SurfaceCapabilities2EXT surfaceCapabilities;
+ Result result = static_cast<Result>( vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilities2EXT*>( &surfaceCapabilities ) ) );
+ return createResultValue( result, surfaceCapabilities, "vk::PhysicalDevice::getSurfaceCapabilities2EXT" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getPresentRectanglesKHX( SurfaceKHR surface, uint32_t* pRectCount, Rect2D* pRects ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDevicePresentRectanglesKHX( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pRectCount, reinterpret_cast<VkRect2D*>( pRects ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<Rect2D,Allocator>>::type PhysicalDevice::getPresentRectanglesKHX( SurfaceKHR surface ) const
+ {
+ std::vector<Rect2D,Allocator> rects;
+ uint32_t rectCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPhysicalDevicePresentRectanglesKHX( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &rectCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && rectCount )
+ {
+ rects.resize( rectCount );
+ result = static_cast<Result>( vkGetPhysicalDevicePresentRectanglesKHX( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &rectCount, reinterpret_cast<VkRect2D*>( rects.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( rectCount <= rects.size() );
+ rects.resize( rectCount );
+ return createResultValue( result, rects, "vk::PhysicalDevice::getPresentRectanglesKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, SurfaceCapabilities2KHR* pSurfaceCapabilities ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( pSurfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR*>( pSurfaceCapabilities ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+#if 0 // LunarG 1.0.48 header update workaround
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const
+ {
+ Result result = static_cast<Result>( vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR*>( &surfaceCapabilities ) ) );
+ return createResultValue( result, surfaceCapabilities, "vk::PhysicalDevice::getSurfaceCapabilities2KHR" );
+ }
+#endif
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, SurfaceFormat2KHR* pSurfaceFormats ) const
+ {
+ return static_cast<Result>( vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( pSurfaceInfo ), pSurfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR*>( pSurfaceFormats ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormat2KHR,Allocator>>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo ) const
+ {
+ std::vector<SurfaceFormat2KHR,Allocator> surfaceFormats;
+ uint32_t surfaceFormatCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &surfaceFormatCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && surfaceFormatCount )
+ {
+ surfaceFormats.resize( surfaceFormatCount );
+ result = static_cast<Result>( vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR*>( surfaceFormats.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( surfaceFormatCount <= surfaceFormats.size() );
+ surfaceFormats.resize( surfaceFormatCount );
+ return createResultValue( result, surfaceFormats, "vk::PhysicalDevice::getSurfaceFormats2KHR" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ struct CmdProcessCommandsInfoNVX
+ {
+ CmdProcessCommandsInfoNVX( ObjectTableNVX objectTable_ = ObjectTableNVX(), IndirectCommandsLayoutNVX indirectCommandsLayout_ = IndirectCommandsLayoutNVX(), uint32_t indirectCommandsTokenCount_ = 0, const IndirectCommandsTokenNVX* pIndirectCommandsTokens_ = nullptr, uint32_t maxSequencesCount_ = 0, CommandBuffer targetCommandBuffer_ = CommandBuffer(), Buffer sequencesCountBuffer_ = Buffer(), DeviceSize sequencesCountOffset_ = 0, Buffer sequencesIndexBuffer_ = Buffer(), DeviceSize sequencesIndexOffset_ = 0 )
+ : sType( StructureType::eCmdProcessCommandsInfoNVX )
+ , pNext( nullptr )
+ , objectTable( objectTable_ )
+ , indirectCommandsLayout( indirectCommandsLayout_ )
+ , indirectCommandsTokenCount( indirectCommandsTokenCount_ )
+ , pIndirectCommandsTokens( pIndirectCommandsTokens_ )
+ , maxSequencesCount( maxSequencesCount_ )
+ , targetCommandBuffer( targetCommandBuffer_ )
+ , sequencesCountBuffer( sequencesCountBuffer_ )
+ , sequencesCountOffset( sequencesCountOffset_ )
+ , sequencesIndexBuffer( sequencesIndexBuffer_ )
+ , sequencesIndexOffset( sequencesIndexOffset_ )
+ {
+ }
+
+ CmdProcessCommandsInfoNVX( VkCmdProcessCommandsInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CmdProcessCommandsInfoNVX) );
+ }
+
+ CmdProcessCommandsInfoNVX& operator=( VkCmdProcessCommandsInfoNVX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(CmdProcessCommandsInfoNVX) );
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setObjectTable( ObjectTableNVX objectTable_ )
+ {
+ objectTable = objectTable_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setIndirectCommandsLayout( IndirectCommandsLayoutNVX indirectCommandsLayout_ )
+ {
+ indirectCommandsLayout = indirectCommandsLayout_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setIndirectCommandsTokenCount( uint32_t indirectCommandsTokenCount_ )
+ {
+ indirectCommandsTokenCount = indirectCommandsTokenCount_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setPIndirectCommandsTokens( const IndirectCommandsTokenNVX* pIndirectCommandsTokens_ )
+ {
+ pIndirectCommandsTokens = pIndirectCommandsTokens_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setMaxSequencesCount( uint32_t maxSequencesCount_ )
+ {
+ maxSequencesCount = maxSequencesCount_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setTargetCommandBuffer( CommandBuffer targetCommandBuffer_ )
+ {
+ targetCommandBuffer = targetCommandBuffer_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setSequencesCountBuffer( Buffer sequencesCountBuffer_ )
+ {
+ sequencesCountBuffer = sequencesCountBuffer_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setSequencesCountOffset( DeviceSize sequencesCountOffset_ )
+ {
+ sequencesCountOffset = sequencesCountOffset_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setSequencesIndexBuffer( Buffer sequencesIndexBuffer_ )
+ {
+ sequencesIndexBuffer = sequencesIndexBuffer_;
+ return *this;
+ }
+
+ CmdProcessCommandsInfoNVX& setSequencesIndexOffset( DeviceSize sequencesIndexOffset_ )
+ {
+ sequencesIndexOffset = sequencesIndexOffset_;
+ return *this;
+ }
+
+ operator const VkCmdProcessCommandsInfoNVX&() const
+ {
+ return *reinterpret_cast<const VkCmdProcessCommandsInfoNVX*>(this);
+ }
+
+ bool operator==( CmdProcessCommandsInfoNVX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( objectTable == rhs.objectTable )
+ && ( indirectCommandsLayout == rhs.indirectCommandsLayout )
+ && ( indirectCommandsTokenCount == rhs.indirectCommandsTokenCount )
+ && ( pIndirectCommandsTokens == rhs.pIndirectCommandsTokens )
+ && ( maxSequencesCount == rhs.maxSequencesCount )
+ && ( targetCommandBuffer == rhs.targetCommandBuffer )
+ && ( sequencesCountBuffer == rhs.sequencesCountBuffer )
+ && ( sequencesCountOffset == rhs.sequencesCountOffset )
+ && ( sequencesIndexBuffer == rhs.sequencesIndexBuffer )
+ && ( sequencesIndexOffset == rhs.sequencesIndexOffset );
+ }
+
+ bool operator!=( CmdProcessCommandsInfoNVX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ ObjectTableNVX objectTable;
+ IndirectCommandsLayoutNVX indirectCommandsLayout;
+ uint32_t indirectCommandsTokenCount;
+ const IndirectCommandsTokenNVX* pIndirectCommandsTokens;
+ uint32_t maxSequencesCount;
+ CommandBuffer targetCommandBuffer;
+ Buffer sequencesCountBuffer;
+ DeviceSize sequencesCountOffset;
+ Buffer sequencesIndexBuffer;
+ DeviceSize sequencesIndexOffset;
+ };
+ static_assert( sizeof( CmdProcessCommandsInfoNVX ) == sizeof( VkCmdProcessCommandsInfoNVX ), "struct and wrapper have different size!" );
+
+ struct PhysicalDeviceGroupPropertiesKHX
+ {
+ operator const VkPhysicalDeviceGroupPropertiesKHX&() const
+ {
+ return *reinterpret_cast<const VkPhysicalDeviceGroupPropertiesKHX*>(this);
+ }
+
+ bool operator==( PhysicalDeviceGroupPropertiesKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( physicalDeviceCount == rhs.physicalDeviceCount )
+ && ( memcmp( physicalDevices, rhs.physicalDevices, VK_MAX_DEVICE_GROUP_SIZE_KHX * sizeof( PhysicalDevice ) ) == 0 )
+ && ( subsetAllocation == rhs.subsetAllocation );
+ }
+
+ bool operator!=( PhysicalDeviceGroupPropertiesKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ void* pNext;
+ uint32_t physicalDeviceCount;
+ PhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE_KHX];
+ Bool32 subsetAllocation;
+ };
+ static_assert( sizeof( PhysicalDeviceGroupPropertiesKHX ) == sizeof( VkPhysicalDeviceGroupPropertiesKHX ), "struct and wrapper have different size!" );
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class DebugReportCallbackEXTDeleter;
+ using UniqueDebugReportCallbackEXT = UniqueHandle<DebugReportCallbackEXT, DebugReportCallbackEXTDeleter>;
+ class SurfaceKHRDeleter;
+ using UniqueSurfaceKHR = UniqueHandle<SurfaceKHR, SurfaceKHRDeleter>;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ class Instance
+ {
+ public:
+ Instance()
+ : m_instance(VK_NULL_HANDLE)
+ {}
+
+ Instance( std::nullptr_t )
+ : m_instance(VK_NULL_HANDLE)
+ {}
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT Instance(VkInstance instance)
+ : m_instance(instance)
+ {}
+
+#if defined(VULKAN_HPP_TYPESAFE_CONVERSION)
+ Instance& operator=(VkInstance instance)
+ {
+ m_instance = instance;
+ return *this;
+ }
+#endif
+
+ Instance& operator=( std::nullptr_t )
+ {
+ m_instance = VK_NULL_HANDLE;
+ return *this;
+ }
+
+ bool operator==(Instance const &rhs) const
+ {
+ return m_instance == rhs.m_instance;
+ }
+
+ bool operator!=(Instance const &rhs) const
+ {
+ return m_instance != rhs.m_instance;
+ }
+
+ bool operator<(Instance const &rhs) const
+ {
+ return m_instance < rhs.m_instance;
+ }
+
+ void destroy( const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroy( Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, PhysicalDevice* pPhysicalDevices ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<PhysicalDevice>>
+ typename ResultValueType<std::vector<PhysicalDevice,Allocator>>::type enumeratePhysicalDevices() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ PFN_vkVoidFunction getProcAddr( const char* pName ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ PFN_vkVoidFunction getProcAddr( const std::string & name ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ Result createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+ Result createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ Result createMirSurfaceKHR( const MirSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createMirSurfaceKHR( const MirSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createMirSurfaceKHRUnique( const MirSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+ void destroySurfaceKHR( SurfaceKHR surface, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroySurfaceKHR( SurfaceKHR surface, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ Result createViSurfaceNN( const ViSurfaceCreateInfoNN* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ Result createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ Result createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ Result createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ Result createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+ Result createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, DebugReportCallbackEXT* pCallback ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<DebugReportCallbackEXT>::type createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueDebugReportCallbackEXT createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, const AllocationCallbacks* pAllocator ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ void debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ void debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message ) const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ Result enumeratePhysicalDeviceGroupsKHX( uint32_t* pPhysicalDeviceGroupCount, PhysicalDeviceGroupPropertiesKHX* pPhysicalDeviceGroupProperties ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator = std::allocator<PhysicalDeviceGroupPropertiesKHX>>
+ typename ResultValueType<std::vector<PhysicalDeviceGroupPropertiesKHX,Allocator>>::type enumeratePhysicalDeviceGroupsKHX() const;
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ Result createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ Result createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const;
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<SurfaceKHR>::type createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueSurfaceKHR createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator = nullptr ) const;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkInstance() const
+ {
+ return m_instance;
+ }
+
+ explicit operator bool() const
+ {
+ return m_instance != VK_NULL_HANDLE;
+ }
+
+ bool operator!() const
+ {
+ return m_instance == VK_NULL_HANDLE;
+ }
+
+ private:
+ VkInstance m_instance;
+ };
+ static_assert( sizeof( Instance ) == sizeof( VkInstance ), "handle and wrapper have different size!" );
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class DebugReportCallbackEXTDeleter
+ {
+ public:
+ DebugReportCallbackEXTDeleter( Instance instance = Instance(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_instance( instance )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( DebugReportCallbackEXT debugReportCallbackEXT )
+ {
+ m_instance.destroyDebugReportCallbackEXT( debugReportCallbackEXT, m_allocator );
+ }
+
+ private:
+ Instance m_instance;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+
+ class SurfaceKHRDeleter
+ {
+ public:
+ SurfaceKHRDeleter( Instance instance = Instance(), Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_instance( instance )
+ , m_allocator( allocator )
+ {}
+
+ void operator()( SurfaceKHR surfaceKHR )
+ {
+ m_instance.destroySurfaceKHR( surfaceKHR, m_allocator );
+ }
+
+ private:
+ Instance m_instance;
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ VULKAN_HPP_INLINE void Instance::destroy( const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyInstance( m_instance, reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Instance::destroy( Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyInstance( m_instance, reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, PhysicalDevice* pPhysicalDevices ) const
+ {
+ return static_cast<Result>( vkEnumeratePhysicalDevices( m_instance, pPhysicalDeviceCount, reinterpret_cast<VkPhysicalDevice*>( pPhysicalDevices ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDevice,Allocator>>::type Instance::enumeratePhysicalDevices() const
+ {
+ std::vector<PhysicalDevice,Allocator> physicalDevices;
+ uint32_t physicalDeviceCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && physicalDeviceCount )
+ {
+ physicalDevices.resize( physicalDeviceCount );
+ result = static_cast<Result>( vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast<VkPhysicalDevice*>( physicalDevices.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( physicalDeviceCount <= physicalDevices.size() );
+ physicalDevices.resize( physicalDeviceCount );
+ return createResultValue( result, physicalDevices, "vk::Instance::enumeratePhysicalDevices" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const char* pName ) const
+ {
+ return vkGetInstanceProcAddr( m_instance, pName );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const std::string & name ) const
+ {
+ return vkGetInstanceProcAddr( m_instance, name.c_str() );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ VULKAN_HPP_INLINE Result Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createAndroidSurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createAndroidSurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+ VULKAN_HPP_INLINE Result Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createDisplayPlaneSurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createDisplayPlaneSurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ VULKAN_HPP_INLINE Result Instance::createMirSurfaceKHR( const MirSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateMirSurfaceKHR( m_instance, reinterpret_cast<const VkMirSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createMirSurfaceKHR( const MirSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateMirSurfaceKHR( m_instance, reinterpret_cast<const VkMirSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createMirSurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createMirSurfaceKHRUnique( const MirSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createMirSurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+ VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( SurfaceKHR surface, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( SurfaceKHR surface, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ VULKAN_HPP_INLINE Result Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createViSurfaceNN" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createViSurfaceNN( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ VULKAN_HPP_INLINE Result Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createWaylandSurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createWaylandSurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ VULKAN_HPP_INLINE Result Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createWin32SurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createWin32SurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ VULKAN_HPP_INLINE Result Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createXlibSurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createXlibSurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ VULKAN_HPP_INLINE Result Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createXcbSurfaceKHR" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createXcbSurfaceKHR( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+ VULKAN_HPP_INLINE Result Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, DebugReportCallbackEXT* pCallback ) const
+ {
+ return static_cast<Result>( vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDebugReportCallbackEXT*>( pCallback ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<DebugReportCallbackEXT>::type Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DebugReportCallbackEXT callback;
+ Result result = static_cast<Result>( vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugReportCallbackEXT*>( &callback ) ) );
+ return createResultValue( result, callback, "vk::Instance::createDebugReportCallbackEXT" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueDebugReportCallbackEXT Instance::createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ DebugReportCallbackEXTDeleter deleter( *this, allocator );
+ return UniqueDebugReportCallbackEXT( createDebugReportCallbackEXT( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, const AllocationCallbacks* pAllocator ) const
+ {
+ vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator ) const
+ {
+ vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage ) const
+ {
+ vkDebugReportMessageEXT( m_instance, static_cast<VkDebugReportFlagsEXT>( flags ), static_cast<VkDebugReportObjectTypeEXT>( objectType ), object, location, messageCode, pLayerPrefix, pMessage );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message ) const
+ {
+#ifdef VULKAN_HPP_NO_EXCEPTIONS
+ assert( layerPrefix.size() == message.size() );
+#else
+ if ( layerPrefix.size() != message.size() )
+ {
+ throw std::logic_error( "vk::Instance::debugReportMessageEXT: layerPrefix.size() != message.size()" );
+ }
+#endif // VULKAN_HPP_NO_EXCEPTIONS
+ vkDebugReportMessageEXT( m_instance, static_cast<VkDebugReportFlagsEXT>( flags ), static_cast<VkDebugReportObjectTypeEXT>( objectType ), object, location, messageCode, layerPrefix.c_str(), message.c_str() );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+ VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroupsKHX( uint32_t* pPhysicalDeviceGroupCount, PhysicalDeviceGroupPropertiesKHX* pPhysicalDeviceGroupProperties ) const
+ {
+ return static_cast<Result>( vkEnumeratePhysicalDeviceGroupsKHX( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupPropertiesKHX*>( pPhysicalDeviceGroupProperties ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ template <typename Allocator>
+ VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupPropertiesKHX,Allocator>>::type Instance::enumeratePhysicalDeviceGroupsKHX() const
+ {
+ std::vector<PhysicalDeviceGroupPropertiesKHX,Allocator> physicalDeviceGroupProperties;
+ uint32_t physicalDeviceGroupCount;
+ Result result;
+ do
+ {
+ result = static_cast<Result>( vkEnumeratePhysicalDeviceGroupsKHX( m_instance, &physicalDeviceGroupCount, nullptr ) );
+ if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount )
+ {
+ physicalDeviceGroupProperties.resize( physicalDeviceGroupCount );
+ result = static_cast<Result>( vkEnumeratePhysicalDeviceGroupsKHX( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupPropertiesKHX*>( physicalDeviceGroupProperties.data() ) ) );
+ }
+ } while ( result == Result::eIncomplete );
+ assert( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() );
+ physicalDeviceGroupProperties.resize( physicalDeviceGroupCount );
+ return createResultValue( result, physicalDeviceGroupProperties, "vk::Instance::enumeratePhysicalDeviceGroupsKHX" );
+ }
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ VULKAN_HPP_INLINE Result Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createIOSSurfaceMVK" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createIOSSurfaceMVK( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ VULKAN_HPP_INLINE Result Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface ) const
+ {
+ return static_cast<Result>( vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<SurfaceKHR>::type Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHR surface;
+ Result result = static_cast<Result>( vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) );
+ return createResultValue( result, surface, "vk::Instance::createMacOSSurfaceMVK" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueSurfaceKHR Instance::createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator ) const
+ {
+ SurfaceKHRDeleter deleter( *this, allocator );
+ return UniqueSurfaceKHR( createMacOSSurfaceMVK( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+ struct DeviceGroupDeviceCreateInfoKHX
+ {
+ DeviceGroupDeviceCreateInfoKHX( uint32_t physicalDeviceCount_ = 0, const PhysicalDevice* pPhysicalDevices_ = nullptr )
+ : sType( StructureType::eDeviceGroupDeviceCreateInfoKHX )
+ , pNext( nullptr )
+ , physicalDeviceCount( physicalDeviceCount_ )
+ , pPhysicalDevices( pPhysicalDevices_ )
+ {
+ }
+
+ DeviceGroupDeviceCreateInfoKHX( VkDeviceGroupDeviceCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupDeviceCreateInfoKHX) );
+ }
+
+ DeviceGroupDeviceCreateInfoKHX& operator=( VkDeviceGroupDeviceCreateInfoKHX const & rhs )
+ {
+ memcpy( this, &rhs, sizeof(DeviceGroupDeviceCreateInfoKHX) );
+ return *this;
+ }
+
+ DeviceGroupDeviceCreateInfoKHX& setPNext( const void* pNext_ )
+ {
+ pNext = pNext_;
+ return *this;
+ }
+
+ DeviceGroupDeviceCreateInfoKHX& setPhysicalDeviceCount( uint32_t physicalDeviceCount_ )
+ {
+ physicalDeviceCount = physicalDeviceCount_;
+ return *this;
+ }
+
+ DeviceGroupDeviceCreateInfoKHX& setPPhysicalDevices( const PhysicalDevice* pPhysicalDevices_ )
+ {
+ pPhysicalDevices = pPhysicalDevices_;
+ return *this;
+ }
+
+ operator const VkDeviceGroupDeviceCreateInfoKHX&() const
+ {
+ return *reinterpret_cast<const VkDeviceGroupDeviceCreateInfoKHX*>(this);
+ }
+
+ bool operator==( DeviceGroupDeviceCreateInfoKHX const& rhs ) const
+ {
+ return ( sType == rhs.sType )
+ && ( pNext == rhs.pNext )
+ && ( physicalDeviceCount == rhs.physicalDeviceCount )
+ && ( pPhysicalDevices == rhs.pPhysicalDevices );
+ }
+
+ bool operator!=( DeviceGroupDeviceCreateInfoKHX const& rhs ) const
+ {
+ return !operator==( rhs );
+ }
+
+ private:
+ StructureType sType;
+
+ public:
+ const void* pNext;
+ uint32_t physicalDeviceCount;
+ const PhysicalDevice* pPhysicalDevices;
+ };
+ static_assert( sizeof( DeviceGroupDeviceCreateInfoKHX ) == sizeof( VkDeviceGroupDeviceCreateInfoKHX ), "struct and wrapper have different size!" );
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class InstanceDeleter;
+ using UniqueInstance = UniqueHandle<Instance, InstanceDeleter>;
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ Result createInstance( const InstanceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Instance* pInstance );
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ ResultValueType<Instance>::type createInstance( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr );
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ UniqueInstance createInstanceUnique( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator = nullptr );
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ class InstanceDeleter
+ {
+ public:
+ InstanceDeleter( Optional<const AllocationCallbacks> allocator = nullptr )
+ : m_allocator( allocator )
+ {}
+
+ void operator()( Instance instance )
+ {
+ instance.destroy( m_allocator );
+ }
+
+ private:
+ Optional<const AllocationCallbacks> m_allocator;
+ };
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+
+ VULKAN_HPP_INLINE Result createInstance( const InstanceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Instance* pInstance )
+ {
+ return static_cast<Result>( vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkInstance*>( pInstance ) ) );
+ }
+#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
+ VULKAN_HPP_INLINE ResultValueType<Instance>::type createInstance( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator )
+ {
+ Instance instance;
+ Result result = static_cast<Result>( vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkInstance*>( &instance ) ) );
+ return createResultValue( result, instance, "vk::createInstance" );
+ }
+#ifndef VULKAN_HPP_NO_SMART_HANDLE
+ VULKAN_HPP_INLINE UniqueInstance createInstanceUnique( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator )
+ {
+ InstanceDeleter deleter( allocator );
+ return UniqueInstance( createInstance( createInfo, allocator ), deleter );
+ }
+#endif /*VULKAN_HPP_NO_SMART_HANDLE*/
+#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/
+
+
+ VULKAN_HPP_INLINE std::string to_string(FramebufferCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(FramebufferCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryPoolCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryPoolCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(RenderPassCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(RenderPassCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SamplerCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SamplerCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineLayoutCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineLayoutCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineCacheCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineCacheCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineDepthStencilStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineDepthStencilStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineDynamicStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineDynamicStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineColorBlendStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineColorBlendStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineMultisampleStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineMultisampleStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineRasterizationStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineRasterizationStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineViewportStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineViewportStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineTessellationStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineTessellationStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineInputAssemblyStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineInputAssemblyStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineVertexInputStateCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineVertexInputStateCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineShaderStageCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineShaderStageCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BufferViewCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BufferViewCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(InstanceCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(InstanceCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceQueueCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceQueueCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageViewCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageViewCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SemaphoreCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SemaphoreCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ShaderModuleCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ShaderModuleCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(EventCreateFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(EventCreateFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryMapFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryMapFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorPoolResetFlagBits)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorPoolResetFlags)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorUpdateTemplateCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorUpdateTemplateCreateFlagsKHR)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplayModeCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplayModeCreateFlagsKHR)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplaySurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplaySurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ VULKAN_HPP_INLINE std::string to_string(AndroidSurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ VULKAN_HPP_INLINE std::string to_string(AndroidSurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_ANDROID_KHR*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ VULKAN_HPP_INLINE std::string to_string(MirSurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+ VULKAN_HPP_INLINE std::string to_string(MirSurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_MIR_KHR*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ VULKAN_HPP_INLINE std::string to_string(ViSurfaceCreateFlagBitsNN)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_VI_NN
+ VULKAN_HPP_INLINE std::string to_string(ViSurfaceCreateFlagsNN)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_VI_NN*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ VULKAN_HPP_INLINE std::string to_string(WaylandSurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+ VULKAN_HPP_INLINE std::string to_string(WaylandSurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ VULKAN_HPP_INLINE std::string to_string(Win32SurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ VULKAN_HPP_INLINE std::string to_string(Win32SurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_WIN32_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ VULKAN_HPP_INLINE std::string to_string(XlibSurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+ VULKAN_HPP_INLINE std::string to_string(XlibSurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_XLIB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ VULKAN_HPP_INLINE std::string to_string(XcbSurfaceCreateFlagBitsKHR)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+ VULKAN_HPP_INLINE std::string to_string(XcbSurfaceCreateFlagsKHR)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_XCB_KHR*/
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ VULKAN_HPP_INLINE std::string to_string(IOSSurfaceCreateFlagBitsMVK)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_IOS_MVK
+ VULKAN_HPP_INLINE std::string to_string(IOSSurfaceCreateFlagsMVK)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_IOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ VULKAN_HPP_INLINE std::string to_string(MacOSSurfaceCreateFlagBitsMVK)
+ {
+ return "(void)";
+ }
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+#ifdef VK_USE_PLATFORM_MACOS_MVK
+ VULKAN_HPP_INLINE std::string to_string(MacOSSurfaceCreateFlagsMVK)
+ {
+ return "{}";
+ }
+#endif /*VK_USE_PLATFORM_MACOS_MVK*/
+
+ VULKAN_HPP_INLINE std::string to_string(CommandPoolTrimFlagBitsKHR)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandPoolTrimFlagsKHR)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineViewportSwizzleStateCreateFlagBitsNV)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineViewportSwizzleStateCreateFlagsNV)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineDiscardRectangleStateCreateFlagBitsEXT)
+ {
+ return "(void)";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineDiscardRectangleStateCreateFlagsEXT)
+ {
+ return "{}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageLayout value)
+ {
+ switch (value)
+ {
+ case ImageLayout::eUndefined: return "Undefined";
+ case ImageLayout::eGeneral: return "General";
+ case ImageLayout::eColorAttachmentOptimal: return "ColorAttachmentOptimal";
+ case ImageLayout::eDepthStencilAttachmentOptimal: return "DepthStencilAttachmentOptimal";
+ case ImageLayout::eDepthStencilReadOnlyOptimal: return "DepthStencilReadOnlyOptimal";
+ case ImageLayout::eShaderReadOnlyOptimal: return "ShaderReadOnlyOptimal";
+ case ImageLayout::eTransferSrcOptimal: return "TransferSrcOptimal";
+ case ImageLayout::eTransferDstOptimal: return "TransferDstOptimal";
+ case ImageLayout::ePreinitialized: return "Preinitialized";
+ case ImageLayout::ePresentSrcKHR: return "PresentSrcKHR";
+ case ImageLayout::eSharedPresentKHR: return "SharedPresentKHR";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(AttachmentLoadOp value)
+ {
+ switch (value)
+ {
+ case AttachmentLoadOp::eLoad: return "Load";
+ case AttachmentLoadOp::eClear: return "Clear";
+ case AttachmentLoadOp::eDontCare: return "DontCare";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(AttachmentStoreOp value)
+ {
+ switch (value)
+ {
+ case AttachmentStoreOp::eStore: return "Store";
+ case AttachmentStoreOp::eDontCare: return "DontCare";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageType value)
+ {
+ switch (value)
+ {
+ case ImageType::e1D: return "1D";
+ case ImageType::e2D: return "2D";
+ case ImageType::e3D: return "3D";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageTiling value)
+ {
+ switch (value)
+ {
+ case ImageTiling::eOptimal: return "Optimal";
+ case ImageTiling::eLinear: return "Linear";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageViewType value)
+ {
+ switch (value)
+ {
+ case ImageViewType::e1D: return "1D";
+ case ImageViewType::e2D: return "2D";
+ case ImageViewType::e3D: return "3D";
+ case ImageViewType::eCube: return "Cube";
+ case ImageViewType::e1DArray: return "1DArray";
+ case ImageViewType::e2DArray: return "2DArray";
+ case ImageViewType::eCubeArray: return "CubeArray";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandBufferLevel value)
+ {
+ switch (value)
+ {
+ case CommandBufferLevel::ePrimary: return "Primary";
+ case CommandBufferLevel::eSecondary: return "Secondary";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ComponentSwizzle value)
+ {
+ switch (value)
+ {
+ case ComponentSwizzle::eIdentity: return "Identity";
+ case ComponentSwizzle::eZero: return "Zero";
+ case ComponentSwizzle::eOne: return "One";
+ case ComponentSwizzle::eR: return "R";
+ case ComponentSwizzle::eG: return "G";
+ case ComponentSwizzle::eB: return "B";
+ case ComponentSwizzle::eA: return "A";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorType value)
+ {
+ switch (value)
+ {
+ case DescriptorType::eSampler: return "Sampler";
+ case DescriptorType::eCombinedImageSampler: return "CombinedImageSampler";
+ case DescriptorType::eSampledImage: return "SampledImage";
+ case DescriptorType::eStorageImage: return "StorageImage";
+ case DescriptorType::eUniformTexelBuffer: return "UniformTexelBuffer";
+ case DescriptorType::eStorageTexelBuffer: return "StorageTexelBuffer";
+ case DescriptorType::eUniformBuffer: return "UniformBuffer";
+ case DescriptorType::eStorageBuffer: return "StorageBuffer";
+ case DescriptorType::eUniformBufferDynamic: return "UniformBufferDynamic";
+ case DescriptorType::eStorageBufferDynamic: return "StorageBufferDynamic";
+ case DescriptorType::eInputAttachment: return "InputAttachment";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryType value)
+ {
+ switch (value)
+ {
+ case QueryType::eOcclusion: return "Occlusion";
+ case QueryType::ePipelineStatistics: return "PipelineStatistics";
+ case QueryType::eTimestamp: return "Timestamp";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BorderColor value)
+ {
+ switch (value)
+ {
+ case BorderColor::eFloatTransparentBlack: return "FloatTransparentBlack";
+ case BorderColor::eIntTransparentBlack: return "IntTransparentBlack";
+ case BorderColor::eFloatOpaqueBlack: return "FloatOpaqueBlack";
+ case BorderColor::eIntOpaqueBlack: return "IntOpaqueBlack";
+ case BorderColor::eFloatOpaqueWhite: return "FloatOpaqueWhite";
+ case BorderColor::eIntOpaqueWhite: return "IntOpaqueWhite";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineBindPoint value)
+ {
+ switch (value)
+ {
+ case PipelineBindPoint::eGraphics: return "Graphics";
+ case PipelineBindPoint::eCompute: return "Compute";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineCacheHeaderVersion value)
+ {
+ switch (value)
+ {
+ case PipelineCacheHeaderVersion::eOne: return "One";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PrimitiveTopology value)
+ {
+ switch (value)
+ {
+ case PrimitiveTopology::ePointList: return "PointList";
+ case PrimitiveTopology::eLineList: return "LineList";
+ case PrimitiveTopology::eLineStrip: return "LineStrip";
+ case PrimitiveTopology::eTriangleList: return "TriangleList";
+ case PrimitiveTopology::eTriangleStrip: return "TriangleStrip";
+ case PrimitiveTopology::eTriangleFan: return "TriangleFan";
+ case PrimitiveTopology::eLineListWithAdjacency: return "LineListWithAdjacency";
+ case PrimitiveTopology::eLineStripWithAdjacency: return "LineStripWithAdjacency";
+ case PrimitiveTopology::eTriangleListWithAdjacency: return "TriangleListWithAdjacency";
+ case PrimitiveTopology::eTriangleStripWithAdjacency: return "TriangleStripWithAdjacency";
+ case PrimitiveTopology::ePatchList: return "PatchList";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SharingMode value)
+ {
+ switch (value)
+ {
+ case SharingMode::eExclusive: return "Exclusive";
+ case SharingMode::eConcurrent: return "Concurrent";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(IndexType value)
+ {
+ switch (value)
+ {
+ case IndexType::eUint16: return "Uint16";
+ case IndexType::eUint32: return "Uint32";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(Filter value)
+ {
+ switch (value)
+ {
+ case Filter::eNearest: return "Nearest";
+ case Filter::eLinear: return "Linear";
+ case Filter::eCubicIMG: return "CubicIMG";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SamplerMipmapMode value)
+ {
+ switch (value)
+ {
+ case SamplerMipmapMode::eNearest: return "Nearest";
+ case SamplerMipmapMode::eLinear: return "Linear";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SamplerAddressMode value)
+ {
+ switch (value)
+ {
+ case SamplerAddressMode::eRepeat: return "Repeat";
+ case SamplerAddressMode::eMirroredRepeat: return "MirroredRepeat";
+ case SamplerAddressMode::eClampToEdge: return "ClampToEdge";
+ case SamplerAddressMode::eClampToBorder: return "ClampToBorder";
+ case SamplerAddressMode::eMirrorClampToEdge: return "MirrorClampToEdge";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CompareOp value)
+ {
+ switch (value)
+ {
+ case CompareOp::eNever: return "Never";
+ case CompareOp::eLess: return "Less";
+ case CompareOp::eEqual: return "Equal";
+ case CompareOp::eLessOrEqual: return "LessOrEqual";
+ case CompareOp::eGreater: return "Greater";
+ case CompareOp::eNotEqual: return "NotEqual";
+ case CompareOp::eGreaterOrEqual: return "GreaterOrEqual";
+ case CompareOp::eAlways: return "Always";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PolygonMode value)
+ {
+ switch (value)
+ {
+ case PolygonMode::eFill: return "Fill";
+ case PolygonMode::eLine: return "Line";
+ case PolygonMode::ePoint: return "Point";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CullModeFlagBits value)
+ {
+ switch (value)
+ {
+ case CullModeFlagBits::eNone: return "None";
+ case CullModeFlagBits::eFront: return "Front";
+ case CullModeFlagBits::eBack: return "Back";
+ case CullModeFlagBits::eFrontAndBack: return "FrontAndBack";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CullModeFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & CullModeFlagBits::eNone) result += "None | ";
+ if (value & CullModeFlagBits::eFront) result += "Front | ";
+ if (value & CullModeFlagBits::eBack) result += "Back | ";
+ if (value & CullModeFlagBits::eFrontAndBack) result += "FrontAndBack | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(FrontFace value)
+ {
+ switch (value)
+ {
+ case FrontFace::eCounterClockwise: return "CounterClockwise";
+ case FrontFace::eClockwise: return "Clockwise";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BlendFactor value)
+ {
+ switch (value)
+ {
+ case BlendFactor::eZero: return "Zero";
+ case BlendFactor::eOne: return "One";
+ case BlendFactor::eSrcColor: return "SrcColor";
+ case BlendFactor::eOneMinusSrcColor: return "OneMinusSrcColor";
+ case BlendFactor::eDstColor: return "DstColor";
+ case BlendFactor::eOneMinusDstColor: return "OneMinusDstColor";
+ case BlendFactor::eSrcAlpha: return "SrcAlpha";
+ case BlendFactor::eOneMinusSrcAlpha: return "OneMinusSrcAlpha";
+ case BlendFactor::eDstAlpha: return "DstAlpha";
+ case BlendFactor::eOneMinusDstAlpha: return "OneMinusDstAlpha";
+ case BlendFactor::eConstantColor: return "ConstantColor";
+ case BlendFactor::eOneMinusConstantColor: return "OneMinusConstantColor";
+ case BlendFactor::eConstantAlpha: return "ConstantAlpha";
+ case BlendFactor::eOneMinusConstantAlpha: return "OneMinusConstantAlpha";
+ case BlendFactor::eSrcAlphaSaturate: return "SrcAlphaSaturate";
+ case BlendFactor::eSrc1Color: return "Src1Color";
+ case BlendFactor::eOneMinusSrc1Color: return "OneMinusSrc1Color";
+ case BlendFactor::eSrc1Alpha: return "Src1Alpha";
+ case BlendFactor::eOneMinusSrc1Alpha: return "OneMinusSrc1Alpha";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BlendOp value)
+ {
+ switch (value)
+ {
+ case BlendOp::eAdd: return "Add";
+ case BlendOp::eSubtract: return "Subtract";
+ case BlendOp::eReverseSubtract: return "ReverseSubtract";
+ case BlendOp::eMin: return "Min";
+ case BlendOp::eMax: return "Max";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(StencilOp value)
+ {
+ switch (value)
+ {
+ case StencilOp::eKeep: return "Keep";
+ case StencilOp::eZero: return "Zero";
+ case StencilOp::eReplace: return "Replace";
+ case StencilOp::eIncrementAndClamp: return "IncrementAndClamp";
+ case StencilOp::eDecrementAndClamp: return "DecrementAndClamp";
+ case StencilOp::eInvert: return "Invert";
+ case StencilOp::eIncrementAndWrap: return "IncrementAndWrap";
+ case StencilOp::eDecrementAndWrap: return "DecrementAndWrap";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(LogicOp value)
+ {
+ switch (value)
+ {
+ case LogicOp::eClear: return "Clear";
+ case LogicOp::eAnd: return "And";
+ case LogicOp::eAndReverse: return "AndReverse";
+ case LogicOp::eCopy: return "Copy";
+ case LogicOp::eAndInverted: return "AndInverted";
+ case LogicOp::eNoOp: return "NoOp";
+ case LogicOp::eXor: return "Xor";
+ case LogicOp::eOr: return "Or";
+ case LogicOp::eNor: return "Nor";
+ case LogicOp::eEquivalent: return "Equivalent";
+ case LogicOp::eInvert: return "Invert";
+ case LogicOp::eOrReverse: return "OrReverse";
+ case LogicOp::eCopyInverted: return "CopyInverted";
+ case LogicOp::eOrInverted: return "OrInverted";
+ case LogicOp::eNand: return "Nand";
+ case LogicOp::eSet: return "Set";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(InternalAllocationType value)
+ {
+ switch (value)
+ {
+ case InternalAllocationType::eExecutable: return "Executable";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SystemAllocationScope value)
+ {
+ switch (value)
+ {
+ case SystemAllocationScope::eCommand: return "Command";
+ case SystemAllocationScope::eObject: return "Object";
+ case SystemAllocationScope::eCache: return "Cache";
+ case SystemAllocationScope::eDevice: return "Device";
+ case SystemAllocationScope::eInstance: return "Instance";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PhysicalDeviceType value)
+ {
+ switch (value)
+ {
+ case PhysicalDeviceType::eOther: return "Other";
+ case PhysicalDeviceType::eIntegratedGpu: return "IntegratedGpu";
+ case PhysicalDeviceType::eDiscreteGpu: return "DiscreteGpu";
+ case PhysicalDeviceType::eVirtualGpu: return "VirtualGpu";
+ case PhysicalDeviceType::eCpu: return "Cpu";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(VertexInputRate value)
+ {
+ switch (value)
+ {
+ case VertexInputRate::eVertex: return "Vertex";
+ case VertexInputRate::eInstance: return "Instance";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(Format value)
+ {
+ switch (value)
+ {
+ case Format::eUndefined: return "Undefined";
+ case Format::eR4G4UnormPack8: return "R4G4UnormPack8";
+ case Format::eR4G4B4A4UnormPack16: return "R4G4B4A4UnormPack16";
+ case Format::eB4G4R4A4UnormPack16: return "B4G4R4A4UnormPack16";
+ case Format::eR5G6B5UnormPack16: return "R5G6B5UnormPack16";
+ case Format::eB5G6R5UnormPack16: return "B5G6R5UnormPack16";
+ case Format::eR5G5B5A1UnormPack16: return "R5G5B5A1UnormPack16";
+ case Format::eB5G5R5A1UnormPack16: return "B5G5R5A1UnormPack16";
+ case Format::eA1R5G5B5UnormPack16: return "A1R5G5B5UnormPack16";
+ case Format::eR8Unorm: return "R8Unorm";
+ case Format::eR8Snorm: return "R8Snorm";
+ case Format::eR8Uscaled: return "R8Uscaled";
+ case Format::eR8Sscaled: return "R8Sscaled";
+ case Format::eR8Uint: return "R8Uint";
+ case Format::eR8Sint: return "R8Sint";
+ case Format::eR8Srgb: return "R8Srgb";
+ case Format::eR8G8Unorm: return "R8G8Unorm";
+ case Format::eR8G8Snorm: return "R8G8Snorm";
+ case Format::eR8G8Uscaled: return "R8G8Uscaled";
+ case Format::eR8G8Sscaled: return "R8G8Sscaled";
+ case Format::eR8G8Uint: return "R8G8Uint";
+ case Format::eR8G8Sint: return "R8G8Sint";
+ case Format::eR8G8Srgb: return "R8G8Srgb";
+ case Format::eR8G8B8Unorm: return "R8G8B8Unorm";
+ case Format::eR8G8B8Snorm: return "R8G8B8Snorm";
+ case Format::eR8G8B8Uscaled: return "R8G8B8Uscaled";
+ case Format::eR8G8B8Sscaled: return "R8G8B8Sscaled";
+ case Format::eR8G8B8Uint: return "R8G8B8Uint";
+ case Format::eR8G8B8Sint: return "R8G8B8Sint";
+ case Format::eR8G8B8Srgb: return "R8G8B8Srgb";
+ case Format::eB8G8R8Unorm: return "B8G8R8Unorm";
+ case Format::eB8G8R8Snorm: return "B8G8R8Snorm";
+ case Format::eB8G8R8Uscaled: return "B8G8R8Uscaled";
+ case Format::eB8G8R8Sscaled: return "B8G8R8Sscaled";
+ case Format::eB8G8R8Uint: return "B8G8R8Uint";
+ case Format::eB8G8R8Sint: return "B8G8R8Sint";
+ case Format::eB8G8R8Srgb: return "B8G8R8Srgb";
+ case Format::eR8G8B8A8Unorm: return "R8G8B8A8Unorm";
+ case Format::eR8G8B8A8Snorm: return "R8G8B8A8Snorm";
+ case Format::eR8G8B8A8Uscaled: return "R8G8B8A8Uscaled";
+ case Format::eR8G8B8A8Sscaled: return "R8G8B8A8Sscaled";
+ case Format::eR8G8B8A8Uint: return "R8G8B8A8Uint";
+ case Format::eR8G8B8A8Sint: return "R8G8B8A8Sint";
+ case Format::eR8G8B8A8Srgb: return "R8G8B8A8Srgb";
+ case Format::eB8G8R8A8Unorm: return "B8G8R8A8Unorm";
+ case Format::eB8G8R8A8Snorm: return "B8G8R8A8Snorm";
+ case Format::eB8G8R8A8Uscaled: return "B8G8R8A8Uscaled";
+ case Format::eB8G8R8A8Sscaled: return "B8G8R8A8Sscaled";
+ case Format::eB8G8R8A8Uint: return "B8G8R8A8Uint";
+ case Format::eB8G8R8A8Sint: return "B8G8R8A8Sint";
+ case Format::eB8G8R8A8Srgb: return "B8G8R8A8Srgb";
+ case Format::eA8B8G8R8UnormPack32: return "A8B8G8R8UnormPack32";
+ case Format::eA8B8G8R8SnormPack32: return "A8B8G8R8SnormPack32";
+ case Format::eA8B8G8R8UscaledPack32: return "A8B8G8R8UscaledPack32";
+ case Format::eA8B8G8R8SscaledPack32: return "A8B8G8R8SscaledPack32";
+ case Format::eA8B8G8R8UintPack32: return "A8B8G8R8UintPack32";
+ case Format::eA8B8G8R8SintPack32: return "A8B8G8R8SintPack32";
+ case Format::eA8B8G8R8SrgbPack32: return "A8B8G8R8SrgbPack32";
+ case Format::eA2R10G10B10UnormPack32: return "A2R10G10B10UnormPack32";
+ case Format::eA2R10G10B10SnormPack32: return "A2R10G10B10SnormPack32";
+ case Format::eA2R10G10B10UscaledPack32: return "A2R10G10B10UscaledPack32";
+ case Format::eA2R10G10B10SscaledPack32: return "A2R10G10B10SscaledPack32";
+ case Format::eA2R10G10B10UintPack32: return "A2R10G10B10UintPack32";
+ case Format::eA2R10G10B10SintPack32: return "A2R10G10B10SintPack32";
+ case Format::eA2B10G10R10UnormPack32: return "A2B10G10R10UnormPack32";
+ case Format::eA2B10G10R10SnormPack32: return "A2B10G10R10SnormPack32";
+ case Format::eA2B10G10R10UscaledPack32: return "A2B10G10R10UscaledPack32";
+ case Format::eA2B10G10R10SscaledPack32: return "A2B10G10R10SscaledPack32";
+ case Format::eA2B10G10R10UintPack32: return "A2B10G10R10UintPack32";
+ case Format::eA2B10G10R10SintPack32: return "A2B10G10R10SintPack32";
+ case Format::eR16Unorm: return "R16Unorm";
+ case Format::eR16Snorm: return "R16Snorm";
+ case Format::eR16Uscaled: return "R16Uscaled";
+ case Format::eR16Sscaled: return "R16Sscaled";
+ case Format::eR16Uint: return "R16Uint";
+ case Format::eR16Sint: return "R16Sint";
+ case Format::eR16Sfloat: return "R16Sfloat";
+ case Format::eR16G16Unorm: return "R16G16Unorm";
+ case Format::eR16G16Snorm: return "R16G16Snorm";
+ case Format::eR16G16Uscaled: return "R16G16Uscaled";
+ case Format::eR16G16Sscaled: return "R16G16Sscaled";
+ case Format::eR16G16Uint: return "R16G16Uint";
+ case Format::eR16G16Sint: return "R16G16Sint";
+ case Format::eR16G16Sfloat: return "R16G16Sfloat";
+ case Format::eR16G16B16Unorm: return "R16G16B16Unorm";
+ case Format::eR16G16B16Snorm: return "R16G16B16Snorm";
+ case Format::eR16G16B16Uscaled: return "R16G16B16Uscaled";
+ case Format::eR16G16B16Sscaled: return "R16G16B16Sscaled";
+ case Format::eR16G16B16Uint: return "R16G16B16Uint";
+ case Format::eR16G16B16Sint: return "R16G16B16Sint";
+ case Format::eR16G16B16Sfloat: return "R16G16B16Sfloat";
+ case Format::eR16G16B16A16Unorm: return "R16G16B16A16Unorm";
+ case Format::eR16G16B16A16Snorm: return "R16G16B16A16Snorm";
+ case Format::eR16G16B16A16Uscaled: return "R16G16B16A16Uscaled";
+ case Format::eR16G16B16A16Sscaled: return "R16G16B16A16Sscaled";
+ case Format::eR16G16B16A16Uint: return "R16G16B16A16Uint";
+ case Format::eR16G16B16A16Sint: return "R16G16B16A16Sint";
+ case Format::eR16G16B16A16Sfloat: return "R16G16B16A16Sfloat";
+ case Format::eR32Uint: return "R32Uint";
+ case Format::eR32Sint: return "R32Sint";
+ case Format::eR32Sfloat: return "R32Sfloat";
+ case Format::eR32G32Uint: return "R32G32Uint";
+ case Format::eR32G32Sint: return "R32G32Sint";
+ case Format::eR32G32Sfloat: return "R32G32Sfloat";
+ case Format::eR32G32B32Uint: return "R32G32B32Uint";
+ case Format::eR32G32B32Sint: return "R32G32B32Sint";
+ case Format::eR32G32B32Sfloat: return "R32G32B32Sfloat";
+ case Format::eR32G32B32A32Uint: return "R32G32B32A32Uint";
+ case Format::eR32G32B32A32Sint: return "R32G32B32A32Sint";
+ case Format::eR32G32B32A32Sfloat: return "R32G32B32A32Sfloat";
+ case Format::eR64Uint: return "R64Uint";
+ case Format::eR64Sint: return "R64Sint";
+ case Format::eR64Sfloat: return "R64Sfloat";
+ case Format::eR64G64Uint: return "R64G64Uint";
+ case Format::eR64G64Sint: return "R64G64Sint";
+ case Format::eR64G64Sfloat: return "R64G64Sfloat";
+ case Format::eR64G64B64Uint: return "R64G64B64Uint";
+ case Format::eR64G64B64Sint: return "R64G64B64Sint";
+ case Format::eR64G64B64Sfloat: return "R64G64B64Sfloat";
+ case Format::eR64G64B64A64Uint: return "R64G64B64A64Uint";
+ case Format::eR64G64B64A64Sint: return "R64G64B64A64Sint";
+ case Format::eR64G64B64A64Sfloat: return "R64G64B64A64Sfloat";
+ case Format::eB10G11R11UfloatPack32: return "B10G11R11UfloatPack32";
+ case Format::eE5B9G9R9UfloatPack32: return "E5B9G9R9UfloatPack32";
+ case Format::eD16Unorm: return "D16Unorm";
+ case Format::eX8D24UnormPack32: return "X8D24UnormPack32";
+ case Format::eD32Sfloat: return "D32Sfloat";
+ case Format::eS8Uint: return "S8Uint";
+ case Format::eD16UnormS8Uint: return "D16UnormS8Uint";
+ case Format::eD24UnormS8Uint: return "D24UnormS8Uint";
+ case Format::eD32SfloatS8Uint: return "D32SfloatS8Uint";
+ case Format::eBc1RgbUnormBlock: return "Bc1RgbUnormBlock";
+ case Format::eBc1RgbSrgbBlock: return "Bc1RgbSrgbBlock";
+ case Format::eBc1RgbaUnormBlock: return "Bc1RgbaUnormBlock";
+ case Format::eBc1RgbaSrgbBlock: return "Bc1RgbaSrgbBlock";
+ case Format::eBc2UnormBlock: return "Bc2UnormBlock";
+ case Format::eBc2SrgbBlock: return "Bc2SrgbBlock";
+ case Format::eBc3UnormBlock: return "Bc3UnormBlock";
+ case Format::eBc3SrgbBlock: return "Bc3SrgbBlock";
+ case Format::eBc4UnormBlock: return "Bc4UnormBlock";
+ case Format::eBc4SnormBlock: return "Bc4SnormBlock";
+ case Format::eBc5UnormBlock: return "Bc5UnormBlock";
+ case Format::eBc5SnormBlock: return "Bc5SnormBlock";
+ case Format::eBc6HUfloatBlock: return "Bc6HUfloatBlock";
+ case Format::eBc6HSfloatBlock: return "Bc6HSfloatBlock";
+ case Format::eBc7UnormBlock: return "Bc7UnormBlock";
+ case Format::eBc7SrgbBlock: return "Bc7SrgbBlock";
+ case Format::eEtc2R8G8B8UnormBlock: return "Etc2R8G8B8UnormBlock";
+ case Format::eEtc2R8G8B8SrgbBlock: return "Etc2R8G8B8SrgbBlock";
+ case Format::eEtc2R8G8B8A1UnormBlock: return "Etc2R8G8B8A1UnormBlock";
+ case Format::eEtc2R8G8B8A1SrgbBlock: return "Etc2R8G8B8A1SrgbBlock";
+ case Format::eEtc2R8G8B8A8UnormBlock: return "Etc2R8G8B8A8UnormBlock";
+ case Format::eEtc2R8G8B8A8SrgbBlock: return "Etc2R8G8B8A8SrgbBlock";
+ case Format::eEacR11UnormBlock: return "EacR11UnormBlock";
+ case Format::eEacR11SnormBlock: return "EacR11SnormBlock";
+ case Format::eEacR11G11UnormBlock: return "EacR11G11UnormBlock";
+ case Format::eEacR11G11SnormBlock: return "EacR11G11SnormBlock";
+ case Format::eAstc4x4UnormBlock: return "Astc4x4UnormBlock";
+ case Format::eAstc4x4SrgbBlock: return "Astc4x4SrgbBlock";
+ case Format::eAstc5x4UnormBlock: return "Astc5x4UnormBlock";
+ case Format::eAstc5x4SrgbBlock: return "Astc5x4SrgbBlock";
+ case Format::eAstc5x5UnormBlock: return "Astc5x5UnormBlock";
+ case Format::eAstc5x5SrgbBlock: return "Astc5x5SrgbBlock";
+ case Format::eAstc6x5UnormBlock: return "Astc6x5UnormBlock";
+ case Format::eAstc6x5SrgbBlock: return "Astc6x5SrgbBlock";
+ case Format::eAstc6x6UnormBlock: return "Astc6x6UnormBlock";
+ case Format::eAstc6x6SrgbBlock: return "Astc6x6SrgbBlock";
+ case Format::eAstc8x5UnormBlock: return "Astc8x5UnormBlock";
+ case Format::eAstc8x5SrgbBlock: return "Astc8x5SrgbBlock";
+ case Format::eAstc8x6UnormBlock: return "Astc8x6UnormBlock";
+ case Format::eAstc8x6SrgbBlock: return "Astc8x6SrgbBlock";
+ case Format::eAstc8x8UnormBlock: return "Astc8x8UnormBlock";
+ case Format::eAstc8x8SrgbBlock: return "Astc8x8SrgbBlock";
+ case Format::eAstc10x5UnormBlock: return "Astc10x5UnormBlock";
+ case Format::eAstc10x5SrgbBlock: return "Astc10x5SrgbBlock";
+ case Format::eAstc10x6UnormBlock: return "Astc10x6UnormBlock";
+ case Format::eAstc10x6SrgbBlock: return "Astc10x6SrgbBlock";
+ case Format::eAstc10x8UnormBlock: return "Astc10x8UnormBlock";
+ case Format::eAstc10x8SrgbBlock: return "Astc10x8SrgbBlock";
+ case Format::eAstc10x10UnormBlock: return "Astc10x10UnormBlock";
+ case Format::eAstc10x10SrgbBlock: return "Astc10x10SrgbBlock";
+ case Format::eAstc12x10UnormBlock: return "Astc12x10UnormBlock";
+ case Format::eAstc12x10SrgbBlock: return "Astc12x10SrgbBlock";
+ case Format::eAstc12x12UnormBlock: return "Astc12x12UnormBlock";
+ case Format::eAstc12x12SrgbBlock: return "Astc12x12SrgbBlock";
+ case Format::ePvrtc12BppUnormBlockIMG: return "Pvrtc12BppUnormBlockIMG";
+ case Format::ePvrtc14BppUnormBlockIMG: return "Pvrtc14BppUnormBlockIMG";
+ case Format::ePvrtc22BppUnormBlockIMG: return "Pvrtc22BppUnormBlockIMG";
+ case Format::ePvrtc24BppUnormBlockIMG: return "Pvrtc24BppUnormBlockIMG";
+ case Format::ePvrtc12BppSrgbBlockIMG: return "Pvrtc12BppSrgbBlockIMG";
+ case Format::ePvrtc14BppSrgbBlockIMG: return "Pvrtc14BppSrgbBlockIMG";
+ case Format::ePvrtc22BppSrgbBlockIMG: return "Pvrtc22BppSrgbBlockIMG";
+ case Format::ePvrtc24BppSrgbBlockIMG: return "Pvrtc24BppSrgbBlockIMG";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(StructureType value)
+ {
+ switch (value)
+ {
+ case StructureType::eApplicationInfo: return "ApplicationInfo";
+ case StructureType::eInstanceCreateInfo: return "InstanceCreateInfo";
+ case StructureType::eDeviceQueueCreateInfo: return "DeviceQueueCreateInfo";
+ case StructureType::eDeviceCreateInfo: return "DeviceCreateInfo";
+ case StructureType::eSubmitInfo: return "SubmitInfo";
+ case StructureType::eMemoryAllocateInfo: return "MemoryAllocateInfo";
+ case StructureType::eMappedMemoryRange: return "MappedMemoryRange";
+ case StructureType::eBindSparseInfo: return "BindSparseInfo";
+ case StructureType::eFenceCreateInfo: return "FenceCreateInfo";
+ case StructureType::eSemaphoreCreateInfo: return "SemaphoreCreateInfo";
+ case StructureType::eEventCreateInfo: return "EventCreateInfo";
+ case StructureType::eQueryPoolCreateInfo: return "QueryPoolCreateInfo";
+ case StructureType::eBufferCreateInfo: return "BufferCreateInfo";
+ case StructureType::eBufferViewCreateInfo: return "BufferViewCreateInfo";
+ case StructureType::eImageCreateInfo: return "ImageCreateInfo";
+ case StructureType::eImageViewCreateInfo: return "ImageViewCreateInfo";
+ case StructureType::eShaderModuleCreateInfo: return "ShaderModuleCreateInfo";
+ case StructureType::ePipelineCacheCreateInfo: return "PipelineCacheCreateInfo";
+ case StructureType::ePipelineShaderStageCreateInfo: return "PipelineShaderStageCreateInfo";
+ case StructureType::ePipelineVertexInputStateCreateInfo: return "PipelineVertexInputStateCreateInfo";
+ case StructureType::ePipelineInputAssemblyStateCreateInfo: return "PipelineInputAssemblyStateCreateInfo";
+ case StructureType::ePipelineTessellationStateCreateInfo: return "PipelineTessellationStateCreateInfo";
+ case StructureType::ePipelineViewportStateCreateInfo: return "PipelineViewportStateCreateInfo";
+ case StructureType::ePipelineRasterizationStateCreateInfo: return "PipelineRasterizationStateCreateInfo";
+ case StructureType::ePipelineMultisampleStateCreateInfo: return "PipelineMultisampleStateCreateInfo";
+ case StructureType::ePipelineDepthStencilStateCreateInfo: return "PipelineDepthStencilStateCreateInfo";
+ case StructureType::ePipelineColorBlendStateCreateInfo: return "PipelineColorBlendStateCreateInfo";
+ case StructureType::ePipelineDynamicStateCreateInfo: return "PipelineDynamicStateCreateInfo";
+ case StructureType::eGraphicsPipelineCreateInfo: return "GraphicsPipelineCreateInfo";
+ case StructureType::eComputePipelineCreateInfo: return "ComputePipelineCreateInfo";
+ case StructureType::ePipelineLayoutCreateInfo: return "PipelineLayoutCreateInfo";
+ case StructureType::eSamplerCreateInfo: return "SamplerCreateInfo";
+ case StructureType::eDescriptorSetLayoutCreateInfo: return "DescriptorSetLayoutCreateInfo";
+ case StructureType::eDescriptorPoolCreateInfo: return "DescriptorPoolCreateInfo";
+ case StructureType::eDescriptorSetAllocateInfo: return "DescriptorSetAllocateInfo";
+ case StructureType::eWriteDescriptorSet: return "WriteDescriptorSet";
+ case StructureType::eCopyDescriptorSet: return "CopyDescriptorSet";
+ case StructureType::eFramebufferCreateInfo: return "FramebufferCreateInfo";
+ case StructureType::eRenderPassCreateInfo: return "RenderPassCreateInfo";
+ case StructureType::eCommandPoolCreateInfo: return "CommandPoolCreateInfo";
+ case StructureType::eCommandBufferAllocateInfo: return "CommandBufferAllocateInfo";
+ case StructureType::eCommandBufferInheritanceInfo: return "CommandBufferInheritanceInfo";
+ case StructureType::eCommandBufferBeginInfo: return "CommandBufferBeginInfo";
+ case StructureType::eRenderPassBeginInfo: return "RenderPassBeginInfo";
+ case StructureType::eBufferMemoryBarrier: return "BufferMemoryBarrier";
+ case StructureType::eImageMemoryBarrier: return "ImageMemoryBarrier";
+ case StructureType::eMemoryBarrier: return "MemoryBarrier";
+ case StructureType::eLoaderInstanceCreateInfo: return "LoaderInstanceCreateInfo";
+ case StructureType::eLoaderDeviceCreateInfo: return "LoaderDeviceCreateInfo";
+ case StructureType::eSwapchainCreateInfoKHR: return "SwapchainCreateInfoKHR";
+ case StructureType::ePresentInfoKHR: return "PresentInfoKHR";
+ case StructureType::eDisplayModeCreateInfoKHR: return "DisplayModeCreateInfoKHR";
+ case StructureType::eDisplaySurfaceCreateInfoKHR: return "DisplaySurfaceCreateInfoKHR";
+ case StructureType::eDisplayPresentInfoKHR: return "DisplayPresentInfoKHR";
+ case StructureType::eXlibSurfaceCreateInfoKHR: return "XlibSurfaceCreateInfoKHR";
+ case StructureType::eXcbSurfaceCreateInfoKHR: return "XcbSurfaceCreateInfoKHR";
+ case StructureType::eWaylandSurfaceCreateInfoKHR: return "WaylandSurfaceCreateInfoKHR";
+ case StructureType::eMirSurfaceCreateInfoKHR: return "MirSurfaceCreateInfoKHR";
+ case StructureType::eAndroidSurfaceCreateInfoKHR: return "AndroidSurfaceCreateInfoKHR";
+ case StructureType::eWin32SurfaceCreateInfoKHR: return "Win32SurfaceCreateInfoKHR";
+ case StructureType::eDebugReportCallbackCreateInfoEXT: return "DebugReportCallbackCreateInfoEXT";
+ case StructureType::ePipelineRasterizationStateRasterizationOrderAMD: return "PipelineRasterizationStateRasterizationOrderAMD";
+ case StructureType::eDebugMarkerObjectNameInfoEXT: return "DebugMarkerObjectNameInfoEXT";
+ case StructureType::eDebugMarkerObjectTagInfoEXT: return "DebugMarkerObjectTagInfoEXT";
+ case StructureType::eDebugMarkerMarkerInfoEXT: return "DebugMarkerMarkerInfoEXT";
+ case StructureType::eDedicatedAllocationImageCreateInfoNV: return "DedicatedAllocationImageCreateInfoNV";
+ case StructureType::eDedicatedAllocationBufferCreateInfoNV: return "DedicatedAllocationBufferCreateInfoNV";
+ case StructureType::eDedicatedAllocationMemoryAllocateInfoNV: return "DedicatedAllocationMemoryAllocateInfoNV";
+ case StructureType::eRenderPassMultiviewCreateInfoKHX: return "RenderPassMultiviewCreateInfoKHX";
+ case StructureType::ePhysicalDeviceMultiviewFeaturesKHX: return "PhysicalDeviceMultiviewFeaturesKHX";
+ case StructureType::ePhysicalDeviceMultiviewPropertiesKHX: return "PhysicalDeviceMultiviewPropertiesKHX";
+ case StructureType::eExternalMemoryImageCreateInfoNV: return "ExternalMemoryImageCreateInfoNV";
+ case StructureType::eExportMemoryAllocateInfoNV: return "ExportMemoryAllocateInfoNV";
+ case StructureType::eImportMemoryWin32HandleInfoNV: return "ImportMemoryWin32HandleInfoNV";
+ case StructureType::eExportMemoryWin32HandleInfoNV: return "ExportMemoryWin32HandleInfoNV";
+ case StructureType::eWin32KeyedMutexAcquireReleaseInfoNV: return "Win32KeyedMutexAcquireReleaseInfoNV";
+ case StructureType::ePhysicalDeviceFeatures2KHR: return "PhysicalDeviceFeatures2KHR";
+ case StructureType::ePhysicalDeviceProperties2KHR: return "PhysicalDeviceProperties2KHR";
+ case StructureType::eFormatProperties2KHR: return "FormatProperties2KHR";
+ case StructureType::eImageFormatProperties2KHR: return "ImageFormatProperties2KHR";
+ case StructureType::ePhysicalDeviceImageFormatInfo2KHR: return "PhysicalDeviceImageFormatInfo2KHR";
+ case StructureType::eQueueFamilyProperties2KHR: return "QueueFamilyProperties2KHR";
+ case StructureType::ePhysicalDeviceMemoryProperties2KHR: return "PhysicalDeviceMemoryProperties2KHR";
+ case StructureType::eSparseImageFormatProperties2KHR: return "SparseImageFormatProperties2KHR";
+ case StructureType::ePhysicalDeviceSparseImageFormatInfo2KHR: return "PhysicalDeviceSparseImageFormatInfo2KHR";
+ case StructureType::eMemoryAllocateFlagsInfoKHX: return "MemoryAllocateFlagsInfoKHX";
+ case StructureType::eBindBufferMemoryInfoKHX: return "BindBufferMemoryInfoKHX";
+ case StructureType::eBindImageMemoryInfoKHX: return "BindImageMemoryInfoKHX";
+ case StructureType::eDeviceGroupRenderPassBeginInfoKHX: return "DeviceGroupRenderPassBeginInfoKHX";
+ case StructureType::eDeviceGroupCommandBufferBeginInfoKHX: return "DeviceGroupCommandBufferBeginInfoKHX";
+ case StructureType::eDeviceGroupSubmitInfoKHX: return "DeviceGroupSubmitInfoKHX";
+ case StructureType::eDeviceGroupBindSparseInfoKHX: return "DeviceGroupBindSparseInfoKHX";
+ case StructureType::eDeviceGroupPresentCapabilitiesKHX: return "DeviceGroupPresentCapabilitiesKHX";
+ case StructureType::eImageSwapchainCreateInfoKHX: return "ImageSwapchainCreateInfoKHX";
+ case StructureType::eBindImageMemorySwapchainInfoKHX: return "BindImageMemorySwapchainInfoKHX";
+ case StructureType::eAcquireNextImageInfoKHX: return "AcquireNextImageInfoKHX";
+ case StructureType::eDeviceGroupPresentInfoKHX: return "DeviceGroupPresentInfoKHX";
+ case StructureType::eDeviceGroupSwapchainCreateInfoKHX: return "DeviceGroupSwapchainCreateInfoKHX";
+ case StructureType::eValidationFlagsEXT: return "ValidationFlagsEXT";
+ case StructureType::eViSurfaceCreateInfoNN: return "ViSurfaceCreateInfoNN";
+ case StructureType::ePhysicalDeviceGroupPropertiesKHX: return "PhysicalDeviceGroupPropertiesKHX";
+ case StructureType::eDeviceGroupDeviceCreateInfoKHX: return "DeviceGroupDeviceCreateInfoKHX";
+ case StructureType::ePhysicalDeviceExternalImageFormatInfoKHX: return "PhysicalDeviceExternalImageFormatInfoKHX";
+ case StructureType::eExternalImageFormatPropertiesKHX: return "ExternalImageFormatPropertiesKHX";
+ case StructureType::ePhysicalDeviceExternalBufferInfoKHX: return "PhysicalDeviceExternalBufferInfoKHX";
+ case StructureType::eExternalBufferPropertiesKHX: return "ExternalBufferPropertiesKHX";
+ case StructureType::ePhysicalDeviceIdPropertiesKHX: return "PhysicalDeviceIdPropertiesKHX";
+ case StructureType::eExternalMemoryBufferCreateInfoKHX: return "ExternalMemoryBufferCreateInfoKHX";
+ case StructureType::eExternalMemoryImageCreateInfoKHX: return "ExternalMemoryImageCreateInfoKHX";
+ case StructureType::eExportMemoryAllocateInfoKHX: return "ExportMemoryAllocateInfoKHX";
+ case StructureType::eImportMemoryWin32HandleInfoKHX: return "ImportMemoryWin32HandleInfoKHX";
+ case StructureType::eExportMemoryWin32HandleInfoKHX: return "ExportMemoryWin32HandleInfoKHX";
+ case StructureType::eMemoryWin32HandlePropertiesKHX: return "MemoryWin32HandlePropertiesKHX";
+ case StructureType::eImportMemoryFdInfoKHX: return "ImportMemoryFdInfoKHX";
+ case StructureType::eMemoryFdPropertiesKHX: return "MemoryFdPropertiesKHX";
+ case StructureType::eWin32KeyedMutexAcquireReleaseInfoKHX: return "Win32KeyedMutexAcquireReleaseInfoKHX";
+ case StructureType::ePhysicalDeviceExternalSemaphoreInfoKHX: return "PhysicalDeviceExternalSemaphoreInfoKHX";
+ case StructureType::eExternalSemaphorePropertiesKHX: return "ExternalSemaphorePropertiesKHX";
+ case StructureType::eExportSemaphoreCreateInfoKHX: return "ExportSemaphoreCreateInfoKHX";
+ case StructureType::eImportSemaphoreWin32HandleInfoKHX: return "ImportSemaphoreWin32HandleInfoKHX";
+ case StructureType::eExportSemaphoreWin32HandleInfoKHX: return "ExportSemaphoreWin32HandleInfoKHX";
+ case StructureType::eD3D12FenceSubmitInfoKHX: return "D3D12FenceSubmitInfoKHX";
+ case StructureType::eImportSemaphoreFdInfoKHX: return "ImportSemaphoreFdInfoKHX";
+ case StructureType::ePhysicalDevicePushDescriptorPropertiesKHR: return "PhysicalDevicePushDescriptorPropertiesKHR";
+ case StructureType::ePresentRegionsKHR: return "PresentRegionsKHR";
+ case StructureType::eDescriptorUpdateTemplateCreateInfoKHR: return "DescriptorUpdateTemplateCreateInfoKHR";
+ case StructureType::eObjectTableCreateInfoNVX: return "ObjectTableCreateInfoNVX";
+ case StructureType::eIndirectCommandsLayoutCreateInfoNVX: return "IndirectCommandsLayoutCreateInfoNVX";
+ case StructureType::eCmdProcessCommandsInfoNVX: return "CmdProcessCommandsInfoNVX";
+ case StructureType::eCmdReserveSpaceForCommandsInfoNVX: return "CmdReserveSpaceForCommandsInfoNVX";
+ case StructureType::eDeviceGeneratedCommandsLimitsNVX: return "DeviceGeneratedCommandsLimitsNVX";
+ case StructureType::eDeviceGeneratedCommandsFeaturesNVX: return "DeviceGeneratedCommandsFeaturesNVX";
+ case StructureType::ePipelineViewportWScalingStateCreateInfoNV: return "PipelineViewportWScalingStateCreateInfoNV";
+ case StructureType::eSurfaceCapabilities2EXT: return "SurfaceCapabilities2EXT";
+ case StructureType::eDisplayPowerInfoEXT: return "DisplayPowerInfoEXT";
+ case StructureType::eDeviceEventInfoEXT: return "DeviceEventInfoEXT";
+ case StructureType::eDisplayEventInfoEXT: return "DisplayEventInfoEXT";
+ case StructureType::eSwapchainCounterCreateInfoEXT: return "SwapchainCounterCreateInfoEXT";
+ case StructureType::ePresentTimesInfoGOOGLE: return "PresentTimesInfoGOOGLE";
+ case StructureType::ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX: return "PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX";
+ case StructureType::ePipelineViewportSwizzleStateCreateInfoNV: return "PipelineViewportSwizzleStateCreateInfoNV";
+ case StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT: return "PhysicalDeviceDiscardRectanglePropertiesEXT";
+ case StructureType::ePipelineDiscardRectangleStateCreateInfoEXT: return "PipelineDiscardRectangleStateCreateInfoEXT";
+ case StructureType::eHdrMetadataEXT: return "HdrMetadataEXT";
+ case StructureType::eSharedPresentSurfaceCapabilitiesKHR: return "SharedPresentSurfaceCapabilitiesKHR";
+ case StructureType::ePhysicalDeviceSurfaceInfo2KHR: return "PhysicalDeviceSurfaceInfo2KHR";
+ case StructureType::eSurfaceCapabilities2KHR: return "SurfaceCapabilities2KHR";
+ case StructureType::eSurfaceFormat2KHR: return "SurfaceFormat2KHR";
+ case StructureType::eIosSurfaceCreateInfoMVK: return "IosSurfaceCreateInfoMVK";
+ case StructureType::eMacosSurfaceCreateInfoMVK: return "MacosSurfaceCreateInfoMVK";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SubpassContents value)
+ {
+ switch (value)
+ {
+ case SubpassContents::eInline: return "Inline";
+ case SubpassContents::eSecondaryCommandBuffers: return "SecondaryCommandBuffers";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DynamicState value)
+ {
+ switch (value)
+ {
+ case DynamicState::eViewport: return "Viewport";
+ case DynamicState::eScissor: return "Scissor";
+ case DynamicState::eLineWidth: return "LineWidth";
+ case DynamicState::eDepthBias: return "DepthBias";
+ case DynamicState::eBlendConstants: return "BlendConstants";
+ case DynamicState::eDepthBounds: return "DepthBounds";
+ case DynamicState::eStencilCompareMask: return "StencilCompareMask";
+ case DynamicState::eStencilWriteMask: return "StencilWriteMask";
+ case DynamicState::eStencilReference: return "StencilReference";
+ case DynamicState::eViewportWScalingNV: return "ViewportWScalingNV";
+ case DynamicState::eDiscardRectangleEXT: return "DiscardRectangleEXT";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorUpdateTemplateTypeKHR value)
+ {
+ switch (value)
+ {
+ case DescriptorUpdateTemplateTypeKHR::eDescriptorSet: return "DescriptorSet";
+ case DescriptorUpdateTemplateTypeKHR::ePushDescriptors: return "PushDescriptors";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ObjectType value)
+ {
+ switch (value)
+ {
+ case ObjectType::eUnknown: return "Unknown";
+ case ObjectType::eInstance: return "Instance";
+ case ObjectType::ePhysicalDevice: return "PhysicalDevice";
+ case ObjectType::eDevice: return "Device";
+ case ObjectType::eQueue: return "Queue";
+ case ObjectType::eSemaphore: return "Semaphore";
+ case ObjectType::eCommandBuffer: return "CommandBuffer";
+ case ObjectType::eFence: return "Fence";
+ case ObjectType::eDeviceMemory: return "DeviceMemory";
+ case ObjectType::eBuffer: return "Buffer";
+ case ObjectType::eImage: return "Image";
+ case ObjectType::eEvent: return "Event";
+ case ObjectType::eQueryPool: return "QueryPool";
+ case ObjectType::eBufferView: return "BufferView";
+ case ObjectType::eImageView: return "ImageView";
+ case ObjectType::eShaderModule: return "ShaderModule";
+ case ObjectType::ePipelineCache: return "PipelineCache";
+ case ObjectType::ePipelineLayout: return "PipelineLayout";
+ case ObjectType::eRenderPass: return "RenderPass";
+ case ObjectType::ePipeline: return "Pipeline";
+ case ObjectType::eDescriptorSetLayout: return "DescriptorSetLayout";
+ case ObjectType::eSampler: return "Sampler";
+ case ObjectType::eDescriptorPool: return "DescriptorPool";
+ case ObjectType::eDescriptorSet: return "DescriptorSet";
+ case ObjectType::eFramebuffer: return "Framebuffer";
+ case ObjectType::eCommandPool: return "CommandPool";
+ case ObjectType::eSurfaceKHR: return "SurfaceKHR";
+ case ObjectType::eSwapchainKHR: return "SwapchainKHR";
+ case ObjectType::eDisplayKHR: return "DisplayKHR";
+ case ObjectType::eDisplayModeKHR: return "DisplayModeKHR";
+ case ObjectType::eDebugReportCallbackEXT: return "DebugReportCallbackEXT";
+ case ObjectType::eDescriptorUpdateTemplateKHR: return "DescriptorUpdateTemplateKHR";
+ case ObjectType::eObjectTableNVX: return "ObjectTableNVX";
+ case ObjectType::eIndirectCommandsLayoutNVX: return "IndirectCommandsLayoutNVX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueueFlagBits value)
+ {
+ switch (value)
+ {
+ case QueueFlagBits::eGraphics: return "Graphics";
+ case QueueFlagBits::eCompute: return "Compute";
+ case QueueFlagBits::eTransfer: return "Transfer";
+ case QueueFlagBits::eSparseBinding: return "SparseBinding";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueueFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & QueueFlagBits::eGraphics) result += "Graphics | ";
+ if (value & QueueFlagBits::eCompute) result += "Compute | ";
+ if (value & QueueFlagBits::eTransfer) result += "Transfer | ";
+ if (value & QueueFlagBits::eSparseBinding) result += "SparseBinding | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryPropertyFlagBits value)
+ {
+ switch (value)
+ {
+ case MemoryPropertyFlagBits::eDeviceLocal: return "DeviceLocal";
+ case MemoryPropertyFlagBits::eHostVisible: return "HostVisible";
+ case MemoryPropertyFlagBits::eHostCoherent: return "HostCoherent";
+ case MemoryPropertyFlagBits::eHostCached: return "HostCached";
+ case MemoryPropertyFlagBits::eLazilyAllocated: return "LazilyAllocated";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryPropertyFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & MemoryPropertyFlagBits::eDeviceLocal) result += "DeviceLocal | ";
+ if (value & MemoryPropertyFlagBits::eHostVisible) result += "HostVisible | ";
+ if (value & MemoryPropertyFlagBits::eHostCoherent) result += "HostCoherent | ";
+ if (value & MemoryPropertyFlagBits::eHostCached) result += "HostCached | ";
+ if (value & MemoryPropertyFlagBits::eLazilyAllocated) result += "LazilyAllocated | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryHeapFlagBits value)
+ {
+ switch (value)
+ {
+ case MemoryHeapFlagBits::eDeviceLocal: return "DeviceLocal";
+ case MemoryHeapFlagBits::eMultiInstanceKHX: return "MultiInstanceKHX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryHeapFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & MemoryHeapFlagBits::eDeviceLocal) result += "DeviceLocal | ";
+ if (value & MemoryHeapFlagBits::eMultiInstanceKHX) result += "MultiInstanceKHX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(AccessFlagBits value)
+ {
+ switch (value)
+ {
+ case AccessFlagBits::eIndirectCommandRead: return "IndirectCommandRead";
+ case AccessFlagBits::eIndexRead: return "IndexRead";
+ case AccessFlagBits::eVertexAttributeRead: return "VertexAttributeRead";
+ case AccessFlagBits::eUniformRead: return "UniformRead";
+ case AccessFlagBits::eInputAttachmentRead: return "InputAttachmentRead";
+ case AccessFlagBits::eShaderRead: return "ShaderRead";
+ case AccessFlagBits::eShaderWrite: return "ShaderWrite";
+ case AccessFlagBits::eColorAttachmentRead: return "ColorAttachmentRead";
+ case AccessFlagBits::eColorAttachmentWrite: return "ColorAttachmentWrite";
+ case AccessFlagBits::eDepthStencilAttachmentRead: return "DepthStencilAttachmentRead";
+ case AccessFlagBits::eDepthStencilAttachmentWrite: return "DepthStencilAttachmentWrite";
+ case AccessFlagBits::eTransferRead: return "TransferRead";
+ case AccessFlagBits::eTransferWrite: return "TransferWrite";
+ case AccessFlagBits::eHostRead: return "HostRead";
+ case AccessFlagBits::eHostWrite: return "HostWrite";
+ case AccessFlagBits::eMemoryRead: return "MemoryRead";
+ case AccessFlagBits::eMemoryWrite: return "MemoryWrite";
+ case AccessFlagBits::eCommandProcessReadNVX: return "CommandProcessReadNVX";
+ case AccessFlagBits::eCommandProcessWriteNVX: return "CommandProcessWriteNVX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(AccessFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & AccessFlagBits::eIndirectCommandRead) result += "IndirectCommandRead | ";
+ if (value & AccessFlagBits::eIndexRead) result += "IndexRead | ";
+ if (value & AccessFlagBits::eVertexAttributeRead) result += "VertexAttributeRead | ";
+ if (value & AccessFlagBits::eUniformRead) result += "UniformRead | ";
+ if (value & AccessFlagBits::eInputAttachmentRead) result += "InputAttachmentRead | ";
+ if (value & AccessFlagBits::eShaderRead) result += "ShaderRead | ";
+ if (value & AccessFlagBits::eShaderWrite) result += "ShaderWrite | ";
+ if (value & AccessFlagBits::eColorAttachmentRead) result += "ColorAttachmentRead | ";
+ if (value & AccessFlagBits::eColorAttachmentWrite) result += "ColorAttachmentWrite | ";
+ if (value & AccessFlagBits::eDepthStencilAttachmentRead) result += "DepthStencilAttachmentRead | ";
+ if (value & AccessFlagBits::eDepthStencilAttachmentWrite) result += "DepthStencilAttachmentWrite | ";
+ if (value & AccessFlagBits::eTransferRead) result += "TransferRead | ";
+ if (value & AccessFlagBits::eTransferWrite) result += "TransferWrite | ";
+ if (value & AccessFlagBits::eHostRead) result += "HostRead | ";
+ if (value & AccessFlagBits::eHostWrite) result += "HostWrite | ";
+ if (value & AccessFlagBits::eMemoryRead) result += "MemoryRead | ";
+ if (value & AccessFlagBits::eMemoryWrite) result += "MemoryWrite | ";
+ if (value & AccessFlagBits::eCommandProcessReadNVX) result += "CommandProcessReadNVX | ";
+ if (value & AccessFlagBits::eCommandProcessWriteNVX) result += "CommandProcessWriteNVX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BufferUsageFlagBits value)
+ {
+ switch (value)
+ {
+ case BufferUsageFlagBits::eTransferSrc: return "TransferSrc";
+ case BufferUsageFlagBits::eTransferDst: return "TransferDst";
+ case BufferUsageFlagBits::eUniformTexelBuffer: return "UniformTexelBuffer";
+ case BufferUsageFlagBits::eStorageTexelBuffer: return "StorageTexelBuffer";
+ case BufferUsageFlagBits::eUniformBuffer: return "UniformBuffer";
+ case BufferUsageFlagBits::eStorageBuffer: return "StorageBuffer";
+ case BufferUsageFlagBits::eIndexBuffer: return "IndexBuffer";
+ case BufferUsageFlagBits::eVertexBuffer: return "VertexBuffer";
+ case BufferUsageFlagBits::eIndirectBuffer: return "IndirectBuffer";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BufferUsageFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & BufferUsageFlagBits::eTransferSrc) result += "TransferSrc | ";
+ if (value & BufferUsageFlagBits::eTransferDst) result += "TransferDst | ";
+ if (value & BufferUsageFlagBits::eUniformTexelBuffer) result += "UniformTexelBuffer | ";
+ if (value & BufferUsageFlagBits::eStorageTexelBuffer) result += "StorageTexelBuffer | ";
+ if (value & BufferUsageFlagBits::eUniformBuffer) result += "UniformBuffer | ";
+ if (value & BufferUsageFlagBits::eStorageBuffer) result += "StorageBuffer | ";
+ if (value & BufferUsageFlagBits::eIndexBuffer) result += "IndexBuffer | ";
+ if (value & BufferUsageFlagBits::eVertexBuffer) result += "VertexBuffer | ";
+ if (value & BufferUsageFlagBits::eIndirectBuffer) result += "IndirectBuffer | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BufferCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case BufferCreateFlagBits::eSparseBinding: return "SparseBinding";
+ case BufferCreateFlagBits::eSparseResidency: return "SparseResidency";
+ case BufferCreateFlagBits::eSparseAliased: return "SparseAliased";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(BufferCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & BufferCreateFlagBits::eSparseBinding) result += "SparseBinding | ";
+ if (value & BufferCreateFlagBits::eSparseResidency) result += "SparseResidency | ";
+ if (value & BufferCreateFlagBits::eSparseAliased) result += "SparseAliased | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ShaderStageFlagBits value)
+ {
+ switch (value)
+ {
+ case ShaderStageFlagBits::eVertex: return "Vertex";
+ case ShaderStageFlagBits::eTessellationControl: return "TessellationControl";
+ case ShaderStageFlagBits::eTessellationEvaluation: return "TessellationEvaluation";
+ case ShaderStageFlagBits::eGeometry: return "Geometry";
+ case ShaderStageFlagBits::eFragment: return "Fragment";
+ case ShaderStageFlagBits::eCompute: return "Compute";
+ case ShaderStageFlagBits::eAllGraphics: return "AllGraphics";
+ case ShaderStageFlagBits::eAll: return "All";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ShaderStageFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ShaderStageFlagBits::eVertex) result += "Vertex | ";
+ if (value & ShaderStageFlagBits::eTessellationControl) result += "TessellationControl | ";
+ if (value & ShaderStageFlagBits::eTessellationEvaluation) result += "TessellationEvaluation | ";
+ if (value & ShaderStageFlagBits::eGeometry) result += "Geometry | ";
+ if (value & ShaderStageFlagBits::eFragment) result += "Fragment | ";
+ if (value & ShaderStageFlagBits::eCompute) result += "Compute | ";
+ if (value & ShaderStageFlagBits::eAllGraphics) result += "AllGraphics | ";
+ if (value & ShaderStageFlagBits::eAll) result += "All | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageUsageFlagBits value)
+ {
+ switch (value)
+ {
+ case ImageUsageFlagBits::eTransferSrc: return "TransferSrc";
+ case ImageUsageFlagBits::eTransferDst: return "TransferDst";
+ case ImageUsageFlagBits::eSampled: return "Sampled";
+ case ImageUsageFlagBits::eStorage: return "Storage";
+ case ImageUsageFlagBits::eColorAttachment: return "ColorAttachment";
+ case ImageUsageFlagBits::eDepthStencilAttachment: return "DepthStencilAttachment";
+ case ImageUsageFlagBits::eTransientAttachment: return "TransientAttachment";
+ case ImageUsageFlagBits::eInputAttachment: return "InputAttachment";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageUsageFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ImageUsageFlagBits::eTransferSrc) result += "TransferSrc | ";
+ if (value & ImageUsageFlagBits::eTransferDst) result += "TransferDst | ";
+ if (value & ImageUsageFlagBits::eSampled) result += "Sampled | ";
+ if (value & ImageUsageFlagBits::eStorage) result += "Storage | ";
+ if (value & ImageUsageFlagBits::eColorAttachment) result += "ColorAttachment | ";
+ if (value & ImageUsageFlagBits::eDepthStencilAttachment) result += "DepthStencilAttachment | ";
+ if (value & ImageUsageFlagBits::eTransientAttachment) result += "TransientAttachment | ";
+ if (value & ImageUsageFlagBits::eInputAttachment) result += "InputAttachment | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case ImageCreateFlagBits::eSparseBinding: return "SparseBinding";
+ case ImageCreateFlagBits::eSparseResidency: return "SparseResidency";
+ case ImageCreateFlagBits::eSparseAliased: return "SparseAliased";
+ case ImageCreateFlagBits::eMutableFormat: return "MutableFormat";
+ case ImageCreateFlagBits::eCubeCompatible: return "CubeCompatible";
+ case ImageCreateFlagBits::eBindSfrKHX: return "BindSfrKHX";
+ case ImageCreateFlagBits::e2DArrayCompatibleKHR: return "2DArrayCompatibleKHR";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ImageCreateFlagBits::eSparseBinding) result += "SparseBinding | ";
+ if (value & ImageCreateFlagBits::eSparseResidency) result += "SparseResidency | ";
+ if (value & ImageCreateFlagBits::eSparseAliased) result += "SparseAliased | ";
+ if (value & ImageCreateFlagBits::eMutableFormat) result += "MutableFormat | ";
+ if (value & ImageCreateFlagBits::eCubeCompatible) result += "CubeCompatible | ";
+ if (value & ImageCreateFlagBits::eBindSfrKHX) result += "BindSfrKHX | ";
+ if (value & ImageCreateFlagBits::e2DArrayCompatibleKHR) result += "2DArrayCompatibleKHR | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case PipelineCreateFlagBits::eDisableOptimization: return "DisableOptimization";
+ case PipelineCreateFlagBits::eAllowDerivatives: return "AllowDerivatives";
+ case PipelineCreateFlagBits::eDerivative: return "Derivative";
+ case PipelineCreateFlagBits::eViewIndexFromDeviceIndexKHX: return "ViewIndexFromDeviceIndexKHX";
+ case PipelineCreateFlagBits::eDispatchBaseKHX: return "DispatchBaseKHX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & PipelineCreateFlagBits::eDisableOptimization) result += "DisableOptimization | ";
+ if (value & PipelineCreateFlagBits::eAllowDerivatives) result += "AllowDerivatives | ";
+ if (value & PipelineCreateFlagBits::eDerivative) result += "Derivative | ";
+ if (value & PipelineCreateFlagBits::eViewIndexFromDeviceIndexKHX) result += "ViewIndexFromDeviceIndexKHX | ";
+ if (value & PipelineCreateFlagBits::eDispatchBaseKHX) result += "DispatchBaseKHX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ColorComponentFlagBits value)
+ {
+ switch (value)
+ {
+ case ColorComponentFlagBits::eR: return "R";
+ case ColorComponentFlagBits::eG: return "G";
+ case ColorComponentFlagBits::eB: return "B";
+ case ColorComponentFlagBits::eA: return "A";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ColorComponentFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ColorComponentFlagBits::eR) result += "R | ";
+ if (value & ColorComponentFlagBits::eG) result += "G | ";
+ if (value & ColorComponentFlagBits::eB) result += "B | ";
+ if (value & ColorComponentFlagBits::eA) result += "A | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(FenceCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case FenceCreateFlagBits::eSignaled: return "Signaled";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(FenceCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & FenceCreateFlagBits::eSignaled) result += "Signaled | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(FormatFeatureFlagBits value)
+ {
+ switch (value)
+ {
+ case FormatFeatureFlagBits::eSampledImage: return "SampledImage";
+ case FormatFeatureFlagBits::eStorageImage: return "StorageImage";
+ case FormatFeatureFlagBits::eStorageImageAtomic: return "StorageImageAtomic";
+ case FormatFeatureFlagBits::eUniformTexelBuffer: return "UniformTexelBuffer";
+ case FormatFeatureFlagBits::eStorageTexelBuffer: return "StorageTexelBuffer";
+ case FormatFeatureFlagBits::eStorageTexelBufferAtomic: return "StorageTexelBufferAtomic";
+ case FormatFeatureFlagBits::eVertexBuffer: return "VertexBuffer";
+ case FormatFeatureFlagBits::eColorAttachment: return "ColorAttachment";
+ case FormatFeatureFlagBits::eColorAttachmentBlend: return "ColorAttachmentBlend";
+ case FormatFeatureFlagBits::eDepthStencilAttachment: return "DepthStencilAttachment";
+ case FormatFeatureFlagBits::eBlitSrc: return "BlitSrc";
+ case FormatFeatureFlagBits::eBlitDst: return "BlitDst";
+ case FormatFeatureFlagBits::eSampledImageFilterLinear: return "SampledImageFilterLinear";
+ case FormatFeatureFlagBits::eSampledImageFilterCubicIMG: return "SampledImageFilterCubicIMG";
+ case FormatFeatureFlagBits::eTransferSrcKHR: return "TransferSrcKHR";
+ case FormatFeatureFlagBits::eTransferDstKHR: return "TransferDstKHR";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(FormatFeatureFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & FormatFeatureFlagBits::eSampledImage) result += "SampledImage | ";
+ if (value & FormatFeatureFlagBits::eStorageImage) result += "StorageImage | ";
+ if (value & FormatFeatureFlagBits::eStorageImageAtomic) result += "StorageImageAtomic | ";
+ if (value & FormatFeatureFlagBits::eUniformTexelBuffer) result += "UniformTexelBuffer | ";
+ if (value & FormatFeatureFlagBits::eStorageTexelBuffer) result += "StorageTexelBuffer | ";
+ if (value & FormatFeatureFlagBits::eStorageTexelBufferAtomic) result += "StorageTexelBufferAtomic | ";
+ if (value & FormatFeatureFlagBits::eVertexBuffer) result += "VertexBuffer | ";
+ if (value & FormatFeatureFlagBits::eColorAttachment) result += "ColorAttachment | ";
+ if (value & FormatFeatureFlagBits::eColorAttachmentBlend) result += "ColorAttachmentBlend | ";
+ if (value & FormatFeatureFlagBits::eDepthStencilAttachment) result += "DepthStencilAttachment | ";
+ if (value & FormatFeatureFlagBits::eBlitSrc) result += "BlitSrc | ";
+ if (value & FormatFeatureFlagBits::eBlitDst) result += "BlitDst | ";
+ if (value & FormatFeatureFlagBits::eSampledImageFilterLinear) result += "SampledImageFilterLinear | ";
+ if (value & FormatFeatureFlagBits::eSampledImageFilterCubicIMG) result += "SampledImageFilterCubicIMG | ";
+ if (value & FormatFeatureFlagBits::eTransferSrcKHR) result += "TransferSrcKHR | ";
+ if (value & FormatFeatureFlagBits::eTransferDstKHR) result += "TransferDstKHR | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryControlFlagBits value)
+ {
+ switch (value)
+ {
+ case QueryControlFlagBits::ePrecise: return "Precise";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryControlFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & QueryControlFlagBits::ePrecise) result += "Precise | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryResultFlagBits value)
+ {
+ switch (value)
+ {
+ case QueryResultFlagBits::e64: return "64";
+ case QueryResultFlagBits::eWait: return "Wait";
+ case QueryResultFlagBits::eWithAvailability: return "WithAvailability";
+ case QueryResultFlagBits::ePartial: return "Partial";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryResultFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & QueryResultFlagBits::e64) result += "64 | ";
+ if (value & QueryResultFlagBits::eWait) result += "Wait | ";
+ if (value & QueryResultFlagBits::eWithAvailability) result += "WithAvailability | ";
+ if (value & QueryResultFlagBits::ePartial) result += "Partial | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandBufferUsageFlagBits value)
+ {
+ switch (value)
+ {
+ case CommandBufferUsageFlagBits::eOneTimeSubmit: return "OneTimeSubmit";
+ case CommandBufferUsageFlagBits::eRenderPassContinue: return "RenderPassContinue";
+ case CommandBufferUsageFlagBits::eSimultaneousUse: return "SimultaneousUse";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandBufferUsageFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & CommandBufferUsageFlagBits::eOneTimeSubmit) result += "OneTimeSubmit | ";
+ if (value & CommandBufferUsageFlagBits::eRenderPassContinue) result += "RenderPassContinue | ";
+ if (value & CommandBufferUsageFlagBits::eSimultaneousUse) result += "SimultaneousUse | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryPipelineStatisticFlagBits value)
+ {
+ switch (value)
+ {
+ case QueryPipelineStatisticFlagBits::eInputAssemblyVertices: return "InputAssemblyVertices";
+ case QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives: return "InputAssemblyPrimitives";
+ case QueryPipelineStatisticFlagBits::eVertexShaderInvocations: return "VertexShaderInvocations";
+ case QueryPipelineStatisticFlagBits::eGeometryShaderInvocations: return "GeometryShaderInvocations";
+ case QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives: return "GeometryShaderPrimitives";
+ case QueryPipelineStatisticFlagBits::eClippingInvocations: return "ClippingInvocations";
+ case QueryPipelineStatisticFlagBits::eClippingPrimitives: return "ClippingPrimitives";
+ case QueryPipelineStatisticFlagBits::eFragmentShaderInvocations: return "FragmentShaderInvocations";
+ case QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches: return "TessellationControlShaderPatches";
+ case QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations: return "TessellationEvaluationShaderInvocations";
+ case QueryPipelineStatisticFlagBits::eComputeShaderInvocations: return "ComputeShaderInvocations";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(QueryPipelineStatisticFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & QueryPipelineStatisticFlagBits::eInputAssemblyVertices) result += "InputAssemblyVertices | ";
+ if (value & QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives) result += "InputAssemblyPrimitives | ";
+ if (value & QueryPipelineStatisticFlagBits::eVertexShaderInvocations) result += "VertexShaderInvocations | ";
+ if (value & QueryPipelineStatisticFlagBits::eGeometryShaderInvocations) result += "GeometryShaderInvocations | ";
+ if (value & QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives) result += "GeometryShaderPrimitives | ";
+ if (value & QueryPipelineStatisticFlagBits::eClippingInvocations) result += "ClippingInvocations | ";
+ if (value & QueryPipelineStatisticFlagBits::eClippingPrimitives) result += "ClippingPrimitives | ";
+ if (value & QueryPipelineStatisticFlagBits::eFragmentShaderInvocations) result += "FragmentShaderInvocations | ";
+ if (value & QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches) result += "TessellationControlShaderPatches | ";
+ if (value & QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations) result += "TessellationEvaluationShaderInvocations | ";
+ if (value & QueryPipelineStatisticFlagBits::eComputeShaderInvocations) result += "ComputeShaderInvocations | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageAspectFlagBits value)
+ {
+ switch (value)
+ {
+ case ImageAspectFlagBits::eColor: return "Color";
+ case ImageAspectFlagBits::eDepth: return "Depth";
+ case ImageAspectFlagBits::eStencil: return "Stencil";
+ case ImageAspectFlagBits::eMetadata: return "Metadata";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ImageAspectFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ImageAspectFlagBits::eColor) result += "Color | ";
+ if (value & ImageAspectFlagBits::eDepth) result += "Depth | ";
+ if (value & ImageAspectFlagBits::eStencil) result += "Stencil | ";
+ if (value & ImageAspectFlagBits::eMetadata) result += "Metadata | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SparseImageFormatFlagBits value)
+ {
+ switch (value)
+ {
+ case SparseImageFormatFlagBits::eSingleMiptail: return "SingleMiptail";
+ case SparseImageFormatFlagBits::eAlignedMipSize: return "AlignedMipSize";
+ case SparseImageFormatFlagBits::eNonstandardBlockSize: return "NonstandardBlockSize";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SparseImageFormatFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SparseImageFormatFlagBits::eSingleMiptail) result += "SingleMiptail | ";
+ if (value & SparseImageFormatFlagBits::eAlignedMipSize) result += "AlignedMipSize | ";
+ if (value & SparseImageFormatFlagBits::eNonstandardBlockSize) result += "NonstandardBlockSize | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SparseMemoryBindFlagBits value)
+ {
+ switch (value)
+ {
+ case SparseMemoryBindFlagBits::eMetadata: return "Metadata";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SparseMemoryBindFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SparseMemoryBindFlagBits::eMetadata) result += "Metadata | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineStageFlagBits value)
+ {
+ switch (value)
+ {
+ case PipelineStageFlagBits::eTopOfPipe: return "TopOfPipe";
+ case PipelineStageFlagBits::eDrawIndirect: return "DrawIndirect";
+ case PipelineStageFlagBits::eVertexInput: return "VertexInput";
+ case PipelineStageFlagBits::eVertexShader: return "VertexShader";
+ case PipelineStageFlagBits::eTessellationControlShader: return "TessellationControlShader";
+ case PipelineStageFlagBits::eTessellationEvaluationShader: return "TessellationEvaluationShader";
+ case PipelineStageFlagBits::eGeometryShader: return "GeometryShader";
+ case PipelineStageFlagBits::eFragmentShader: return "FragmentShader";
+ case PipelineStageFlagBits::eEarlyFragmentTests: return "EarlyFragmentTests";
+ case PipelineStageFlagBits::eLateFragmentTests: return "LateFragmentTests";
+ case PipelineStageFlagBits::eColorAttachmentOutput: return "ColorAttachmentOutput";
+ case PipelineStageFlagBits::eComputeShader: return "ComputeShader";
+ case PipelineStageFlagBits::eTransfer: return "Transfer";
+ case PipelineStageFlagBits::eBottomOfPipe: return "BottomOfPipe";
+ case PipelineStageFlagBits::eHost: return "Host";
+ case PipelineStageFlagBits::eAllGraphics: return "AllGraphics";
+ case PipelineStageFlagBits::eAllCommands: return "AllCommands";
+ case PipelineStageFlagBits::eCommandProcessNVX: return "CommandProcessNVX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PipelineStageFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & PipelineStageFlagBits::eTopOfPipe) result += "TopOfPipe | ";
+ if (value & PipelineStageFlagBits::eDrawIndirect) result += "DrawIndirect | ";
+ if (value & PipelineStageFlagBits::eVertexInput) result += "VertexInput | ";
+ if (value & PipelineStageFlagBits::eVertexShader) result += "VertexShader | ";
+ if (value & PipelineStageFlagBits::eTessellationControlShader) result += "TessellationControlShader | ";
+ if (value & PipelineStageFlagBits::eTessellationEvaluationShader) result += "TessellationEvaluationShader | ";
+ if (value & PipelineStageFlagBits::eGeometryShader) result += "GeometryShader | ";
+ if (value & PipelineStageFlagBits::eFragmentShader) result += "FragmentShader | ";
+ if (value & PipelineStageFlagBits::eEarlyFragmentTests) result += "EarlyFragmentTests | ";
+ if (value & PipelineStageFlagBits::eLateFragmentTests) result += "LateFragmentTests | ";
+ if (value & PipelineStageFlagBits::eColorAttachmentOutput) result += "ColorAttachmentOutput | ";
+ if (value & PipelineStageFlagBits::eComputeShader) result += "ComputeShader | ";
+ if (value & PipelineStageFlagBits::eTransfer) result += "Transfer | ";
+ if (value & PipelineStageFlagBits::eBottomOfPipe) result += "BottomOfPipe | ";
+ if (value & PipelineStageFlagBits::eHost) result += "Host | ";
+ if (value & PipelineStageFlagBits::eAllGraphics) result += "AllGraphics | ";
+ if (value & PipelineStageFlagBits::eAllCommands) result += "AllCommands | ";
+ if (value & PipelineStageFlagBits::eCommandProcessNVX) result += "CommandProcessNVX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandPoolCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case CommandPoolCreateFlagBits::eTransient: return "Transient";
+ case CommandPoolCreateFlagBits::eResetCommandBuffer: return "ResetCommandBuffer";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandPoolCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & CommandPoolCreateFlagBits::eTransient) result += "Transient | ";
+ if (value & CommandPoolCreateFlagBits::eResetCommandBuffer) result += "ResetCommandBuffer | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandPoolResetFlagBits value)
+ {
+ switch (value)
+ {
+ case CommandPoolResetFlagBits::eReleaseResources: return "ReleaseResources";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandPoolResetFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & CommandPoolResetFlagBits::eReleaseResources) result += "ReleaseResources | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandBufferResetFlagBits value)
+ {
+ switch (value)
+ {
+ case CommandBufferResetFlagBits::eReleaseResources: return "ReleaseResources";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CommandBufferResetFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & CommandBufferResetFlagBits::eReleaseResources) result += "ReleaseResources | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SampleCountFlagBits value)
+ {
+ switch (value)
+ {
+ case SampleCountFlagBits::e1: return "1";
+ case SampleCountFlagBits::e2: return "2";
+ case SampleCountFlagBits::e4: return "4";
+ case SampleCountFlagBits::e8: return "8";
+ case SampleCountFlagBits::e16: return "16";
+ case SampleCountFlagBits::e32: return "32";
+ case SampleCountFlagBits::e64: return "64";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SampleCountFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SampleCountFlagBits::e1) result += "1 | ";
+ if (value & SampleCountFlagBits::e2) result += "2 | ";
+ if (value & SampleCountFlagBits::e4) result += "4 | ";
+ if (value & SampleCountFlagBits::e8) result += "8 | ";
+ if (value & SampleCountFlagBits::e16) result += "16 | ";
+ if (value & SampleCountFlagBits::e32) result += "32 | ";
+ if (value & SampleCountFlagBits::e64) result += "64 | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(AttachmentDescriptionFlagBits value)
+ {
+ switch (value)
+ {
+ case AttachmentDescriptionFlagBits::eMayAlias: return "MayAlias";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(AttachmentDescriptionFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & AttachmentDescriptionFlagBits::eMayAlias) result += "MayAlias | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(StencilFaceFlagBits value)
+ {
+ switch (value)
+ {
+ case StencilFaceFlagBits::eFront: return "Front";
+ case StencilFaceFlagBits::eBack: return "Back";
+ case StencilFaceFlagBits::eVkStencilFrontAndBack: return "VkStencilFrontAndBack";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(StencilFaceFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & StencilFaceFlagBits::eFront) result += "Front | ";
+ if (value & StencilFaceFlagBits::eBack) result += "Back | ";
+ if (value & StencilFaceFlagBits::eVkStencilFrontAndBack) result += "VkStencilFrontAndBack | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorPoolCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case DescriptorPoolCreateFlagBits::eFreeDescriptorSet: return "FreeDescriptorSet";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorPoolCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & DescriptorPoolCreateFlagBits::eFreeDescriptorSet) result += "FreeDescriptorSet | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DependencyFlagBits value)
+ {
+ switch (value)
+ {
+ case DependencyFlagBits::eByRegion: return "ByRegion";
+ case DependencyFlagBits::eViewLocalKHX: return "ViewLocalKHX";
+ case DependencyFlagBits::eDeviceGroupKHX: return "DeviceGroupKHX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DependencyFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & DependencyFlagBits::eByRegion) result += "ByRegion | ";
+ if (value & DependencyFlagBits::eViewLocalKHX) result += "ViewLocalKHX | ";
+ if (value & DependencyFlagBits::eDeviceGroupKHX) result += "DeviceGroupKHX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PresentModeKHR value)
+ {
+ switch (value)
+ {
+ case PresentModeKHR::eImmediate: return "Immediate";
+ case PresentModeKHR::eMailbox: return "Mailbox";
+ case PresentModeKHR::eFifo: return "Fifo";
+ case PresentModeKHR::eFifoRelaxed: return "FifoRelaxed";
+ case PresentModeKHR::eSharedDemandRefresh: return "SharedDemandRefresh";
+ case PresentModeKHR::eSharedContinuousRefresh: return "SharedContinuousRefresh";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ColorSpaceKHR value)
+ {
+ switch (value)
+ {
+ case ColorSpaceKHR::eSrgbNonlinear: return "SrgbNonlinear";
+ case ColorSpaceKHR::eDisplayP3NonlinearEXT: return "DisplayP3NonlinearEXT";
+ case ColorSpaceKHR::eExtendedSrgbLinearEXT: return "ExtendedSrgbLinearEXT";
+ case ColorSpaceKHR::eDciP3LinearEXT: return "DciP3LinearEXT";
+ case ColorSpaceKHR::eDciP3NonlinearEXT: return "DciP3NonlinearEXT";
+ case ColorSpaceKHR::eBt709LinearEXT: return "Bt709LinearEXT";
+ case ColorSpaceKHR::eBt709NonlinearEXT: return "Bt709NonlinearEXT";
+ case ColorSpaceKHR::eBt2020LinearEXT: return "Bt2020LinearEXT";
+ case ColorSpaceKHR::eHdr10St2084EXT: return "Hdr10St2084EXT";
+ case ColorSpaceKHR::eDolbyvisionEXT: return "DolbyvisionEXT";
+ case ColorSpaceKHR::eHdr10HlgEXT: return "Hdr10HlgEXT";
+ case ColorSpaceKHR::eAdobergbLinearEXT: return "AdobergbLinearEXT";
+ case ColorSpaceKHR::eAdobergbNonlinearEXT: return "AdobergbNonlinearEXT";
+ case ColorSpaceKHR::ePassThroughEXT: return "PassThroughEXT";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplayPlaneAlphaFlagBitsKHR value)
+ {
+ switch (value)
+ {
+ case DisplayPlaneAlphaFlagBitsKHR::eOpaque: return "Opaque";
+ case DisplayPlaneAlphaFlagBitsKHR::eGlobal: return "Global";
+ case DisplayPlaneAlphaFlagBitsKHR::ePerPixel: return "PerPixel";
+ case DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied: return "PerPixelPremultiplied";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplayPlaneAlphaFlagsKHR value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & DisplayPlaneAlphaFlagBitsKHR::eOpaque) result += "Opaque | ";
+ if (value & DisplayPlaneAlphaFlagBitsKHR::eGlobal) result += "Global | ";
+ if (value & DisplayPlaneAlphaFlagBitsKHR::ePerPixel) result += "PerPixel | ";
+ if (value & DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied) result += "PerPixelPremultiplied | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CompositeAlphaFlagBitsKHR value)
+ {
+ switch (value)
+ {
+ case CompositeAlphaFlagBitsKHR::eOpaque: return "Opaque";
+ case CompositeAlphaFlagBitsKHR::ePreMultiplied: return "PreMultiplied";
+ case CompositeAlphaFlagBitsKHR::ePostMultiplied: return "PostMultiplied";
+ case CompositeAlphaFlagBitsKHR::eInherit: return "Inherit";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(CompositeAlphaFlagsKHR value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & CompositeAlphaFlagBitsKHR::eOpaque) result += "Opaque | ";
+ if (value & CompositeAlphaFlagBitsKHR::ePreMultiplied) result += "PreMultiplied | ";
+ if (value & CompositeAlphaFlagBitsKHR::ePostMultiplied) result += "PostMultiplied | ";
+ if (value & CompositeAlphaFlagBitsKHR::eInherit) result += "Inherit | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SurfaceTransformFlagBitsKHR value)
+ {
+ switch (value)
+ {
+ case SurfaceTransformFlagBitsKHR::eIdentity: return "Identity";
+ case SurfaceTransformFlagBitsKHR::eRotate90: return "Rotate90";
+ case SurfaceTransformFlagBitsKHR::eRotate180: return "Rotate180";
+ case SurfaceTransformFlagBitsKHR::eRotate270: return "Rotate270";
+ case SurfaceTransformFlagBitsKHR::eHorizontalMirror: return "HorizontalMirror";
+ case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90: return "HorizontalMirrorRotate90";
+ case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180: return "HorizontalMirrorRotate180";
+ case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270: return "HorizontalMirrorRotate270";
+ case SurfaceTransformFlagBitsKHR::eInherit: return "Inherit";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SurfaceTransformFlagsKHR value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SurfaceTransformFlagBitsKHR::eIdentity) result += "Identity | ";
+ if (value & SurfaceTransformFlagBitsKHR::eRotate90) result += "Rotate90 | ";
+ if (value & SurfaceTransformFlagBitsKHR::eRotate180) result += "Rotate180 | ";
+ if (value & SurfaceTransformFlagBitsKHR::eRotate270) result += "Rotate270 | ";
+ if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirror) result += "HorizontalMirror | ";
+ if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90) result += "HorizontalMirrorRotate90 | ";
+ if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180) result += "HorizontalMirrorRotate180 | ";
+ if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270) result += "HorizontalMirrorRotate270 | ";
+ if (value & SurfaceTransformFlagBitsKHR::eInherit) result += "Inherit | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DebugReportFlagBitsEXT value)
+ {
+ switch (value)
+ {
+ case DebugReportFlagBitsEXT::eInformation: return "Information";
+ case DebugReportFlagBitsEXT::eWarning: return "Warning";
+ case DebugReportFlagBitsEXT::ePerformanceWarning: return "PerformanceWarning";
+ case DebugReportFlagBitsEXT::eError: return "Error";
+ case DebugReportFlagBitsEXT::eDebug: return "Debug";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DebugReportFlagsEXT value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & DebugReportFlagBitsEXT::eInformation) result += "Information | ";
+ if (value & DebugReportFlagBitsEXT::eWarning) result += "Warning | ";
+ if (value & DebugReportFlagBitsEXT::ePerformanceWarning) result += "PerformanceWarning | ";
+ if (value & DebugReportFlagBitsEXT::eError) result += "Error | ";
+ if (value & DebugReportFlagBitsEXT::eDebug) result += "Debug | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DebugReportObjectTypeEXT value)
+ {
+ switch (value)
+ {
+ case DebugReportObjectTypeEXT::eUnknown: return "Unknown";
+ case DebugReportObjectTypeEXT::eInstance: return "Instance";
+ case DebugReportObjectTypeEXT::ePhysicalDevice: return "PhysicalDevice";
+ case DebugReportObjectTypeEXT::eDevice: return "Device";
+ case DebugReportObjectTypeEXT::eQueue: return "Queue";
+ case DebugReportObjectTypeEXT::eSemaphore: return "Semaphore";
+ case DebugReportObjectTypeEXT::eCommandBuffer: return "CommandBuffer";
+ case DebugReportObjectTypeEXT::eFence: return "Fence";
+ case DebugReportObjectTypeEXT::eDeviceMemory: return "DeviceMemory";
+ case DebugReportObjectTypeEXT::eBuffer: return "Buffer";
+ case DebugReportObjectTypeEXT::eImage: return "Image";
+ case DebugReportObjectTypeEXT::eEvent: return "Event";
+ case DebugReportObjectTypeEXT::eQueryPool: return "QueryPool";
+ case DebugReportObjectTypeEXT::eBufferView: return "BufferView";
+ case DebugReportObjectTypeEXT::eImageView: return "ImageView";
+ case DebugReportObjectTypeEXT::eShaderModule: return "ShaderModule";
+ case DebugReportObjectTypeEXT::ePipelineCache: return "PipelineCache";
+ case DebugReportObjectTypeEXT::ePipelineLayout: return "PipelineLayout";
+ case DebugReportObjectTypeEXT::eRenderPass: return "RenderPass";
+ case DebugReportObjectTypeEXT::ePipeline: return "Pipeline";
+ case DebugReportObjectTypeEXT::eDescriptorSetLayout: return "DescriptorSetLayout";
+ case DebugReportObjectTypeEXT::eSampler: return "Sampler";
+ case DebugReportObjectTypeEXT::eDescriptorPool: return "DescriptorPool";
+ case DebugReportObjectTypeEXT::eDescriptorSet: return "DescriptorSet";
+ case DebugReportObjectTypeEXT::eFramebuffer: return "Framebuffer";
+ case DebugReportObjectTypeEXT::eCommandPool: return "CommandPool";
+ case DebugReportObjectTypeEXT::eSurfaceKhr: return "SurfaceKhr";
+ case DebugReportObjectTypeEXT::eSwapchainKhr: return "SwapchainKhr";
+ case DebugReportObjectTypeEXT::eDebugReport: return "DebugReport";
+ case DebugReportObjectTypeEXT::eDisplayKhr: return "DisplayKhr";
+ case DebugReportObjectTypeEXT::eDisplayModeKhr: return "DisplayModeKhr";
+ case DebugReportObjectTypeEXT::eObjectTableNvx: return "ObjectTableNvx";
+ case DebugReportObjectTypeEXT::eIndirectCommandsLayoutNvx: return "IndirectCommandsLayoutNvx";
+ case DebugReportObjectTypeEXT::eDescriptorUpdateTemplateKHR: return "DescriptorUpdateTemplateKHR";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DebugReportErrorEXT value)
+ {
+ switch (value)
+ {
+ case DebugReportErrorEXT::eNone: return "None";
+ case DebugReportErrorEXT::eCallbackRef: return "CallbackRef";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(RasterizationOrderAMD value)
+ {
+ switch (value)
+ {
+ case RasterizationOrderAMD::eStrict: return "Strict";
+ case RasterizationOrderAMD::eRelaxed: return "Relaxed";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagBitsNV value)
+ {
+ switch (value)
+ {
+ case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32: return "OpaqueWin32";
+ case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt: return "OpaqueWin32Kmt";
+ case ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image: return "D3D11Image";
+ case ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt: return "D3D11ImageKmt";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagsNV value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32) result += "OpaqueWin32 | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image) result += "D3D11Image | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt) result += "D3D11ImageKmt | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagBitsNV value)
+ {
+ switch (value)
+ {
+ case ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly: return "DedicatedOnly";
+ case ExternalMemoryFeatureFlagBitsNV::eExportable: return "Exportable";
+ case ExternalMemoryFeatureFlagBitsNV::eImportable: return "Importable";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagsNV value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly) result += "DedicatedOnly | ";
+ if (value & ExternalMemoryFeatureFlagBitsNV::eExportable) result += "Exportable | ";
+ if (value & ExternalMemoryFeatureFlagBitsNV::eImportable) result += "Importable | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ValidationCheckEXT value)
+ {
+ switch (value)
+ {
+ case ValidationCheckEXT::eAll: return "All";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(IndirectCommandsLayoutUsageFlagBitsNVX value)
+ {
+ switch (value)
+ {
+ case IndirectCommandsLayoutUsageFlagBitsNVX::eUnorderedSequences: return "UnorderedSequences";
+ case IndirectCommandsLayoutUsageFlagBitsNVX::eSparseSequences: return "SparseSequences";
+ case IndirectCommandsLayoutUsageFlagBitsNVX::eEmptyExecutions: return "EmptyExecutions";
+ case IndirectCommandsLayoutUsageFlagBitsNVX::eIndexedSequences: return "IndexedSequences";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(IndirectCommandsLayoutUsageFlagsNVX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eUnorderedSequences) result += "UnorderedSequences | ";
+ if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eSparseSequences) result += "SparseSequences | ";
+ if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eEmptyExecutions) result += "EmptyExecutions | ";
+ if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eIndexedSequences) result += "IndexedSequences | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ObjectEntryUsageFlagBitsNVX value)
+ {
+ switch (value)
+ {
+ case ObjectEntryUsageFlagBitsNVX::eGraphics: return "Graphics";
+ case ObjectEntryUsageFlagBitsNVX::eCompute: return "Compute";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ObjectEntryUsageFlagsNVX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ObjectEntryUsageFlagBitsNVX::eGraphics) result += "Graphics | ";
+ if (value & ObjectEntryUsageFlagBitsNVX::eCompute) result += "Compute | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(IndirectCommandsTokenTypeNVX value)
+ {
+ switch (value)
+ {
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenPipeline: return "VkIndirectCommandsTokenPipeline";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenDescriptorSet: return "VkIndirectCommandsTokenDescriptorSet";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenIndexBuffer: return "VkIndirectCommandsTokenIndexBuffer";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenVertexBuffer: return "VkIndirectCommandsTokenVertexBuffer";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenPushConstant: return "VkIndirectCommandsTokenPushConstant";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenDrawIndexed: return "VkIndirectCommandsTokenDrawIndexed";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenDraw: return "VkIndirectCommandsTokenDraw";
+ case IndirectCommandsTokenTypeNVX::eVkIndirectCommandsTokenDispatch: return "VkIndirectCommandsTokenDispatch";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ObjectEntryTypeNVX value)
+ {
+ switch (value)
+ {
+ case ObjectEntryTypeNVX::eVkObjectEntryDescriptorSet: return "VkObjectEntryDescriptorSet";
+ case ObjectEntryTypeNVX::eVkObjectEntryPipeline: return "VkObjectEntryPipeline";
+ case ObjectEntryTypeNVX::eVkObjectEntryIndexBuffer: return "VkObjectEntryIndexBuffer";
+ case ObjectEntryTypeNVX::eVkObjectEntryVertexBuffer: return "VkObjectEntryVertexBuffer";
+ case ObjectEntryTypeNVX::eVkObjectEntryPushConstant: return "VkObjectEntryPushConstant";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorSetLayoutCreateFlagBits value)
+ {
+ switch (value)
+ {
+ case DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR: return "PushDescriptorKHR";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DescriptorSetLayoutCreateFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR) result += "PushDescriptorKHR | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd: return "OpaqueFd";
+ case ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueWin32: return "OpaqueWin32";
+ case ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueWin32Kmt: return "OpaqueWin32Kmt";
+ case ExternalMemoryHandleTypeFlagBitsKHX::eD3D11Texture: return "D3D11Texture";
+ case ExternalMemoryHandleTypeFlagBitsKHX::eD3D11TextureKmt: return "D3D11TextureKmt";
+ case ExternalMemoryHandleTypeFlagBitsKHX::eD3D12Heap: return "D3D12Heap";
+ case ExternalMemoryHandleTypeFlagBitsKHX::eD3D12Resource: return "D3D12Resource";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueFd) result += "OpaqueFd | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueWin32) result += "OpaqueWin32 | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eD3D11Texture) result += "D3D11Texture | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eD3D11TextureKmt) result += "D3D11TextureKmt | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eD3D12Heap) result += "D3D12Heap | ";
+ if (value & ExternalMemoryHandleTypeFlagBitsKHX::eD3D12Resource) result += "D3D12Resource | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case ExternalMemoryFeatureFlagBitsKHX::eDedicatedOnly: return "DedicatedOnly";
+ case ExternalMemoryFeatureFlagBitsKHX::eExportable: return "Exportable";
+ case ExternalMemoryFeatureFlagBitsKHX::eImportable: return "Importable";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ExternalMemoryFeatureFlagBitsKHX::eDedicatedOnly) result += "DedicatedOnly | ";
+ if (value & ExternalMemoryFeatureFlagBitsKHX::eExportable) result += "Exportable | ";
+ if (value & ExternalMemoryFeatureFlagBitsKHX::eImportable) result += "Importable | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreHandleTypeFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueFd: return "OpaqueFd";
+ case ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueWin32: return "OpaqueWin32";
+ case ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueWin32Kmt: return "OpaqueWin32Kmt";
+ case ExternalSemaphoreHandleTypeFlagBitsKHX::eD3D12Fence: return "D3D12Fence";
+ case ExternalSemaphoreHandleTypeFlagBitsKHX::eFenceFd: return "FenceFd";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreHandleTypeFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueFd) result += "OpaqueFd | ";
+ if (value & ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueWin32) result += "OpaqueWin32 | ";
+ if (value & ExternalSemaphoreHandleTypeFlagBitsKHX::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | ";
+ if (value & ExternalSemaphoreHandleTypeFlagBitsKHX::eD3D12Fence) result += "D3D12Fence | ";
+ if (value & ExternalSemaphoreHandleTypeFlagBitsKHX::eFenceFd) result += "FenceFd | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreFeatureFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case ExternalSemaphoreFeatureFlagBitsKHX::eExportable: return "Exportable";
+ case ExternalSemaphoreFeatureFlagBitsKHX::eImportable: return "Importable";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreFeatureFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & ExternalSemaphoreFeatureFlagBitsKHX::eExportable) result += "Exportable | ";
+ if (value & ExternalSemaphoreFeatureFlagBitsKHX::eImportable) result += "Importable | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SurfaceCounterFlagBitsEXT value)
+ {
+ switch (value)
+ {
+ case SurfaceCounterFlagBitsEXT::eVblank: return "Vblank";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SurfaceCounterFlagsEXT value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SurfaceCounterFlagBitsEXT::eVblank) result += "Vblank | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplayPowerStateEXT value)
+ {
+ switch (value)
+ {
+ case DisplayPowerStateEXT::eOff: return "Off";
+ case DisplayPowerStateEXT::eSuspend: return "Suspend";
+ case DisplayPowerStateEXT::eOn: return "On";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceEventTypeEXT value)
+ {
+ switch (value)
+ {
+ case DeviceEventTypeEXT::eDisplayHotplug: return "DisplayHotplug";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DisplayEventTypeEXT value)
+ {
+ switch (value)
+ {
+ case DisplayEventTypeEXT::eFirstPixelOut: return "FirstPixelOut";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PeerMemoryFeatureFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case PeerMemoryFeatureFlagBitsKHX::eCopySrc: return "CopySrc";
+ case PeerMemoryFeatureFlagBitsKHX::eCopyDst: return "CopyDst";
+ case PeerMemoryFeatureFlagBitsKHX::eGenericSrc: return "GenericSrc";
+ case PeerMemoryFeatureFlagBitsKHX::eGenericDst: return "GenericDst";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(PeerMemoryFeatureFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & PeerMemoryFeatureFlagBitsKHX::eCopySrc) result += "CopySrc | ";
+ if (value & PeerMemoryFeatureFlagBitsKHX::eCopyDst) result += "CopyDst | ";
+ if (value & PeerMemoryFeatureFlagBitsKHX::eGenericSrc) result += "GenericSrc | ";
+ if (value & PeerMemoryFeatureFlagBitsKHX::eGenericDst) result += "GenericDst | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryAllocateFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case MemoryAllocateFlagBitsKHX::eDeviceMask: return "DeviceMask";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(MemoryAllocateFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & MemoryAllocateFlagBitsKHX::eDeviceMask) result += "DeviceMask | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceGroupPresentModeFlagBitsKHX value)
+ {
+ switch (value)
+ {
+ case DeviceGroupPresentModeFlagBitsKHX::eLocal: return "Local";
+ case DeviceGroupPresentModeFlagBitsKHX::eRemote: return "Remote";
+ case DeviceGroupPresentModeFlagBitsKHX::eSum: return "Sum";
+ case DeviceGroupPresentModeFlagBitsKHX::eLocalMultiDevice: return "LocalMultiDevice";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DeviceGroupPresentModeFlagsKHX value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & DeviceGroupPresentModeFlagBitsKHX::eLocal) result += "Local | ";
+ if (value & DeviceGroupPresentModeFlagBitsKHX::eRemote) result += "Remote | ";
+ if (value & DeviceGroupPresentModeFlagBitsKHX::eSum) result += "Sum | ";
+ if (value & DeviceGroupPresentModeFlagBitsKHX::eLocalMultiDevice) result += "LocalMultiDevice | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SwapchainCreateFlagBitsKHR value)
+ {
+ switch (value)
+ {
+ case SwapchainCreateFlagBitsKHR::eBindSfrKHX: return "BindSfrKHX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SwapchainCreateFlagsKHR value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SwapchainCreateFlagBitsKHR::eBindSfrKHX) result += "BindSfrKHX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(ViewportCoordinateSwizzleNV value)
+ {
+ switch (value)
+ {
+ case ViewportCoordinateSwizzleNV::ePositiveX: return "PositiveX";
+ case ViewportCoordinateSwizzleNV::eNegativeX: return "NegativeX";
+ case ViewportCoordinateSwizzleNV::ePositiveY: return "PositiveY";
+ case ViewportCoordinateSwizzleNV::eNegativeY: return "NegativeY";
+ case ViewportCoordinateSwizzleNV::ePositiveZ: return "PositiveZ";
+ case ViewportCoordinateSwizzleNV::eNegativeZ: return "NegativeZ";
+ case ViewportCoordinateSwizzleNV::ePositiveW: return "PositiveW";
+ case ViewportCoordinateSwizzleNV::eNegativeW: return "NegativeW";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(DiscardRectangleModeEXT value)
+ {
+ switch (value)
+ {
+ case DiscardRectangleModeEXT::eInclusive: return "Inclusive";
+ case DiscardRectangleModeEXT::eExclusive: return "Exclusive";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SubpassDescriptionFlagBits value)
+ {
+ switch (value)
+ {
+ case SubpassDescriptionFlagBits::ePerViewAttributesNVX: return "PerViewAttributesNVX";
+ case SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX: return "PerViewPositionXOnlyNVX";
+ default: return "invalid";
+ }
+ }
+
+ VULKAN_HPP_INLINE std::string to_string(SubpassDescriptionFlags value)
+ {
+ if (!value) return "{}";
+ std::string result;
+ if (value & SubpassDescriptionFlagBits::ePerViewAttributesNVX) result += "PerViewAttributesNVX | ";
+ if (value & SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX) result += "PerViewPositionXOnlyNVX | ";
+ return "{" + result.substr(0, result.size() - 3) + "}";
+ }
+
+} // namespace vk
+
+#endif
diff --git a/samples/thirdparty/vulkan-1.0.49.0/lib/linux64/libvulkan.so b/samples/thirdparty/vulkan-1.0.49.0/lib/linux64/libvulkan.so
new file mode 100644
index 0000000..ae4814b
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/lib/linux64/libvulkan.so
Binary files differ
diff --git a/samples/thirdparty/vulkan-1.0.49.0/lib/win32/vulkan-1.lib b/samples/thirdparty/vulkan-1.0.49.0/lib/win32/vulkan-1.lib
new file mode 100644
index 0000000..ffbe23e
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/lib/win32/vulkan-1.lib
Binary files differ
diff --git a/samples/thirdparty/vulkan-1.0.49.0/lib/win64/vulkan-1.lib b/samples/thirdparty/vulkan-1.0.49.0/lib/win64/vulkan-1.lib
new file mode 100644
index 0000000..50aba00
--- /dev/null
+++ b/samples/thirdparty/vulkan-1.0.49.0/lib/win64/vulkan-1.lib
Binary files differ
diff --git a/src/vrcommon/hmderrors_public.cpp b/src/vrcommon/hmderrors_public.cpp
index 5d9ecbc..bc1ad2b 100644
--- a/src/vrcommon/hmderrors_public.cpp
+++ b/src/vrcommon/hmderrors_public.cpp
@@ -42,6 +42,16 @@ const char *GetEnglishStringForHmdError( vr::EVRInitError eError )
case VRInitError_Init_Internal: return "vrserver internal error (124)";
case VRInitError_Init_HmdDriverIdIsNone: return "Hmd DriverId is invalid (125)";
case VRInitError_Init_HmdNotFoundPresenceFailed: return "Hmd Not Found Presence Failed (126)";
+ case VRInitError_Init_VRMonitorNotFound: return "VR Monitor Not Found (127)";
+ case VRInitError_Init_VRMonitorStartupFailed: return "VR Monitor startup failed (128)";
+ case VRInitError_Init_LowPowerWatchdogNotSupported: return "Low Power Watchdog Not Supported (129)";
+ case VRInitError_Init_InvalidApplicationType: return "Invalid Application Type (130)";
+ case VRInitError_Init_NotAvailableToWatchdogApps: return "Not available to watchdog apps (131)";
+ case VRInitError_Init_WatchdogDisabledInSettings: return "Watchdog disabled in settings (132)";
+ case VRInitError_Init_VRDashboardNotFound: return "VR Dashboard Not Found (133)";
+ case VRInitError_Init_VRDashboardStartupFailed: return "VR Dashboard startup failed (134)";
+ case VRInitError_Init_VRHomeNotFound: return "VR Home Not Found (135)";
+ case VRInitError_Init_VRHomeStartupFailed: return "VR home startup failed (136)";
case VRInitError_Driver_Failed: return "Driver Failed (200)";
case VRInitError_Driver_Unknown: return "Driver Not Known (201)";
@@ -71,6 +81,8 @@ const char *GetEnglishStringForHmdError( vr::EVRInitError eError )
case VRInitError_Compositor_D3D11HardwareRequired: return "Compositor failed to find DX11 hardware (401)";
case VRInitError_Compositor_FirmwareRequiresUpdate: return "Compositor requires mandatory firmware update (402)";
case VRInitError_Compositor_OverlayInitFailed: return "Compositor initialization succeeded, but overlay init failed (403)";
+ case VRInitError_Compositor_ScreenshotsInitFailed: return "Compositor initialization succeeded, but screenshot init failed (404)";
+ case VRInitError_Compositor_UnableToCreateDevice: return "Compositor unable to create graphics device (405)";
// Oculus
case VRInitError_VendorSpecific_UnableToConnectToOculusRuntime: return "Unable to connect to Oculus Runtime (1000)";
@@ -141,6 +153,10 @@ const char *GetIDForVRInitError( vr::EVRInitError eError )
RETURN_ENUM_AS_STRING( VRInitError_Init_InvalidApplicationType );
RETURN_ENUM_AS_STRING( VRInitError_Init_NotAvailableToWatchdogApps );
RETURN_ENUM_AS_STRING( VRInitError_Init_WatchdogDisabledInSettings );
+ RETURN_ENUM_AS_STRING( VRInitError_Init_VRDashboardNotFound );
+ RETURN_ENUM_AS_STRING( VRInitError_Init_VRDashboardStartupFailed );
+ RETURN_ENUM_AS_STRING( VRInitError_Init_VRHomeNotFound );
+ RETURN_ENUM_AS_STRING( VRInitError_Init_VRHomeStartupFailed );
RETURN_ENUM_AS_STRING( VRInitError_Init_HmdDriverIdIsNone );
RETURN_ENUM_AS_STRING( VRInitError_Init_HmdNotFoundPresenceFailed );
@@ -173,6 +189,8 @@ const char *GetIDForVRInitError( vr::EVRInitError eError )
RETURN_ENUM_AS_STRING( VRInitError_Compositor_D3D11HardwareRequired );
RETURN_ENUM_AS_STRING( VRInitError_Compositor_FirmwareRequiresUpdate );
RETURN_ENUM_AS_STRING( VRInitError_Compositor_OverlayInitFailed );
+ RETURN_ENUM_AS_STRING( VRInitError_Compositor_ScreenshotsInitFailed );
+ RETURN_ENUM_AS_STRING( VRInitError_Compositor_UnableToCreateDevice );
// Oculus
RETURN_ENUM_AS_STRING( VRInitError_VendorSpecific_UnableToConnectToOculusRuntime);