diff options
author | ionescu007 <aionescu+git@gmail.com> | 2016-09-03 18:50:58 +0300 |
---|---|---|
committer | ionescu007 <aionescu+git@gmail.com> | 2016-09-03 18:50:58 +0300 |
commit | 84c5f91b4c2c86438dfa703f514c5dbf05eddf03 (patch) | |
tree | 9cc2c578a70e8add2618a74f70a6eccb3d15bd4c | |
parent | d580e2e5b8bac8b814927242ce773f366db62fde (diff) |
Support VMWARE/non-EPT sytems. More portability support with non-NT systems. Refactoring. Reject NULL/LDT selectors when building VMCS.
Support systems without 1GB HugePage EPT support (such as Vmware) by
simply not enabling EPT for the.
Refactor header files and some definitions again to support non-NT.
Deallocation routines on some platform need to have a size. Refactor
alloc and free so this is easy to maintain portably.
Make ShvVmxLaunchOnVpreturn a status code instead of VOID, so we can
actually know if launch failed instead of just relying on CPUID (which,
if the hypervisor was already loaded, misidentified that a second load
failed, and instead assumed success).
Refactor the launch/off-on-failure code into a ShvVmxLaunch routine.
Fix some comments to reflect the new portability layer/refactoring.
Factor out OS-specific load/unload logic
(ShvOsPrepareProcessor/ShvOsUnprepareProcessor)
Fix ShvUtilConvertGdtEntry to reject selectors with the TI bit set, or
NULL selectors, and correctly mark them as unusable.
-rw-r--r-- | ntint.h | 1 | ||||
-rw-r--r-- | shv.h | 111 | ||||
-rw-r--r-- | shv_x.h | 80 | ||||
-rw-r--r-- | shvos.c | 32 | ||||
-rw-r--r-- | shvutil.c | 19 | ||||
-rw-r--r-- | shvvmx.c | 87 | ||||
-rw-r--r-- | shvvmxhv.c | 42 | ||||
-rw-r--r-- | shvvp.c | 49 | ||||
-rw-r--r-- | vmx.h | 40 |
9 files changed, 304 insertions, 157 deletions
@@ -27,7 +27,6 @@ Environment: #define DECLSPEC_NORETURN __declspec(noreturn) #define FORCEINLINE __forceinline #define C_ASSERT(x) static_assert(x, "Error") -#define FIELD_OFFSET offsetof #define UNREFERENCED_PARAMETER(x) (x) #ifndef TRUE @@ -24,58 +24,14 @@ Environment: #pragma warning(disable:4201) #pragma warning(disable:4214) -#ifdef _WIN64 +#ifndef __BASE_H__ #include <basetsd.h> #endif +#define _INC_MALLOC #include <intrin.h> #include "ntint.h" -#include "vmx.h" #include "shv_x.h" -typedef struct _SHV_SPECIAL_REGISTERS -{ - UINT64 Cr0; - UINT64 Cr3; - UINT64 Cr4; - UINT64 MsrGsBase; - UINT16 Tr; - UINT16 Ldtr; - UINT64 DebugControl; - UINT64 KernelDr7; - KDESCRIPTOR Idtr; - KDESCRIPTOR Gdtr; -} SHV_SPECIAL_REGISTERS, *PSHV_SPECIAL_REGISTERS; - -typedef struct _SHV_VP_DATA -{ - union - { - DECLSPEC_ALIGN(PAGE_SIZE) UINT8 ShvStackLimit[KERNEL_STACK_SIZE]; - struct - { - SHV_SPECIAL_REGISTERS SpecialRegisters; - CONTEXT ContextFrame; - UINT64 SystemDirectoryTableBase; - LARGE_INTEGER MsrData[17]; - UINT64 VmxOnPhysicalAddress; - UINT64 VmcsPhysicalAddress; - UINT64 MsrBitmapPhysicalAddress; - UINT64 EptPml4PhysicalAddress; - }; - }; - - DECLSPEC_ALIGN(PAGE_SIZE) UINT8 MsrBitmap[PAGE_SIZE]; - DECLSPEC_ALIGN(PAGE_SIZE) VMX_EPML4E Epml4[PML4E_ENTRY_COUNT]; - DECLSPEC_ALIGN(PAGE_SIZE) VMX_HUGE_PDPTE Epdpt[PDPTE_ENTRY_COUNT]; - - DECLSPEC_ALIGN(PAGE_SIZE) VMX_VMCS VmxOn; - DECLSPEC_ALIGN(PAGE_SIZE) VMX_VMCS Vmcs; -} SHV_VP_DATA, *PSHV_VP_DATA; - -C_ASSERT(sizeof(SHV_VP_DATA) == (KERNEL_STACK_SIZE + 5 * PAGE_SIZE)); -C_ASSERT((FIELD_OFFSET(SHV_VP_DATA, Epml4) % PAGE_SIZE) == 0); -C_ASSERT((FIELD_OFFSET(SHV_VP_DATA, Epdpt) % PAGE_SIZE) == 0); - typedef struct _SHV_VP_STATE { PCONTEXT VpRegs; @@ -86,28 +42,23 @@ typedef struct _SHV_VP_STATE UINT8 ExitVm; } SHV_VP_STATE, *PSHV_VP_STATE; +typedef struct _SHV_CALLBACK_CONTEXT +{ + UINT64 Cr3; + volatile long InitCount; + INT32 FailedCpu; + INT32 FailureStatus; +} SHV_CALLBACK_CONTEXT, *PSHV_CALLBACK_CONTEXT; + +SHV_CPU_CALLBACK ShvVpLoadCallback; +SHV_CPU_CALLBACK ShvVpUnloadCallback; VOID ShvVmxEntry ( VOID ); -VOID -_sldt ( - _In_ UINT16* Ldtr - ); - -VOID -_str ( - _In_ UINT16* Tr - ); - -VOID -__lgdt ( - _In_ VOID* Gdtr - ); - -VOID +INT32 ShvVmxLaunchOnVp ( _In_ PSHV_VP_DATA VpData ); @@ -126,7 +77,18 @@ ShvUtilAdjustMsr ( ); PSHV_VP_DATA -ShvVpAllocateGlobalData ( +ShvVpAllocateData ( + _In_ UINT32 CpuCount + ); + +VOID +ShvVpFreeData ( + _In_ PSHV_VP_DATA Data, + _In_ UINT32 CpuCount + ); + +INT32 +ShvVmxLaunch ( VOID ); @@ -161,6 +123,16 @@ ShvOsCaptureContext ( _In_ PCONTEXT ContextRecord ); +VOID +ShvOsUnprepareProcessor ( + _In_ PSHV_VP_DATA VpData + ); + +VOID +ShvOsPrepareProcessor ( + _In_ PSHV_VP_DATA VpData + ); + INT32 ShvOsGetActiveProcessorCount ( VOID @@ -173,7 +145,8 @@ ShvOsGetCurrentProcessorNumber ( VOID ShvOsFreeContiguousAlignedMemory ( - _In_ VOID* BaseAddress + _In_ VOID* BaseAddress, + _In_ size_t Size ); VOID* @@ -186,11 +159,20 @@ ShvOsGetPhysicalAddress ( _In_ VOID* BaseAddress ); +#ifndef __BASE_H__ VOID ShvOsDebugPrint ( _In_ const char* Format, ... ); +#else +VOID +ShvOsDebugPrintWide ( + _In_ const CHAR16* Format, + ... + ); +#define ShvOsDebugPrint(format, ...) ShvOsDebugPrintWide(_CRT_WIDE(format), __VA_ARGS__) +#endif VOID ShvOsRunCallbackOnProcessors ( @@ -198,6 +180,5 @@ ShvOsRunCallbackOnProcessors ( _In_opt_ VOID* Context ); - extern PSHV_VP_DATA* ShvGlobalData; @@ -23,26 +23,85 @@ Environment: #pragma once +#include "vmx.h" + #define SHV_STATUS_SUCCESS 0 #define SHV_STATUS_NOT_AVAILABLE -1 #define SHV_STATUS_NO_RESOURCES -2 #define SHV_STATUS_NOT_PRESENT -3 -typedef struct _SHV_CALLBACK_CONTEXT -{ - UINT64 Cr3; - volatile long InitCount; - INT32 FailedCpu; - INT32 FailureStatus; -} SHV_CALLBACK_CONTEXT, *PSHV_CALLBACK_CONTEXT; +struct _SHV_CALLBACK_CONTEXT; typedef void SHV_CPU_CALLBACK ( - _In_ PSHV_CALLBACK_CONTEXT Context + struct _SHV_CALLBACK_CONTEXT* Context ); typedef SHV_CPU_CALLBACK *PSHV_CPU_CALLBACK; +typedef struct _SHV_SPECIAL_REGISTERS +{ + UINT64 Cr0; + UINT64 Cr3; + UINT64 Cr4; + UINT64 MsrGsBase; + UINT16 Tr; + UINT16 Ldtr; + UINT64 DebugControl; + UINT64 KernelDr7; + KDESCRIPTOR Idtr; + KDESCRIPTOR Gdtr; +} SHV_SPECIAL_REGISTERS, *PSHV_SPECIAL_REGISTERS; + +typedef struct _SHV_VP_DATA +{ + union + { + DECLSPEC_ALIGN(PAGE_SIZE) UINT8 ShvStackLimit[KERNEL_STACK_SIZE]; + struct + { + SHV_SPECIAL_REGISTERS SpecialRegisters; + CONTEXT ContextFrame; + UINT64 SystemDirectoryTableBase; + LARGE_INTEGER MsrData[17]; + UINT64 VmxOnPhysicalAddress; + UINT64 VmcsPhysicalAddress; + UINT64 MsrBitmapPhysicalAddress; + UINT64 EptPml4PhysicalAddress; + UINT32 EptControls; + }; + }; + + DECLSPEC_ALIGN(PAGE_SIZE) UINT8 MsrBitmap[PAGE_SIZE]; + DECLSPEC_ALIGN(PAGE_SIZE) VMX_EPML4E Epml4[PML4E_ENTRY_COUNT]; + DECLSPEC_ALIGN(PAGE_SIZE) VMX_HUGE_PDPTE Epdpt[PDPTE_ENTRY_COUNT]; + + DECLSPEC_ALIGN(PAGE_SIZE) VMX_VMCS VmxOn; + DECLSPEC_ALIGN(PAGE_SIZE) VMX_VMCS Vmcs; +} SHV_VP_DATA, *PSHV_VP_DATA; + +C_ASSERT(sizeof(SHV_VP_DATA) == (KERNEL_STACK_SIZE + 5 * PAGE_SIZE)); + +VOID +_sldt ( + _In_ UINT16* Ldtr + ); + +VOID +_ltr ( + _In_ UINT16 Tr + ); + +VOID +_str ( + _In_ UINT16* Tr + ); + +VOID +__lgdt ( + _In_ VOID* Gdtr + ); + INT32 ShvLoad ( VOID @@ -51,7 +110,4 @@ ShvLoad ( VOID ShvUnload ( VOID - ); - -SHV_CPU_CALLBACK ShvVpLoadCallback; -SHV_CPU_CALLBACK ShvVpUnloadCallback; + );
\ No newline at end of file @@ -62,12 +62,11 @@ RtlRestoreContext ( typedef struct _SHV_DPC_CONTEXT { PSHV_CPU_CALLBACK Routine; - PSHV_CALLBACK_CONTEXT Context; + struct _SHV_CALLBACK_CONTEXT* Context; } SHV_DPC_CONTEXT, *PSHV_DPC_CONTEXT; #define KGDT64_R3_DATA 0x28 #define KGDT64_R3_CMTEB 0x50 -#define RPL_MASK 0x03 VOID ShvVmxCleanup ( @@ -153,6 +152,35 @@ ShvOsDpcRoutine ( } VOID +ShvOsPrepareProcessor ( + _In_ PSHV_VP_DATA VpData + ) +{ + // + // Nothing to do on NT + // + UNREFERENCED_PARAMETER(VpData); + NOTHING; +} + +VOID +ShvOsUnprepareProcessor ( + _In_ PSHV_VP_DATA VpData + ) +{ + // + // When running in VMX root mode, the processor will set limits of the + // GDT and IDT to 0xFFFF (notice that there are no Host VMCS fields to + // set these values). This causes problems with PatchGuard, which will + // believe that the GDTR and IDTR have been modified by malware, and + // eventually crash the system. Since we know what the original state + // of the GDTR and IDTR was, simply restore it now. + // + __lgdt(&VpData->SpecialRegisters.Gdtr.Limit); + __lidt(&VpData->SpecialRegisters.Idtr.Limit); +} + +VOID ShvOsFreeContiguousAlignedMemory ( _In_ PVOID BaseAddress ) @@ -32,9 +32,20 @@ ShvUtilConvertGdtEntry ( PKGDTENTRY64 gdtEntry; // - // Read the GDT entry at the given selector, masking out the RPL bits. x64 - // Windows does not use an LDT for these selectors in kernel, so the TI bit - // should never be set. + // Reject LDT or NULL entries + // + if ((Selector == 0) || + (Selector & SELECTOR_TABLE_INDEX) != 0) + { + VmxGdtEntry->Limit = VmxGdtEntry->AccessRights = 0; + VmxGdtEntry->Base = 0; + VmxGdtEntry->Selector = 0; + VmxGdtEntry->Bits.Unusable = TRUE; + return; + } + + // + // Read the GDT entry at the given selector, masking out the RPL bits. // gdtEntry = (PKGDTENTRY64)((uintptr_t)GdtBase + (Selector & ~RPL_MASK)); @@ -59,7 +70,7 @@ ShvUtilConvertGdtEntry ( // VmxGdtEntry->Base = ((gdtEntry->Bytes.BaseHigh << 24) | (gdtEntry->Bytes.BaseMiddle << 16) | - (gdtEntry->BaseLow)) & ULONG_MAX; + (gdtEntry->BaseLow)) & 0xFFFFFFFF; VmxGdtEntry->Base |= ((gdtEntry->Bits.Type & 0x10) == 0) ? ((uintptr_t)gdtEntry->BaseUpper << 32) : 0; @@ -87,11 +87,14 @@ ShvVmxEnterRootModeOnVp ( // // Ensure that EPT is available with the needed features SimpleVisor uses // - if (((VpData->MsrData[12].QuadPart & VMX_EPT_PAGE_WALK_4_BIT) == 0) || - ((VpData->MsrData[12].QuadPart & VMX_EPTP_WB_BIT) == 0) || - ((VpData->MsrData[12].QuadPart & VMX_EPT_1GB_PAGE_BIT) == 0)) + if (((VpData->MsrData[12].QuadPart & VMX_EPT_PAGE_WALK_4_BIT) != 0) && + ((VpData->MsrData[12].QuadPart & VMX_EPTP_WB_BIT) != 0) && + ((VpData->MsrData[12].QuadPart & VMX_EPT_1GB_PAGE_BIT) != 0)) { - return FALSE; + // + // Enable EPT if these features are supported + // + VpData->EptControls = SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_ENABLE_VPID; } // @@ -172,22 +175,28 @@ ShvVmxSetupVmcsForVp ( __vmx_vmwrite(VMCS_LINK_POINTER, ~0ULL); // - // Configure the EPTP + // Enable EPT features if supported // - vmxEptp.AsUlonglong = 0; - vmxEptp.PageWalkLength = 3; - vmxEptp.Type = MTRR_TYPE_WB; - vmxEptp.PageFrameNumber = VpData->EptPml4PhysicalAddress / PAGE_SIZE; + if (VpData->EptControls != 0) + { + // + // Configure the EPTP + // + vmxEptp.AsUlonglong = 0; + vmxEptp.PageWalkLength = 3; + vmxEptp.Type = MTRR_TYPE_WB; + vmxEptp.PageFrameNumber = VpData->EptPml4PhysicalAddress / PAGE_SIZE; - // - // Load EPT Root Pointer - // - __vmx_vmwrite(EPT_POINTER, vmxEptp.AsUlonglong); + // + // Load EPT Root Pointer + // + __vmx_vmwrite(EPT_POINTER, vmxEptp.AsUlonglong); - // - // Set VPID to one - // - __vmx_vmwrite(VIRTUAL_PROCESSOR_ID, 1); + // + // Set VPID to one + // + __vmx_vmwrite(VIRTUAL_PROCESSOR_ID, 1); + } // // Load the MSR bitmap. Unlike other bitmaps, not having an MSR bitmap will @@ -208,9 +217,7 @@ ShvVmxSetupVmcsForVp ( ShvUtilAdjustMsr(VpData->MsrData[11], SECONDARY_EXEC_ENABLE_RDTSCP | SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_ENABLE_EPT | - SECONDARY_EXEC_ENABLE_VPID - )); + VpData->EptControls)); // // Enable no pin-based options ourselves, but there may be some required by @@ -254,7 +261,7 @@ ShvVmxSetupVmcsForVp ( __vmx_vmwrite(GUEST_CS_AR_BYTES, vmxGdtEntry.AccessRights); __vmx_vmwrite(GUEST_CS_BASE, vmxGdtEntry.Base); __vmx_vmwrite(HOST_CS_SELECTOR, context->SegCs & ~RPL_MASK); - + // // Load the SS Segment (Ring 0 Data) // @@ -264,7 +271,7 @@ ShvVmxSetupVmcsForVp ( __vmx_vmwrite(GUEST_SS_AR_BYTES, vmxGdtEntry.AccessRights); __vmx_vmwrite(GUEST_SS_BASE, vmxGdtEntry.Base); __vmx_vmwrite(HOST_SS_SELECTOR, context->SegSs & ~RPL_MASK); - + // // Load the DS Segment (Ring 3 Data) // @@ -306,7 +313,7 @@ ShvVmxSetupVmcsForVp ( __vmx_vmwrite(GUEST_GS_BASE, state->MsrGsBase); __vmx_vmwrite(HOST_GS_BASE, state->MsrGsBase); __vmx_vmwrite(HOST_GS_SELECTOR, context->SegGs & ~RPL_MASK); - + // // Load the Task Register (Ring 0 TSS) // @@ -435,7 +442,7 @@ ShvVmxProbe ( return TRUE; } -VOID +INT32 ShvVmxLaunchOnVp ( _In_ PSHV_VP_DATA VpData ) @@ -458,26 +465,24 @@ ShvVmxLaunchOnVp ( // // Attempt to enter VMX root mode on this processor. // - if (ShvVmxEnterRootModeOnVp(VpData)) + if (ShvVmxEnterRootModeOnVp(VpData) == FALSE) { // - // Initialize the VMCS, both guest and host state. + // We could not enter VMX Root mode // - ShvVmxSetupVmcsForVp(VpData); + return SHV_STATUS_NOT_AVAILABLE; + } - // - // Launch the VMCS, based on the guest data that was loaded into the - // various VMCS fields by ShvVmxSetupVmcsForVp. This will cause the - // processor to jump to the return address of RtlCaptureContext in - // ShvVpInitialize, which called us. - // - __vmx_vmlaunch(); + // + // Initialize the VMCS, both guest and host state. + // + ShvVmxSetupVmcsForVp(VpData); - // - // If we got here, either VMCS setup failed in some way, or the launch - // did not proceed as planned. Because VmxEnabled is not set to 1, this - // will correctly register as a failure. - // - __vmx_off(); - } + // + // Launch the VMCS, based on the guest data that was loaded into the + // various VMCS fields by ShvVmxSetupVmcsForVp. This will cause the + // processor to jump to ShvVpRestoreAfterLaunch on success, or return + // back to the caller on failure. + // + return ShvVmxLaunch(); } @@ -54,6 +54,31 @@ ShvVmxRead ( return FieldData; } +INT32 +ShvVmxLaunch ( + VOID + ) +{ + INT32 failureCode; + + // + // Launch the VMCS + // + __vmx_vmlaunch(); + + // + // If we got here, either VMCS setup failed in some way, or the launch + // did not proceed as planned. + // + failureCode = (INT32)ShvVmxRead(VM_INSTRUCTION_ERROR); + __vmx_off(); + + // + // Return the error back to the caller + // + return failureCode; +} + VOID ShvVmxHandleInvd ( VOID @@ -132,6 +157,7 @@ ShvVmxHandleXsetbv ( // // Simply issue the XSETBV instruction on the native logical processor. // + _xsetbv((UINT32)VpState->VpRegs->Rcx, VpState->VpRegs->Rdx << 32 | VpState->VpRegs->Rax); @@ -211,7 +237,7 @@ ShvVmxEntryHandler ( PSHV_VP_DATA vpData; // - // Because we had to use RCX when calling RtlCaptureContext, its true value + // Because we had to use RCX when calling ShvOsCaptureContext, its value // was actually pushed on the stack right before the call. Go dig into the // stack to find it, and overwrite the bogus value that's there now. // @@ -254,19 +280,13 @@ ShvVmxEntryHandler ( Context->Rbx = (uintptr_t)vpData & 0xFFFFFFFF; // - // When running in VMX root mode, the processor will set limits of the - // GDT and IDT to 0xFFFF (notice that there are no Host VMCS fields to - // set these values). This causes problems with PatchGuard, which will - // believe that the GDTR and IDTR have been modified by malware, and - // eventually crash the system. Since we know what the original state - // of the GDTR and IDTR was, simply restore it now. + // Perform any OS-specific CPU uninitialization work // - __lgdt(&vpData->SpecialRegisters.Gdtr.Limit); - __lidt(&vpData->SpecialRegisters.Idtr.Limit); + ShvOsUnprepareProcessor(vpData); // - // Our DPC routine may have interrupted an arbitrary user process, and - // not an idle or system thread as usually happens on an idle system. + // Our callback routine may have interrupted an arbitrary user process, + // and therefore not a thread running with a systemwide page directory. // Therefore if we return back to the original caller after turning off // VMX, it will keep our current "host" CR3 value which we set on entry // to the PML4 of the SYSTEM process. We want to return back with the @@ -73,7 +73,7 @@ ShvCaptureSpecialRegisters ( __sidt(&SpecialRegisters->Idtr.Limit); // - // Use assembly to get these two + // Use OS-specific functions to get these two // _str(&SpecialRegisters->Tr); _sldt(&SpecialRegisters->Ldtr); @@ -109,12 +109,16 @@ ShvVpRestoreAfterLaunch ( ShvOsRestoreContext(&vpData->ContextFrame); } -VOID +INT32 ShvVpInitialize ( _In_ PSHV_VP_DATA Data ) { // + // Prepare any OS-specific CPU data + // + ShvOsPrepareProcessor(Data); + // Read the special control registers for this processor // Note: KeSaveStateForHibernate(&Data->HostState) can be used as a Windows // specific undocumented function that can also get this data. @@ -136,8 +140,13 @@ ShvVpInitialize ( // If the AC bit is not set in EFLAGS, it means that we have not yet // launched the VM. Attempt to initialize VMX on this processor. // - ShvVmxLaunchOnVp(Data); + return ShvVmxLaunchOnVp(Data); } + + // + // IF we got here, the hypervisor is running :-) + // + return SHV_STATUS_SUCCESS; } VOID @@ -163,26 +172,27 @@ ShvVpUnloadCallback ( vpData = (PSHV_VP_DATA)((UINT64)cpuInfo[0] << 32 | (UINT32)cpuInfo[1]); if (vpData != NULL) { - ShvOsFreeContiguousAlignedMemory(vpData); + ShvOsFreeContiguousAlignedMemory(vpData, sizeof(*vpData)); } } PSHV_VP_DATA ShvVpAllocateData ( - VOID + _In_ UINT32 CpuCount ) { PSHV_VP_DATA data; // // Allocate a contiguous chunk of RAM to back this allocation - data = ShvOsAllocateContigousAlignedMemory(sizeof(*data)); + // + data = ShvOsAllocateContigousAlignedMemory(sizeof(*data) * CpuCount); if (data != NULL) { // // Zero out the entire data region // - __stosq((UINT64*)data, 0, sizeof(*data) / sizeof(UINT64)); + __stosq((UINT64*)data, 0, (sizeof(*data) / sizeof(UINT64)) * CpuCount); } // @@ -192,6 +202,18 @@ ShvVpAllocateData ( } VOID +ShvVpFreeData ( + _In_ PSHV_VP_DATA Data, + _In_ UINT32 CpuCount + ) +{ + // + // Free the contiguous chunk of RAM + // + ShvOsFreeContiguousAlignedMemory(Data, sizeof(*Data) * CpuCount); +} + +VOID ShvVpLoadCallback ( _In_ PSHV_CALLBACK_CONTEXT Context ) @@ -212,7 +234,7 @@ ShvVpLoadCallback ( // // Allocate the per-VP data for this logical processor // - vpData = ShvVpAllocateData(); + vpData = ShvVpAllocateData(1); if (vpData == NULL) { status = SHV_STATUS_NO_RESOURCES; @@ -229,7 +251,14 @@ ShvVpLoadCallback ( // // Initialize the virtual processor // - ShvVpInitialize(vpData); + status = ShvVpInitialize(vpData); + if (status != SHV_STATUS_SUCCESS) + { + // + // Bail out + // + goto Failure; + } // // Our hypervisor should now be seen as present on this LP, as the SHV @@ -240,7 +269,7 @@ ShvVpLoadCallback ( // // Free the per-processor data // - ShvOsFreeContiguousAlignedMemory(vpData); + ShvVpFreeData(vpData, 1); status = SHV_STATUS_NOT_PRESENT; goto Failure; } @@ -24,14 +24,18 @@ Environment: #pragma warning(disable:4201) #pragma warning(disable:4214) -#define DPL_USER 3 -#define DPL_SYSTEM 0 -#define MSR_GS_BASE 0xC0000101 -#define MSR_DEBUG_CTL 0x1D9 -#define RPL_MASK 3 -#define MTRR_TYPE_WB 6 -#define EFLAGS_ALIGN_CHECK 0x40000 -#define PAGE_SIZE 4096 +#define DPL_USER 3 +#define DPL_SYSTEM 0 +#define MSR_GS_BASE 0xC0000101 +#define MSR_DEBUG_CTL 0x1D9 +#define RPL_MASK 3 +#define SELECTOR_TABLE_INDEX 0x04 +#define MTRR_TYPE_WB 6 +#define EFLAGS_ALIGN_CHECK 0x40000 +#define AMD64_TSS 9 +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif typedef struct _KDESCRIPTOR { @@ -79,6 +83,20 @@ typedef union _KGDTENTRY64 }; } KGDTENTRY64, *PKGDTENTRY64; +#pragma pack(push,4) +typedef struct _KTSS64 +{ + UINT32 Reserved0; + UINT64 Rsp0; + UINT64 Rsp1; + UINT64 Rsp2; + UINT64 Ist[8]; + UINT64 Reserved1; + UINT16 Reserved2; + UINT16 IoMapBase; +} KTSS64, *PKTSS64; +#pragma pack(pop) + #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 #define CPU_BASED_HLT_EXITING 0x00000080 @@ -410,7 +428,7 @@ enum vmcs_field { typedef struct _VMX_GDTENTRY64 { - uintptr_t Base; + UINT64 Base; UINT32 Limit; union { @@ -511,8 +529,8 @@ typedef struct _VMX_HUGE_PDPTE }; } VMX_HUGE_PDPTE, *PVMX_HUGE_PDPTE; -C_ASSERT(sizeof(VMX_EPTP) == sizeof(UINT64)); -C_ASSERT(sizeof(VMX_EPML4E) == sizeof(UINT64)); +static_assert(sizeof(VMX_EPTP) == sizeof(UINT64), "EPTP Size Mismatch"); +static_assert(sizeof(VMX_EPML4E) == sizeof(UINT64), "EPML4E Size Mismatch"); #define PML4E_ENTRY_COUNT 512 #define PDPTE_ENTRY_COUNT 512 |