mirror-ac/driver/hv.c

100 lines
2.9 KiB
C
Raw Normal View History

2023-08-21 14:40:40 +02:00
#include "hv.h"
#include <intrin.h>
#include "common.h"
#define TOTAL_ITERATION_COUNT 20
#define IA32_APERF_MSR 0x000000E8
/*
2023-08-21 17:48:34 +02:00
* TODO: Perform the test in a loop and average the delta out, then compare it
* to an instruction such as FYL2XP1 (source: secret.club) which has an average
* execution time slightly higher then the CPUID instruction then compare the two.
* If the average time for the CPUID instruction is higher then the average time
2023-08-22 10:51:52 +02:00
* for the FYL2XP1 instruction it is a dead giveaway we are running on a
2023-08-21 17:48:34 +02:00
* virtualized system.
*
2023-08-29 19:36:58 +02:00
* reference: https://secret.club/2020/01/12/battleye-hypervisor-detection.html
2023-08-21 14:40:40 +02:00
*/
2023-08-21 17:48:34 +02:00
INT APERFMsrTimingCheck()
2023-08-21 14:40:40 +02:00
{
2023-08-21 17:48:34 +02:00
KAFFINITY new_affinity = { 0 };
KAFFINITY old_affinity = { 0 };
2023-08-21 14:40:40 +02:00
ULONG64 old_irql;
INT cpuid_result[ 4 ];
2023-08-21 17:48:34 +02:00
/*
* First thing we do is we lock the current thread to the logical processor
* its executing on.
*/
new_affinity = ( KAFFINITY )( 1 << KeGetCurrentProcessorNumber() );
old_affinity = KeSetSystemAffinityThreadEx( new_affinity );
/*
2023-08-22 10:51:52 +02:00
* Once we've locked our thread to the current core, we save the old irql
2023-08-21 17:48:34 +02:00
* and raise to HIGH_LEVEL to ensure the chance our thread is preempted
* by a thread with a higher IRQL is extremely low.
*/
2023-08-21 14:40:40 +02:00
old_irql = __readcr8();
__writecr8( HIGH_LEVEL );
2023-08-21 17:48:34 +02:00
/*
* Then we also disable interrupts, once again making sure our thread
* is not preempted.
*/
2023-08-21 14:40:40 +02:00
_disable();
2023-08-21 17:48:34 +02:00
/*
* Once our thread is ready for the test, we read the APERF from the
* MSR register and store it. We then execute a CPUID instruction
* which we don't really care about and immediately after read the APERF
* counter once again and store it in a seperate variable.
*/
2023-08-21 14:40:40 +02:00
UINT64 aperf_before = __readmsr( IA32_APERF_MSR ) << 32;
__cpuid( cpuid_result, 1 );
UINT64 aperf_after = __readmsr( IA32_APERF_MSR ) << 32;
2023-08-21 17:48:34 +02:00
/*
* Once we have performed our test, we want to make sure we are not
* hogging the cpu time from other threads, so we reverse the initial
* preparation process. i.e we first enable interrupts, lower our irql
* to the threads previous irql before it was raised and then restore the
* threads affinity back to its original affinity.
*/
2023-08-21 14:40:40 +02:00
_enable();
__writecr8( old_irql );
2023-08-21 17:48:34 +02:00
KeRevertToUserAffinityThreadEx( old_affinity );
2023-08-21 14:40:40 +02:00
2023-08-21 17:48:34 +02:00
/*
* Now the only thing left to do is calculate the change. Now, on some VMs
* such as VMWARE the aperf value will be 0, meaning the change will be 0.
* This is a dead giveaway we are executing in a VM.
*/
2023-08-21 14:40:40 +02:00
UINT64 aperf_delta = aperf_after - aperf_before;
2023-08-21 17:48:34 +02:00
return aperf_delta == 0 ? TRUE : FALSE;
}
NTSTATUS PerformVirtualizationDetection(
_In_ PIRP Irp
)
{
NTSTATUS status = STATUS_SUCCESS;
HYPERVISOR_DETECTION_REPORT report;
report.aperf_msr_timing_check = APERFMsrTimingCheck();
report.invd_emulation_check = TestINVDEmulation();
Irp->IoStatus.Information = sizeof( HYPERVISOR_DETECTION_REPORT );
2023-08-21 14:40:40 +02:00
2023-08-21 17:48:34 +02:00
RtlCopyMemory(
Irp->AssociatedIrp.SystemBuffer,
&report,
sizeof( HYPERVISOR_DETECTION_REPORT )
);
2023-08-21 14:40:40 +02:00
2023-08-21 17:48:34 +02:00
return status;
2023-08-21 14:40:40 +02:00
}