2023-08-21 14:40:40 +02:00
|
|
|
#include "hv.h"
|
|
|
|
|
|
|
|
#include "common.h"
|
2024-08-01 06:21:53 +02:00
|
|
|
#include "imports.h"
|
2024-01-21 08:22:06 +01:00
|
|
|
#include "io.h"
|
2024-07-22 12:43:09 +02:00
|
|
|
#include "lib/stdlib.h"
|
|
|
|
|
2024-08-04 08:30:31 +02:00
|
|
|
#include <intrin.h>
|
|
|
|
|
2023-10-07 17:37:47 +02:00
|
|
|
#ifdef ALLOC_PRAGMA
|
2024-04-13 10:23:14 +02:00
|
|
|
# pragma alloc_text(PAGE, PerformVirtualizationDetection)
|
2023-10-07 17:37:47 +02:00
|
|
|
#endif
|
|
|
|
|
2023-08-21 14:40:40 +02:00
|
|
|
#define TOTAL_ITERATION_COUNT 20
|
|
|
|
|
|
|
|
/*
|
2023-12-13 05:06:27 +01:00
|
|
|
* TODO: Perform the test in a loop and average the delta out, then compare it
|
|
|
|
* to an instruction such as FYL2XP1 (source: secret.club) which has an average
|
2024-04-13 06:40:51 +02:00
|
|
|
* execution time slightly higher then the CPUID instruction then compare the
|
|
|
|
* two. If the average time for the CPUID instruction is higher then the average
|
|
|
|
* time for the FYL2XP1 instruction it is a dead giveaway we are running on a
|
2023-12-13 05:06:27 +01:00
|
|
|
* virtualized system.
|
|
|
|
*
|
|
|
|
* reference: https://secret.club/2020/01/12/battleye-hypervisor-detection.html
|
|
|
|
*/
|
|
|
|
|
2024-04-13 06:40:51 +02:00
|
|
|
BOOLEAN
|
|
|
|
APERFMsrTimingCheck()
|
2023-08-21 14:40:40 +02:00
|
|
|
{
|
2024-04-13 10:23:14 +02:00
|
|
|
KAFFINITY new_affinity = {0};
|
|
|
|
KAFFINITY old_affinity = {0};
|
2024-08-01 06:21:53 +02:00
|
|
|
UINT64 old_irql = 0;
|
2024-08-04 07:15:37 +02:00
|
|
|
UINT64 aperf_delta = 0;
|
|
|
|
UINT64 aperf_before = 0;
|
|
|
|
UINT64 aperf_after = 0;
|
2024-08-01 06:21:53 +02:00
|
|
|
INT cpuid_result[4];
|
2024-04-13 10:23:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First thing we do is we lock the current thread to the logical
|
|
|
|
* processor its executing on.
|
|
|
|
*/
|
|
|
|
new_affinity = (KAFFINITY)(1ull << KeGetCurrentProcessorNumber());
|
|
|
|
old_affinity = ImpKeSetSystemAffinityThreadEx(new_affinity);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once we've locked our thread to the current core, we save the old
|
|
|
|
* irql and raise to HIGH_LEVEL to ensure the chance our thread is
|
|
|
|
* preempted by a thread with a higher IRQL is extremely low.
|
|
|
|
*/
|
|
|
|
old_irql = __readcr8();
|
|
|
|
__writecr8(HIGH_LEVEL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Then we also disable interrupts, once again making sure our thread
|
|
|
|
* is not preempted.
|
|
|
|
*/
|
|
|
|
_disable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once our thread is ready for the test, we read the APERF from the
|
|
|
|
* MSR register and store it. We then execute a CPUID instruction
|
|
|
|
* which we don't really care about and immediately after read the APERF
|
|
|
|
* counter once again and store it in a seperate variable.
|
|
|
|
*/
|
2024-08-04 07:15:37 +02:00
|
|
|
aperf_before = __readmsr(IA32_APERF_MSR) << 32;
|
2024-04-13 10:23:14 +02:00
|
|
|
__cpuid(cpuid_result, 1);
|
2024-08-04 07:15:37 +02:00
|
|
|
aperf_after = __readmsr(IA32_APERF_MSR) << 32;
|
2024-04-13 10:23:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Once we have performed our test, we want to make sure we are not
|
|
|
|
* hogging the cpu time from other threads, so we reverse the initial
|
|
|
|
* preparation process. i.e we first enable interrupts, lower our irql
|
|
|
|
* to the threads previous irql before it was raised and then restore
|
|
|
|
* the threads affinity back to its original affinity.
|
|
|
|
*/
|
|
|
|
_enable();
|
|
|
|
__writecr8(old_irql);
|
|
|
|
ImpKeRevertToUserAffinityThreadEx(old_affinity);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now the only thing left to do is calculate the change. Now, on some
|
|
|
|
* VMs such as VMWARE the aperf value will be 0, meaning the change will
|
|
|
|
* be 0. This is a dead giveaway we are executing in a VM.
|
|
|
|
*/
|
2024-08-04 07:15:37 +02:00
|
|
|
aperf_delta = aperf_after - aperf_before;
|
2024-04-13 10:23:14 +02:00
|
|
|
|
|
|
|
return aperf_delta == 0 ? TRUE : FALSE;
|
2023-08-21 17:48:34 +02:00
|
|
|
}
|
|
|
|
|
2023-10-05 08:27:17 +02:00
|
|
|
NTSTATUS
|
2023-12-13 05:06:27 +01:00
|
|
|
PerformVirtualizationDetection(_Inout_ PIRP Irp)
|
2023-08-21 17:48:34 +02:00
|
|
|
{
|
2024-04-13 10:23:14 +02:00
|
|
|
PAGED_CODE();
|
2023-10-09 20:19:51 +02:00
|
|
|
|
2024-08-04 07:15:37 +02:00
|
|
|
NTSTATUS status = STATUS_UNSUCCESSFUL;
|
|
|
|
HYPERVISOR_DETECTION_REPORT report = {0};
|
|
|
|
|
|
|
|
status = ValidateIrpOutputBuffer(Irp, sizeof(HYPERVISOR_DETECTION_REPORT));
|
2023-11-09 08:30:59 +01:00
|
|
|
|
2024-04-13 10:23:14 +02:00
|
|
|
if (!NT_SUCCESS(status)) {
|
|
|
|
DEBUG_ERROR("ValidateIrpOutputBuffer failed with status %x", status);
|
|
|
|
return status;
|
|
|
|
}
|
2023-11-09 08:30:59 +01:00
|
|
|
|
2024-08-01 06:21:53 +02:00
|
|
|
report.aperf_msr_timing_check = APERFMsrTimingCheck();
|
|
|
|
report.invd_emulation_check = TestINVDEmulation();
|
2023-08-21 17:48:34 +02:00
|
|
|
|
2024-04-13 10:23:14 +02:00
|
|
|
Irp->IoStatus.Information = sizeof(HYPERVISOR_DETECTION_REPORT);
|
2023-08-21 14:40:40 +02:00
|
|
|
|
2024-08-01 06:21:53 +02:00
|
|
|
IntCopyMemory(
|
|
|
|
Irp->AssociatedIrp.SystemBuffer,
|
|
|
|
&report,
|
|
|
|
sizeof(HYPERVISOR_DETECTION_REPORT));
|
2023-08-21 14:40:40 +02:00
|
|
|
|
2024-04-13 10:23:14 +02:00
|
|
|
return STATUS_SUCCESS;
|
2023-08-21 14:40:40 +02:00
|
|
|
}
|