diff --git a/docs/documentation.md b/docs/documentation.md
index a7d8d4e6..8d2e601f 100644
--- a/docs/documentation.md
+++ b/docs/documentation.md
@@ -511,90 +511,92 @@ VMAware provides a convenient way to not only check for VMs, but also have the f
| Flag alias | Description | Supported platforms | Certainty | Admin? | 32-bit only? | Notes | Code implementation |
| ---------- | ----------- | ------------------- | --------- | ------ | ------------ | ----- | ------------------- |
-| `VM::VMID` | Check CPUID output of manufacturer ID for known VMs/hypervisors at leaf 0 and 0x40000000-0x40000100 | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4809) |
-| `VM::CPU_BRAND` | Check if CPU brand model contains any VM-specific string snippets | 🐧🪟🍏 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4827) |
-| `VM::HYPERVISOR_BIT` | Check if hypervisor feature bit in CPUID ECX bit 31 is enabled (always false for physical CPUs) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4906) |
-| `VM::HYPERVISOR_STR` | Check for hypervisor brand string length (would be around 2 characters in a host machine) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4932) |
-| `VM::TIMER` | Check for timing anomalies in the system | 🐧🪟🍏 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5118) |
-| `VM::THREAD_COUNT` | Check if there are only 1 or 2 threads, which is a common pattern in VMs with default settings, nowadays physical CPUs should have at least 4 threads for modern CPUs | 🐧🪟🍏 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7788) |
-| `VM::MAC` | Check if mac address starts with certain VM designated values | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5855) |
-| `VM::TEMPERATURE` | Check for device's temperature | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6706) |
-| `VM::SYSTEMD` | Check result from systemd-detect-virt tool | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5736) |
-| `VM::CVENDOR` | Check if the chassis vendor is a VM vendor | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5760) |
-| `VM::CTYPE` | Check if the chassis type is valid (it's very often invalid in VMs) | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5785) |
-| `VM::DOCKERENV` | Check if /.dockerenv or /.dockerinit file is present | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5803) |
-| `VM::DMIDECODE` | Check if dmidecode output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5818) |
-| `VM::DMESG` | Check if dmesg output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5961) |
-| `VM::HWMON` | Check if /sys/class/hwmon/ directory is present. If not, likely a VM | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6002) |
-| `VM::DLL` | Check for VM-specific DLLs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8088) |
-| `VM::HWMODEL` | Check if the sysctl for the hwmodel does not contain the "Mac" string | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7812) |
-| `VM::WINE_FUNC` | Check if the function "wine_get_unix_file_name" is present and if the OS booted from a VHD container | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8119) |
-| `VM::POWER_CAPABILITIES` | Check what power states are enabled | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8145) |
-| `VM::PROCESSES` | Check for any VM processes that are active | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6717) |
-| `VM::LINUX_USER_HOST` | Check for default VM username and hostname for linux | 🐧 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6012) |
-| `VM::GAMARUE` | Check for Gamarue ransomware technique which compares VM-specific Window product IDs | 🪟 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8205) |
-| `VM::BOCHS_CPU` | Check for various Bochs-related emulation oversights through CPU checks | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4960) |
-| `VM::MAC_MEMSIZE` | Check if memory is too low for MacOS system | 🍏 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7848) |
-| `VM::MAC_IOKIT` | Check MacOS' IO kit registry for VM-specific strings | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7881) |
-| `VM::IOREG_GREP` | Check for VM-strings in ioreg commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7978) |
-| `VM::MAC_SIP` | Check for the status of System Integrity Protection and hv_mm_present | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8019) |
-| `VM::VPC_INVALID` | Check for official VPC method | 🪟 | 75% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8314) |
+| `VM::VMID` | Check CPUID output of manufacturer ID for known VMs/hypervisors at leaf 0 and 0x40000000-0x40000100 | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4777) |
+| `VM::CPU_BRAND` | Check if CPU brand model contains any VM-specific string snippets | 🐧🪟🍏 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4795) |
+| `VM::HYPERVISOR_BIT` | Check if hypervisor feature bit in CPUID ECX bit 31 is enabled (always false for physical CPUs) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4874) |
+| `VM::HYPERVISOR_STR` | Check for hypervisor brand string length (would be around 2 characters in a host machine) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4907) |
+| `VM::TIMER` | Check for timing anomalies in the system | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5297) |
+| `VM::THREAD_COUNT` | Check if there are only 1 or 2 threads, which is a common pattern in VMs with default settings, nowadays physical CPUs should have at least 4 threads for modern CPUs | 🐧🪟🍏 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7699) |
+| `VM::MAC` | Check if mac address starts with certain VM designated values | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5766) |
+| `VM::TEMPERATURE` | Check for device's temperature | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6617) |
+| `VM::SYSTEMD` | Check result from systemd-detect-virt tool | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5647) |
+| `VM::CVENDOR` | Check if the chassis vendor is a VM vendor | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5671) |
+| `VM::CTYPE` | Check if the chassis type is valid (it's very often invalid in VMs) | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5696) |
+| `VM::DOCKERENV` | Check if /.dockerenv or /.dockerinit file is present | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5714) |
+| `VM::DMIDECODE` | Check if dmidecode output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5729) |
+| `VM::DMESG` | Check if dmesg output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5872) |
+| `VM::HWMON` | Check if /sys/class/hwmon/ directory is present. If not, likely a VM | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5913) |
+| `VM::DLL` | Check for VM-specific DLLs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7999) |
+| `VM::HWMODEL` | Check if the sysctl for the hwmodel does not contain the "Mac" string | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7723) |
+| `VM::WINE` | Check if the function "wine_get_unix_file_name" is present and if the OS booted from a VHD container | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8030) |
+| `VM::POWER_CAPABILITIES` | Check what power states are enabled | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8056) |
+| `VM::PROCESSES` | Check for any VM processes that are active | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6628) |
+| `VM::LINUX_USER_HOST` | Check for default VM username and hostname for linux | 🐧 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5923) |
+| `VM::GAMARUE` | Check for Gamarue ransomware technique which compares VM-specific Window product IDs | 🪟 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8116) |
+| `VM::BOCHS_CPU` | Check for various Bochs-related emulation oversights through CPU checks | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4935) |
+| `VM::MAC_MEMSIZE` | Check if memory is too low for MacOS system | 🍏 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7759) |
+| `VM::MAC_IOKIT` | Check MacOS' IO kit registry for VM-specific strings | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7792) |
+| `VM::IOREG_GREP` | Check for VM-strings in ioreg commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7889) |
+| `VM::MAC_SIP` | Check for the status of System Integrity Protection and hv_mm_present | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7930) |
+| `VM::VPC_INVALID` | Check for official VPC method | 🪟 | 75% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8225) |
| `VM::SYSTEM_REGISTERS` | | | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L1) |
-| `VM::VMWARE_IOMEM` | Check for VMware string in /proc/iomem | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6041) |
-| `VM::VMWARE_IOPORTS` | Check for VMware string in /proc/ioports | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6552) |
-| `VM::VMWARE_SCSI` | Check for VMware string in /proc/scsi/scsi | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6350) |
-| `VM::VMWARE_DMESG` | Check for VMware-specific device name in dmesg output | 🐧 | 65% | Admin | | Disabled by default | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6369) |
-| `VM::VMWARE_STR` | Check str assembly instruction method for VMware | 🪟 | 35% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8365) |
-| `VM::VMWARE_BACKDOOR` | Check for official VMware io port backdoor technique | 🪟 | 100% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8390) |
-| `VM::MUTEX` | Check for mutex strings of VM brands | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8451) |
-| `VM::THREAD_MISMATCH` | Check if the system's thread count matches the expected thread count for the detected CPU model | 🐧🪟🍏 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5040) |
-| `VM::CUCKOO_DIR` | Check for cuckoo directory using crt and WIN API directory functions | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8537) |
-| `VM::CUCKOO_PIPE` | Check for Cuckoo specific piping mechanism | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8593) |
-| `VM::AZURE` | Check for default Azure hostname format (Azure uses Hyper-V as their base VM brand) | 🐧🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6953) |
-| `VM::DISPLAY` | Check for display configurations commonly found in VMs | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8647) |
-| `VM::DEVICE_STRING` | Check if bogus device string would be accepted | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8669) |
-| `VM::BLUESTACKS_FOLDERS` | Check for the presence of BlueStacks-specific folders | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6057) |
-| `VM::CPUID_SIGNATURE` | Check for signatures in leaf 0x40000001 in CPUID | 🐧🪟🍏 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5068) |
-| `VM::KGT_SIGNATURE` | Check for Intel KGT (Trusty branch) hypervisor signature in CPUID | 🐧🪟🍏 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5094) |
-| `VM::QEMU_VIRTUAL_DMI` | Check for presence of QEMU in the /sys/devices/virtual/dmi/id directory | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6138) |
-| `VM::QEMU_USB` | Check for presence of QEMU in the /sys/kernel/debug/usb/devices directory | 🐧 | 20% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6167) |
-| `VM::HYPERVISOR_DIR` | Check for presence of any files in /sys/hypervisor directory | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6195) |
-| `VM::UML_CPU` | Check for the "UML" string in the CPU brand | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6243) |
-| `VM::KMSG` | Check for any indications of hypervisors in the kernel message logs | 🐧 | 5% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6273) |
-| `VM::VBOX_MODULE` | Check for a VBox kernel module | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6327) |
-| `VM::SYSINFO_PROC` | Check for potential VM info in /proc/sysinfo | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6403) |
-| `VM::DMI_SCAN` | Check for string matches of VM brands in the linux DMI | 🐧 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6425) |
-| `VM::SMBIOS_VM_BIT` | Check for the VM bit in the SMBIOS data | 🐧 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6507) |
-| `VM::PODMAN_FILE` | Check for podman file in /run/ | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6537) |
-| `VM::WSL_PROC` | Check for WSL or microsoft indications in /proc/ subdirectories | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6569) |
-| `VM::DRIVERS` | Check for VM-specific names for drivers | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8686) |
-| `VM::DISK_SERIAL` | Check for serial numbers of virtual disks | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8784) |
-| `VM::IVSHMEM` | Check for IVSHMEM device presence | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9023) |
-| `VM::GPU_CAPABILITIES` | Check for GPU capabilities related to VMs | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9129) |
-| `VM::HANDLES` | Check for vm-specific devices | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9167) |
-| `VM::QEMU_FW_CFG` | Detect QEMU fw_cfg interface. This first checks the Device Tree for a fw-cfg node or hypervisor tag, then verifies the presence of the qemu_fw_cfg module and firmware directories in sysfs. | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6597) |
-| `VM::VIRTUAL_PROCESSORS` | Check if the number of virtual and logical processors are reported correctly by the system | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9269) |
-| `VM::HYPERVISOR_QUERY` | Check if a call to NtQuerySystemInformation with the 0x9f leaf fills a _SYSTEM_HYPERVISOR_DETAIL_INFORMATION structure | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9299) |
-| `VM::AMD_SEV_MSR` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6080) |
-| `VM::VIRTUAL_REGISTRY` | Check for particular object directory which is present in Sandboxie virtual environment but not in usual host systems | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9367) |
-| `VM::FIRMWARE` | Check for VM signatures on all firmware tables | 🐧🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7013) |
-| `VM::FILE_ACCESS_HISTORY` | Check if the number of accessed files are too low for a human-managed environment | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6627) |
-| `VM::AUDIO` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9461) |
-| `VM::NSJAIL_PID` | Check if process status matches with nsjail patterns with PID anomalies | 🐧 | 75% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6654) |
-| `VM::DEVICES` | Check for PCI vendor and device IDs that are VM-specific | 🐧🪟 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7432) |
-| `VM::ACPI_SIGNATURE` | Check for VM-specific ACPI device signatures | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9569) |
-| `VM::TRAP` | Check if after raising two traps at the same RIP, a hypervisor interferes with the instruction pointer delivery | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9714) |
-| `VM::UD` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9461) |
-| `VM::BLOCKSTEP` | Check if a hypervisor does not properly restore the interruptibility state after a VM-exit in compatibility mode | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9989) |
-| `VM::DBVM_HYPERCALL` | Check if Dark Byte's VM is present | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10036) |
-| `VM::BOOT_LOGO` | Check boot logo for known VM images | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10155) |
-| `VM::MAC_SYS` | Check for VM-strings in system profiler commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8063) |
-| `VM::KERNEL_OBJECTS` | Check for any signs of VMs in Windows kernel object entities | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10257) |
-| `VM::NVRAM` | Check for known NVRAM signatures that are present on virtual firmware | 🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10447) |
-| `VM::EDID` | Check for non-standard EDID configurations | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10886) |
-| `VM::CPU_HEURISTIC` | Check whether the CPU is genuine and its reported instruction capabilities are not masked | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11142) |
-| `VM::CLOCK` | Check the presence of system timers | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11611) |
-| `VM::MSR` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6080) |
+| `VM::VMWARE_IOMEM` | Check for VMware string in /proc/iomem | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5952) |
+| `VM::VMWARE_IOPORTS` | Check for VMware string in /proc/ioports | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6463) |
+| `VM::VMWARE_SCSI` | Check for VMware string in /proc/scsi/scsi | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6261) |
+| `VM::VMWARE_DMESG` | Check for VMware-specific device name in dmesg output | 🐧 | 65% | Admin | | Disabled by default | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6280) |
+| `VM::VMWARE_STR` | Check str assembly instruction method for VMware | 🪟 | 35% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8276) |
+| `VM::VMWARE_BACKDOOR` | Check for official VMware io port backdoor technique | 🪟 | 100% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8301) |
+| `VM::MUTEX` | Check for mutex strings of VM brands | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8362) |
+| `VM::THREAD_MISMATCH` | Check if the system's thread count matches the expected thread count for the detected CPU model | 🐧🪟🍏 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5015) |
+| `VM::CUCKOO_DIR` | Check for cuckoo directory using crt and WIN API directory functions | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8448) |
+| `VM::CUCKOO_PIPE` | Check for Cuckoo specific piping mechanism | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8504) |
+| `VM::AZURE` | Check for default Azure hostname format (Azure uses Hyper-V as their base VM brand) | 🐧🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6864) |
+| `VM::DISPLAY` | Check for display configurations commonly found in VMs | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8558) |
+| `VM::DEVICE_STRING` | Check if bogus device string would be accepted | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8580) |
+| `VM::BLUESTACKS_FOLDERS` | Check for the presence of BlueStacks-specific folders | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5968) |
+| `VM::CPUID_SIGNATURE` | Check for CPUID signatures that reveal the presence of a hypervisor | 🐧🪟🍏 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5151) |
+| `VM::KGT_SIGNATURE` | Check for Intel KGT (Trusty branch) hypervisor signature in CPUID | 🐧🪟🍏 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5273) |
+| `VM::QEMU_VIRTUAL_DMI` | Check for presence of QEMU in the /sys/devices/virtual/dmi/id directory | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6049) |
+| `VM::QEMU_USB` | Check for presence of QEMU in the /sys/kernel/debug/usb/devices directory | 🐧 | 20% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6078) |
+| `VM::HYPERVISOR_DIR` | Check for presence of any files in /sys/hypervisor directory | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6106) |
+| `VM::UML_CPU` | Check for the "UML" string in the CPU brand | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6154) |
+| `VM::KMSG` | Check for any indications of hypervisors in the kernel message logs | 🐧 | 5% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6184) |
+| `VM::VBOX_MODULE` | Check for a VBox kernel module | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6238) |
+| `VM::SYSINFO_PROC` | Check for potential VM info in /proc/sysinfo | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6314) |
+| `VM::DMI_SCAN` | Check for string matches of VM brands in the linux DMI | 🐧 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6336) |
+| `VM::SMBIOS_VM_BIT` | Check for the VM bit in the SMBIOS data | 🐧 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6418) |
+| `VM::PODMAN_FILE` | Check for podman file in /run/ | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6448) |
+| `VM::WSL_PROC` | Check for WSL or microsoft indications in /proc/ subdirectories | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6480) |
+| `VM::DRIVERS` | Check for VM-specific names for drivers | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8597) |
+| `VM::DISK_SERIAL` | Check for serial numbers of virtual disks | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8695) |
+| `VM::IVSHMEM` | Check for IVSHMEM device presence | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8934) |
+| `VM::GPU_CAPABILITIES` | Check for GPU capabilities related to VMs | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9040) |
+| `VM::HANDLES` | Check for vm-specific devices | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9078) |
+| `VM::QEMU_FW_CFG` | Detect QEMU fw_cfg interface. This first checks the Device Tree for a fw-cfg node or hypervisor tag, then verifies the presence of the qemu_fw_cfg module and firmware directories in sysfs. | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6508) |
+| `VM::VIRTUAL_PROCESSORS` | Check if the number of virtual and logical processors are reported correctly by the system | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9180) |
+| `VM::HYPERVISOR_QUERY` | Check if a call to NtQuerySystemInformation with the 0x9f leaf fills a _SYSTEM_HYPERVISOR_DETAIL_INFORMATION structure | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9210) |
+| `VM::AMD_SEV_MSR` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5991) |
+| `VM::VIRTUAL_REGISTRY` | Check for particular object directory which is present in Sandboxie virtual environment but not in usual host systems | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9278) |
+| `VM::FIRMWARE` | Check for VM signatures on all firmware tables | 🐧🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6924) |
+| `VM::FILE_ACCESS_HISTORY` | Check if the number of accessed files are too low for a human-managed environment | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6538) |
+| `VM::AUDIO` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9372) |
+| `VM::NSJAIL_PID` | Check if process status matches with nsjail patterns with PID anomalies | 🐧 | 75% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6565) |
+| `VM::DEVICES` | Check for PCI vendor and device IDs that are VM-specific | 🐧🪟 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7343) |
+| `VM::ACPI_SIGNATURE` | Check for VM-specific ACPI device signatures | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9480) |
+| `VM::TRAP` | Check if after raising two traps at the same RIP, a hypervisor interferes with the instruction pointer delivery | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9625) |
+| `VM::UD` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9372) |
+| `VM::BLOCKSTEP` | Check if a hypervisor does not properly restore the interruptibility state after a VM-exit in compatibility mode | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9900) |
+| `VM::DBVM_HYPERCALL` | Check if Dark Byte's VM is present | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9947) |
+| `VM::BOOT_LOGO` | Check boot logo for known VM images | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10066) |
+| `VM::MAC_SYS` | Check for VM-strings in system profiler commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7974) |
+| `VM::KERNEL_OBJECTS` | Check for any signs of VMs in Windows kernel object entities | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10168) |
+| `VM::NVRAM` | Check for known NVRAM signatures that are present on virtual firmware | 🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10358) |
+| `VM::EDID` | Check for non-standard EDID configurations | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10797) |
+| `VM::CPU_HEURISTIC` | Check whether the CPU is genuine and its reported instruction capabilities are not masked | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11053) |
+| `VM::CLOCK` | Check the presence of system timers | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11522) |
+| `VM::MSR` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5991) |
+| `VM::KVM_INTERCEPTION` | Check whether KVM attempts to patch a mismatched hypercall instruction | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11735) |
+| `VM::BREAKPOINT` | Check whether a hypervisor uses EPT/NPT hooking to intercept hardware breakpoints | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11933) |
diff --git a/src/cli.cpp b/src/cli.cpp
index 888c4c25..cd2782bd 100755
--- a/src/cli.cpp
+++ b/src/cli.cpp
@@ -931,7 +931,7 @@ static void general(
checker(VM::DMESG, "dmesg output");
checker(VM::HWMON, "hwmon presence");
checker(VM::DLL, "DLLs");
- checker(VM::WINE_FUNC, "Wine");
+ checker(VM::WINE, "Wine");
checker(VM::HWMODEL, "hw.model");
checker(VM::PROCESSES, "processes");
checker(VM::LINUX_USER_HOST, "default Linux user/host");
@@ -1001,6 +1001,8 @@ static void general(
checker(VM::CPU_HEURISTIC, "CPU heuristics");
checker(VM::CLOCK, "system timers");
checker(VM::MSR, "model specific registers");
+ checker(VM::KVM_INTERCEPTION, "KVM interception");
+ checker(VM::BREAKPOINT, "EPT/NPT hooking");
// ADD NEW TECHNIQUE CHECKER HERE
@@ -1207,7 +1209,7 @@ static void general(
note <<
" If you found a false positive, please make sure to create\n \
an issue at https://github.com/kernelwernel/VMAware/issues\n\n";
-// ^ do not modify the space above
+ // ^ do not modify the space above
}
}
diff --git a/src/vmaware.hpp b/src/vmaware.hpp
index 42c4c81e..5aa9227b 100644
--- a/src/vmaware.hpp
+++ b/src/vmaware.hpp
@@ -54,14 +54,14 @@
*
*
* ============================== SECTIONS ==================================
- * - enums for publicly accessible techniques => line 556
- * - struct for internal cpu operations => line 804
- * - struct for internal memoization => line 3131
- * - struct for internal utility functions => line 3338
- * - struct for internal core components => line 11827
- * - start of VM detection technique list => line 4804
- * - start of public VM detection functions => line 12192
- * - start of externally defined variables => line 12979
+ * - enums for publicly accessible techniques => line 557
+ * - struct for internal cpu operations => line 807
+ * - struct for internal memoization => line 3117
+ * - struct for internal utility functions => line 3324
+ * - struct for internal core components => line 12053
+ * - start of VM detection technique list => line 4772
+ * - start of public VM detection functions => line 12418
+ * - start of externally defined variables => line 13207
*
*
* ============================== EXAMPLE ===================================
@@ -376,6 +376,7 @@
#include
#include
#include
+#include
#if (WINDOWS)
#include
@@ -569,7 +570,7 @@ struct VM {
DISPLAY,
DLL,
VMWARE_BACKDOOR,
- WINE_FUNC,
+ WINE,
VIRTUAL_REGISTRY,
MUTEX,
DEVICE_STRING,
@@ -589,6 +590,8 @@ struct VM {
CPU_HEURISTIC,
CLOCK,
MSR,
+ KVM_INTERCEPTION,
+ BREAKPOINT,
// Linux and Windows
SYSTEM_REGISTERS,
@@ -1221,16 +1224,14 @@ struct VM {
struct cpu_entry {
u32 hash;
u32 threads;
- double base_clock;
- constexpr cpu_entry(const char* m, u32 t, double c)
- : hash(constexpr_hash::get(m)), threads(t), base_clock(c) {
+ constexpr cpu_entry(const char* m, u32 t)
+ : hash(constexpr_hash::get(m)), threads(t) {
}
};
struct cpu_cache {
u32 expected_threads;
- u32 base_clock_mhz;
bool found;
const char* debug_tag;
std::string model_name;
@@ -1244,7 +1245,7 @@ struct VM {
};
static const cpu_cache& analyze_cpu() {
- static cpu_cache result = { 0, 0, false, "", "" };
+ static cpu_cache result = { 0, false, "", "" };
static bool initialized = false;
if (initialized) return result;
@@ -1320,7 +1321,6 @@ struct VM {
const char* str = result.model_name.c_str();
size_t best_len = 0;
u32 z_series_threads = 0;
- double found_clock = 0.0;
const auto hash_func = hasher::get();
@@ -1371,7 +1371,6 @@ struct VM {
if (current_len > best_len) {
best_len = current_len;
result.expected_threads = db[idx].threads;
- found_clock = db[idx].base_clock;
result.found = true;
}
}
@@ -1386,1031 +1385,1018 @@ struct VM {
result.expected_threads = z_series_threads;
}
- if (result.found) {
- result.base_clock_mhz = static_cast(found_clock * 1000.0);
- }
-
initialized = true;
return result;
}
- // In C++11, you can define static const arrays inside a function
- // without specifying the size explicitly. The compiler deduces it
- // The data is stored in read-only data just like a global constexpr array
- // We can't also put it outside the VM struct because the compiler complains about "too many initializers"
-
- // we cannot use constexpr on a static array if we do not want to provide the size explicitly inside the class
- // we cant also use another source file or use the C++ 17 inline variable feature because we want to stay C++11 compatible
- // using other structs or std::array would not solve anything, so the ONLY solution to this c++ 11 limitation is to define the function that returns the array
- // just like we had before (intel_thread_mismatch, xeon_thread_mismatch, amd_thread_mismatch) but now inside the cpu struct
inline static void get_intel_core_db(const cpu_entry*& out_ptr, size_t& out_size) {
static const cpu_entry db[] = {
// i3 series
- { "i3-1000G1", 4, 1.10 },
- { "i3-1000G4", 4, 1.10 },
- { "i3-1000NG4", 4, 1.10 },
- { "i3-1005G1", 4, 1.20 },
- { "i3-10100", 8, 3.60 },
- { "i3-10100E", 8, 3.20 },
- { "i3-10100F", 8, 3.60 },
- { "i3-10100T", 8, 3.00 },
- { "i3-10100TE", 8, 2.30 },
- { "i3-10100Y", 4, 1.30 },
- { "i3-10105", 8, 3.70 },
- { "i3-10105F", 8, 3.70 },
- { "i3-10105T", 8, 3.00 },
- { "i3-10110U", 4, 2.10 },
- { "i3-10110Y", 4, 1.00 },
- { "i3-10300", 8, 3.70 },
- { "i3-10300T", 8, 3.00 },
- { "i3-10305", 8, 3.80 },
- { "i3-10305T", 8, 3.00 },
- { "i3-10320", 8, 3.80 },
- { "i3-10325", 8, 3.90 },
- { "i3-11100B", 8, 3.60 },
- { "i3-11100HE", 8, 2.40 },
- { "i3-1110G4", 4, 2.50 },
- { "i3-1115G4E", 4, 3.00 },
- { "i3-1115GRE", 4, 3.00 },
- { "i3-1120G4", 8, 1.10 },
- { "i3-12100", 8, 3.30 },
- { "i3-12100F", 8, 3.30 },
- { "i3-12100T", 8, 2.20 },
- { "i3-1210U", 8, 1.00 },
- { "i3-1215U", 8, 1.20 },
- { "i3-1215UE", 8, 1.20 },
- { "i3-1215UL", 8, 1.20 },
- { "i3-12300", 8, 3.50 },
- { "i3-12300T", 8, 2.30 },
- { "i3-13100", 8, 3.40 },
- { "i3-13100F", 8, 3.40 },
- { "i3-13100T", 8, 2.50 },
- { "i3-1315U", 8, 1.20 },
- { "i3-1315UE", 8, 1.20 },
- { "i3-14100", 8, 3.50 },
- { "i3-14100F", 8, 3.50 },
- { "i3-14100T", 8, 2.70 },
- { "i3-2100", 4, 3.10 },
- { "i3-2100T", 4, 2.50 },
- { "i3-2102", 4, 3.10 },
- { "i3-2105", 4, 3.10 },
- { "i3-2120", 4, 3.30 },
- { "i3-2120T", 4, 2.60 },
- { "i3-2125", 4, 3.30 },
- { "i3-2130", 4, 3.40 },
- { "i3-2308M", 4, 2.10 },
- { "i3-2310E", 4, 2.10 },
- { "i3-2310M", 4, 2.10 },
- { "i3-2312M", 4, 2.10 },
- { "i3-2328M", 4, 2.20 },
- { "i3-2330E", 4, 2.20 },
- { "i3-2330M", 4, 2.20 },
- { "i3-2332M", 4, 2.20 },
- { "i3-2340UE", 4, 1.30 },
- { "i3-2348M", 4, 2.30 },
- { "i3-2350LM", 4, 1.30 },
- { "i3-2350M", 4, 2.30 },
- { "i3-2355M", 4, 1.40 },
- { "i3-2357M", 4, 1.30 },
- { "i3-2365M", 4, 1.40 },
- { "i3-2367M", 4, 1.40 },
- { "i3-2370LM", 4, 1.40 },
- { "i3-2370M", 4, 2.40 },
- { "i3-2375M", 4, 1.50 },
- { "i3-2377M", 4, 1.50 },
- { "i3-2390M", 4, 2.40 },
- { "i3-2393M", 4, 2.50 },
- { "i3-2394M", 4, 2.60 },
- { "i3-2395M", 4, 2.70 },
- { "i3-2397M", 4, 2.80 },
- { "i3-3110M", 4, 2.40 },
- { "i3-3115C", 4, 2.50 },
- { "i3-3120M", 4, 2.50 },
- { "i3-3120ME", 4, 2.40 },
- { "i3-3130M", 4, 2.60 },
- { "i3-3210", 4, 3.20 },
- { "i3-3217U", 4, 1.80 },
- { "i3-3217UE", 4, 1.60 },
- { "i3-3220", 4, 3.30 },
- { "i3-3220T", 4, 2.80 },
- { "i3-3225", 4, 3.30 },
- { "i3-3227U", 4, 1.90 },
- { "i3-3229Y", 4, 1.40 },
- { "i3-3240", 4, 3.40 },
- { "i3-3240T", 4, 2.90 },
- { "i3-3245", 4, 3.40 },
- { "i3-3250", 4, 3.50 },
- { "i3-3250T", 4, 3.00 },
- { "i3-330E", 4, 2.13 },
- { "i3-330M", 4, 2.13 },
- { "i3-330UM", 4, 1.20 },
- { "i3-350M", 4, 2.26 },
- { "i3-370M", 4, 2.40 },
- { "i3-380M", 4, 2.53 },
- { "i3-380UM", 4, 1.33 },
- { "i3-390M", 4, 2.66 },
- { "i3-4000M", 4, 2.40 },
- { "i3-4005U", 4, 1.70 },
- { "i3-4010M", 4, 1.70 },
- { "i3-4010U", 4, 1.70 },
- { "i3-4010Y", 4, 1.30 },
- { "i3-4012Y", 4, 1.50 },
- { "i3-4020Y", 4, 1.50 },
- { "i3-4025U", 4, 1.90 },
- { "i3-4030U", 4, 1.90 },
- { "i3-4030Y", 4, 1.60 },
- { "i3-4100E", 4, 2.40 },
- { "i3-4100M", 4, 2.50 },
- { "i3-4100U", 4, 1.80 },
- { "i3-4102E", 4, 1.60 },
- { "i3-4110E", 4, 2.60 },
- { "i3-4110M", 4, 2.60 },
- { "i3-4112E", 4, 1.80 },
- { "i3-4120U", 4, 2.00 },
- { "i3-4130", 4, 3.40 },
- { "i3-4130T", 4, 2.90 },
- { "i3-4150", 4, 3.50 },
- { "i3-4150T", 4, 3.00 },
- { "i3-4158U", 4, 2.00 },
- { "i3-4160", 4, 3.60 },
- { "i3-4160T", 4, 3.10 },
- { "i3-4170", 4, 3.70 },
- { "i3-4170T", 4, 3.20 },
- { "i3-4330", 4, 3.50 },
- { "i3-4330T", 4, 3.00 },
- { "i3-4330TE", 4, 2.40 },
- { "i3-4340", 4, 3.60 },
- { "i3-4340TE", 4, 2.60 },
- { "i3-4350", 4, 3.60 },
- { "i3-4350T", 4, 3.10 },
- { "i3-4360", 4, 3.70 },
- { "i3-4360T", 4, 3.20 },
- { "i3-4370", 4, 3.80 },
- { "i3-4370T", 4, 3.30 },
- { "i3-5005U", 4, 2.00 },
- { "i3-5010U", 4, 2.10 },
- { "i3-5015U", 4, 2.10 },
- { "i3-5020U", 4, 2.20 },
- { "i3-5157U", 4, 2.50 },
- { "i3-530", 4, 2.93 },
- { "i3-540", 4, 3.06 },
- { "i3-550", 4, 3.20 },
- { "i3-560", 4, 3.33 },
- { "i3-6006U", 4, 2.00 },
- { "i3-6098P", 4, 3.60 },
- { "i3-6100", 4, 3.70 },
- { "i3-6100E", 4, 2.70 },
- { "i3-6100H", 4, 2.70 },
- { "i3-6100T", 4, 3.20 },
- { "i3-6100TE", 4, 2.70 },
- { "i3-6100U", 4, 2.30 },
- { "i3-6102E", 4, 1.90 },
- { "i3-6120T", 4, 3.20 },
- { "i3-6157U", 4, 2.40 },
- { "i3-6167U", 4, 2.70 },
- { "i3-6300", 4, 3.80 },
- { "i3-6300T", 4, 3.30 },
- { "i3-6320", 4, 3.90 },
- { "i3-6320T", 4, 3.40 },
- { "i3-7007U", 4, 2.10 },
- { "i3-7020U", 4, 2.30 },
- { "i3-7100", 4, 3.90 },
- { "i3-7100E", 4, 2.90 },
- { "i3-7100H", 4, 3.00 },
- { "i3-7100T", 4, 3.40 },
- { "i3-7100U", 4, 2.40 },
- { "i3-7101E", 4, 3.90 },
- { "i3-7101TE", 4, 3.40 },
- { "i3-7102E", 4, 2.10 },
- { "i3-7110U", 4, 2.60 },
- { "i3-7120", 4, 4.00 },
- { "i3-7120T", 4, 3.50 },
- { "i3-7130U", 4, 2.70 },
- { "i3-7167U", 4, 2.80 },
- { "i3-7300", 4, 4.00 },
- { "i3-7300T", 4, 3.50 },
- { "i3-7310T", 4, 3.40 },
- { "i3-7310U", 4, 2.40 },
- { "i3-7320", 4, 4.10 },
- { "i3-7320T", 4, 3.50 },
- { "i3-7340", 4, 4.20 },
- { "i3-7350K", 4, 4.20 },
- { "i3-8000", 4, 3.60 },
- { "i3-8000T", 4, 3.10 },
- { "i3-8020", 4, 3.60 },
- { "i3-8020T", 4, 3.10 },
- { "i3-8100", 4, 3.60 },
- { "i3-8100B", 4, 3.60 },
- { "i3-8100F", 4, 3.60 },
- { "i3-8100H", 4, 3.00 },
- { "i3-8100T", 4, 3.10 },
- { "i3-8109U", 4, 3.00 },
- { "i3-8120", 4, 3.60 },
- { "i3-8120T", 4, 3.10 },
- { "i3-8121U", 4, 2.20 },
- { "i3-8130U", 4, 2.20 },
- { "i3-8140U", 4, 2.10 },
- { "i3-8145U", 4, 2.10 },
- { "i3-8145UE", 4, 2.20 },
- { "i3-8300", 4, 3.70 },
- { "i3-8300T", 4, 3.20 },
- { "i3-8320", 4, 3.70 },
- { "i3-8320T", 4, 3.20 },
- { "i3-8350K", 4, 4.00 },
- { "i3-9100", 4, 3.60 },
- { "i3-9100E", 4, 3.10 },
- { "i3-9100F", 4, 3.60 },
- { "i3-9100HL", 4, 1.60 },
- { "i3-9100T", 4, 3.10 },
- { "i3-9100TE", 4, 2.20 },
- { "i3-9300", 4, 3.70 },
- { "i3-9300T", 4, 3.20 },
- { "i3-9320", 4, 3.70 },
- { "i3-9350K", 4, 4.00 },
- { "i3-9350KF", 4, 4.00 },
- { "i3-N300", 8, 0.80 },
- { "i3-N305", 8, 1.80 },
+ { "i3-1000G1", 4 },
+ { "i3-1000G4", 4 },
+ { "i3-1000NG4", 4 },
+ { "i3-1005G1", 4 },
+ { "i3-10100", 8 },
+ { "i3-10100E", 8 },
+ { "i3-10100F", 8 },
+ { "i3-10100T", 8 },
+ { "i3-10100TE", 8 },
+ { "i3-10100Y", 4 },
+ { "i3-10105", 8 },
+ { "i3-10105F", 8 },
+ { "i3-10105T", 8 },
+ { "i3-10110U", 4 },
+ { "i3-10110Y", 4 },
+ { "i3-10300", 8 },
+ { "i3-10300T", 8 },
+ { "i3-10305", 8 },
+ { "i3-10305T", 8 },
+ { "i3-10320", 8 },
+ { "i3-10325", 8 },
+ { "i3-11100B", 8 },
+ { "i3-11100HE", 8 },
+ { "i3-1110G4", 4 },
+ { "i3-1115G4E", 4 },
+ { "i3-1115GRE", 4 },
+ { "i3-1120G4", 8 },
+ { "i3-12100", 8 },
+ { "i3-12100F", 8 },
+ { "i3-12100T", 8 },
+ { "i3-1210U", 8 },
+ { "i3-1215U", 8 },
+ { "i3-1215UE", 8 },
+ { "i3-1215UL", 8 },
+ { "i3-12300", 8 },
+ { "i3-12300T", 8 },
+ { "i3-13100", 8 },
+ { "i3-13100F", 8 },
+ { "i3-13100T", 8 },
+ { "i3-1315U", 8 },
+ { "i3-1315UE", 8 },
+ { "i3-14100", 8 },
+ { "i3-14100F", 8 },
+ { "i3-14100T", 8 },
+ { "i3-2100", 4 },
+ { "i3-2100T", 4 },
+ { "i3-2102", 4 },
+ { "i3-2105", 4 },
+ { "i3-2120", 4 },
+ { "i3-2120T", 4 },
+ { "i3-2125", 4 },
+ { "i3-2130", 4 },
+ { "i3-2308M", 4 },
+ { "i3-2310E", 4 },
+ { "i3-2310M", 4 },
+ { "i3-2312M", 4 },
+ { "i3-2328M", 4 },
+ { "i3-2330E", 4 },
+ { "i3-2330M", 4 },
+ { "i3-2332M", 4 },
+ { "i3-2340UE", 4 },
+ { "i3-2348M", 4 },
+ { "i3-2350LM", 4 },
+ { "i3-2350M", 4 },
+ { "i3-2355M", 4 },
+ { "i3-2357M", 4 },
+ { "i3-2365M", 4 },
+ { "i3-2367M", 4 },
+ { "i3-2370LM", 4 },
+ { "i3-2370M", 4 },
+ { "i3-2375M", 4 },
+ { "i3-2377M", 4 },
+ { "i3-2390M", 4 },
+ { "i3-2393M", 4 },
+ { "i3-2394M", 4 },
+ { "i3-2395M", 4 },
+ { "i3-2397M", 4 },
+ { "i3-3110M", 4 },
+ { "i3-3115C", 4 },
+ { "i3-3120M", 4 },
+ { "i3-3120ME", 4 },
+ { "i3-3130M", 4 },
+ { "i3-3210", 4 },
+ { "i3-3217U", 4 },
+ { "i3-3217UE", 4 },
+ { "i3-3220", 4 },
+ { "i3-3220T", 4 },
+ { "i3-3225", 4 },
+ { "i3-3227U", 4 },
+ { "i3-3229Y", 4 },
+ { "i3-3240", 4 },
+ { "i3-3240T", 4 },
+ { "i3-3245", 4 },
+ { "i3-3250", 4 },
+ { "i3-3250T", 4 },
+ { "i3-330E", 4 },
+ { "i3-330M", 4 },
+ { "i3-330UM", 4 },
+ { "i3-350M", 4 },
+ { "i3-370M", 4 },
+ { "i3-380M", 4 },
+ { "i3-380UM", 4 },
+ { "i3-390M", 4 },
+ { "i3-4000M", 4 },
+ { "i3-4005U", 4 },
+ { "i3-4010M", 4 },
+ { "i3-4010U", 4 },
+ { "i3-4010Y", 4 },
+ { "i3-4012Y", 4 },
+ { "i3-4020Y", 4 },
+ { "i3-4025U", 4 },
+ { "i3-4030U", 4 },
+ { "i3-4030Y", 4 },
+ { "i3-4100E", 4 },
+ { "i3-4100M", 4 },
+ { "i3-4100U", 4 },
+ { "i3-4102E", 4 },
+ { "i3-4110E", 4 },
+ { "i3-4110M", 4 },
+ { "i3-4112E", 4 },
+ { "i3-4120U", 4 },
+ { "i3-4130", 4 },
+ { "i3-4130T", 4 },
+ { "i3-4150", 4 },
+ { "i3-4150T", 4 },
+ { "i3-4158U", 4 },
+ { "i3-4160", 4 },
+ { "i3-4160T", 4 },
+ { "i3-4170", 4 },
+ { "i3-4170T", 4 },
+ { "i3-4330", 4 },
+ { "i3-4330T", 4 },
+ { "i3-4330TE", 4 },
+ { "i3-4340", 4 },
+ { "i3-4340TE", 4 },
+ { "i3-4350", 4 },
+ { "i3-4350T", 4 },
+ { "i3-4360", 4 },
+ { "i3-4360T", 4 },
+ { "i3-4370", 4 },
+ { "i3-4370T", 4 },
+ { "i3-5005U", 4 },
+ { "i3-5010U", 4 },
+ { "i3-5015U", 4 },
+ { "i3-5020U", 4 },
+ { "i3-5157U", 4 },
+ { "i3-530", 4 },
+ { "i3-540", 4 },
+ { "i3-550", 4 },
+ { "i3-560", 4 },
+ { "i3-6006U", 4 },
+ { "i3-6098P", 4 },
+ { "i3-6100", 4 },
+ { "i3-6100E", 4 },
+ { "i3-6100H", 4 },
+ { "i3-6100T", 4 },
+ { "i3-6100TE", 4 },
+ { "i3-6100U", 4 },
+ { "i3-6102E", 4 },
+ { "i3-6120T", 4 },
+ { "i3-6157U", 4 },
+ { "i3-6167U", 4 },
+ { "i3-6300", 4 },
+ { "i3-6300T", 4 },
+ { "i3-6320", 4 },
+ { "i3-6320T", 4 },
+ { "i3-7007U", 4 },
+ { "i3-7020U", 4 },
+ { "i3-7100", 4 },
+ { "i3-7100E", 4 },
+ { "i3-7100H", 4 },
+ { "i3-7100T", 4 },
+ { "i3-7100U", 4 },
+ { "i3-7101E", 4 },
+ { "i3-7101TE", 4 },
+ { "i3-7102E", 4 },
+ { "i3-7110U", 4 },
+ { "i3-7120", 4 },
+ { "i3-7120T", 4 },
+ { "i3-7130U", 4 },
+ { "i3-7167U", 4 },
+ { "i3-7300", 4 },
+ { "i3-7300T", 4 },
+ { "i3-7310T", 4 },
+ { "i3-7310U", 4 },
+ { "i3-7320", 4 },
+ { "i3-7320T", 4 },
+ { "i3-7340", 4 },
+ { "i3-7350K", 4 },
+ { "i3-8000", 4 },
+ { "i3-8000T", 4 },
+ { "i3-8020", 4 },
+ { "i3-8020T", 4 },
+ { "i3-8100", 4 },
+ { "i3-8100B", 4 },
+ { "i3-8100F", 4 },
+ { "i3-8100H", 4 },
+ { "i3-8100T", 4 },
+ { "i3-8109U", 4 },
+ { "i3-8120", 4 },
+ { "i3-8120T", 4 },
+ { "i3-8121U", 4 },
+ { "i3-8130U", 4 },
+ { "i3-8140U", 4 },
+ { "i3-8145U", 4 },
+ { "i3-8145UE", 4 },
+ { "i3-8300", 4 },
+ { "i3-8300T", 4 },
+ { "i3-8320", 4 },
+ { "i3-8320T", 4 },
+ { "i3-8350K", 4 },
+ { "i3-9100", 4 },
+ { "i3-9100E", 4 },
+ { "i3-9100F", 4 },
+ { "i3-9100HL", 4 },
+ { "i3-9100T", 4 },
+ { "i3-9100TE", 4 },
+ { "i3-9300", 4 },
+ { "i3-9300T", 4 },
+ { "i3-9320", 4 },
+ { "i3-9350K", 4 },
+ { "i3-9350KF", 4 },
+ { "i3-N300", 8 },
+ { "i3-N305", 8 },
// i5 series
- { "i5-10200H", 8, 2.40 },
- { "i5-10210U", 8, 1.60 },
- { "i5-10210Y", 8, 1.00 },
- { "i5-10300H", 8, 2.50 },
- { "i5-1030G4", 8, 0.70 },
- { "i5-1030G7", 8, 0.80 },
- { "i5-1030NG7", 8, 1.10 },
- { "i5-10310U", 8, 1.70 },
- { "i5-10310Y", 8, 1.10 },
- { "i5-1035G1", 8, 1.00 },
- { "i5-1035G4", 8, 1.10 },
- { "i5-1035G7", 8, 1.20 },
- { "i5-1038NG7", 8, 2.00 },
- { "i5-10400", 12, 2.90 },
- { "i5-10400F", 12, 2.90 },
- { "i5-10400H", 8, 2.60 },
- { "i5-10400T", 12, 2.00 },
- { "i5-10500", 12, 3.10 },
- { "i5-10500E", 12, 3.10 },
- { "i5-10500H", 12, 2.50 },
- { "i5-10500T", 12, 2.30 },
- { "i5-10500TE", 12, 2.30 },
- { "i5-10505", 12, 3.20 },
- { "i5-10600", 12, 3.30 },
- { "i5-10600K", 12, 4.10 },
- { "i5-10600KF", 12, 4.10 },
- { "i5-10600T", 12, 2.40 },
- { "i5-1115G4", 4, 3.00 },
- { "i5-1125G4", 8, 2.00 },
- { "i5-11260H", 12, 2.60 },
- { "i5-11300H", 8, 3.10 },
- { "i5-1130G7", 8, 1.10 },
- { "i5-11320H", 8, 3.20 },
- { "i5-1135G7", 8, 2.40 },
- { "i5-11400", 12, 2.60 },
- { "i5-11400F", 12, 2.60 },
- { "i5-11400H", 12, 2.70 },
- { "i5-11400T", 12, 1.30 },
- { "i5-1140G7", 8, 1.10 },
- { "i5-1145G7", 8, 2.60 },
- { "i5-1145G7E", 8, 1.50 },
- { "i5-1145GRE", 8, 1.50 },
- { "i5-11500", 12, 2.70 },
- { "i5-11500B", 12, 3.30 },
- { "i5-11500H", 12, 2.90 },
- { "i5-11500HE", 12, 2.60 },
- { "i5-11500T", 12, 1.50 },
- { "i5-1155G7", 8, 2.50 },
- { "i5-11600", 12, 2.80 },
- { "i5-11600K", 12, 3.90 },
- { "i5-11600KF", 12, 3.90 },
- { "i5-11600T", 12, 1.70 },
- { "i5-1230U", 12, 1.00 },
- { "i5-1235U", 12, 1.30 },
- { "i5-12400", 12, 2.50 },
- { "i5-12400F", 12, 2.50 },
- { "i5-12400T", 12, 1.80 },
- { "i5-1240P", 16, 1.70 },
- { "i5-1240U", 12, 1.10 },
- { "i5-1245U", 12, 1.60 },
- { "i5-12490F", 12, 3.00 },
- { "i5-12500", 12, 3.00 },
- { "i5-12500H", 16, 2.50 },
- { "i5-12500HL", 16, 2.50 },
- { "i5-12500T", 12, 2.00 },
- { "i5-1250P", 16, 1.70 },
- { "i5-1250PE", 16, 1.70 },
- { "i5-12600", 12, 3.30 },
- { "i5-12600H", 16, 2.70 },
- { "i5-12600HE", 16, 2.50 },
- { "i5-12600HL", 16, 2.70 },
- { "i5-12600HX", 16, 2.50 },
- { "i5-12600K", 16, 3.70 },
- { "i5-12600KF", 16, 3.70 },
- { "i5-12600T", 12, 2.10 },
- { "i5-13400", 16, 2.50 },
- { "i5-13400F", 16, 2.50 },
- { "i5-13400T", 16, 1.30 },
- { "i5-1340P", 16, 1.90 },
- { "i5-1340PE", 16, 1.80 },
- { "i5-13490F", 16, 2.50 },
- { "i5-13500", 20, 2.50 },
- { "i5-13500H", 16, 2.60 },
- { "i5-13500T", 20, 1.60 },
- { "i5-13505H", 16, 2.60 },
- { "i5-1350P", 16, 1.90 },
- { "i5-1350PE", 16, 1.80 },
- { "i5-13600", 20, 2.70 },
- { "i5-13600H", 16, 2.80 },
- { "i5-13600HE", 16, 2.70 },
- { "i5-13600K", 20, 3.50 },
- { "i5-13600KF", 20, 3.50 },
- { "i5-13600T", 20, 1.80 },
- { "i5-2300", 4, 2.80 },
- { "i5-2310", 4, 2.90 },
- { "i5-2320", 4, 3.00 },
- { "i5-2380P", 4, 3.10 },
- { "i5-2390T", 4, 2.70 },
- { "i5-2400", 4, 3.10 },
- { "i5-2400S", 4, 2.50 },
- { "i5-2405S", 4, 2.50 },
- { "i5-2410M", 4, 2.30 },
- { "i5-2415M", 4, 2.30 },
- { "i5-2430M", 4, 2.40 },
- { "i5-2435M", 4, 2.40 },
- { "i5-2450M", 4, 2.50 },
- { "i5-2450P", 4, 3.20 },
- { "i5-2467M", 4, 1.60 },
- { "i5-2475M", 4, 2.40 },
- { "i5-2477M", 4, 1.80 },
- { "i5-2487M", 4, 1.90 },
- { "i5-2490M", 4, 2.50 },
- { "i5-2497M", 4, 2.30 },
- { "i5-2500", 4, 3.30 },
- { "i5-2500K", 4, 3.30 },
- { "i5-2500S", 4, 2.70 },
- { "i5-2500T", 4, 2.30 },
- { "i5-2510E", 4, 2.50 },
- { "i5-2515E", 4, 2.50 },
- { "i5-2520M", 4, 2.50 },
- { "i5-2537M", 4, 1.40 },
- { "i5-2540LM", 4, 2.60 },
- { "i5-2540M", 4, 2.60 },
- { "i5-2547M", 4, 1.60 },
- { "i5-2550K", 4, 3.40 },
- { "i5-2557M", 4, 1.70 },
- { "i5-2560LM", 4, 2.70 },
- { "i5-2560M", 4, 2.70 },
- { "i5-2580M", 4, 2.90 },
- { "i5-3210M", 4, 2.50 },
- { "i5-3230M", 4, 2.60 },
- { "i5-3317U", 4, 1.70 },
- { "i5-3320M", 4, 2.60 },
- { "i5-3330", 4, 3.00 },
- { "i5-3330S", 4, 2.70 },
- { "i5-3335S", 4, 2.70 },
- { "i5-3337U", 4, 1.80 },
- { "i5-3339Y", 4, 1.50 },
- { "i5-3340", 4, 3.10 },
- { "i5-3340M", 4, 2.70 },
- { "i5-3340S", 4, 2.80 },
- { "i5-3350P", 4, 3.10 },
- { "i5-3360M", 4, 2.80 },
- { "i5-3380M", 4, 2.90 },
- { "i5-3427U", 4, 1.80 },
- { "i5-3437U", 4, 1.90 },
- { "i5-3439Y", 4, 1.50 },
- { "i5-3450", 4, 3.10 },
- { "i5-3450S", 4, 2.80 },
- { "i5-3470", 4, 3.20 },
- { "i5-3470S", 4, 2.90 },
- { "i5-3470T", 4, 2.90 },
- { "i5-3475S", 4, 2.90 },
- { "i5-3550", 4, 3.30 },
- { "i5-3550S", 4, 3.00 },
- { "i5-3570", 4, 3.40 },
- { "i5-3570K", 4, 3.40 },
- { "i5-3570S", 4, 3.10 },
- { "i5-3570T", 4, 2.30 },
- { "i5-3610ME", 4, 2.70 },
- { "i5-4200H", 4, 2.80 },
- { "i5-4200M", 4, 2.50 },
- { "i5-4200U", 4, 1.60 },
- { "i5-4200Y", 4, 1.40 },
- { "i5-4202Y", 4, 1.60 },
- { "i5-4210H", 4, 2.90 },
- { "i5-4210M", 4, 2.60 },
- { "i5-4210U", 4, 1.70 },
- { "i5-4210Y", 4, 1.50 },
- { "i5-4220Y", 4, 1.60 },
- { "i5-4250U", 4, 1.30 },
- { "i5-4258U", 4, 2.40 },
- { "i5-4260U", 4, 1.40 },
- { "i5-4278U", 4, 2.60 },
- { "i5-4288U", 4, 2.60 },
- { "i5-4300M", 4, 2.60 },
- { "i5-4300U", 4, 1.90 },
- { "i5-4300Y", 4, 1.60 },
- { "i5-4302Y", 4, 1.60 },
- { "i5-4308U", 4, 2.80 },
- { "i5-430M", 4, 2.26 },
- { "i5-430UM", 4, 1.20 },
- { "i5-4310M", 4, 2.70 },
- { "i5-4310U", 4, 2.00 },
- { "i5-4330M", 4, 2.80 },
- { "i5-4340M", 4, 2.90 },
- { "i5-4350U", 4, 1.40 },
- { "i5-4360U", 4, 1.50 },
- { "i5-4400E", 4, 2.70 },
- { "i5-4402E", 4, 1.60 },
- { "i5-4402EC", 4, 2.50 },
- { "i5-4410E", 4, 2.90 },
- { "i5-4422E", 4, 1.80 },
- { "i5-4430", 4, 3.00 },
- { "i5-4430S", 4, 2.70 },
- { "i5-4440", 4, 3.10 },
- { "i5-4440S", 4, 2.80 },
- { "i5-4460", 4, 3.20 },
- { "i5-4460S", 4, 2.90 },
- { "i5-4460T", 4, 1.90 },
- { "i5-4470", 4, 3.40 },
- { "i5-450M", 4, 2.40 },
- { "i5-4570", 4, 3.20 },
- { "i5-4570R", 4, 2.70 },
- { "i5-4570S", 4, 2.90 },
- { "i5-4570T", 4, 2.90 },
- { "i5-4570TE", 4, 2.70 },
- { "i5-4590", 4, 3.30 },
- { "i5-4590S", 4, 3.00 },
- { "i5-4590T", 4, 2.00 },
- { "i5-460M", 4, 2.53 },
- { "i5-4670", 4, 3.40 },
- { "i5-4670K", 4, 3.40 },
- { "i5-4670R", 4, 3.00 },
- { "i5-4670S", 4, 3.10 },
- { "i5-4670T", 4, 2.30 },
- { "i5-4690", 4, 3.50 },
- { "i5-4690K", 4, 3.50 },
- { "i5-4690S", 4, 3.20 },
- { "i5-4690T", 4, 2.50 },
- { "i5-470UM", 4, 1.33 },
- { "i5-480M", 4, 2.66 },
- { "i5-5200U", 4, 2.20 },
- { "i5-520E", 4, 2.40 },
- { "i5-520M", 4, 2.40 },
- { "i5-520UM", 4, 1.06 },
- { "i5-5250U", 4, 1.60 },
- { "i5-5257U", 4, 2.70 },
- { "i5-5287U", 4, 2.90 },
- { "i5-5300U", 4, 2.30 },
- { "i5-5350H", 4, 3.00 },
- { "i5-5350U", 4, 1.80 },
- { "i5-540M", 4, 2.53 },
- { "i5-540UM", 4, 1.20 },
- { "i5-5575R", 4, 2.80 },
- { "i5-560M", 4, 2.66 },
- { "i5-560UM", 4, 1.33 },
- { "i5-5675C", 4, 3.10 },
- { "i5-5675R", 4, 3.10 },
- { "i5-580M", 4, 2.66 },
- { "i5-6198DU", 4, 2.30 },
- { "i5-6200U", 4, 2.30 },
- { "i5-6260U", 4, 1.80 },
- { "i5-6267U", 4, 2.90 },
- { "i5-6287U", 4, 3.10 },
- { "i5-6300HQ", 4, 2.30 },
- { "i5-6300U", 4, 2.40 },
- { "i5-6350HQ", 4, 2.30 },
- { "i5-6360U", 4, 2.00 },
- { "i5-6400", 4, 2.70 },
- { "i5-6400T", 4, 2.20 },
- { "i5-6402P", 4, 2.80 },
- { "i5-6440EQ", 4, 2.70 },
- { "i5-6440HQ", 4, 2.60 },
- { "i5-6442EQ", 4, 1.90 },
- { "i5-650", 4, 3.20 },
- { "i5-6500", 4, 3.20 },
- { "i5-6500T", 4, 2.50 },
- { "i5-6500TE", 4, 2.30 },
- { "i5-655K", 4, 3.20 },
- { "i5-6585R", 4, 2.80 },
- { "i5-660", 4, 3.33 },
- { "i5-6600", 4, 3.30 },
- { "i5-6600K", 4, 3.50 },
- { "i5-6600T", 4, 2.70 },
- { "i5-661", 4, 3.33 },
- { "i5-6685R", 4, 3.00 },
- { "i5-670", 4, 3.46 },
- { "i5-680", 4, 3.60 },
- { "i5-7200U", 4, 2.50 },
- { "i5-7210U", 4, 2.50 },
- { "i5-7260U", 4, 2.20 },
- { "i5-7267U", 4, 3.10 },
- { "i5-7287U", 4, 3.30 },
- { "i5-7300HQ", 4, 2.50 },
- { "i5-7300U", 4, 2.60 },
- { "i5-7360U", 4, 2.30 },
- { "i5-7400", 4, 3.00 },
- { "i5-7400T", 4, 2.40 },
- { "i5-7440EQ", 4, 2.90 },
- { "i5-7440HQ", 4, 2.80 },
- { "i5-7442EQ", 4, 2.10 },
- { "i5-750", 4, 2.66 },
- { "i5-7500", 4, 3.40 },
- { "i5-7500T", 4, 2.70 },
- { "i5-750S", 4, 2.40 },
- { "i5-760", 4, 2.80 },
- { "i5-7600", 4, 3.50 },
- { "i5-7600K", 4, 3.80 },
- { "i5-7600T", 4, 2.80 },
- { "i5-7640X", 4, 4.00 },
- { "i5-7Y54", 4, 1.20 },
- { "i5-7Y57", 4, 1.20 },
- { "i5-8200Y", 4, 1.30 },
- { "i5-8210Y", 4, 1.60 },
- { "i5-8250U", 8, 1.60 },
- { "i5-8257U", 8, 1.40 },
- { "i5-8259U", 8, 2.30 },
- { "i5-8260U", 8, 1.60 },
- { "i5-8265U", 8, 1.60 },
- { "i5-8269U", 8, 2.60 },
- { "i5-8279U", 8, 2.40 },
- { "i5-8300H", 8, 2.30 },
- { "i5-8305G", 8, 2.80 },
- { "i5-8310Y", 4, 1.60 },
- { "i5-8350U", 8, 1.70 },
- { "i5-8365U", 8, 1.60 },
- { "i5-8365UE", 8, 1.60 },
- { "i5-8400", 6, 2.80 },
- { "i5-8400B", 6, 2.80 },
- { "i5-8400H", 8, 2.50 },
- { "i5-8400T", 6, 1.70 },
- { "i5-8420", 6, 2.80 },
- { "i5-8420T", 6, 1.70 },
- { "i5-8500", 6, 3.00 },
- { "i5-8500B", 6, 3.00 },
- { "i5-8500T", 6, 2.10 },
- { "i5-8550", 6, 2.50 },
- { "i5-8600", 6, 3.10 },
- { "i5-8600K", 6, 3.60 },
- { "i5-8600T", 6, 2.30 },
- { "i5-8650", 6, 2.90 },
- { "i5-9300H", 8, 2.40 },
- { "i5-9300HF", 8, 2.40 },
- { "i5-9400", 6, 2.90 },
- { "i5-9400F", 6, 2.90 },
- { "i5-9400H", 8, 2.50 },
- { "i5-9400T", 6, 1.80 },
- { "i5-9500", 6, 3.00 },
- { "i5-9500E", 6, 3.00 },
- { "i5-9500F", 6, 3.00 },
- { "i5-9500T", 6, 2.20 },
- { "i5-9500TE", 6, 2.20 },
- { "i5-9600", 6, 3.10 },
- { "i5-9600K", 6, 3.70 },
- { "i5-9600KF", 6, 3.70 },
- { "i5-9600T", 6, 2.30 },
- { "i5-12450H", 12, 2.00 },
- { "i5-12450HX", 12, 2.40 },
- { "i5-12650H", 16, 2.30 },
- { "i5-13420H", 12, 2.10 },
- { "i5-13450HX", 16, 2.40 },
- { "i5-13500HX", 20, 2.50 },
- { "i5-13600HX", 20, 2.60 },
- { "i5-14400", 16, 2.50 },
- { "i5-14400F", 16, 2.50 },
- { "i5-14400T", 16, 1.50 },
- { "i5-14450HX", 16, 2.40 },
- { "i5-14490F", 16, 2.80 },
- { "i5-14500", 20, 2.60 },
- { "i5-14500GX", 20, 2.60 },
- { "i5-14500HX", 20, 2.60 },
- { "i5-14500T", 20, 1.70 },
- { "i5-14500TE", 20, 1.20 },
- { "i5-14600", 20, 2.70 },
- { "i5-14600K", 20, 3.50 },
- { "i5-14600KF", 20, 3.50 },
- { "i5-14600T", 20, 1.80 },
+ { "i5-10200H", 8 },
+ { "i5-10210U", 8 },
+ { "i5-10210Y", 8 },
+ { "i5-10300H", 8 },
+ { "i5-1030G4", 8 },
+ { "i5-1030G7", 8 },
+ { "i5-1030NG7", 8 },
+ { "i5-10310U", 8 },
+ { "i5-10310Y", 8 },
+ { "i5-1035G1", 8 },
+ { "i5-1035G4", 8 },
+ { "i5-1035G7", 8 },
+ { "i5-1038NG7", 8 },
+ { "i5-10400", 12 },
+ { "i5-10400F", 12 },
+ { "i5-10400H", 8 },
+ { "i5-10400T", 12 },
+ { "i5-10500", 12 },
+ { "i5-10500E", 12 },
+ { "i5-10500H", 12 },
+ { "i5-10500T", 12 },
+ { "i5-10500TE", 12 },
+ { "i5-10505", 12 },
+ { "i5-10600", 12 },
+ { "i5-10600K", 12 },
+ { "i5-10600KF", 12 },
+ { "i5-10600T", 12 },
+ { "i5-1115G4", 4 },
+ { "i5-1125G4", 8 },
+ { "i5-11260H", 12 },
+ { "i5-11300H", 8 },
+ { "i5-1130G7", 8 },
+ { "i5-11320H", 8 },
+ { "i5-1135G7", 8 },
+ { "i5-11400", 12 },
+ { "i5-11400F", 12 },
+ { "i5-11400H", 12 },
+ { "i5-11400T", 12 },
+ { "i5-1140G7", 8 },
+ { "i5-1145G7", 8 },
+ { "i5-1145G7E", 8 },
+ { "i5-1145GRE", 8 },
+ { "i5-11500", 12 },
+ { "i5-11500B", 12 },
+ { "i5-11500H", 12 },
+ { "i5-11500HE", 12 },
+ { "i5-11500T", 12 },
+ { "i5-1155G7", 8 },
+ { "i5-11600", 12 },
+ { "i5-11600K", 12 },
+ { "i5-11600KF", 12 },
+ { "i5-11600T", 12 },
+ { "i5-1230U", 12 },
+ { "i5-1235U", 12 },
+ { "i5-12400", 12 },
+ { "i5-12400F", 12 },
+ { "i5-12400T", 12 },
+ { "i5-1240P", 16 },
+ { "i5-1240U", 12 },
+ { "i5-1245U", 12 },
+ { "i5-12490F", 12 },
+ { "i5-12500", 12 },
+ { "i5-12500H", 16 },
+ { "i5-12500HL", 16 },
+ { "i5-12500T", 12 },
+ { "i5-1250P", 16 },
+ { "i5-1250PE", 16 },
+ { "i5-12600", 12 },
+ { "i5-12600H", 16 },
+ { "i5-12600HE", 16 },
+ { "i5-12600HL", 16 },
+ { "i5-12600HX", 16 },
+ { "i5-12600K", 16 },
+ { "i5-12600KF", 16 },
+ { "i5-12600T", 12 },
+ { "i5-13400", 16 },
+ { "i5-13400F", 16 },
+ { "i5-13400T", 16 },
+ { "i5-1340P", 16 },
+ { "i5-1340PE", 16 },
+ { "i5-13490F", 16 },
+ { "i5-13500", 20 },
+ { "i5-13500H", 16 },
+ { "i5-13500T", 20 },
+ { "i5-13505H", 16 },
+ { "i5-1350P", 16 },
+ { "i5-1350PE", 16 },
+ { "i5-13600", 20 },
+ { "i5-13600H", 16 },
+ { "i5-13600HE", 16 },
+ { "i5-13600K", 20 },
+ { "i5-13600KF", 20 },
+ { "i5-13600T", 20 },
+ { "i5-2300", 4 },
+ { "i5-2310", 4 },
+ { "i5-2320", 4 },
+ { "i5-2380P", 4 },
+ { "i5-2390T", 4 },
+ { "i5-2400", 4 },
+ { "i5-2400S", 4 },
+ { "i5-2405S", 4 },
+ { "i5-2410M", 4 },
+ { "i5-2415M", 4 },
+ { "i5-2430M", 4 },
+ { "i5-2435M", 4 },
+ { "i5-2450M", 4 },
+ { "i5-2450P", 4 },
+ { "i5-2467M", 4 },
+ { "i5-2475M", 4 },
+ { "i5-2477M", 4 },
+ { "i5-2487M", 4 },
+ { "i5-2490M", 4 },
+ { "i5-2497M", 4 },
+ { "i5-2500", 4 },
+ { "i5-2500K", 4 },
+ { "i5-2500S", 4 },
+ { "i5-2500T", 4 },
+ { "i5-2510E", 4 },
+ { "i5-2515E", 4 },
+ { "i5-2520M", 4 },
+ { "i5-2537M", 4 },
+ { "i5-2540LM", 4 },
+ { "i5-2540M", 4 },
+ { "i5-2547M", 4 },
+ { "i5-2550K", 4 },
+ { "i5-2557M", 4 },
+ { "i5-2560LM", 4 },
+ { "i5-2560M", 4 },
+ { "i5-2580M", 4 },
+ { "i5-3210M", 4 },
+ { "i5-3230M", 4 },
+ { "i5-3317U", 4 },
+ { "i5-3320M", 4 },
+ { "i5-3330", 4 },
+ { "i5-3330S", 4 },
+ { "i5-3335S", 4 },
+ { "i5-3337U", 4 },
+ { "i5-3339Y", 4 },
+ { "i5-3340", 4 },
+ { "i5-3340M", 4 },
+ { "i5-3340S", 4 },
+ { "i5-3350P", 4 },
+ { "i5-3360M", 4 },
+ { "i5-3380M", 4 },
+ { "i5-3427U", 4 },
+ { "i5-3437U", 4 },
+ { "i5-3439Y", 4 },
+ { "i5-3450", 4 },
+ { "i5-3450S", 4 },
+ { "i5-3470", 4 },
+ { "i5-3470S", 4 },
+ { "i5-3470T", 4 },
+ { "i5-3475S", 4 },
+ { "i5-3550", 4 },
+ { "i5-3550S", 4 },
+ { "i5-3570", 4 },
+ { "i5-3570K", 4 },
+ { "i5-3570S", 4 },
+ { "i5-3570T", 4 },
+ { "i5-3610ME", 4 },
+ { "i5-4200H", 4 },
+ { "i5-4200M", 4 },
+ { "i5-4200U", 4 },
+ { "i5-4200Y", 4 },
+ { "i5-4202Y", 4 },
+ { "i5-4210H", 4 },
+ { "i5-4210M", 4 },
+ { "i5-4210U", 4 },
+ { "i5-4210Y", 4 },
+ { "i5-4220Y", 4 },
+ { "i5-4250U", 4 },
+ { "i5-4258U", 4 },
+ { "i5-4260U", 4 },
+ { "i5-4278U", 4 },
+ { "i5-4288U", 4 },
+ { "i5-4300M", 4 },
+ { "i5-4300U", 4 },
+ { "i5-4300Y", 4 },
+ { "i5-4302Y", 4 },
+ { "i5-4308U", 4 },
+ { "i5-430M", 4 },
+ { "i5-430UM", 4 },
+ { "i5-4310M", 4 },
+ { "i5-4310U", 4 },
+ { "i5-4330M", 4 },
+ { "i5-4340M", 4 },
+ { "i5-4350U", 4 },
+ { "i5-4360U", 4 },
+ { "i5-4400E", 4 },
+ { "i5-4402E", 4 },
+ { "i5-4402EC", 4 },
+ { "i5-4410E", 4 },
+ { "i5-4422E", 4 },
+ { "i5-4430", 4 },
+ { "i5-4430S", 4 },
+ { "i5-4440", 4 },
+ { "i5-4440S", 4 },
+ { "i5-4460", 4 },
+ { "i5-4460S", 4 },
+ { "i5-4460T", 4 },
+ { "i5-4470", 4 },
+ { "i5-450M", 4 },
+ { "i5-4570", 4 },
+ { "i5-4570R", 4 },
+ { "i5-4570S", 4 },
+ { "i5-4570T", 4 },
+ { "i5-4570TE", 4 },
+ { "i5-4590", 4 },
+ { "i5-4590S", 4 },
+ { "i5-4590T", 4 },
+ { "i5-460M", 4 },
+ { "i5-4670", 4 },
+ { "i5-4670K", 4 },
+ { "i5-4670R", 4 },
+ { "i5-4670S", 4 },
+ { "i5-4670T", 4 },
+ { "i5-4690", 4 },
+ { "i5-4690K", 4 },
+ { "i5-4690S", 4 },
+ { "i5-4690T", 4 },
+ { "i5-470UM", 4 },
+ { "i5-480M", 4 },
+ { "i5-5200U", 4 },
+ { "i5-520E", 4 },
+ { "i5-520M", 4 },
+ { "i5-520UM", 4 },
+ { "i5-5250U", 4 },
+ { "i5-5257U", 4 },
+ { "i5-5287U", 4 },
+ { "i5-5300U", 4 },
+ { "i5-5350H", 4 },
+ { "i5-5350U", 4 },
+ { "i5-540M", 4 },
+ { "i5-540UM", 4 },
+ { "i5-5575R", 4 },
+ { "i5-560M", 4 },
+ { "i5-560UM", 4 },
+ { "i5-5675C", 4 },
+ { "i5-5675R", 4 },
+ { "i5-580M", 4 },
+ { "i5-6198DU", 4 },
+ { "i5-6200U", 4 },
+ { "i5-6260U", 4 },
+ { "i5-6267U", 4 },
+ { "i5-6287U", 4 },
+ { "i5-6300HQ", 4 },
+ { "i5-6300U", 4 },
+ { "i5-6350HQ", 4 },
+ { "i5-6360U", 4 },
+ { "i5-6400", 4 },
+ { "i5-6400T", 4 },
+ { "i5-6402P", 4 },
+ { "i5-6440EQ", 4 },
+ { "i5-6440HQ", 4 },
+ { "i5-6442EQ", 4 },
+ { "i5-650", 4 },
+ { "i5-6500", 4 },
+ { "i5-6500T", 4 },
+ { "i5-6500TE", 4 },
+ { "i5-655K", 4 },
+ { "i5-6585R", 4 },
+ { "i5-660", 4 },
+ { "i5-6600", 4 },
+ { "i5-6600K", 4 },
+ { "i5-6600T", 4 },
+ { "i5-661", 4 },
+ { "i5-6685R", 4 },
+ { "i5-670", 4 },
+ { "i5-680", 4 },
+ { "i5-7200U", 4 },
+ { "i5-7210U", 4 },
+ { "i5-7260U", 4 },
+ { "i5-7267U", 4 },
+ { "i5-7287U", 4 },
+ { "i5-7300HQ", 4 },
+ { "i5-7300U", 4 },
+ { "i5-7360U", 4 },
+ { "i5-7400", 4 },
+ { "i5-7400T", 4 },
+ { "i5-7440EQ", 4 },
+ { "i5-7440HQ", 4 },
+ { "i5-7442EQ", 4 },
+ { "i5-750", 4 },
+ { "i5-7500", 4 },
+ { "i5-7500T", 4 },
+ { "i5-750S", 4 },
+ { "i5-760", 4 },
+ { "i5-7600", 4 },
+ { "i5-7600K", 4 },
+ { "i5-7600T", 4 },
+ { "i5-7640X", 4 },
+ { "i5-7Y54", 4 },
+ { "i5-7Y57", 4 },
+ { "i5-8200Y", 4 },
+ { "i5-8210Y", 4 },
+ { "i5-8250U", 8 },
+ { "i5-8257U", 8 },
+ { "i5-8259U", 8 },
+ { "i5-8260U", 8 },
+ { "i5-8265U", 8 },
+ { "i5-8269U", 8 },
+ { "i5-8279U", 8 },
+ { "i5-8300H", 8 },
+ { "i5-8305G", 8 },
+ { "i5-8310Y", 4 },
+ { "i5-8350U", 8 },
+ { "i5-8365U", 8 },
+ { "i5-8365UE", 8 },
+ { "i5-8400", 6 },
+ { "i5-8400B", 6 },
+ { "i5-8400H", 8 },
+ { "i5-8400T", 6 },
+ { "i5-8420", 6 },
+ { "i5-8420T", 6 },
+ { "i5-8500", 6 },
+ { "i5-8500B", 6 },
+ { "i5-8500T", 6 },
+ { "i5-8550", 6 },
+ { "i5-8600", 6 },
+ { "i5-8600K", 6 },
+ { "i5-8600T", 6 },
+ { "i5-8650", 6 },
+ { "i5-9300H", 8 },
+ { "i5-9300HF", 8 },
+ { "i5-9400", 6 },
+ { "i5-9400F", 6 },
+ { "i5-9400H", 8 },
+ { "i5-9400T", 6 },
+ { "i5-9500", 6 },
+ { "i5-9500E", 6 },
+ { "i5-9500F", 6 },
+ { "i5-9500T", 6 },
+ { "i5-9500TE", 6 },
+ { "i5-9600", 6 },
+ { "i5-9600K", 6 },
+ { "i5-9600KF", 6 },
+ { "i5-9600T", 6 },
+ { "i5-12450H", 12 },
+ { "i5-12450HX", 12 },
+ { "i5-12650H", 16 },
+ { "i5-13420H", 12 },
+ { "i5-13450HX", 16 },
+ { "i5-13500HX", 20 },
+ { "i5-13600HX", 20 },
+ { "i5-14400", 16 },
+ { "i5-14400F", 16 },
+ { "i5-14400T", 16 },
+ { "i5-14450HX", 16 },
+ { "i5-14490F", 16 },
+ { "i5-14500", 20 },
+ { "i5-14500GX", 20 },
+ { "i5-14500HX", 20 },
+ { "i5-14500T", 20 },
+ { "i5-14500TE", 20 },
+ { "i5-14600", 20 },
+ { "i5-14600K", 20 },
+ { "i5-14600KF", 20 },
+ { "i5-14600T", 20 },
// i7 series
- { "i7-10510U", 8, 1.80 },
- { "i7-10510Y", 8, 1.20 },
- { "i7-1060G7", 8, 1.00 },
- { "i7-10610U", 8, 1.80 },
- { "i7-1065G7", 8, 1.30 },
- { "i7-1068G7", 8, 2.30 },
- { "i7-1068NG7", 8, 2.30 },
- { "i7-10700", 16, 2.90 },
- { "i7-10700E", 16, 2.90 },
- { "i7-10700F", 16, 2.90 },
- { "i7-10700K", 16, 3.80 },
- { "i7-10700KF", 16, 3.80 },
- { "i7-10700T", 16, 2.00 },
- { "i7-10700TE", 16, 2.00 },
- { "i7-10710U", 12, 1.10 },
- { "i7-10750H", 12, 2.60 },
- { "i7-10810U", 12, 1.10 },
- { "i7-10850H", 12, 2.70 },
- { "i7-10870H", 16, 2.20 },
- { "i7-10875H", 16, 2.30 },
- { "i7-11370H", 8, 3.30 },
- { "i7-11375H", 8, 3.30 },
- { "i7-11390H", 8, 3.40 },
- { "i7-11600H", 12, 2.90 },
- { "i7-1160G7", 8, 1.20 },
- { "i7-1165G7", 8, 2.80 },
- { "i7-11700", 16, 2.50 },
- { "i7-11700B", 16, 3.20 },
- { "i7-11700F", 16, 2.50 },
- { "i7-11700K", 16, 3.60 },
- { "i7-11700KF", 16, 3.60 },
- { "i7-11700T", 16, 1.40 },
- { "i7-11800H", 16, 2.30 },
- { "i7-1180G7", 8, 1.30 },
- { "i7-11850H", 16, 2.50 },
- { "i7-11850HE", 16, 2.60 },
- { "i7-1185G7", 8, 3.00 },
- { "i7-1185G7E", 8, 1.80 },
- { "i7-1185GRE", 8, 1.80 },
- { "i7-1195G7", 8, 2.90 },
- { "i7-1250U", 12, 1.10 },
- { "i7-1255U", 12, 1.70 },
- { "i7-1260P", 16, 2.10 },
- { "i7-1260U", 12, 1.10 },
- { "i7-1265U", 12, 1.80 },
- { "i7-12700", 20, 2.10 },
- { "i7-12700F", 20, 2.10 },
- { "i7-12700K", 20, 3.60 },
- { "i7-12700KF", 20, 3.60 },
- { "i7-12700T", 20, 1.40 },
- { "i7-12700H", 20, 2.30 },
- { "i7-1270P", 16, 2.20 },
- { "i7-1270PE", 16, 2.20 },
- { "i7-1360P", 16, 2.20 },
- { "i7-13700", 24, 2.10 },
- { "i7-13700F", 24, 2.10 },
- { "i7-13700K", 24, 3.40 },
- { "i7-13700KF", 24, 3.40 },
- { "i7-13700T", 24, 1.40 },
- { "i7-13790F", 24, 2.10 },
- { "i7-2535QM", 8, 2.40 },
- { "i7-2570QM", 8, 2.70 },
- { "i7-2600", 8, 3.40 },
- { "i7-2600K", 8, 3.40 },
- { "i7-2600S", 8, 2.80 },
- { "i7-2610UE", 4, 1.50 },
- { "i7-2617M", 4, 1.50 },
- { "i7-2620M", 4, 2.70 },
- { "i7-2627M", 4, 1.50 },
- { "i7-2629M", 4, 2.10 },
- { "i7-2630QM", 8, 2.00 },
- { "i7-2635QM", 8, 2.00 },
- { "i7-2637M", 4, 1.70 },
- { "i7-2640M", 4, 2.80 },
- { "i7-2649M", 4, 2.30 },
- { "i7-2655LE", 4, 2.20 },
- { "i7-2655QM", 8, 2.40 },
- { "i7-2657M", 4, 1.60 },
- { "i7-2660M", 4, 2.20 },
- { "i7-2667M", 4, 1.80 },
- { "i7-2669M", 4, 2.10 },
- { "i7-2670QM", 8, 2.20 },
- { "i7-2675QM", 8, 2.20 },
- { "i7-2677M", 4, 1.80 },
- { "i7-2685QM", 8, 2.50 },
- { "i7-2689M", 4, 2.00 },
- { "i7-2700K", 8, 3.50 },
- { "i7-2710QE", 8, 2.10 },
- { "i7-2715QE", 8, 2.10 },
- { "i7-2720QM", 8, 2.20 },
- { "i7-2740QM", 8, 2.40 },
- { "i7-2760QM", 8, 2.40 },
- { "i7-2820QM", 8, 2.30 },
- { "i7-2840QM", 8, 2.40 },
- { "i7-2860QM", 8, 2.50 },
- { "i7-2920XM", 8, 2.50 },
- { "i7-2960XM", 8, 2.70 },
- { "i7-3517U", 4, 1.90 },
- { "i7-3517UE", 4, 1.70 },
- { "i7-3520M", 4, 2.90 },
- { "i7-3537U", 4, 2.00 },
- { "i7-3540M", 4, 3.00 },
- { "i7-3555LE", 4, 2.50 },
- { "i7-3610QE", 8, 2.30 },
- { "i7-3610QM", 8, 2.30 },
- { "i7-3612QE", 8, 2.10 },
- { "i7-3612QM", 8, 2.10 },
- { "i7-3615QE", 8, 2.30 },
- { "i7-3615QM", 8, 2.30 },
- { "i7-3630QM", 8, 2.40 },
- { "i7-3632QM", 8, 2.20 },
- { "i7-3635QM", 8, 2.40 },
- { "i7-3667U", 4, 2.00 },
- { "i7-3687U", 4, 2.10 },
- { "i7-3689Y", 4, 1.50 },
- { "i7-3720QM", 8, 2.60 },
- { "i7-3740QM", 8, 2.70 },
- { "i7-3770", 8, 3.40 },
- { "i7-3770K", 8, 3.50 },
- { "i7-3770S", 8, 3.10 },
- { "i7-3770T", 8, 2.50 },
- { "i7-3820", 8, 3.60 },
- { "i7-3820QM", 8, 2.70 },
- { "i7-3840QM", 8, 2.80 },
- { "i7-3920XM", 8, 2.90 },
- { "i7-3930K", 12, 3.20 },
- { "i7-3940XM", 8, 3.00 },
- { "i7-3960X", 12, 3.30 },
- { "i7-3970X", 12, 3.50 },
- { "i7-4500U", 4, 1.80 },
- { "i7-4510U", 4, 2.00 },
- { "i7-4550U", 4, 1.50 },
- { "i7-4558U", 4, 2.80 },
- { "i7-4578U", 4, 3.00 },
- { "i7-4600M", 4, 2.90 },
- { "i7-4600U", 4, 2.10 },
- { "i7-4610M", 4, 3.00 },
- { "i7-4610Y", 4, 1.70 },
- { "i7-4650U", 4, 1.70 },
- { "i7-4700EC", 8, 2.70 },
- { "i7-4700EQ", 8, 2.40 },
- { "i7-4700HQ", 8, 2.40 },
- { "i7-4700MQ", 8, 2.40 },
- { "i7-4701EQ", 8, 2.40 },
- { "i7-4702EC", 8, 2.00 },
- { "i7-4702HQ", 8, 2.20 },
- { "i7-4702MQ", 8, 2.20 },
- { "i7-4710HQ", 8, 2.50 },
- { "i7-4710MQ", 8, 2.50 },
- { "i7-4712HQ", 8, 2.30 },
- { "i7-4712MQ", 8, 2.30 },
- { "i7-4720HQ", 8, 2.60 },
- { "i7-4722HQ", 8, 2.40 },
- { "i7-4750HQ", 8, 2.00 },
- { "i7-4760HQ", 8, 2.10 },
- { "i7-4765T", 8, 2.00 },
- { "i7-4770", 8, 3.40 },
- { "i7-4770HQ", 8, 2.20 },
- { "i7-4770K", 8, 3.50 },
- { "i7-4770R", 8, 3.20 },
- { "i7-4770S", 8, 3.10 },
- { "i7-4770T", 8, 2.50 },
- { "i7-4770TE", 8, 2.30 },
- { "i7-4771", 8, 3.50 },
- { "i7-4785T", 8, 2.20 },
- { "i7-4790", 8, 3.60 },
- { "i7-4790K", 8, 4.00 },
- { "i7-4790S", 8, 3.20 },
- { "i7-4790T", 8, 2.70 },
- { "i7-4800MQ", 8, 2.70 },
- { "i7-4810MQ", 8, 2.80 },
- { "i7-4820K", 8, 3.70 },
- { "i7-4850EQ", 8, 1.60 },
- { "i7-4850HQ", 8, 2.30 },
- { "i7-4860EQ", 8, 1.80 },
- { "i7-4860HQ", 8, 2.40 },
- { "i7-4870HQ", 8, 2.50 },
- { "i7-4900MQ", 8, 2.80 },
- { "i7-4910MQ", 8, 2.90 },
- { "i7-4930K", 12, 3.40 },
- { "i7-4930MX", 8, 3.00 },
- { "i7-4940MX", 8, 3.10 },
- { "i7-4950HQ", 8, 2.40 },
- { "i7-4960HQ", 8, 2.60 },
- { "i7-4960X", 12, 3.60 },
- { "i7-4980HQ", 8, 2.80 },
- { "i7-5500U", 4, 2.40 },
- { "i7-5550U", 4, 2.00 },
- { "i7-5557U", 4, 3.10 },
- { "i7-5600U", 4, 2.60 },
- { "i7-5650U", 4, 2.20 },
- { "i7-5700EQ", 8, 2.60 },
- { "i7-5700HQ", 8, 2.70 },
- { "i7-5750HQ", 8, 2.50 },
- { "i7-5775C", 8, 3.30 },
- { "i7-5775R", 8, 3.30 },
- { "i7-5820K", 12, 3.30 },
- { "i7-5850EQ", 8, 2.70 },
- { "i7-5850HQ", 8, 2.70 },
- { "i7-5930K", 12, 3.50 },
- { "i7-5950HQ", 8, 2.90 },
- { "i7-5960X", 16, 3.00 },
- { "i7-610E", 4, 2.53 },
- { "i7-620LE", 4, 2.00 },
- { "i7-620LM", 4, 2.00 },
- { "i7-620M", 4, 2.66 },
- { "i7-620UE", 4, 1.06 },
- { "i7-620UM", 4, 1.20 },
- { "i7-640LM", 4, 2.13 },
- { "i7-640M", 4, 2.80 },
- { "i7-640UM", 4, 1.20 },
- { "i7-6498DU", 4, 2.50 },
- { "i7-6500U", 4, 2.50 },
- { "i7-6560U", 4, 2.20 },
- { "i7-6567U", 4, 3.30 },
- { "i7-6600U", 4, 2.60 },
- { "i7-660LM", 4, 2.26 },
- { "i7-660UE", 4, 1.33 },
- { "i7-660UM", 4, 1.33 },
- { "i7-6650U", 4, 2.20 },
- { "i7-6660U", 4, 2.40 },
- { "i7-6700", 8, 3.40 },
- { "i7-6700HQ", 8, 2.60 },
- { "i7-6700K", 8, 4.00 },
- { "i7-6700T", 8, 2.80 },
- { "i7-6700TE", 8, 2.40 },
- { "i7-6770HQ", 8, 2.60 },
- { "i7-6785R", 8, 3.30 },
- { "i7-6800K", 12, 3.40 },
- { "i7-680UM", 4, 1.46 },
- { "i7-6820EQ", 8, 2.80 },
- { "i7-6820HK", 8, 2.70 },
- { "i7-6820HQ", 8, 2.70 },
- { "i7-6822EQ", 8, 2.00 },
- { "i7-6850K", 12, 3.60 },
- { "i7-6870HQ", 8, 2.70 },
- { "i7-6900K", 16, 3.20 },
- { "i7-6920HQ", 8, 2.90 },
- { "i7-6950X", 20, 3.00 },
- { "i7-6970HQ", 8, 2.80 },
- { "i7-720QM", 8, 1.60 },
- { "i7-740QM", 8, 1.73 },
- { "i7-7500U", 4, 2.70 },
- { "i7-7510U", 4, 1.80 },
- { "i7-7560U", 4, 2.40 },
- { "i7-7567U", 4, 3.50 },
- { "i7-7600U", 4, 2.80 },
- { "i7-7660U", 4, 2.50 },
- { "i7-7700", 8, 3.60 },
- { "i7-7700HQ", 8, 2.80 },
- { "i7-7700K", 8, 4.20 },
- { "i7-7700T", 8, 2.90 },
- { "i7-7740X", 8, 4.30 },
- { "i7-7800X", 12, 3.50 },
- { "i7-7820EQ", 8, 3.00 },
- { "i7-7820HK", 8, 2.90 },
- { "i7-7820HQ", 8, 2.90 },
- { "i7-7820X", 16, 3.60 },
- { "i7-7920HQ", 8, 3.10 },
- { "i7-7Y75", 4, 1.30 },
- { "i7-8086K", 12, 4.00 },
- { "i7-820QM", 8, 1.73 },
- { "i7-840QM", 8, 1.86 },
- { "i7-8500Y", 4, 1.50 },
- { "i7-8550U", 8, 1.80 },
- { "i7-8557U", 8, 1.70 },
- { "i7-8559U", 8, 2.70 },
- { "i7-8565U", 8, 1.80 },
- { "i7-8569U", 8, 2.80 },
- { "i7-860", 8, 2.80 },
- { "i7-860S", 8, 2.53 },
- { "i7-8650U", 8, 1.90 },
- { "i7-8665U", 8, 1.90 },
- { "i7-8665UE", 8, 1.70 },
- { "i7-8670", 12, 2.90 },
- { "i7-8670T", 12, 2.20 },
- { "i7-870", 8, 2.93 },
- { "i7-8700", 12, 3.20 },
- { "i7-8700B", 12, 3.20 },
- { "i7-8700K", 12, 3.70 },
- { "i7-8700T", 12, 2.40 },
- { "i7-8705G", 8, 3.10 },
- { "i7-8706G", 8, 3.10 },
- { "i7-8709G", 8, 3.10 },
- { "i7-870S", 8, 2.66 },
- { "i7-8750H", 12, 2.20 },
- { "i7-875K", 8, 2.93 },
- { "i7-880", 8, 3.06 },
- { "i7-8809G", 8, 3.10 },
- { "i7-8850H", 12, 2.60 },
- { "i7-920", 8, 2.66 },
- { "i7-920XM", 8, 2.00 },
- { "i7-930", 8, 2.80 },
- { "i7-940", 8, 2.93 },
- { "i7-940XM", 8, 2.13 },
- { "i7-950", 8, 3.06 },
- { "i7-960", 8, 3.20 },
- { "i7-965", 8, 3.20 },
- { "i7-970", 12, 3.20 },
- { "i7-9700", 8, 3.00 },
- { "i7-9700E", 8, 2.60 },
- { "i7-9700F", 8, 3.00 },
- { "i7-9700K", 8, 3.60 },
- { "i7-9700KF", 8, 3.60 },
- { "i7-9700T", 8, 2.00 },
- { "i7-9700TE", 8, 1.80 },
- { "i7-975", 8, 3.33 },
- { "i7-9750H", 12, 2.60 },
- { "i7-9750HF", 12, 2.60 },
- { "i7-980", 12, 3.33 },
- { "i7-9800X", 16, 3.80 },
- { "i7-980X", 12, 3.33 },
- { "i7-9850H", 12, 2.60 },
- { "i7-9850HE", 12, 2.70 },
- { "i7-9850HL", 12, 1.90 },
- { "i7-990X", 12, 3.46 },
- { "i7-12650H", 16, 2.30 },
- { "i7-12800H", 20, 2.40 },
- { "i7-12800HE", 20, 2.40 },
- { "i7-12800HX", 24, 2.00 },
- { "i7-12850HX", 24, 2.10 },
- { "i7-13620H", 16, 2.40 },
- { "i7-13650HX", 20, 2.60 },
- { "i7-13700H", 20, 2.40 },
- { "i7-13700HX", 24, 2.10 },
- { "i7-13705H", 20, 2.40 },
- { "i7-13800H", 20, 2.50 },
- { "i7-13850HX", 28, 2.10 },
- { "i7-14650HX", 24, 2.20 },
- { "i7-14700", 28, 2.10 },
- { "i7-14700F", 28, 2.10 },
- { "i7-14700H", 28, 2.30 },
- { "i7-14700HX", 28, 2.10 },
- { "i7-14700K", 28, 3.40 },
- { "i7-14700KF", 28, 3.40 },
- { "i7-14700T", 28, 1.30 },
- { "i7-14790F", 24, 2.10 },
- { "i7-14950HX", 24, 2.20 },
+ { "i7-10510U", 8 },
+ { "i7-10510Y", 8 },
+ { "i7-1060G7", 8 },
+ { "i7-10610U", 8 },
+ { "i7-1065G7", 8 },
+ { "i7-1068G7", 8 },
+ { "i7-1068NG7", 8 },
+ { "i7-10700", 16 },
+ { "i7-10700E", 16 },
+ { "i7-10700F", 16 },
+ { "i7-10700K", 16 },
+ { "i7-10700KF", 16 },
+ { "i7-10700T", 16 },
+ { "i7-10700TE", 16 },
+ { "i7-10710U", 12 },
+ { "i7-10750H", 12 },
+ { "i7-10810U", 12 },
+ { "i7-10850H", 12 },
+ { "i7-10870H", 16 },
+ { "i7-10875H", 16 },
+ { "i7-11370H", 8 },
+ { "i7-11375H", 8 },
+ { "i7-11390H", 8 },
+ { "i7-11600H", 12 },
+ { "i7-1160G7", 8 },
+ { "i7-1165G7", 8 },
+ { "i7-11700", 16 },
+ { "i7-11700B", 16 },
+ { "i7-11700F", 16 },
+ { "i7-11700K", 16 },
+ { "i7-11700KF", 16 },
+ { "i7-11700T", 16 },
+ { "i7-11800H", 16 },
+ { "i7-1180G7", 8 },
+ { "i7-11850H", 16 },
+ { "i7-11850HE", 16 },
+ { "i7-1185G7", 8 },
+ { "i7-1185G7E", 8 },
+ { "i7-1185GRE", 8 },
+ { "i7-1195G7", 8 },
+ { "i7-1250U", 12 },
+ { "i7-1255U", 12 },
+ { "i7-1260P", 16 },
+ { "i7-1260U", 12 },
+ { "i7-1265U", 12 },
+ { "i7-12700", 20 },
+ { "i7-12700F", 20 },
+ { "i7-12700K", 20 },
+ { "i7-12700KF", 20 },
+ { "i7-12700T", 20 },
+ { "i7-12700H", 20 },
+ { "i7-1270P", 16 },
+ { "i7-1270PE", 16 },
+ { "i7-1360P", 16 },
+ { "i7-13700", 24 },
+ { "i7-13700F", 24 },
+ { "i7-13700K", 24 },
+ { "i7-13700KF", 24 },
+ { "i7-13700T", 24 },
+ { "i7-13790F", 24 },
+ { "i7-2535QM", 8 },
+ { "i7-2570QM", 8 },
+ { "i7-2600", 8 },
+ { "i7-2600K", 8 },
+ { "i7-2600S", 8 },
+ { "i7-2610UE", 4 },
+ { "i7-2617M", 4 },
+ { "i7-2620M", 4 },
+ { "i7-2627M", 4 },
+ { "i7-2629M", 4 },
+ { "i7-2630QM", 8 },
+ { "i7-2635QM", 8 },
+ { "i7-2637M", 4 },
+ { "i7-2640M", 4 },
+ { "i7-2649M", 4 },
+ { "i7-2655LE", 4 },
+ { "i7-2655QM", 8 },
+ { "i7-2657M", 4 },
+ { "i7-2660M", 4 },
+ { "i7-2667M", 4 },
+ { "i7-2669M", 4 },
+ { "i7-2670QM", 8 },
+ { "i7-2675QM", 8 },
+ { "i7-2677M", 4 },
+ { "i7-2685QM", 8 },
+ { "i7-2689M", 4 },
+ { "i7-2700K", 8 },
+ { "i7-2710QE", 8 },
+ { "i7-2715QE", 8 },
+ { "i7-2720QM", 8 },
+ { "i7-2740QM", 8 },
+ { "i7-2760QM", 8 },
+ { "i7-2820QM", 8 },
+ { "i7-2840QM", 8 },
+ { "i7-2860QM", 8 },
+ { "i7-2920XM", 8 },
+ { "i7-2960XM", 8 },
+ { "i7-3517U", 4 },
+ { "i7-3517UE", 4 },
+ { "i7-3520M", 4 },
+ { "i7-3537U", 4 },
+ { "i7-3540M", 4 },
+ { "i7-3555LE", 4 },
+ { "i7-3610QE", 8 },
+ { "i7-3610QM", 8 },
+ { "i7-3612QE", 8 },
+ { "i7-3612QM", 8 },
+ { "i7-3615QE", 8 },
+ { "i7-3615QM", 8 },
+ { "i7-3630QM", 8 },
+ { "i7-3632QM", 8 },
+ { "i7-3635QM", 8 },
+ { "i7-3667U", 4 },
+ { "i7-3687U", 4 },
+ { "i7-3689Y", 4 },
+ { "i7-3720QM", 8 },
+ { "i7-3740QM", 8 },
+ { "i7-3770", 8 },
+ { "i7-3770K", 8 },
+ { "i7-3770S", 8 },
+ { "i7-3770T", 8 },
+ { "i7-3820", 8 },
+ { "i7-3820QM", 8 },
+ { "i7-3840QM", 8 },
+ { "i7-3920XM", 8 },
+ { "i7-3930K", 12 },
+ { "i7-3940XM", 8 },
+ { "i7-3960X", 12 },
+ { "i7-3970X", 12 },
+ { "i7-4500U", 4 },
+ { "i7-4510U", 4 },
+ { "i7-4550U", 4 },
+ { "i7-4558U", 4 },
+ { "i7-4578U", 4 },
+ { "i7-4600M", 4 },
+ { "i7-4600U", 4 },
+ { "i7-4610M", 4 },
+ { "i7-4610Y", 4 },
+ { "i7-4650U", 4 },
+ { "i7-4700EC", 8 },
+ { "i7-4700EQ", 8 },
+ { "i7-4700HQ", 8 },
+ { "i7-4700MQ", 8 },
+ { "i7-4701EQ", 8 },
+ { "i7-4702EC", 8 },
+ { "i7-4702HQ", 8 },
+ { "i7-4702MQ", 8 },
+ { "i7-4710HQ", 8 },
+ { "i7-4710MQ", 8 },
+ { "i7-4712HQ", 8 },
+ { "i7-4712MQ", 8 },
+ { "i7-4720HQ", 8 },
+ { "i7-4722HQ", 8 },
+ { "i7-4750HQ", 8 },
+ { "i7-4760HQ", 8 },
+ { "i7-4765T", 8 },
+ { "i7-4770", 8 },
+ { "i7-4770HQ", 8 },
+ { "i7-4770K", 8 },
+ { "i7-4770R", 8 },
+ { "i7-4770S", 8 },
+ { "i7-4770T", 8 },
+ { "i7-4770TE", 8 },
+ { "i7-4771", 8 },
+ { "i7-4785T", 8 },
+ { "i7-4790", 8 },
+ { "i7-4790K", 8 },
+ { "i7-4790S", 8 },
+ { "i7-4790T", 8 },
+ { "i7-4800MQ", 8 },
+ { "i7-4810MQ", 8 },
+ { "i7-4820K", 8 },
+ { "i7-4850EQ", 8 },
+ { "i7-4850HQ", 8 },
+ { "i7-4860EQ", 8 },
+ { "i7-4860HQ", 8 },
+ { "i7-4870HQ", 8 },
+ { "i7-4900MQ", 8 },
+ { "i7-4910MQ", 8 },
+ { "i7-4930K", 12 },
+ { "i7-4930MX", 8 },
+ { "i7-4940MX", 8 },
+ { "i7-4950HQ", 8 },
+ { "i7-4960HQ", 8 },
+ { "i7-4960X", 12 },
+ { "i7-4980HQ", 8 },
+ { "i7-5500U", 4 },
+ { "i7-5550U", 4 },
+ { "i7-5557U", 4 },
+ { "i7-5600U", 4 },
+ { "i7-5650U", 4 },
+ { "i7-5700EQ", 8 },
+ { "i7-5700HQ", 8 },
+ { "i7-5750HQ", 8 },
+ { "i7-5775C", 8 },
+ { "i7-5775R", 8 },
+ { "i7-5820K", 12 },
+ { "i7-5850EQ", 8 },
+ { "i7-5850HQ", 8 },
+ { "i7-5930K", 12 },
+ { "i7-5950HQ", 8 },
+ { "i7-5960X", 16 },
+ { "i7-610E", 4 },
+ { "i7-620LE", 4 },
+ { "i7-620LM", 4 },
+ { "i7-620M", 4 },
+ { "i7-620UE", 4 },
+ { "i7-620UM", 4 },
+ { "i7-640LM", 4 },
+ { "i7-640M", 4 },
+ { "i7-640UM", 4 },
+ { "i7-6498DU", 4 },
+ { "i7-6500U", 4 },
+ { "i7-6560U", 4 },
+ { "i7-6567U", 4 },
+ { "i7-6600U", 4 },
+ { "i7-660LM", 4 },
+ { "i7-660UE", 4 },
+ { "i7-660UM", 4 },
+ { "i7-6650U", 4 },
+ { "i7-6660U", 4 },
+ { "i7-6700", 8 },
+ { "i7-6700HQ", 8 },
+ { "i7-6700K", 8 },
+ { "i7-6700T", 8 },
+ { "i7-6700TE", 8 },
+ { "i7-6770HQ", 8 },
+ { "i7-6785R", 8 },
+ { "i7-6800K", 12 },
+ { "i7-680UM", 4 },
+ { "i7-6820EQ", 8 },
+ { "i7-6820HK", 8 },
+ { "i7-6820HQ", 8 },
+ { "i7-6822EQ", 8 },
+ { "i7-6850K", 12 },
+ { "i7-6870HQ", 8 },
+ { "i7-6900K", 16 },
+ { "i7-6920HQ", 8 },
+ { "i7-6950X", 20 },
+ { "i7-6970HQ", 8 },
+ { "i7-720QM", 8 },
+ { "i7-740QM", 8 },
+ { "i7-7500U", 4 },
+ { "i7-7510U", 4 },
+ { "i7-7560U", 4 },
+ { "i7-7567U", 4 },
+ { "i7-7600U", 4 },
+ { "i7-7660U", 4 },
+ { "i7-7700", 8 },
+ { "i7-7700HQ", 8 },
+ { "i7-7700K", 8 },
+ { "i7-7700T", 8 },
+ { "i7-7740X", 8 },
+ { "i7-7800X", 12 },
+ { "i7-7820EQ", 8 },
+ { "i7-7820HK", 8 },
+ { "i7-7820HQ", 8 },
+ { "i7-7820X", 16 },
+ { "i7-7920HQ", 8 },
+ { "i7-7Y75", 4 },
+ { "i7-8086K", 12 },
+ { "i7-820QM", 8 },
+ { "i7-840QM", 8 },
+ { "i7-8500Y", 4 },
+ { "i7-8550U", 8 },
+ { "i7-8557U", 8 },
+ { "i7-8559U", 8 },
+ { "i7-8565U", 8 },
+ { "i7-8569U", 8 },
+ { "i7-860", 8 },
+ { "i7-860S", 8 },
+ { "i7-8650U", 8 },
+ { "i7-8665U", 8 },
+ { "i7-8665UE", 8 },
+ { "i7-8670", 12 },
+ { "i7-8670T", 12 },
+ { "i7-870", 8 },
+ { "i7-8700", 12 },
+ { "i7-8700B", 12 },
+ { "i7-8700K", 12 },
+ { "i7-8700T", 12 },
+ { "i7-8705G", 8 },
+ { "i7-8706G", 8 },
+ { "i7-8709G", 8 },
+ { "i7-870S", 8 },
+ { "i7-8750H", 12 },
+ { "i7-875K", 8 },
+ { "i7-880", 8 },
+ { "i7-8809G", 8 },
+ { "i7-8850H", 12 },
+ { "i7-920", 8 },
+ { "i7-920XM", 8 },
+ { "i7-930", 8 },
+ { "i7-940", 8 },
+ { "i7-940XM", 8 },
+ { "i7-950", 8 },
+ { "i7-960", 8 },
+ { "i7-965", 8 },
+ { "i7-970", 12 },
+ { "i7-9700", 8 },
+ { "i7-9700E", 8 },
+ { "i7-9700F", 8 },
+ { "i7-9700K", 8 },
+ { "i7-9700KF", 8 },
+ { "i7-9700T", 8 },
+ { "i7-9700TE", 8 },
+ { "i7-975", 8 },
+ { "i7-9750H", 12 },
+ { "i7-9750HF", 12 },
+ { "i7-980", 12 },
+ { "i7-9800X", 16 },
+ { "i7-980X", 12 },
+ { "i7-9850H", 12 },
+ { "i7-9850HE", 12 },
+ { "i7-9850HL", 12 },
+ { "i7-990X", 12 },
+ { "i7-12650H", 16 },
+ { "i7-12800H", 20 },
+ { "i7-12800HE", 20 },
+ { "i7-12800HX", 24 },
+ { "i7-12850HX", 24 },
+ { "i7-13620H", 16 },
+ { "i7-13650HX", 20 },
+ { "i7-13700H", 20 },
+ { "i7-13700HX", 24 },
+ { "i7-13705H", 20 },
+ { "i7-13800H", 20 },
+ { "i7-13850HX", 28 },
+ { "i7-14650HX", 24 },
+ { "i7-14700", 28 },
+ { "i7-14700F", 28 },
+ { "i7-14700H", 28 },
+ { "i7-14700HX", 28 },
+ { "i7-14700K", 28 },
+ { "i7-14700KF", 28 },
+ { "i7-14700T", 28 },
+ { "i7-14790F", 24 },
+ { "i7-14950HX", 24 },
// i9 series
- { "i9-10850K", 20, 3.60 },
- { "i9-10885H", 16, 2.40 },
- { "i9-10900", 20, 2.80 },
- { "i9-10900E", 20, 2.80 },
- { "i9-10900F", 20, 2.80 },
- { "i9-10900K", 20, 3.70 },
- { "i9-10900KF", 20, 3.70 },
- { "i9-10900T", 20, 1.90 },
- { "i9-10900TE", 20, 1.80 },
- { "i9-10900X", 20, 3.70 },
- { "i9-10910", 20, 3.60 },
- { "i9-10920X", 24, 3.50 },
- { "i9-10940X", 28, 3.30 },
- { "i9-10980HK", 16, 2.40 },
- { "i9-10980XE", 36, 3.00 },
- { "i9-11900", 16, 2.50 },
- { "i9-11900F", 16, 2.50 },
- { "i9-11900H", 16, 2.50 },
- { "i9-11900K", 16, 3.50 },
- { "i9-11900KB", 16, 3.30 },
- { "i9-11900KF", 16, 3.50 },
- { "i9-11900T", 16, 1.50 },
- { "i9-11950H", 16, 2.60 },
- { "i9-11980HK", 16, 2.60 },
- { "i9-12900", 24, 2.40 },
- { "i9-12900F", 24, 2.40 },
- { "i9-12900H", 20, 2.50 },
- { "i9-12900K", 24, 3.20 },
- { "i9-12900KF", 24, 3.20 },
- { "i9-12900KS", 24, 3.40 },
- { "i9-12900T", 24, 1.40 },
- { "i9-13900", 32, 2.00 },
- { "i9-13900E", 32, 1.80 },
- { "i9-13900F", 32, 2.00 },
- { "i9-13900HX", 32, 2.20 },
- { "i9-13900K", 32, 3.00 },
- { "i9-13900KF", 32, 3.00 },
- { "i9-13900KS", 32, 3.20 },
- { "i9-13900T", 32, 1.10 },
- { "i9-13900TE", 32, 1.00 },
- { "i9-13950HX", 32, 2.20 },
- { "i9-13980HX", 32, 2.20 },
- { "i9-14900", 32, 2.00 },
- { "i9-14900F", 32, 2.00 },
- { "i9-14900HX", 32, 2.20 },
- { "i9-14900K", 32, 3.20 },
- { "i9-14900KF", 32, 3.20 },
- { "i9-14900KS", 32, 3.20 },
- { "i9-14900T", 32, 1.10 },
- { "i9-7900X", 20, 3.30 },
- { "i9-7920X", 24, 2.90 },
- { "i9-7940X", 28, 3.10 },
- { "i9-7960X", 32, 2.80 },
- { "i9-7980XE", 36, 2.60 },
- { "i9-8950HK", 12, 2.90 },
- { "i9-9820X", 20, 3.30 },
- { "i9-9880H", 16, 2.30 },
- { "i9-9900", 16, 3.10 },
- { "i9-9900K", 16, 3.60 },
- { "i9-9900KF", 16, 3.60 },
- { "i9-9900KS", 16, 4.00 },
- { "i9-9900T", 16, 2.10 },
- { "i9-9900X", 20, 3.50 },
- { "i9-9920X", 24, 3.50 },
- { "i9-9940X", 28, 3.30 },
- { "i9-9960X", 32, 3.10 },
- { "i9-9980HK", 16, 2.40 },
- { "i9-9980XE", 36, 3.00 },
- { "i9-9990XE", 28, 4.00 },
- { "i9-12900E", 24, 2.30 },
- { "i9-12900HK", 20, 2.50 },
- { "i9-12900HX", 24, 2.30 },
- { "i9-12900TE", 24, 1.10 },
- { "i9-12950HX", 24, 2.30 },
- { "i9-13900H", 20, 2.60 },
- { "i9-13900HK", 20, 2.60 },
- { "i9-13905H", 20, 2.60 },
- { "i9-14900H", 32, 2.20 },
- { "i9-14901KE", 16, 3.80 }
+ { "i9-10850K", 20 },
+ { "i9-10885H", 16 },
+ { "i9-10900", 20 },
+ { "i9-10900E", 20 },
+ { "i9-10900F", 20 },
+ { "i9-10900K", 20 },
+ { "i9-10900KF", 20 },
+ { "i9-10900T", 20 },
+ { "i9-10900TE", 20 },
+ { "i9-10900X", 20 },
+ { "i9-10910", 20 },
+ { "i9-10920X", 24 },
+ { "i9-10940X", 28 },
+ { "i9-10980HK", 16 },
+ { "i9-10980XE", 36 },
+ { "i9-11900", 16 },
+ { "i9-11900F", 16 },
+ { "i9-11900H", 16 },
+ { "i9-11900K", 16 },
+ { "i9-11900KB", 16 },
+ { "i9-11900KF", 16 },
+ { "i9-11900T", 16 },
+ { "i9-11950H", 16 },
+ { "i9-11980HK", 16 },
+ { "i9-12900", 24 },
+ { "i9-12900F", 24 },
+ { "i9-12900H", 20 },
+ { "i9-12900K", 24 },
+ { "i9-12900KF", 24 },
+ { "i9-12900KS", 24 },
+ { "i9-12900T", 24 },
+ { "i9-13900", 32 },
+ { "i9-13900E", 32 },
+ { "i9-13900F", 32 },
+ { "i9-13900HX", 32 },
+ { "i9-13900K", 32 },
+ { "i9-13900KF", 32 },
+ { "i9-13900KS", 32 },
+ { "i9-13900T", 32 },
+ { "i9-13900TE", 32 },
+ { "i9-13950HX", 32 },
+ { "i9-13980HX", 32 },
+ { "i9-14900", 32 },
+ { "i9-14900F", 32 },
+ { "i9-14900HX", 32 },
+ { "i9-14900K", 32 },
+ { "i9-14900KF", 32 },
+ { "i9-14900KS", 32 },
+ { "i9-14900T", 32 },
+ { "i9-7900X", 20 },
+ { "i9-7920X", 24 },
+ { "i9-7940X", 28 },
+ { "i9-7960X", 32 },
+ { "i9-7980XE", 36 },
+ { "i9-8950HK", 12 },
+ { "i9-9820X", 20 },
+ { "i9-9880H", 16 },
+ { "i9-9900", 16 },
+ { "i9-9900K", 16 },
+ { "i9-9900KF", 16 },
+ { "i9-9900KS", 16 },
+ { "i9-9900T", 16 },
+ { "i9-9900X", 20 },
+ { "i9-9920X", 24 },
+ { "i9-9940X", 28 },
+ { "i9-9960X", 32 },
+ { "i9-9980HK", 16 },
+ { "i9-9980XE", 36 },
+ { "i9-9990XE", 28 },
+ { "i9-12900E", 24 },
+ { "i9-12900HK", 20 },
+ { "i9-12900HX", 24 },
+ { "i9-12900TE", 24 },
+ { "i9-12950HX", 24 },
+ { "i9-13900H", 20 },
+ { "i9-13900HK", 20 },
+ { "i9-13905H", 20 },
+ { "i9-14900H", 32 },
+ { "i9-14901KE", 16 }
};
out_ptr = db;
out_size = sizeof(db) / sizeof(cpu_entry);
@@ -2418,138 +2404,138 @@ struct VM {
inline static void get_intel_xeon_db(const cpu_entry*& out_ptr, size_t& out_size) {
static const cpu_entry db[] = {
- { "D-1518", 8, 2.20 },
- { "D-1520", 8, 2.20 },
- { "D-1521", 8, 2.40 },
- { "D-1527", 8, 2.20 },
- { "D-1528", 12, 1.90 },
- { "D-1529", 8, 1.30 },
- { "D-1531", 12, 2.20 },
- { "D-1537", 16, 1.70 },
- { "D-1539", 16, 1.60 },
- { "D-1540", 16, 2.00 },
- { "D-1541", 16, 2.10 },
- { "D-1548", 16, 2.00 },
- { "D-1557", 24, 1.50 },
- { "D-1559", 24, 1.50 },
- { "D-1567", 24, 2.10 },
- { "D-1571", 32, 1.30 },
- { "D-1577", 32, 1.30 },
- { "D-1581", 32, 1.80 },
- { "D-1587", 32, 1.70 },
- { "D-1513N", 8, 1.60 },
- { "D-1523N", 8, 2.00 },
- { "D-1533N", 12, 2.10 },
- { "D-1543N", 16, 1.90 },
- { "D-1553N", 16, 2.30 },
- { "D-1602", 4, 2.50 },
- { "D-1612", 8, 1.50 },
- { "D-1622", 8, 2.60 },
- { "D-1627", 8, 2.90 },
- { "D-1632", 16, 1.50 },
- { "D-1637", 12, 2.90 },
- { "D-1623N", 8, 2.40 },
- { "D-1633N", 12, 2.50 },
- { "D-1649N", 16, 2.30 },
- { "D-1653N", 16, 2.80 },
- { "D-2141I", 16, 2.20 },
- { "D-2161I", 24, 2.20 },
- { "D-2191", 36, 1.60 },
- { "D-2123IT", 8, 2.20 },
- { "D-2142IT", 16, 1.90 },
- { "D-2143IT", 16, 2.20 },
- { "D-2163IT", 24, 2.10 },
- { "D-2173IT", 28, 1.70 },
- { "D-2183IT", 32, 2.20 },
- { "D-2145NT", 16, 1.90 },
- { "D-2146NT", 16, 2.30 },
- { "D-2166NT", 24, 2.00 },
- { "D-2177NT", 28, 1.90 },
- { "D-2187NT", 32, 2.00 },
+ { "D-1518", 8 },
+ { "D-1520", 8 },
+ { "D-1521", 8 },
+ { "D-1527", 8 },
+ { "D-1528", 12 },
+ { "D-1529", 8 },
+ { "D-1531", 12 },
+ { "D-1537", 16 },
+ { "D-1539", 16 },
+ { "D-1540", 16 },
+ { "D-1541", 16 },
+ { "D-1548", 16 },
+ { "D-1557", 24 },
+ { "D-1559", 24 },
+ { "D-1567", 24 },
+ { "D-1571", 32 },
+ { "D-1577", 32 },
+ { "D-1581", 32 },
+ { "D-1587", 32 },
+ { "D-1513N", 8 },
+ { "D-1523N", 8 },
+ { "D-1533N", 12 },
+ { "D-1543N", 16 },
+ { "D-1553N", 16 },
+ { "D-1602", 4 },
+ { "D-1612", 8 },
+ { "D-1622", 8 },
+ { "D-1627", 8 },
+ { "D-1632", 16 },
+ { "D-1637", 12 },
+ { "D-1623N", 8 },
+ { "D-1633N", 12 },
+ { "D-1649N", 16 },
+ { "D-1653N", 16 },
+ { "D-2141I", 16 },
+ { "D-2161I", 24 },
+ { "D-2191", 36 },
+ { "D-2123IT", 8 },
+ { "D-2142IT", 16 },
+ { "D-2143IT", 16 },
+ { "D-2163IT", 24 },
+ { "D-2173IT", 28 },
+ { "D-2183IT", 32 },
+ { "D-2145NT", 16 },
+ { "D-2146NT", 16 },
+ { "D-2166NT", 24 },
+ { "D-2177NT", 28 },
+ { "D-2187NT", 32 },
// Xeon E
- { "E-2104G", 4, 3.20 },
- { "E-2124", 4, 3.30 },
- { "E-2124G", 4, 3.40 },
- { "E-2126G", 6, 3.30 },
- { "E-2134", 8, 3.50 },
- { "E-2136", 12, 3.30 },
- { "E-2144G", 8, 3.60 },
- { "E-2146G", 12, 3.50 },
- { "E-2174G", 8, 3.80 },
- { "E-2176G", 12, 3.70 },
- { "E-2186G", 12, 3.80 },
- { "E-2176M", 12, 2.70 },
- { "E-2186M", 12, 2.90 },
- { "E-2224", 4, 3.40 },
- { "E-2224G", 4, 3.50 },
- { "E-2226G", 6, 3.40 },
- { "E-2234", 8, 3.60 },
- { "E-2236", 12, 3.40 },
- { "E-2244G", 8, 3.80 },
- { "E-2246G", 12, 3.60 },
- { "E-2274G", 8, 4.00 },
- { "E-2276G", 12, 3.80 },
- { "E-2278G", 16, 3.40 },
- { "E-2286G", 12, 4.00 },
- { "E-2288G", 16, 3.70 },
- { "E-2276M", 12, 2.80 },
- { "E-2286M", 16, 2.40 },
+ { "E-2104G", 4 },
+ { "E-2124", 4 },
+ { "E-2124G", 4 },
+ { "E-2126G", 6 },
+ { "E-2134", 8 },
+ { "E-2136", 12 },
+ { "E-2144G", 8 },
+ { "E-2146G", 12 },
+ { "E-2174G", 8 },
+ { "E-2176G", 12 },
+ { "E-2186G", 12 },
+ { "E-2176M", 12 },
+ { "E-2186M", 12 },
+ { "E-2224", 4 },
+ { "E-2224G", 4 },
+ { "E-2226G", 6 },
+ { "E-2234", 8 },
+ { "E-2236", 12 },
+ { "E-2244G", 8 },
+ { "E-2246G", 12 },
+ { "E-2274G", 8 },
+ { "E-2276G", 12 },
+ { "E-2278G", 16 },
+ { "E-2286G", 12 },
+ { "E-2288G", 16 },
+ { "E-2276M", 12 },
+ { "E-2286M", 16 },
// Xeon W
- { "W-2102", 4, 2.90 },
- { "W-2104", 4, 3.20 },
- { "W-2123", 8, 3.60 },
- { "W-2125", 8, 4.00 },
- { "W-2133", 12, 3.60 },
- { "W-2135", 12, 3.70 },
- { "W-2140B", 16, 3.20 },
- { "W-2145", 16, 3.70 },
- { "W-2150B", 20, 3.00 },
- { "W-2155", 20, 3.30 },
- { "W-2170B", 28, 2.50 },
- { "W-2175", 28, 2.50 },
- { "W-2191B", 36, 2.30 },
- { "W-2195", 36, 2.30 },
- { "W-3175X", 56, 3.10 },
- { "W-3223", 16, 3.50 },
- { "W-3225", 16, 3.70 },
- { "W-3235", 24, 3.30 },
- { "W-3245", 32, 3.20 },
- { "W-3245M", 32, 3.20 },
- { "W-3265", 48, 2.70 },
- { "W-3265M", 48, 2.70 },
- { "W-3275", 56, 2.50 },
- { "W-3275M", 56, 2.50 },
- { "w3-2423", 12, 2.10 },
- { "w3-2425", 12, 3.00 },
- { "w3-2435", 16, 3.10 },
- { "w5-2445", 20, 3.10 },
- { "w5-2455X", 24, 3.20 },
- { "w5-2465X", 32, 3.10 },
- { "w7-2475X", 40, 2.60 },
- { "w7-2495X", 48, 2.50 },
- { "w5-3425", 24, 3.20 },
- { "w5-3435X", 32, 3.10 },
- { "w7-3445", 40, 2.60 },
- { "w7-3455", 48, 2.50 },
- { "w7-3465X", 56, 2.50 },
- { "w9-3475X", 72, 2.20 },
- { "w9-3495X", 112, 1.90 },
- { "w3-2525", 16, 3.50 },
- { "w3-2535", 20, 3.50 },
- { "w5-2545", 24, 3.50 },
- { "w5-2555X", 28, 3.30 },
- { "w5-2565X", 36, 3.20 },
- { "w7-2575X", 44, 3.00 },
- { "w7-2595X", 52, 2.80 },
- { "w5-3525", 32, 3.20 },
- { "w5-3535X", 40, 3.20 },
- { "w7-3545", 48, 2.70 },
- { "w7-3555", 56, 2.70 },
- { "w7-3565X", 64, 2.60 },
- { "w9-3575X", 88, 2.20 },
- { "w9-3595X", 120, 2.00 }
+ { "W-2102", 4 },
+ { "W-2104", 4 },
+ { "W-2123", 8 },
+ { "W-2125", 8 },
+ { "W-2133", 12 },
+ { "W-2135", 12 },
+ { "W-2140B", 16 },
+ { "W-2145", 16 },
+ { "W-2150B", 20 },
+ { "W-2155", 20 },
+ { "W-2170B", 28 },
+ { "W-2175", 28 },
+ { "W-2191B", 36 },
+ { "W-2195", 36 },
+ { "W-3175X", 56 },
+ { "W-3223", 16 },
+ { "W-3225", 16 },
+ { "W-3235", 24 },
+ { "W-3245", 32 },
+ { "W-3245M", 32 },
+ { "W-3265", 48 },
+ { "W-3265M", 48 },
+ { "W-3275", 56 },
+ { "W-3275M", 56 },
+ { "w3-2423", 12 },
+ { "w3-2425", 12 },
+ { "w3-2435", 16 },
+ { "w5-2445", 20 },
+ { "w5-2455X", 24 },
+ { "w5-2465X", 32 },
+ { "w7-2475X", 40 },
+ { "w7-2495X", 48 },
+ { "w5-3425", 24 },
+ { "w5-3435X", 32 },
+ { "w7-3445", 40 },
+ { "w7-3455", 48 },
+ { "w7-3465X", 56 },
+ { "w9-3475X", 72 },
+ { "w9-3495X", 112 },
+ { "w3-2525", 16 },
+ { "w3-2535", 20 },
+ { "w5-2545", 24 },
+ { "w5-2555X", 28 },
+ { "w5-2565X", 36 },
+ { "w7-2575X", 44 },
+ { "w7-2595X", 52 },
+ { "w5-3525", 32 },
+ { "w5-3535X", 40 },
+ { "w7-3545", 48 },
+ { "w7-3555", 56 },
+ { "w7-3565X", 64 },
+ { "w9-3575X", 88 },
+ { "w9-3595X", 120 }
};
out_ptr = db;
out_size = sizeof(db) / sizeof(cpu_entry);
@@ -2558,32 +2544,32 @@ struct VM {
inline static void get_intel_ultra_db(const cpu_entry*& db, size_t& size) {
static const cpu_entry intel_ultra[] = {
// Series 2 (Arrow Lake - Desktop/Mobile) - No HT on P-Cores
- { "285K", 24, 3.70 },
- { "265K", 20, 3.90 },
- { "265KF", 20, 3.90 },
- { "245K", 14, 4.20 },
- { "245KF", 14, 4.20 },
+ { "285K", 24 },
+ { "265K", 20 },
+ { "265KF", 20 },
+ { "245K", 14 },
+ { "245KF", 14 },
// Series 2 (Lunar Lake - Mobile)
- { "288V", 8, 3.30 },
- { "268V", 8, 3.30 },
- { "258V", 8, 2.20 },
+ { "288V", 8 },
+ { "268V", 8 },
+ { "258V", 8 },
// Series 1 (Meteor Lake - Mobile) - P-Cores have HT
// 6P + 8E + 2LP = 16 Cores. Threads = (6*2) + 8 + 2 = 22 Threads
- { "185H", 22, 2.30 },
- { "165H", 22, 1.40 },
- { "155H", 22, 1.40 },
+ { "185H", 22 },
+ { "165H", 22 },
+ { "155H", 22 },
// 4P + 8E + 2LP = 14 Cores. Threads = (4*2) + 8 + 2 = 18 Threads
- { "135H", 18, 1.70 },
- { "125H", 18, 1.20 },
+ { "135H", 18 },
+ { "125H", 18 },
// 2P + 8E + 2LP = 12 Cores. Threads = (2*2) + 8 + 2 = 14 Threads
- { "165U", 14, 1.70 },
- { "155U", 14, 1.70 },
- { "135U", 14, 1.60 },
- { "125U", 14, 1.30 },
+ { "165U", 14 },
+ { "155U", 14 },
+ { "135U", 14 },
+ { "125U", 14 },
};
db = intel_ultra;
size = sizeof(intel_ultra) / sizeof(cpu_entry);
@@ -2592,506 +2578,506 @@ struct VM {
inline static void get_amd_ryzen_db(const cpu_entry*& out_ptr, size_t& out_size) {
static const cpu_entry db[] = {
// 3015/3020
- { "3015ce", 4, 1.20 },
- { "3015e", 4, 1.20 },
- { "3020e", 2, 1.20 },
+ { "3015ce", 4 },
+ { "3015e", 4 },
+ { "3020e", 2 },
// Athlon/Ax suffixes
- { "860k", 4, 3.70 },
- { "870k", 4, 3.90 },
- { "pro-7350b", 4, 3.10 },
- { "pro-7800b", 4, 3.50 },
- { "pro-7850b", 4, 3.70 },
- { "a10-6700", 4, 3.70 },
- { "a10-6700t", 4, 2.50 },
- { "a10-6790b", 4, 4.00 },
- { "a10-6790k", 4, 4.00 },
- { "a10-6800b", 4, 4.10 },
- { "a10-6800k", 4, 4.10 },
- { "a10-7300", 4, 1.90 },
- { "a10-7400p", 4, 2.50 },
- { "a10-7700k", 4, 3.40 },
- { "a10-7800", 4, 3.50 },
- { "a10-7850k", 4, 3.70 },
- { "a10-7860k", 4, 3.60 },
- { "a10-7870k", 4, 3.90 },
- { "a10-8700b", 4, 1.80 },
- { "a10-8700p", 4, 1.80 },
- { "a10-8750b", 4, 3.60 },
- { "a10-8850b", 4, 3.90 },
- { "a12-8800b", 4, 2.10 },
- { "micro-6400t", 4, 1.00 },
- { "pro-3340b", 4, 2.20 },
- { "pro-3350b", 4, 2.20 },
- { "pro-7300b", 2, 1.90 },
- { "a4-5000", 4, 1.50 },
- { "a4-5100", 4, 1.55 },
- { "a4-6210", 4, 1.80 },
- { "a4-6300", 2, 3.70 },
- { "a4-6320", 2, 3.80 },
- { "a4-7210", 4, 1.80 },
- { "a4-7300", 2, 3.80 },
- { "a4-8350b", 2, 3.50 },
- { "a4-9120c", 2, 1.60 },
- { "pro-7050b", 2, 2.20 },
- { "pro-7400b", 2, 3.50 },
- { "a6-5200", 4, 2.00 },
- { "a6-5200m", 4, 2.00 },
- { "a6-5350m", 2, 2.90 },
- { "a6-6310", 4, 1.80 },
- { "a6-6400b", 2, 3.90 },
- { "a6-6400k", 2, 3.90 },
- { "a6-6420b", 2, 4.00 },
- { "a6-6420k", 2, 4.00 },
- { "a6-7000", 2, 2.20 },
- { "a6-7310", 4, 2.00 },
- { "a6-7400k", 2, 3.50 },
- { "a6-8500b", 2, 1.60 },
- { "a6-8500p", 2, 1.60 },
- { "a6-8550b", 2, 3.70 },
- { "a6-9220c", 2, 1.80 },
- { "pro-7150b", 4, 1.90 },
- { "pro-7600b", 4, 3.10 },
- { "a8-6410", 4, 2.00 },
- { "a8-6500", 4, 3.50 },
- { "a8-6500b", 4, 3.50 },
- { "a8-6500t", 4, 2.10 },
- { "a8-6600k", 4, 3.90 },
- { "a8-7100", 4, 1.80 },
- { "a8-7200p", 4, 2.40 },
- { "a8-7410", 4, 2.20 },
- { "a8-7600", 4, 3.10 },
- { "a8-7650k", 4, 3.30 },
- { "a8-7670k", 4, 3.60 },
- { "a8-8600b", 4, 1.60 },
- { "a8-8600p", 4, 1.60 },
- { "a8-8650b", 4, 3.20 },
+ { "860k", 4 },
+ { "870k", 4 },
+ { "pro-7350b", 4 },
+ { "pro-7800b", 4 },
+ { "pro-7850b", 4 },
+ { "a10-6700", 4 },
+ { "a10-6700t", 4 },
+ { "a10-6790b", 4 },
+ { "a10-6790k", 4 },
+ { "a10-6800b", 4 },
+ { "a10-6800k", 4 },
+ { "a10-7300", 4 },
+ { "a10-7400p", 4 },
+ { "a10-7700k", 4 },
+ { "a10-7800", 4 },
+ { "a10-7850k", 4 },
+ { "a10-7860k", 4 },
+ { "a10-7870k", 4 },
+ { "a10-8700b", 4 },
+ { "a10-8700p", 4 },
+ { "a10-8750b", 4 },
+ { "a10-8850b", 4 },
+ { "a12-8800b", 4 },
+ { "micro-6400t", 4 },
+ { "pro-3340b", 4 },
+ { "pro-3350b", 4 },
+ { "pro-7300b", 2 },
+ { "a4-5000", 4 },
+ { "a4-5100", 4 },
+ { "a4-6210", 4 },
+ { "a4-6300", 2 },
+ { "a4-6320", 2 },
+ { "a4-7210", 4 },
+ { "a4-7300", 2 },
+ { "a4-8350b", 2 },
+ { "a4-9120c", 2 },
+ { "pro-7050b", 2 },
+ { "pro-7400b", 2 },
+ { "a6-5200", 4 },
+ { "a6-5200m", 4 },
+ { "a6-5350m", 2 },
+ { "a6-6310", 4 },
+ { "a6-6400b", 2 },
+ { "a6-6400k", 2 },
+ { "a6-6420b", 2 },
+ { "a6-6420k", 2 },
+ { "a6-7000", 2 },
+ { "a6-7310", 4 },
+ { "a6-7400k", 2 },
+ { "a6-8500b", 2 },
+ { "a6-8500p", 2 },
+ { "a6-8550b", 2 },
+ { "a6-9220c", 2 },
+ { "pro-7150b", 4 },
+ { "pro-7600b", 4 },
+ { "a8-6410", 4 },
+ { "a8-6500", 4 },
+ { "a8-6500b", 4 },
+ { "a8-6500t", 4 },
+ { "a8-6600k", 4 },
+ { "a8-7100", 4 },
+ { "a8-7200p", 4 },
+ { "a8-7410", 4 },
+ { "a8-7600", 4 },
+ { "a8-7650k", 4 },
+ { "a8-7670k", 4 },
+ { "a8-8600b", 4 },
+ { "a8-8600p", 4 },
+ { "a8-8650b", 4 },
// AI Series (Strix Point)
- { "365", 20, 2.00 }, // Ryzen AI 7 365
- { "370", 24, 2.00 }, // Ryzen AI 9 HX 370
- { "375", 24, 2.00 }, // Ryzen AI 9 HX 375
+ { "365", 20 }, // Ryzen AI 7 365
+ { "370", 24 }, // Ryzen AI 9 HX 370
+ { "375", 24 }, // Ryzen AI 9 HX 375
// Athlon
- { "3050c", 2, 2.30 },
- { "200ge", 4, 3.20 },
- { "220ge", 4, 3.40 },
- { "240ge", 4, 3.50 },
- { "255e", 2, 3.10 },
- { "3000g", 4, 3.50 },
- { "300ge", 4, 3.40 },
- { "300u", 4, 2.40 },
- { "320ge", 4, 3.50 },
- { "425e", 3, 2.70 },
- { "460", 3, 3.40 },
- { "5150", 4, 1.60 },
- { "5350", 4, 2.05 },
- { "5370", 4, 2.20 },
- { "620e", 4, 2.70 },
- { "631", 4, 2.60 },
- { "638", 4, 2.70 },
- { "641", 4, 2.80 },
- { "740", 4, 3.20 },
- { "750k", 4, 3.40 },
- { "760k", 4, 3.80 },
- { "3150c", 4, 2.40 },
- { "3150g", 4, 3.50 },
- { "3150ge", 4, 3.30 },
- { "3150u", 4, 2.40 },
- { "7220c", 4, 2.40 },
- { "7220u", 4, 2.40 },
- { "3045b", 2, 2.30 },
- { "3145b", 4, 2.40 },
- { "3050e", 4, 1.40 },
- { "3050ge", 4, 3.40 },
- { "3050u", 2, 2.30 },
- { "7120c", 2, 2.40 },
- { "7120u", 2, 2.40 },
- { "3125ge", 4, 3.40 },
- { "940", 4, 3.00 },
- { "950", 4, 3.50 },
- { "970", 4, 3.80 },
+ { "3050c", 2 },
+ { "200ge", 4 },
+ { "220ge", 4 },
+ { "240ge", 4 },
+ { "255e", 2 },
+ { "3000g", 4 },
+ { "300ge", 4 },
+ { "300u", 4 },
+ { "320ge", 4 },
+ { "425e", 3 },
+ { "460", 3 },
+ { "5150", 4 },
+ { "5350", 4 },
+ { "5370", 4 },
+ { "620e", 4 },
+ { "631", 4 },
+ { "638", 4 },
+ { "641", 4 },
+ { "740", 4 },
+ { "750k", 4 },
+ { "760k", 4 },
+ { "3150c", 4 },
+ { "3150g", 4 },
+ { "3150ge", 4 },
+ { "3150u", 4 },
+ { "7220c", 4 },
+ { "7220u", 4 },
+ { "3045b", 2 },
+ { "3145b", 4 },
+ { "3050e", 4 },
+ { "3050ge", 4 },
+ { "3050u", 2 },
+ { "7120c", 2 },
+ { "7120u", 2 },
+ { "3125ge", 4 },
+ { "940", 4 },
+ { "950", 4 },
+ { "970", 4 },
// Business Class
- { "b57", 2, 3.20 },
- { "b59", 2, 3.40 },
- { "b60", 2, 3.50 },
- { "b75", 3, 3.00 },
- { "b77", 3, 3.20 },
- { "b97", 4, 3.20 },
- { "b99", 4, 3.30 },
+ { "b57", 2 },
+ { "b59", 2 },
+ { "b60", 2 },
+ { "b75", 3 },
+ { "b77", 3 },
+ { "b97", 4 },
+ { "b99", 4 },
// E-Series
- { "micro-6200t", 2, 1.00 },
- { "e1-2100", 2, 1.00 },
- { "e1-2200", 2, 1.05 },
- { "e1-2500", 2, 1.40 },
- { "e1-6010", 2, 1.35 },
- { "e1-7010", 2, 1.50 },
- { "e2-3000", 2, 1.65 },
- { "e2-3800", 4, 1.30 },
- { "e2-6110", 4, 1.50 },
- { "e2-7110", 4, 1.80 },
+ { "micro-6200t", 2 },
+ { "e1-2100", 2 },
+ { "e1-2200", 2 },
+ { "e1-2500", 2 },
+ { "e1-6010", 2 },
+ { "e1-7010", 2 },
+ { "e2-3000", 2 },
+ { "e2-3800", 4 },
+ { "e2-6110", 4 },
+ { "e2-7110", 4 },
// FX
- { "fx-4100", 4, 3.60 },
- { "fx-4130", 4, 3.80 },
- { "fx-4170", 4, 4.20 },
- { "fx-4300", 4, 3.80 },
- { "fx-4320", 4, 4.00 },
- { "fx-4350", 4, 4.20 },
- { "fx-6200", 6, 3.80 },
- { "fx-6300", 6, 3.50 },
- { "fx-6350", 6, 3.90 },
- { "fx-7500", 4, 2.10 },
- { "fx-7600p", 4, 2.70 },
- { "fx-8120", 8, 3.10 },
- { "fx-8150", 8, 3.60 },
- { "fx-8300", 8, 3.30 },
- { "fx-8310", 8, 3.40 },
- { "fx-8320", 8, 3.50 },
- { "fx-8320e", 8, 3.20 },
- { "fx-8350", 8, 4.00 },
- { "fx-8370", 8, 4.00 },
- { "fx-8370e", 8, 3.30 },
- { "fx-8800p", 4, 2.10 },
- { "fx-9370", 8, 4.40 },
- { "fx-9590", 8, 4.70 },
+ { "fx-4100", 4 },
+ { "fx-4130", 4 },
+ { "fx-4170", 4 },
+ { "fx-4300", 4 },
+ { "fx-4320", 4 },
+ { "fx-4350", 4 },
+ { "fx-6200", 6 },
+ { "fx-6300", 6 },
+ { "fx-6350", 6 },
+ { "fx-7500", 4 },
+ { "fx-7600p", 4 },
+ { "fx-8120", 8 },
+ { "fx-8150", 8 },
+ { "fx-8300", 8 },
+ { "fx-8310", 8 },
+ { "fx-8320", 8 },
+ { "fx-8320e", 8 },
+ { "fx-8350", 8 },
+ { "fx-8370", 8 },
+ { "fx-8370e", 8 },
+ { "fx-8800p", 4 },
+ { "fx-9370", 8 },
+ { "fx-9590", 8 },
// Misc
- { "micro-6700t", 4, 1.20 },
- { "n640", 2, 2.90 },
- { "n660", 2, 3.00 },
- { "n870", 3, 2.30 },
- { "n960", 4, 1.80 },
- { "n970", 4, 2.20 },
- { "p650", 2, 2.60 },
- { "p860", 3, 2.00 },
+ { "micro-6700t", 4 },
+ { "n640", 2 },
+ { "n660", 2 },
+ { "n870", 3 },
+ { "n960", 4 },
+ { "n970", 4 },
+ { "p650", 2 },
+ { "p860", 3 },
// Phenom II
- { "1075t", 6, 3.00 },
- { "555", 2, 3.20 },
- { "565", 2, 3.40 },
- { "570", 2, 3.50 },
- { "840", 4, 3.20 },
- { "850", 4, 3.30 },
- { "960t", 4, 3.00 },
- { "965", 4, 3.40 },
- { "975", 4, 3.60 },
- { "980", 4, 3.70 },
+ { "1075t", 6 },
+ { "555", 2 },
+ { "565", 2 },
+ { "570", 2 },
+ { "840", 4 },
+ { "850", 4 },
+ { "960t", 4 },
+ { "965", 4 },
+ { "975", 4 },
+ { "980", 4 },
// Ryzen Suffixes (3/5/7/9/Threadripper consolidated)
- { "1200", 4, 3.10 },
- { "1300x", 4, 3.50 },
+ { "1200", 4 },
+ { "1300x", 4 },
// "210" mapped to Ryzen 5 1400 (First Gen 4c/8t)
- { "210", 8, 3.20 },
- { "2200g", 4, 3.50 },
- { "2200ge", 4, 3.20 },
- { "2200u", 4, 2.50 },
- { "2300u", 4, 2.00 },
- { "2300x", 4, 3.50 },
- { "3100", 8, 3.60 },
- { "3200g", 4, 3.60 },
- { "3200ge", 4, 3.30 },
- { "3200u", 4, 2.60 },
- { "3250c", 4, 2.60 },
- { "3250u", 4, 2.60 },
- { "3300u", 4, 2.10 },
- { "3300x", 8, 3.80 },
- { "3350u", 4, 2.10 },
- { "4100", 8, 3.80 },
- { "4300g", 8, 3.80 },
- { "4300ge", 8, 3.50 },
- { "4300u", 4, 2.70 },
- { "5125c", 4, 3.00 },
- { "5300g", 8, 4.00 },
- { "5300ge", 8, 3.60 },
- { "5300u", 8, 2.60 },
- { "5305g", 8, 4.00 },
- { "5305ge", 8, 3.60 },
- { "5400u", 8, 2.60 },
- { "5425c", 8, 2.70 },
- { "5425u", 8, 2.70 },
- { "7320c", 8, 2.40 },
- { "7320u", 8, 2.40 },
- { "7330u", 8, 2.30 },
- { "7335u", 8, 3.00 },
- { "7440u", 8, 3.00 },
- { "8300g", 8, 3.40 },
- { "8300ge", 8, 3.40 },
- { "8440u", 8, 3.00 },
- { "1300", 4, 3.50 },
- { "4350g", 8, 3.80 },
- { "4350ge", 8, 3.50 },
- { "4355g", 8, 3.80 },
- { "4355ge", 8, 3.50 },
- { "4450u", 8, 2.50 },
- { "5350g", 8, 4.00 },
- { "5350ge", 8, 3.60 },
- { "5355g", 8, 4.00 },
- { "5355ge", 8, 3.60 },
- { "5450u", 8, 2.60 },
- { "5475u", 8, 2.70 },
- { "1400", 8, 3.20 },
- { "1500x", 8, 3.50 },
- { "1600", 12, 3.20 },
- { "1600x", 12, 3.60 },
+ { "210", 8 },
+ { "2200g", 4 },
+ { "2200ge", 4 },
+ { "2200u", 4 },
+ { "2300u", 4 },
+ { "2300x", 4 },
+ { "3100", 8 },
+ { "3200g", 4 },
+ { "3200ge", 4 },
+ { "3200u", 4 },
+ { "3250c", 4 },
+ { "3250u", 4 },
+ { "3300u", 4 },
+ { "3300x", 8 },
+ { "3350u", 4 },
+ { "4100", 8 },
+ { "4300g", 8 },
+ { "4300ge", 8 },
+ { "4300u", 4 },
+ { "5125c", 4 },
+ { "5300g", 8 },
+ { "5300ge", 8 },
+ { "5300u", 8 },
+ { "5305g", 8 },
+ { "5305ge", 8 },
+ { "5400u", 8 },
+ { "5425c", 8 },
+ { "5425u", 8 },
+ { "7320c", 8 },
+ { "7320u", 8 },
+ { "7330u", 8 },
+ { "7335u", 8 },
+ { "7440u", 8 },
+ { "8300g", 8 },
+ { "8300ge", 8 },
+ { "8440u", 8 },
+ { "1300", 4 },
+ { "4350g", 8 },
+ { "4350ge", 8 },
+ { "4355g", 8 },
+ { "4355ge", 8 },
+ { "4450u", 8 },
+ { "5350g", 8 },
+ { "5350ge", 8 },
+ { "5355g", 8 },
+ { "5355ge", 8 },
+ { "5450u", 8 },
+ { "5475u", 8 },
+ { "1400", 8 },
+ { "1500x", 8 },
+ { "1600", 12 },
+ { "1600x", 12 },
// "220" mapped to Ryzen 5 1600 (First Gen 6c/12t)
- { "220", 12, 3.20 },
+ { "220", 12 },
// "230" mapped to Ryzen 5 2600 (Second Gen 6c/12t)
- { "230", 12, 3.40 },
+ { "230", 12 },
// "240" mapped to Ryzen 5 3600 (Third Gen 6c/12t)
- { "240", 12, 3.60 },
- { "2400g", 8, 3.60 },
- { "2400ge", 8, 3.20 },
- { "2500u", 8, 2.00 },
- { "2500x", 8, 3.60 },
- { "2600", 12, 3.40 },
- { "2600e", 12, 3.10 },
- { "2600h", 8, 3.20 },
- { "2600x", 12, 3.60 },
- { "3400g", 8, 3.70 },
- { "3400ge", 8, 3.30 },
- { "3450u", 8, 2.10 },
- { "3500", 6, 3.60 },
- { "3500c", 8, 2.10 },
- { "3500u", 8, 2.10 },
- { "3550h", 8, 2.10 },
- { "3580u", 8, 2.10 },
- { "3600", 12, 3.60 },
- { "3600x", 12, 3.80 },
- { "3600xt", 12, 3.80 },
- { "4500", 12, 3.60 },
- { "4500u", 6, 2.30 },
- { "4600g", 12, 3.70 },
- { "4600ge", 12, 3.30 },
- { "4600h", 12, 3.00 },
- { "4600u", 12, 2.10 },
- { "4680u", 12, 2.10 },
- { "5500", 12, 3.60 },
- { "5500gt", 12, 3.60 },
- { "5500h", 8, 3.30 },
- { "5500u", 12, 2.10 },
- { "5560u", 12, 2.30 },
- { "5600", 12, 3.50 },
- { "5600g", 12, 3.90 },
- { "5600ge", 12, 3.40 },
- { "5600gt", 12, 3.60 },
- { "5600h", 12, 3.30 },
- { "5600hs", 12, 3.00 },
- { "5600t", 12, 3.50 },
- { "5600u", 12, 2.30 },
- { "5600x", 12, 3.70 },
- { "5600x3d", 12, 3.30 },
- { "5600xt", 12, 3.80 },
- { "5605g", 12, 3.90 },
- { "5605ge", 12, 3.40 },
- { "5625c", 12, 2.30 },
- { "5625u", 12, 2.30 },
- { "6600h", 12, 3.30 },
- { "6600hs", 12, 3.30 },
- { "6600u", 12, 2.90 },
- { "7235hs", 8, 3.20 },
- { "7400f", 12, 3.70 },
- { "7430u", 12, 2.30 },
- { "7500f", 12, 3.70 },
- { "7520c", 8, 2.80 },
- { "7520u", 8, 2.80 },
- { "7530u", 12, 2.00 },
- { "7535hs", 12, 3.30 },
- { "7535u", 12, 2.90 },
- { "7540u", 12, 3.20 },
- { "7545u", 12, 3.20 },
- { "7600", 12, 3.80 },
- { "7600x", 12, 4.70 },
- { "7600x3d", 12, 4.10 },
- { "7640hs", 12, 4.30 },
- { "7640u", 12, 3.50 },
- { "7645hx", 12, 4.00 },
- { "8400f", 12, 4.20 },
- { "8500g", 12, 4.10 }, // Zen 4 base
- { "8500ge", 12, 3.40 },
- { "8540u", 12, 3.20 },
- { "8600g", 12, 4.30 },
- { "8640hs", 12, 3.50 },
- { "8640u", 12, 3.50 },
- { "8645hs", 12, 4.30 },
- { "9600", 12, 3.90 },
- { "9600x", 12, 3.90 },
- { "1500", 8, 3.00 },
- { "3350g", 8, 3.60 },
- { "3350ge", 8, 3.30 },
- { "4650g", 12, 3.70 },
- { "4650ge", 12, 3.30 },
- { "4650u", 12, 2.10 },
- { "4655g", 12, 3.70 },
- { "4655ge", 12, 3.30 },
- { "5645", 12, 3.70 },
- { "5650g", 12, 3.90 },
- { "5650ge", 12, 3.40 },
- { "5650u", 12, 2.30 },
- { "5655g", 12, 3.90 },
- { "5655ge", 12, 3.40 },
- { "5675u", 12, 2.30 },
- { "6650h", 12, 3.30 },
- { "6650hs", 12, 3.30 },
- { "6650u", 12, 2.90 },
- { "1700", 16, 3.00 },
- { "1700x", 16, 3.40 },
- { "1800x", 16, 3.60 },
+ { "240", 12 },
+ { "2400g", 8 },
+ { "2400ge", 8 },
+ { "2500u", 8 },
+ { "2500x", 8 },
+ { "2600", 12 },
+ { "2600e", 12 },
+ { "2600h", 8 },
+ { "2600x", 12 },
+ { "3400g", 8 },
+ { "3400ge", 8 },
+ { "3450u", 8 },
+ { "3500", 6 },
+ { "3500c", 8 },
+ { "3500u", 8 },
+ { "3550h", 8 },
+ { "3580u", 8 },
+ { "3600", 12 },
+ { "3600x", 12 },
+ { "3600xt", 12 },
+ { "4500", 12 },
+ { "4500u", 6 },
+ { "4600g", 12 },
+ { "4600ge", 12 },
+ { "4600h", 12 },
+ { "4600u", 12 },
+ { "4680u", 12 },
+ { "5500", 12 },
+ { "5500gt", 12 },
+ { "5500h", 8 },
+ { "5500u", 12 },
+ { "5560u", 12 },
+ { "5600", 12 },
+ { "5600g", 12 },
+ { "5600ge", 12 },
+ { "5600gt", 12 },
+ { "5600h", 12 },
+ { "5600hs", 12 },
+ { "5600t", 12 },
+ { "5600u", 12 },
+ { "5600x", 12 },
+ { "5600x3d", 12 },
+ { "5600xt", 12 },
+ { "5605g", 12 },
+ { "5605ge", 12 },
+ { "5625c", 12 },
+ { "5625u", 12 },
+ { "6600h", 12 },
+ { "6600hs", 12 },
+ { "6600u", 12 },
+ { "7235hs", 8 },
+ { "7400f", 12 },
+ { "7430u", 12 },
+ { "7500f", 12 },
+ { "7520c", 8 },
+ { "7520u", 8 },
+ { "7530u", 12 },
+ { "7535hs", 12 },
+ { "7535u", 12 },
+ { "7540u", 12 },
+ { "7545u", 12 },
+ { "7600", 12 },
+ { "7600x", 12 },
+ { "7600x3d", 12 },
+ { "7640hs", 12 },
+ { "7640u", 12 },
+ { "7645hx", 12 },
+ { "8400f", 12 },
+ { "8500g", 12 }, // Zen 4 base
+ { "8500ge", 12 },
+ { "8540u", 12 },
+ { "8600g", 12 },
+ { "8640hs", 12 },
+ { "8640u", 12 },
+ { "8645hs", 12 },
+ { "9600", 12 },
+ { "9600x", 12 },
+ { "1500", 8 },
+ { "3350g", 8 },
+ { "3350ge", 8 },
+ { "4650g", 12 },
+ { "4650ge", 12 },
+ { "4650u", 12 },
+ { "4655g", 12 },
+ { "4655ge", 12 },
+ { "5645", 12 },
+ { "5650g", 12 },
+ { "5650ge", 12 },
+ { "5650u", 12 },
+ { "5655g", 12 },
+ { "5655ge", 12 },
+ { "5675u", 12 },
+ { "6650h", 12 },
+ { "6650hs", 12 },
+ { "6650u", 12 },
+ { "1700", 16 },
+ { "1700x", 16 },
+ { "1800x", 16 },
// "250" mapped to Ryzen 7 1700 (First Gen 8c/16t)
- { "250", 16, 3.00 },
+ { "250", 16 },
// "260" mapped to Ryzen 7 2700 (Second Gen 8c/16t)
- { "260", 16, 3.20 },
- { "2700", 16, 3.20 },
- { "2700e", 16, 2.80 },
- { "2700u", 8, 2.20 },
- { "2700x", 16, 3.70 },
- { "2800h", 8, 3.30 },
- { "3700c", 8, 2.30 },
- { "3700u", 8, 2.30 },
- { "3700x", 16, 3.60 },
- { "3750h", 8, 2.30 },
- { "3780u", 8, 2.30 },
- { "3800x", 16, 3.90 },
- { "3800xt", 16, 3.90 },
- { "4700g", 16, 3.60 },
- { "4700ge", 16, 3.10 },
- { "4700u", 8, 2.00 },
- { "4800h", 16, 2.90 },
- { "4800hs", 16, 2.90 },
- { "4800u", 16, 1.80 },
- { "4980u", 16, 2.00 },
- { "5700", 16, 3.70 },
- { "5700g", 16, 3.80 },
- { "5700ge", 16, 3.20 },
- { "5700u", 16, 1.80 },
- { "5700x", 16, 3.40 },
- { "5700x3d", 16, 3.00 },
- { "5705g", 16, 3.80 },
- { "5705ge", 16, 3.20 },
- { "5800", 16, 3.40 },
- { "5800h", 16, 3.20 },
- { "5800hs", 16, 2.80 },
- { "5800u", 16, 1.90 },
- { "5800x", 16, 3.80 },
- { "5800x3d", 16, 3.40 },
- { "5800xt", 16, 3.80 },
- { "5825c", 16, 2.00 },
- { "5825u", 16, 2.00 },
- { "6800h", 16, 3.20 },
- { "6800hs", 16, 3.20 },
- { "6800u", 16, 2.70 },
- { "7435hs", 16, 3.10 },
- { "7700", 16, 3.80 },
- { "7700x", 16, 4.50 },
- { "7730u", 16, 2.00 },
- { "7735hs", 16, 3.20 },
- { "7735u", 16, 2.70 },
- { "7736u", 16, 2.70 },
- { "7745hx", 16, 3.60 },
- { "7800x3d", 16, 4.20 },
- { "7840hs", 16, 3.80 },
- { "7840hx", 24, 3.00 },
- { "7840u", 16, 3.30 },
- { "8700f", 16, 4.10 },
- { "8700g", 16, 4.20 },
- { "8840hs", 16, 3.30 },
- { "8840u", 16, 3.30 },
- { "8845hs", 16, 3.80 },
- { "9700x", 16, 3.80 },
- { "9800x3d", 16, 4.70 },
- { "4750g", 16, 3.60 },
- { "4750ge", 16, 3.10 },
- { "4750u", 16, 1.70 },
- { "5750g", 16, 3.80 },
- { "5750ge", 16, 3.20 },
- { "5755g", 16, 3.80 },
- { "5755ge", 16, 3.20 },
- { "5845", 16, 3.40 },
- { "5850u", 16, 1.90 },
- { "5875u", 16, 2.00 },
- { "6850h", 16, 3.20 },
- { "6850hs", 16, 3.20 },
- { "6850u", 16, 2.70 },
- { "6860z", 16, 2.70 },
- { "7745", 16, 3.60 },
+ { "260", 16 },
+ { "2700", 16 },
+ { "2700e", 16 },
+ { "2700u", 8 },
+ { "2700x", 16 },
+ { "2800h", 8 },
+ { "3700c", 8 },
+ { "3700u", 8 },
+ { "3700x", 16 },
+ { "3750h", 8 },
+ { "3780u", 8 },
+ { "3800x", 16 },
+ { "3800xt", 16 },
+ { "4700g", 16 },
+ { "4700ge", 16 },
+ { "4700u", 8 },
+ { "4800h", 16 },
+ { "4800hs", 16 },
+ { "4800u", 16 },
+ { "4980u", 16 },
+ { "5700", 16 },
+ { "5700g", 16 },
+ { "5700ge", 16 },
+ { "5700u", 16 },
+ { "5700x", 16 },
+ { "5700x3d", 16 },
+ { "5705g", 16 },
+ { "5705ge", 16 },
+ { "5800", 16 },
+ { "5800h", 16 },
+ { "5800hs", 16 },
+ { "5800u", 16 },
+ { "5800x", 16 },
+ { "5800x3d", 16 },
+ { "5800xt", 16 },
+ { "5825c", 16 },
+ { "5825u", 16 },
+ { "6800h", 16 },
+ { "6800hs", 16 },
+ { "6800u", 16 },
+ { "7435hs", 16 },
+ { "7700", 16 },
+ { "7700x", 16 },
+ { "7730u", 16 },
+ { "7735hs", 16 },
+ { "7735u", 16 },
+ { "7736u", 16 },
+ { "7745hx", 16 },
+ { "7800x3d", 16 },
+ { "7840hs", 16 },
+ { "7840hx", 24 },
+ { "7840u", 16 },
+ { "8700f", 16 },
+ { "8700g", 16 },
+ { "8840hs", 16 },
+ { "8840u", 16 },
+ { "8845hs", 16 },
+ { "9700x", 16 },
+ { "9800x3d", 16 },
+ { "4750g", 16 },
+ { "4750ge", 16 },
+ { "4750u", 16 },
+ { "5750g", 16 },
+ { "5750ge", 16 },
+ { "5755g", 16 },
+ { "5755ge", 16 },
+ { "5845", 16 },
+ { "5850u", 16 },
+ { "5875u", 16 },
+ { "6850h", 16 },
+ { "6850hs", 16 },
+ { "6850u", 16 },
+ { "6860z", 16 },
+ { "7745", 16 },
// "270" mapped to Ryzen 7 3700X (Third Gen 8c/16t)
- { "270", 16, 3.60 },
- { "3900", 24, 3.10 },
- { "3900x", 24, 3.80 },
- { "3900xt", 24, 3.80 },
- { "3950x", 32, 3.50 },
- { "4900h", 16, 3.30 },
- { "4900hs", 16, 3.00 },
- { "5900", 24, 3.00 },
- { "5900hs", 16, 3.00 },
- { "5900hx", 16, 3.30 },
- { "5900x", 24, 3.70 },
- { "5900xt", 32, 3.30 },
- { "5950x", 32, 3.40 },
- { "5980hs", 16, 3.00 },
- { "5980hx", 16, 3.30 },
- { "6900hs", 16, 3.30 },
- { "6900hx", 16, 3.30 },
- { "6980hs", 16, 3.30 },
- { "6980hx", 16, 3.30 },
- { "7845hx", 24, 3.00 },
- { "7900", 24, 3.70 },
- { "7900x", 24, 4.70 },
- { "7900x3d", 24, 4.40 },
- { "7940hs", 16, 4.00 },
- { "7940hx", 32, 2.40 },
- { "7945hx", 32, 2.50 },
- { "7945hx3d", 32, 2.30 },
- { "7950x", 32, 4.50 },
- { "7950x3d", 32, 4.20 },
- { "8945hs", 16, 4.00 },
- { "9850hx", 24, 2.40 },
- { "9900x", 24, 4.40 },
- { "9900x3d", 24, 4.40 },
- { "9950x", 32, 4.30 },
- { "9950x3d", 32, 4.30 },
- { "9955hx", 32, 2.40 },
- { "5945", 24, 4.10 },
- { "6950h", 16, 3.30 },
- { "6950hs", 16, 3.30 },
- { "7945", 24, 4.70 },
- { "1900x", 16, 3.80 },
- { "1920x", 24, 3.50 },
- { "1950x", 32, 3.40 },
- { "2920x", 24, 3.50 },
- { "2950x", 32, 3.50 },
- { "2970wx", 48, 3.00 },
- { "2990wx", 64, 3.00 },
- { "3960x", 48, 3.80 },
- { "3970x", 64, 3.70 },
- { "3990x", 128, 2.90 },
- { "7960x", 48, 4.20 },
- { "7970x", 64, 4.00 },
- { "7980x", 128, 3.20 },
- { "3945wx", 24, 4.00 },
- { "3955wx", 32, 3.90 },
- { "3975wx", 64, 3.50 },
- { "3995wx", 128, 2.70 },
- { "5945wx", 24, 4.10 },
- { "5955wx", 32, 4.00 },
- { "5965wx", 48, 3.80 },
- { "5975wx", 64, 3.60 },
- { "5995wx", 128, 2.70 },
- { "7945wx", 24, 4.70 },
- { "7955wx", 32, 4.50 },
- { "7965wx", 48, 4.20 },
- { "7975wx", 64, 4.00 },
- { "7985wx", 128, 3.20 },
- { "7995wx", 192, 2.50 },
+ { "270", 16 },
+ { "3900", 24 },
+ { "3900x", 24 },
+ { "3900xt", 24 },
+ { "3950x", 32 },
+ { "4900h", 16 },
+ { "4900hs", 16 },
+ { "5900", 24 },
+ { "5900hs", 16 },
+ { "5900hx", 16 },
+ { "5900x", 24 },
+ { "5900xt", 32 },
+ { "5950x", 32 },
+ { "5980hs", 16 },
+ { "5980hx", 16 },
+ { "6900hs", 16 },
+ { "6900hx", 16 },
+ { "6980hs", 16 },
+ { "6980hx", 16 },
+ { "7845hx", 24 },
+ { "7900", 24 },
+ { "7900x", 24 },
+ { "7900x3d", 24 },
+ { "7940hs", 16 },
+ { "7940hx", 32 },
+ { "7945hx", 32 },
+ { "7945hx3d", 32 },
+ { "7950x", 32 },
+ { "7950x3d", 32 },
+ { "8945hs", 16 },
+ { "9850hx", 24 },
+ { "9900x", 24 },
+ { "9900x3d", 24 },
+ { "9950x", 32 },
+ { "9950x3d", 32 },
+ { "9955hx", 32 },
+ { "5945", 24 },
+ { "6950h", 16 },
+ { "6950hs", 16 },
+ { "7945", 24 },
+ { "1900x", 16 },
+ { "1920x", 24 },
+ { "1950x", 32 },
+ { "2920x", 24 },
+ { "2950x", 32 },
+ { "2970wx", 48 },
+ { "2990wx", 64 },
+ { "3960x", 48 },
+ { "3970x", 64 },
+ { "3990x", 128 },
+ { "7960x", 48 },
+ { "7970x", 64 },
+ { "7980x", 128 },
+ { "3945wx", 24 },
+ { "3955wx", 32 },
+ { "3975wx", 64 },
+ { "3995wx", 128 },
+ { "5945wx", 24 },
+ { "5955wx", 32 },
+ { "5965wx", 48 },
+ { "5975wx", 64 },
+ { "5995wx", 128 },
+ { "7945wx", 24 },
+ { "7955wx", 32 },
+ { "7965wx", 48 },
+ { "7975wx", 64 },
+ { "7985wx", 128 },
+ { "7995wx", 192 },
// Sempron
- { "2650", 2, 1.45 },
- { "3850", 4, 1.30 },
+ { "2650", 2 },
+ { "3850", 4 },
// Z-Series
- { "z1", 12, 3.20 }
+ { "z1", 12 }
};
out_ptr = db;
out_size = sizeof(db) / sizeof(cpu_entry);
@@ -3716,7 +3702,7 @@ struct VM {
}
const char* names[] = { "GetProcessInformation" };
- void* funcs[1] = { nullptr };
+ void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, 1);
get_process_information get_proc_info = reinterpret_cast(funcs[0]);
@@ -4896,12 +4882,19 @@ struct VM {
constexpr u32 HYPERVISOR_MASK = (1u << 31);
if (ecx & HYPERVISOR_MASK) {
+ // if hypervisor bit is enabled, but we're in a root partition, prevent it from flagging
if (util::hyper_x() == HYPERV_ARTIFACT_VM) {
return false;
}
return true;
}
+ else {
+ // if hypervisor bit is disabled, but vmaware detects hyper-v signals, we're in an impossible situation (patching)
+ if (util::hyper_x() == HYPERV_ARTIFACT_VM) {
+ return true;
+ }
+ }
return false;
#endif
@@ -5025,6 +5018,109 @@ struct VM {
#if (!x86)
return false;
#else
+ auto is_smt_enabled = []() noexcept -> bool {
+ auto popcount = [](uint64_t v) noexcept -> int {
+ #if (GCC || CLANG)
+ return __builtin_popcountll(v);
+ #elif (MSVC)
+ return static_cast(__popcnt64(static_cast(v)));
+ #else
+ int c = 0;
+ while (v) { c += static_cast(v & 1ull); v >>= 1; }
+ return c;
+ #endif
+ };
+ #if (WINDOWS)
+ DWORD len = 0;
+ if (GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &len) ||
+ GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ return false;
+ }
+ std::vector buf(static_cast(len));
+ if (!GetLogicalProcessorInformationEx(RelationProcessorCore,
+ reinterpret_cast(buf.data()), &len)) {
+ return false;
+ }
+ // first RelationProcessorCore record encountered, basically if two logical processors maps to the same core, SMT is enabled to the OS point of view
+ size_t offset = 0;
+ while (offset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) <= static_cast(len)) {
+ auto rec = reinterpret_cast(buf.data() + offset);
+ if (rec->Relationship == RelationProcessorCore) {
+ const PROCESSOR_RELATIONSHIP& pr = rec->Processor;
+ unsigned total = 0;
+ for (WORD i = 0; i < pr.GroupCount; ++i) {
+ total += popcount(static_cast(pr.GroupMask[i].Mask));
+ }
+ return total > 1;
+ }
+ if (rec->Size == 0) break;
+ offset += rec->Size;
+ }
+ return false;
+ #elif (APPLE)
+ int logical = 0, physical = 0;
+ size_t sz = sizeof(logical);
+ if (sysctlbyname("hw.logicalcpu", &logical, &sz, nullptr, 0) != 0) logical = 0;
+ sz = sizeof(physical);
+ if (sysctlbyname("hw.physicalcpu", &physical, &sz, nullptr, 0) != 0) physical = 0;
+ if (logical > 0 && physical > 0) return logical > physical;
+ return false;
+ #else
+ // check cpu0 thread_siblings_list
+ {
+ std::ifstream f("/sys/devices/system/cpu/cpu0/topology/thread_siblings_list");
+ if (f) {
+ std::string s;
+ if (std::getline(f, s)) {
+ // trim
+ size_t a = 0; while (a < s.size() && std::isspace(static_cast(s[a]))) ++a;
+ size_t b = s.size(); while (b > a && std::isspace(static_cast(s[b - 1]))) --b;
+ if (b > a) {
+ for (size_t k = a; k < b; ++k) {
+ if (s[k] == ',' || s[k] == '-') return true;
+ }
+ return false;
+ }
+ }
+ }
+ }
+ // /proc/cpuinfo for unique (physical id, core id) pairs vs processors
+ std::ifstream cpuinfo("/proc/cpuinfo");
+ if (!cpuinfo) return false;
+ std::string line;
+ int processors = 0;
+ bool in_section = false;
+ int cur_phys = -1, cur_core = -1;
+ std::vector> cores;
+ while (std::getline(cpuinfo, line)) {
+ if (line.empty()) {
+ if (cur_phys != -1 && cur_core != -1) cores.emplace_back(cur_phys, cur_core);
+ cur_phys = cur_core = -1;
+ in_section = false;
+ continue;
+ }
+ auto pos = line.find(':');
+ if (pos == std::string::npos) continue;
+ std::string key = line.substr(0, pos);
+ std::string val = line.substr(pos + 1);
+ // trim
+ while (!key.empty() && std::isspace(static_cast(key.back()))) key.pop_back();
+ while (!val.empty() && std::isspace(static_cast(val.front()))) val.erase(val.begin());
+ if (key == "processor") ++processors;
+ else if (key == "physical id") { try { cur_phys = std::stoi(val); } catch (...) { cur_phys = -1; } }
+ else if (key == "core id") { try { cur_core = std::stoi(val); } catch (...) { cur_core = -1; } }
+ }
+ if (cur_phys != -1 && cur_core != -1) cores.emplace_back(cur_phys, cur_core);
+ if (!cores.empty() && processors > 0) {
+ std::sort(cores.begin(), cores.end());
+ cores.erase(std::unique(cores.begin(), cores.end()), cores.end());
+ int physical_cores = static_cast(cores.size());
+ return processors > physical_cores;
+ }
+ return false;
+ #endif
+ };
+
const auto& info = cpu::analyze_cpu();
if (info.found) {
@@ -5033,8 +5129,15 @@ struct VM {
const u32 actual = memo::threadcount::fetch();
if (actual != info.expected_threads) {
debug(info.debug_tag, ": Current threads -> ", actual);
- debug(info.debug_tag, ": Expected threads -> ", info.expected_threads);
- return true;
+ const bool smt = is_smt_enabled();
+ if (smt) {
+ debug(info.debug_tag, ": Expected ", info.expected_threads, " threads");
+ return true;
+ }
+ else {
+ debug(info.debug_tag, ": Expected ", info.expected_threads, " threads, but found SMT disabled");
+ return false;
+ }
}
}
return false;
@@ -5043,9 +5146,7 @@ struct VM {
/**
- * @brief Check for signatures in leaf 0x40000001 in CPUID
- * @link https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/shared/hvgdk_mini/hv_hypervisor_interface.htm
- * @link https://github.com/ionescu007/SimpleVisor/blob/master/shvvp.c
+ * @brief Check for CPUID signatures that reveal the presence of a hypervisor
* @category x86
* @implements VM::CPUID_SIGNATURE
*/
@@ -5053,16 +5154,112 @@ struct VM {
#if (!x86)
return false;
#else
- u32 eax, unused = 0;
- cpu::cpuid(eax, unused, unused, unused, 0x40000001);
- VMAWARE_UNUSED(unused);
+ u32 eax = 0, ebx = 0, ecx = 0, edx = 0;
+ cpu::cpuid(eax, ebx, ecx, edx, 0x40000001);
constexpr u32 simplevisor = 0x00766853; // " vhS"
debug("CPUID_SIGNATURE: eax = ", eax);
- if (eax == simplevisor)
+ if (eax == simplevisor) {
return core::add(brand_enum::SIMPLEVISOR);
+ }
+
+ if (cpu::is_intel()) {
+ const bool has_leaf_b = cpu::is_leaf_supported(0x0B);
+ const bool has_leaf_1f = cpu::is_leaf_supported(0x1F);
+
+ // If neither extended topology leaf is supported, we can't perform the check
+ if (!has_leaf_b && !has_leaf_1f) {
+ return false;
+ }
+
+ u32 l1_eax = 0, l1_ebx = 0, l1_ecx = 0, l1_edx = 0;
+ u32 vb_eax = 0, vb_ebx = 0, vb_ecx = 0, vb_edx = 0;
+ u32 v1f_eax = 0, v1f_ebx = 0, v1f_ecx = 0, v1f_edx = 0;
+
+ u32 aba_start = 0, aba_end = 0;
+ u32 unused = 0;
+ int retries = 0;
+
+ // triple-read ABA pattern to detect thread migration and bounded to 8 retries
+ // leaf 1's Initial APIC ID is the ABA guard
+ do {
+ cpu::cpuid(l1_eax, l1_ebx, l1_ecx, l1_edx, 1, 0);
+ aba_start = (l1_ebx >> 24) & 0xFF; // Initial APIC ID
+
+ if (has_leaf_b) cpu::cpuid(vb_eax, vb_ebx, vb_ecx, vb_edx, 0x0B, 0);
+ if (has_leaf_1f) cpu::cpuid(v1f_eax, v1f_ebx, v1f_ecx, v1f_edx, 0x1F, 0);
+
+ cpu::cpuid(unused, l1_ebx, unused, unused, 1, 0);
+ aba_end = (l1_ebx >> 24) & 0xFF;
+ } while (aba_start != aba_end && ++retries < 8);
+
+ // If we hit the retry limit and the thread is still migrating,
+ // abort the check to prevent false positives
+ if (aba_start != aba_end) {
+ return false;
+ }
+
+ const u32 initial_apic_id = aba_start;
+
+ // check Leaf 0x0B against Leaf 1
+ if (has_leaf_b) {
+ const u32 vb_level = (vb_ecx >> 8) & 0xFF;
+
+ if (vb_level != 0) {
+ // if x2APIC ID is < 255, Initial APIC ID must match exactly
+ if (vb_edx < 255 && (vb_edx & 0xFF) != initial_apic_id) {
+ return true;
+ }
+ }
+ }
+
+ // check Leaf 0x1F against Leaf 1, and cross-check with 0x0B
+ if (has_leaf_1f) {
+ const u32 v1f_level = (v1f_ecx >> 8) & 0xFF;
+
+ if (v1f_level != 0) {
+ // if x2APIC ID is < 255, Initial APIC ID must match exactly
+ if (v1f_edx < 255 && (v1f_edx & 0xFF) != initial_apic_id) {
+ return true;
+ }
+
+ // cross-check 0x0B vs 0x1F if both are supported and valid
+ if (has_leaf_b) {
+ const u32 vb_level = (vb_ecx >> 8) & 0xFF;
+ if (vb_level != 0 && vb_edx != v1f_edx) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ else if (cpu::is_amd()) {
+ const bool has_leaf_7 = cpu::is_leaf_supported(7);
+
+ if (!has_leaf_7) {
+ return false;
+ }
+
+ u32 l7_eax = 0, l7_ebx = 0, l7_ecx = 0, l7_edx = 0;
+ cpu::cpuid(l7_eax, l7_ebx, l7_ecx, l7_edx, 7, 0);
+
+ // Intel enumerates hardware mitigations in Leaf 7.0.EDX:
+ // Bit 26: IBRS and IBPB
+ // Bit 27: STIBP
+ // Bit 31: SSBD
+
+ // AMD processors strictly reserve these bits (force them to 0)
+ // and instead enumerate their mitigations in Leaf 0x80000008.EBX
+ const bool has_intel_ibrs = (l7_edx & (1 << 26)) != 0;
+ const bool has_intel_stibp = (l7_edx & (1 << 27)) != 0;
+ const bool has_intel_ssbd = (l7_edx & (1 << 31)) != 0;
+
+ if (has_intel_ibrs || has_intel_stibp || has_intel_ssbd) {
+ return true;
+ }
+ }
return false;
#endif
@@ -5096,617 +5293,349 @@ struct VM {
/**
* @brief Check for timing anomalies in the system
- * @category x86
+ * @category x86, Windows
* @implements VM::TIMER
*/
[[nodiscard]] static bool timer() {
- #if (x86)
- /**
- * This is an explanation for any person, even if you don't have any kind of knowledge into this topic, without going depth into technical stuff.
- *
- * ======== General Explanation ========
- * This function runs a CPU instruction that takes a lot of time to run inside a VM, and returns true if the latency (time) was high
- * It also checks for hypervisors trying to hide this latency, and it has been historically refactored over the last two years to take into account every false positive and false negative
- *
- * Techniques a hypervisor can use to hide latency:
- * 1. A hypervisor that, whenever the latency is measured by VMAware, returns a spoofed latency
- * 2. A hypervisor that makes the cpu instruction itself have a fast routine, so it doesn't take too much time to run in the VM
- *
- * Small introduction:
- * the instruction that always (unconditionally) takes a lot of time to run inside the VM is "cpuid"
- * the instruction used to read its latency is "rdtsc", which reads the TSC, measured in cycles
- * so, if we do: start = rdtsc, then cpuid, then end = rdtsc, and finally end - start, we know how much time cpuid took to run, being high if a VM is present
- * this measurement must always be done multiple times to be as accurate as possible
- * cpuid takes a lot to run in VMs because its a instruction that spends time in switching from usermode (where vmaware runs) to hypervisor mode (where cpuid will be handled)
- *
- * ======== Detection Explanation ========
- * - Local Ratio Check -
- * A hypervisor will of course try to hide this latency, they can intercept any of those two instructions, or both: cpuid and rdtsc
- * After being intercepted, the hypervisor can return a fake TSC value
- * They can intercept those 2 instructions in one cpu core, or in all cpu cores
- * Example: If a VM in normal conditions has 2000 cycles of real cpuid latency (because it spent 1600 cycles in the hypervisor), they do:
- * spoofed_tsc = real_cpuid_latency (2000) - time_spent_in_hypervisor (1600) and return spoofed_tsc (400) to VMAware in the core where vmaware is measuring latency
- * This is called TSC offsetting/downclocking, in this case VMAware would see a latency of 400 cycles (which is below our 800 cycle threshold), instead of 2000
- *
- * VMAware knows whether the time is spoofed or real by cross-referencing.
- * VMAware runs a thread, pinned to core 1 that runs an instruction that can't be intercepted by hypervisors: "xor"
- * VMAware runs another thread simultaneously, in CPU core 2, that runs the rdtsc-cpuid-rdtsc loop explained before, which is intercepted by the hypervisor
- * When both threads end, VMAware compares the time spent in both loops. In normal conditions, both loops reported having the same TSC, because they ran simultaneously
- * But, if a hypervisor intercepts cpuid or rdtsc, they would have to hide the latency of thread 2 so that VMAware doesnt see a high cpuid latency
- * This makes thread 2 (which ran cpuid) have a much smaller (spoofed) TSC than thread 1 (which didnt run any instruction that the hypervisor can intercept)
- *
- * But, what if when thread 2 is running cpuid, the hypervisor downclocks all cpu cores, instead of only core 2, affecting thread 1 as well?
- *
- * - Global Ratio Check -
- * If a hypervisor downclocks latency in all cpu cores, all instructions in thread 1 and 2, including XOR of thread 1, might appear to have ran extremely fast
- * Because of this, VMAware checks the IPC (Instruction per Cycle) that were run during the measurement:
- * Example: If thread 1 reports finishing 100000000 dependent iterations in only 10000000 TSC cycles, the CPU effectively ran at 10 IPS
- * which is basically impossible on x86 silicon and confirms the TSC was manipulated
- *
- * This makes impossible for hypervisors to hide the latency in either one or all cores, so they always attempt to restore the time debt.
- *
- * - Transient TSC Check -
- * Since the hypervisor cant hide reliably the latency against this code, they do the following:
- * 1. VMAware runs rdtsc
- * 2. VMAware runs cpuid
- * 3. VMAware runs rdtsc, hypervisor returns spoofed tsc, but saves the real latency (previous real_cpuid_latency)
- * 4. VMAware stores result and prepares next iteration in the loop
- * 5. While VMAware is doing step 4, hypervisor puts all cores to current_tsc + real_cpuid_latency, restoring the TSC to its original value
- * 6. Loop repeats
- *
- * The hypervisor must return the time debt between the first loop and the second loop: always after step 3 and always before step 6
- * To counter this, VMAware simply keeps track of latency between iterations, so no matter when the hypervisor restores the time debt, it sees the hidden latency
- *
- * - Statistical Check -
- * Now, they might try to downscale TSC sometimes, and pay the debt much later, so for example, when vmaware runs cpuid, it sees:
- * fast fast fast fast fast fast fast fast fast fast fast fast fast fast slow (time debt of all previous iterations is paid here in slow)
- * VMAware sees: 94% fast, 6% slow, thinks 6% slow are normal kernel noise, and discards them, so it gets bypassed...
- *
- * Thus, VMAware when detects a spike that seems to be a time debt payment, it redistributes them into previous samples, so it becomes:
- * before redistribution:
- * 100 100 100 100 100 100 100 100 100 100 100 100 100 100 1000
- *
- * after redistribution:
- * 166 166 166 166 166 166 166 166 166 166 166 166 166 166 166
- *
- * After the hypervisor have paid all the time debt, ALL the hidden latency will be redistributed into all samples, and will mathematically always cross the latency threshold
- * They might try to keep playing with the time debt redistribution, like not paying 20-30% of the time debt, but thresholds in VMAware's code (and the excesive number of iterations)
- * are calculated in purpose so that it's mathematically impossible (even with a patch that only downscales 1 cycle) to contaminate samples selectively so that it doesn't trigger any check
- * If a hypervisor doesn't pay sometimes the time debt (enough so that sample redistribution is not larger than the latency threshold), it trips the local or global ratio check
- * and if you do pay the time debt but at any interval (lets say at loop iteration 15, or 32, or 47...), you trip the cpuid latency check
- *
- * So, now what they can do?
- * - Low Latency Check -
- * If they can't do technique 1 (hiding the latency), they might attempt technique 2 (making the VM as fast as possible)
- * Remember that we use the cpuid instruction, which always haves high latency in a VM, which return results containing info about the CPU
- *
- * To do this, they might cache results and give them back instantly when cpuid is executed, or recode the whole kernel to just make it handle the cpuid quickly
- * But they can't avoid one thing: the latency of the CPU switching from vmaware to the hypervisor itself
- * No matter how fast the hypervisor is at handling cpuid, you cannot make the CPU faster than what it is at a hardware level.
- * VMAware puts a threshold specifically tailored to the minimum latency seen on the wild, across more than 10000000 machines
- * that the CPU takes to switch from user-mode (VMAware) to VMM (hypervisor), this latency is often called the "vmexit" latency
- *
- * To give the worst nightmare, VMAware runs all the aforementioned checks in parallel, trillions of times in a single loop, so the hypervisor finds a paradox:
- * Either the hypervisor downclock TSC (failing the local/global ratio checks), or either the hypervisor pays the time debt (exposing the vmexit latency)
- * Only solution? Maybe making the whole VM a 52% slower? Who knows...
- */
-
- #if (MSVC)
- #define COMPILER_BARRIER() _ReadWriteBarrier()
- #else
- #define COMPILER_BARRIER() asm volatile("" ::: "memory")
- #endif
-
+ #if (x86 && WINDOWS)
+ // Detect a hypervisor without giving it time to react (when the hypervisor sees the vmexit, it's already too late for it, as the counter already exceeded the threshold)
+ // Uses our own software-based clock, meaning a hypervisor can't hide time by offsetting TSC or controlling any hardware timer
+ double threshold = 4.0;
if (util::is_running_under_translator()) {
debug("TIMER: Running inside a binary translation layer");
return false;
}
- // will be used in cpuid measurements
- u16 cycle_threshold = 800; // average latency of a VMX/SVM VMEXIT alone, we should never include more than that
- if (util::hyper_x() == HYPERV_ARTIFACT_VM) {
- cycle_threshold = 3250; // if we're running under Hyper-V, make VMAware detect nested virtualization
- }
-
- // check for RDTSCP support, we will use it later
- int regs[4] = { 0 };
- cpu::cpuid(regs, 0x80000001);
- const bool have_rdtscp = (regs[3] & (1u << 27)) != 0;
- if (!have_rdtscp) {
- debug("TIMER: RDTSCP instruction not supported"); // __rdtscp should be supported nowadays
- return true;
+ if (util::hyper_x() != HYPERV_UNKNOWN) {
+ threshold = 15.0;
}
- constexpr u64 ITER_XOR = 150000000ULL;
- u64 actual_iters = ITER_XOR; // actual_iters is ITER_XOR + any extra XOR instructions vmaware could run before collecting enough cpuid samples
- constexpr size_t CPUID_ITER = 100; // per leaf
- // we try many leaves so that it's extremely heavy to recode every cpuid path to exit quickly enough so that it doesn't trigger the cycle threshold
- static constexpr unsigned int leaves[] = {
- 0x0u, 0x1u, 0x2u, 0x3u, 0x4u, 0x5u, 0x6u, 0x7u, 0x8u, 0x9u,
- 0xAu, 0xBu, 0xCu, 0xDu, 0xEu, 0xFu, 0x10u, 0x11u, 0x12u, 0x13u,
- 0x14u, 0x15u, 0x16u, 0x17u, 0x18u, 0x19u, 0x1Au, 0x1Bu, 0x1Cu, 0x1Du,
- 0x1Eu, 0x1Fu,
- 0x40000000u, 0x40000001u, 0x40000002u, 0x40000003u, 0x40000004u,
- 0x40000005u, 0x40000006u, 0x40000007u, 0x40000008u, 0x40000009u,
- 0x80000000u, 0x80000001u, 0x80000002u, 0x80000003u, 0x80000004u,
- 0x80000005u, 0x80000006u, 0x80000007u, 0x80000008u, 0x80000009u,
- 0x8000000Au, 0x8000000Bu, 0x8000000Cu, 0x8000000Du, 0x8000000Eu,
- 0x8000000Fu, 0x80000010u, 0x80000011u, 0x80000012u, 0x80000013u,
- 0x80000014u, 0x80000015u, 0x80000016u, 0x80000017u, 0x80000018u,
- 0x80000019u, 0x8000001Au
- };
- constexpr size_t n_leaves = sizeof(leaves) / sizeof(leaves[0]);
-
- unsigned hw = std::thread::hardware_concurrency();
- if (hw == 0) hw = 1;
-
- std::atomic ready_count(0);
- std::atomic state(0);
- std::atomic t1_start(0), t1_end(0);
- std::atomic t2_end(0);
- std::vector samples(200000, 0);
-
- struct affinity_cookie {
- bool valid{ false };
- #if (WINDOWS)
- HANDLE thread_handle{ nullptr };
- DWORD_PTR prev_mask{ 0 };
- #elif (LINUX)
- pthread_t thread{ 0 };
- cpu_set_t prev_mask{};
- #endif
- };
-
- auto set_affinity = [](std::thread& t, unsigned core) -> affinity_cookie {
- affinity_cookie cookie;
- #if (WINDOWS)
- const HANDLE h = static_cast(t.native_handle());
- const DWORD_PTR mask = static_cast(1ULL) << core;
- const DWORD_PTR prev = SetThreadAffinityMask(h, mask);
- if (prev != 0) {
- cookie.valid = true;
- cookie.thread_handle = h;
- cookie.prev_mask = prev;
- }
- #elif (LINUX)
- pthread_t ph = t.native_handle();
- cpu_set_t prev;
- if (pthread_getaffinity_np(ph, sizeof(prev), &prev) == 0) {
- cpu_set_t cp;
- CPU_ZERO(&cp);
- CPU_SET(core, &cp);
- (void)pthread_setaffinity_np(ph, sizeof(cp), &cp);
- cookie.valid = true;
- cookie.thread = ph;
- cookie.prev_mask = prev;
- }
- #else
- (void)t; (void)core;
- #endif
- return cookie;
+ // prevent false sharing when triggering hypervisor exits with the intentional data race condition
+ struct alignas(64) cache_state {
+ alignas(64) volatile u64 counter { 0 };
+ alignas(64) std::atomic start_test{ false };
+ alignas(64) std::atomic test_done{ false };
};
- auto restore_affinity = [](const affinity_cookie& cookie) {
- if (!cookie.valid) return;
- #if (WINDOWS)
- (void)SetThreadAffinityMask(cookie.thread_handle, cookie.prev_mask);
- #elif (LINUX)
- (void)pthread_setaffinity_np(cookie.thread, sizeof(cookie.prev_mask), &cookie.prev_mask);
+ // Shared state and results
+ cache_state state;
+ bool hypervisor_detected = false;
+ bool bypass_detected = false;
+
+ // we dont use cpu::cpuid on purpose
+ auto trigger_vmexit = [](i32* info, i32 leaf, i32 sub) {
+ #if (GCC || CLANG)
+ __asm__ volatile (
+ "cpuid"
+ : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3])
+ : "a"(leaf), "c"(sub)
+ : "cc", "memory"
+ );
#else
- (void)cookie;
+ __cpuidex(info, leaf, sub);
#endif
};
- // lambda that calculates how much cycles a single vmexit takes
- auto cpuid = [](unsigned int leaf) noexcept -> u64 {
- #if (MSVC)
- thread_local u32 aux = 0;
- // make regs volatile so writes cannot be optimized out, if this isn't added and the code is compiled in release mode, cycles would be around 40 even under Hyper-V
- volatile int regs[4] = { 0 };
-
- // ensure the CPU pipeline is drained of previous loads before we start the clock
- _mm_lfence();
+ auto counter_thread = [&state]() {
+ const HANDLE current_thread = reinterpret_cast(-2LL);
+ const HANDLE current_process = reinterpret_cast(-1LL);
- // read start time
- const u64 t1 = __rdtsc();
+ // search for the physical sibling of CPU 0, then pick a random CPU excluding it to avoid SMT locks
+ DWORD_PTR procMask = 0, sysMask = 0;
+ GetProcessAffinityMask(current_process, &procMask, &sysMask);
- // prevent the compiler from moving the __cpuid call before the t1 read
- COMPILER_BARRIER();
+ DWORD len = 0;
+ GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &len);
- __cpuid((int*)regs, static_cast(leaf)); // not using cpu::cpuid to get a chance of inlining
+ BYTE stackBuf[1024]{};
+ BYTE* buf = stackBuf;
- COMPILER_BARRIER();
+ std::vector heapBuf;
+ if (len > sizeof(stackBuf)) {
+ heapBuf.resize(len);
+ buf = heapBuf.data();
+ }
- // the idea is to let rdtscp internally wait until cpuid is executed rather than using another memory barrier
- const u64 t2 = __rdtscp(&aux);
+ GetLogicalProcessorInformationEx(RelationProcessorCore,
+ reinterpret_cast(buf), &len);
- // ensure the read of t2 doesn't bleed into future instructions
- _mm_lfence();
-
- return t2 - t1;
- #else
- // same logic of above
- unsigned int lo1, hi1, lo2, hi2, lo3, hi3;
+ DWORD_PTR cpu0CoreMask = 0;
+ for (BYTE* p = buf; p < buf + len; ) {
+ const auto* r = reinterpret_cast(p);
+ if (r->Relationship == RelationProcessorCore) {
+ for (WORD i = 0; i < r->Processor.GroupCount; ++i) {
+ const auto& m = r->Processor.GroupMask[i];
+ if (m.Group == 0 && (m.Mask & 1)) cpu0CoreMask |= m.Mask;
+ }
+ }
+ p += r->Size;
+ }
- asm volatile("lfence" ::: "memory");
- asm volatile("rdtsc" : "=a"(lo1), "=d"(hi1) :: "memory");
- COMPILER_BARRIER();
+ const DWORD_PTR choices = procMask & ~cpu0CoreMask;
- volatile unsigned int a, b, c, d;
- asm volatile("cpuid"
- : "=a"(a), "=b"(b), "=c"(c), "=d"(d)
- : "a"(leaf)
- : "memory");
+ DWORD idxs[64]{}, n = 0;
+ for (DWORD i = 0; i < 64; ++i)
+ if (choices & (1ull << i)) idxs[n++] = i;
- COMPILER_BARRIER();
- asm volatile("rdtscp" : "=a"(lo2), "=d"(hi2) :: "rcx", "memory");
- asm volatile("lfence" ::: "memory");
+ if (n) {
+ // random so that the hypervisor doesn't know where the counter thread is
+ // this will affect latency if cache lines from trigger_thread and counter_thread are separated
+ // however, we do a ratio based detection, so this wont affect the detection accuracy because the cache latency affects both samples
+ std::mt19937 gen(std::random_device{}());
+ const u32 cpu = idxs[std::uniform_int_distribution(0, n - 1)(gen)];
+ SetThreadAffinityMask(current_thread, 1ull << cpu);
+ }
+ SetThreadPriority(current_thread, THREAD_PRIORITY_HIGHEST); // decrease chance of being rescheduled
- const u64 t1 = (u64(hi1) << 32) | lo1;
- const u64 t2 = (u64(hi2) << 32) | lo2;
- const u64 delta = t2 - t1;
+ while (!state.start_test.load(std::memory_order_acquire)) {}
- return delta;
- #endif
+ while (!state.test_done.load(std::memory_order_relaxed)) {
+ #if (GCC || CLANG)
+ #if (x86_64)
+ // A single incq is enough. Unrolling for example 8 times on a volatile memory address
+ // creates unpredictable store-buffer behavior and we want it stable, latency is aprox 1 cycle in all microarchs
+ __asm__ volatile ("incq %0\n\t" : "+m" (state.counter) : : "cc", "memory");
+ #else
+ __asm__ volatile (
+ "addl $1, %0; adcl $0, %1\n\t"
+ : "+m" (((u32*)&state.counter)[0]), "+m" (((u32*)&state.counter)[1])
+ : : "cc", "memory");
+ #endif
+ #else
+ state.counter++;
+ #endif
+ }
};
- // lambda that takes all vmexit samples and filters kernel noise statistically
- auto calculate_latency = [&](const std::vector& samples_in) -> u64 {
- if (samples_in.empty()) return 0;
- const size_t N = samples_in.size();
- if (N == 1) return samples_in[0];
-
- // local sorted copy
- std::vector s = samples_in;
- std::sort(s.begin(), s.end()); // ascending
-
- // tiny-sample short-circuits
- if (N <= 4) return s.front();
-
- // median (and works for sorted input)
- auto median_of_sorted = [](const std::vector& v, size_t lo, size_t hi) -> u64 {
- // this is the median of v[lo..hi-1], requires 0 <= lo < hi
- const size_t len = hi - lo;
- if (len == 0) return 0;
- const size_t mid = lo + (len / 2);
- if (len & 1) return v[mid];
- return (v[mid - 1] + v[mid]) / 2;
- };
+ auto trigger_thread = [&]() {
+ auto calculate_latency = [&](const std::vector& samples_in) -> u64 {
+ if (samples_in.empty()) return 0;
+ const size_t N = samples_in.size();
+ if (N == 1) return samples_in[0];
+
+ // local sorted copy
+ std::vector s = samples_in;
+ std::sort(s.begin(), s.end()); // ascending
+
+ // tiny-sample short-circuits
+ if (N <= 4) return s.front();
+
+ // median (and works for sorted input)
+ auto median_of_sorted = [](const std::vector& v, size_t lo, size_t hi) -> u64 {
+ // this is the median of v[lo..hi-1], requires 0 <= lo < hi
+ const size_t len = hi - lo;
+ if (len == 0) return 0;
+ const size_t mid = lo + (len / 2);
+ if (len & 1) return v[mid];
+ return (v[mid - 1] + v[mid]) / 2;
+ };
- // the robust center: median M and MAD -> approximate sigma
- const u64 M = median_of_sorted(s, 0, s.size());
- std::vector absdev;
- absdev.reserve(N);
- for (size_t i = 0; i < N; ++i) {
- const u64 d = (s[i] > M) ? (s[i] - M) : (M - s[i]);
- absdev.push_back(d);
- }
- std::sort(absdev.begin(), absdev.end());
- const u64 MAD = median_of_sorted(absdev, 0, absdev.size());
- // convert MAD to an approximate standard-deviation-like measure
- constexpr long double kmad_to_sigma = 1.4826L; // consistent for normal approx
- const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kmad_to_sigma);
-
- // find the densest small-valued cluster by sliding a fixed-count window
- // this locates the most concentrated group of samples (likely it would be the true VMEXIT cluster)
- // const size_t frac_win = (N * 8 + 99) / 100; // ceil(N * 0.08)
- // const size_t win = std::min(N, std::max(MIN_WIN, frac_win));
- const size_t MIN_WIN = 10;
- // manual min/max calculation for win size
- const size_t calc_frac = static_cast(std::ceil(static_cast(N) * 0.08));
- const size_t inner_max = (MIN_WIN > calc_frac) ? MIN_WIN : calc_frac;
- const size_t win = (N < inner_max) ? N : inner_max;
-
- size_t best_i = 0;
- u64 best_span = (s.back() - s.front()) + 1; // large initial
- for (size_t i = 0; i + win <= N; ++i) {
- const u64 span = s[i + win - 1] - s[i];
- if (span < best_span) {
- best_span = span;
- best_i = i;
- }
- }
-
- // expand the initial window greedily while staying "tight"
- // allow expansion while adding samples does not more than multiply the span by EXPAND_FACTOR
- constexpr long double EXPAND_FACTOR = 1.5L;
- size_t cluster_lo = best_i;
- size_t cluster_hi = best_i + win; // exclusive
- // expand left
- while (cluster_lo > 0) {
- const u64 new_span = s[cluster_hi - 1] - s[cluster_lo - 1];
- if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) ||
- (s[cluster_hi - 1] <= (s[cluster_lo - 1] + static_cast(std::ceil(3.0L * sigma))))) {
- --cluster_lo;
- // manual min calculation
- best_span = (best_span < new_span) ? best_span : new_span;
- }
- else break;
- }
- // expand right
- while (cluster_hi < N) {
- const u64 new_span = s[cluster_hi] - s[cluster_lo];
- if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) ||
- (s[cluster_hi] <= (s[cluster_lo] + static_cast(std::ceil(3.0L * sigma))))) {
- ++cluster_hi;
- best_span = (best_span < new_span) ? best_span : new_span;
- }
- else break;
- }
-
- const size_t cluster_size = (cluster_hi > cluster_lo) ? (cluster_hi - cluster_lo) : 0;
-
- // cluster must be reasonably dense and cover a non-negligible portion of samples, so this is pure sanity checks
- const double fraction_in_cluster = static_cast(cluster_size) / static_cast(N);
-
- // min/max calculation for MIN_CLUSTER
- const int val_n_50 = static_cast(N / 50);
- const size_t val_max = static_cast((5 > val_n_50) ? 5 : val_n_50);
- const size_t MIN_CLUSTER = (val_max < N) ? val_max : N; // at least 2% or 5 elements
-
- if (cluster_size < MIN_CLUSTER || fraction_in_cluster < 0.02) {
- // low-percentile (10th) trimmed median
- // Manual max calculation for fallback_count
- const size_t floor_val = static_cast(std::floor(static_cast(N) * 0.10));
- const size_t fallback_count = (1 > floor_val) ? 1 : floor_val;
-
- // median of lowest fallback_count elements (if fallback_count==1 that's smallest)
- if (fallback_count == 1) return s.front();
- const size_t mid = fallback_count / 2;
- if (fallback_count & 1) return s[mid];
- return (s[mid - 1] + s[mid]) / 2;
- }
-
- // now we try to get a robust estimate inside the cluster, trimmed mean (10% trim) centered on cluster
- const size_t trim_count = static_cast(std::floor(static_cast(cluster_size) * 0.10));
- const size_t lo = cluster_lo + trim_count;
- const size_t hi = cluster_hi - trim_count; // exclusive
- if (hi <= lo) {
- // degenerate -> median of cluster
- return median_of_sorted(s, cluster_lo, cluster_hi);
- }
-
- // sum with long double to avoid overflow and better rounding
- long double sum = 0.0L;
- for (size_t i = lo; i < hi; ++i) sum += static_cast(s[i]);
- const long double avg = sum / static_cast(hi - lo);
- u64 result = static_cast(std::llround(avg));
-
- // final sanity adjustments:
- // if the computed result is suspiciously far from the global median (e.g., > +6*sigma)
- // clamp toward the median to avoid choosing a high noisy cluster by mistake
- const long double diff_from_med = static_cast(result) - static_cast(M);
- if (diff_from_med > 0 && diff_from_med > (6.0L * sigma)) {
- // clamp to median + 4*sigma (conservative)
- result = static_cast(std::llround(static_cast(M) + 4.0L * sigma));
- }
-
- // also, if result is zero (shouldn't be) or extremely small, return a smallest observed sample
- if (result == 0) result = s.front();
+ // the robust center: median M and MAD -> approximate sigma
+ const u64 M = median_of_sorted(s, 0, s.size());
+ std::vector absdev;
+ absdev.reserve(N);
+ for (size_t i = 0; i < N; ++i) {
+ const u64 d = (s[i] > M) ? (s[i] - M) : (M - s[i]);
+ absdev.push_back(d);
+ }
+ std::sort(absdev.begin(), absdev.end());
+ const u64 MAD = median_of_sorted(absdev, 0, absdev.size());
+ // convert MAD to an approximate standard-deviation-like measure
+ constexpr long double kmad_to_sigma = 1.4826L; // consistent for normal approx
+ const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kmad_to_sigma);
+
+ // find the densest small-valued cluster by sliding a fixed-count window
+ // this locates the most concentrated group of samples (likely it would be the true VMEXIT cluster)
+ // const size_t frac_win = (N * 8 + 99) / 100; // ceil(N * 0.08)
+ // const size_t win = std::min(N, std::max(MIN_WIN, frac_win));
+ const size_t MIN_WIN = 10;
+ // manual min/max calculation for win size
+ const size_t calc_frac = static_cast(std::ceil(static_cast(N) * 0.08));
+ const size_t inner_max = (MIN_WIN > calc_frac) ? MIN_WIN : calc_frac;
+ const size_t win = (N < inner_max) ? N : inner_max;
+
+ size_t best_i = 0;
+ u64 best_span = (s.back() - s.front()) + 1; // large initial
+ for (size_t i = 0; i + win <= N; ++i) {
+ const u64 span = s[i + win - 1] - s[i];
+ if (span < best_span) {
+ best_span = span;
+ best_i = i;
+ }
+ }
- return result;
- };
+ // expand the initial window greedily while staying "tight"
+ // allow expansion while adding samples does not more than multiply the span by EXPAND_FACTOR
+ constexpr long double EXPAND_FACTOR = 1.5L;
+ size_t cluster_lo = best_i;
+ size_t cluster_hi = best_i + win; // exclusive
+ // expand left
+ while (cluster_lo > 0) {
+ const u64 new_span = s[cluster_hi - 1] - s[cluster_lo - 1];
+ if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) ||
+ (s[cluster_hi - 1] <= (s[cluster_lo - 1] + static_cast(std::ceil(3.0L * sigma))))) {
+ --cluster_lo;
+ // manual min calculation
+ best_span = (best_span < new_span) ? best_span : new_span;
+ }
+ else break;
+ }
+ // expand right
+ while (cluster_hi < N) {
+ const u64 new_span = s[cluster_hi] - s[cluster_lo];
+ if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) ||
+ (s[cluster_hi] <= (s[cluster_lo] + static_cast(std::ceil(3.0L * sigma))))) {
+ ++cluster_hi;
+ best_span = (best_span < new_span) ? best_span : new_span;
+ }
+ else break;
+ }
- // exercise the XOR loop and CPUID paths to wake up the CPU from low-power states, warm cache and train branch predictor
- volatile u64 warm_x = 0;
- for (int w = 0; w < 64; ++w) cpuid(leaves[w % n_leaves]);
- VMAWARE_UNUSED(warm_x);
+ const size_t cluster_size = (cluster_hi > cluster_lo) ? (cluster_hi - cluster_lo) : 0;
- // Thread 1: start near same cycle as thread 2, do work that cant be intercepted by hypervisors, and set end
- std::thread th1([&]() {
- ready_count.fetch_add(1, std::memory_order_acq_rel);
- while (ready_count.load(std::memory_order_acquire) < 2)
- _mm_pause();
+ // cluster must be reasonably dense and cover a non-negligible portion of samples, so this is pure sanity checks
+ const double fraction_in_cluster = static_cast(cluster_size) / static_cast(N);
- const u64 start = __rdtsc();
- t1_start.store(start, std::memory_order_release);
- state.store(1, std::memory_order_release);
+ // min/max calculation for MIN_CLUSTER
+ const int val_n_50 = static_cast(N / 50);
+ const size_t val_max = static_cast((5 > val_n_50) ? 5 : val_n_50);
+ const size_t MIN_CLUSTER = (val_max < N) ? val_max : N; // at least 2% or 5 elements
- volatile u64 x = 0xDEADBEEFCAFEBABEULL;
- u64 i = 0;
- for (; i < ITER_XOR; ++i) {
- x ^= i;
- x = (x << 1) ^ (x >> 3);
- }
+ if (cluster_size < MIN_CLUSTER || fraction_in_cluster < 0.02) {
+ // low-percentile (10th) trimmed median
+ // Manual max calculation for fallback_count
+ const size_t floor_val = static_cast(std::floor(static_cast(N) * 0.10));
+ const size_t fallback_count = (1 > floor_val) ? 1 : floor_val;
- // Loop extensively without bottlenecking standard execution to guarantee Thread 2 hits the desired cpuid threshold limit
- while (state.load(std::memory_order_acquire) == 1) {
- for (int j = 0; j < 1000; ++j) {
- x ^= i;
- x = (x << 1) ^ (x >> 3);
- ++i;
+ // median of lowest fallback_count elements (if fallback_count==1 that's smallest)
+ if (fallback_count == 1) return s.front();
+ const size_t mid = fallback_count / 2;
+ if (fallback_count & 1) return s[mid];
+ return (s[mid - 1] + s[mid]) / 2;
}
- }
- actual_iters = i;
- VMAWARE_UNUSED(x);
- const u64 end = __rdtsc();
- t1_end.store(end - start, std::memory_order_release);
- state.store(2, std::memory_order_release);
- });
+ // now we try to get a robust estimate inside the cluster, trimmed mean (10% trim) centered on cluster
+ const size_t trim_count = static_cast(std::floor(static_cast(cluster_size) * 0.10));
+ const size_t lo = cluster_lo + trim_count;
+ const size_t hi = cluster_hi - trim_count; // exclusive
+ if (hi <= lo) {
+ // degenerate -> median of cluster
+ return median_of_sorted(s, cluster_lo, cluster_hi);
+ }
- // Thread 2: rdtsc and cpuid spammer, forces hypervisor to downscale TSC if patch is present; if interception disabled, caught by cpuid latency
- std::thread th2([&]() {
- ready_count.fetch_add(1, std::memory_order_acq_rel);
- while (ready_count.load(std::memory_order_acquire) < 2)
- _mm_pause();
+ // sum with long double to avoid overflow and better rounding
+ long double sum = 0.0L;
+ for (size_t i = lo; i < hi; ++i) sum += static_cast(s[i]);
+ const long double avg = sum / static_cast(hi - lo);
+ u64 result = static_cast(std::llround(avg));
- u64 last = __rdtsc();
+ // final sanity adjustments:
+ // if the computed result is suspiciously far from the global median (e.g., > +6*sigma)
+ // clamp toward the median to avoid choosing a high noisy cluster by mistake
+ const long double diff_from_med = static_cast(result) - static_cast(M);
+ if (diff_from_med > 0 && diff_from_med > (6.0L * sigma)) {
+ // clamp to median + 4*sigma (conservative)
+ result = static_cast(std::llround(static_cast(M) + 4.0L * sigma));
+ }
- // track the end of the previous measurement so that the hypervisor cant return spoofed values mid-loop
- u64 prev_post = last;
+ // also, if result is zero (shouldn't be) or extremely small, return a smallest observed sample
+ if (result == 0) result = s.front();
- // local accumulator and local index into samples
- u64 acc = 0;
- size_t idx = 0;
+ return result;
+ };
- // accumulator for spikes that represent potential time debt
- u64 total_spike_debt = 0;
+ const HANDLE current_thread = reinterpret_cast(-2LL);
+ const HANDLE current_process = reinterpret_cast(-1LL);
+ SetThreadAffinityMask(current_thread, 1);
+ SetPriorityClass(current_process, ABOVE_NORMAL_PRIORITY_CLASS); // ABOVE_NORMAL_PRIORITY_CLASS + THREAD_PRIORITY_HIGHEST = 12 base priority
+ SetThreadPriority(current_thread, THREAD_PRIORITY_HIGHEST);
- // for each leaf do CPUID_ITER samples, then repeat
- while (state.load(std::memory_order_acquire) != 2) {
- for (size_t li = 0; li < n_leaves; ++li) {
- const unsigned int leaf = leaves[li];
+ // so that hypervisor can't predict how many samples we will collect
+ std::mt19937 gen(std::random_device{}());
+ std::uniform_int_distribution batch_dist(30000, 70000);
+ const size_t BATCH_SIZE = batch_dist(gen);
+ i32 dummy_res[4]{};
+ std::vector vm_samples(BATCH_SIZE), ref_samples(BATCH_SIZE); // pre page-fault MMU, wwe wont warm-up cpuid samples for the P-states intentionally
- for (unsigned i = 0; i < CPUID_ITER; ++i) {
- const u64 now = __rdtsc();
+ state.start_test.store(true, std::memory_order_release); // _mm_pause can be exited conditionally, spam hit L3.
+ while (state.counter == 0) {}
- // If now < last, the hypervisor rewound the TSC too much, we ignore it and let it decrement this thread's TSC to be caught by ratio checks later
- if (now >= last) {
- acc += (now - last);
- }
- last = now;
+ size_t valid = 0;
+ while (valid < BATCH_SIZE) {
+ // interpolated so that any turbo boost, thermal throttling, speculation (for the loop overhead itself, not for the serializing instructions), etc affects both samples
+ u64 v_pre, v_post, r_pre, r_post, sync;
- if (idx < samples.size()) {
- u64 lat = cpuid(leaf);
+ sync = state.counter; while (state.counter == sync); // infer if counter got enough quantum momentum (so its currently scheduled)
+ sync = state.counter; while (state.counter == sync); // fastest busy-waiting strategy, PAUSE affects cache, calling APIs like SwitchToThread() would be even worse
- // the gap check catches debt injected between our measurements (external window)
- if (now > prev_post) {
- u64 gap = now - prev_post;
+ v_pre = state.counter;
+ std::atomic_signal_fence(std::memory_order_seq_cst); // _ReadWriteBarrier() aka dont emit runtime fences
+ // force cpuid collection here so that the hypervisor is either forced to disable interception and try to bypass latency, or intercept cpuid and try to bypass XSAVE states
+ trigger_vmexit(dummy_res, 0, 0);
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ v_post = state.counter;
- // If the gap is massive, the hypervisor dumped the debt here or is legitimate interrupt latency
- // we pull it back into our latency measurement
- if (gap > 400) { // 500 is generous for loop latency
- lat += gap;
- }
- }
+ sync = state.counter; while (state.counter == sync); // sync to our counter tick again
+ sync = state.counter; while (state.counter == sync);
- // capture post immediately to track the end of this measurement
- const u64 post = __rdtsc();
- prev_post = post;
+ r_pre = state.counter;
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ for (int i = 0; i < 8; ++i) _mm_lfence(); // 8 LFENCES is enough for the MESI RFO cache bounce in the data race (so that the counter thread sees an increment)
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ r_post = state.counter;
- const u64 total_overhead = post - now;
+ // we dont filter by cycles spent here (for example by querying thread cycle time) because the point of this function is to not let either the kernel or this app handle a TSC read
+ if (v_post > v_pre && r_post > r_pre) {
+ vm_samples[valid] = v_post - v_pre;
+ ref_samples[valid] = r_post - r_pre;
+ valid++;
+ }
+ }
- // total_overhead catches debt injected inside the function call (internal window)
- // together with the gap, its physically impossible for a hypervisor to hide the time debt
- if (total_overhead > (lat + 400)) {
- lat = total_overhead;
- }
+ const u64 cpuid_l = calculate_latency(vm_samples); // check for lowest dense cluster with no interrupt spikes, filter noise we can detect (SMIs, etc)
+ const u64 ref_l = calculate_latency(ref_samples);
+ const double ratio = ref_l ? (double)cpuid_l / (double)ref_l : 0;
- // Instead of local redistribution (which fails on single huge spikes, hypervisor might try to pay the time debt but contaminating only one sample),
- // we separate "normal" samples from "spike" samples
- if (lat > cycle_threshold) {
- // This is likely a debt payment (or a massive interrupt)
- // we accumulate the FULL amount into debt
- total_spike_debt += lat;
-
- // We store a "capped" value in the vector for now
- // if we stored the huge outlier, calculate_latency would discard it
- // by capping it, we ensure this sample is treated as "valid but slow" by our calculate_latency lambda
- // and the debt will be added back via the global lift later
- samples[idx] = cycle_threshold;
- }
- else {
- // normal sample
- samples[idx] = lat;
- }
- }
+ debug("TIMER: CPUID -> ", cpuid_l, " | Ref -> ", ref_l, " | Ratio -> ", ratio);
- ++idx;
+ if (ratio >= threshold) hypervisor_detected = true;
- // Force Thread 1 to finalize when minimum sample quota is met
- // this makes impossible for the hypervisor to pause thread 2 so that it only samples a few cpuid, thus only needed to downlock a few million TSC cycles
- // enough to bypass ratio checks
- if (idx == samples.size() && state.load(std::memory_order_relaxed) == 1) {
- state.store(3, std::memory_order_release);
- }
+ // Now detect bypassers letting the VM boot with cpuid interception, and then disabling interception with SVM by flipping bit 18 in the VMCB
+ // if hypervisor lies about the CPU vendor, it will create 100000 more detectable signals (querying intel-specific behavior)
+ if (cpu::is_amd()) {
+ i32 res_d0[4], res_d1[4], res_d12[4], res_ext[4];
+ trigger_vmexit(res_d0, 0xD, 0);
+ trigger_vmexit(res_d1, 0xD, 1);
+ trigger_vmexit(res_d12, 0xD, 12);
+ trigger_vmexit(res_ext, 0x80000008, 0);
- // if thread1 finishes
- if (state.load(std::memory_order_acquire) == 2) break;
- }
- if (state.load(std::memory_order_acquire) == 2) break;
- }
- }
-
- // final rdtsc after detecting finish
- const u64 final_now = __rdtsc();
- if (final_now >= last)
- acc += (final_now - last);
-
- // global lift
- // we now take the total accumulated debt and spread it evenly across
- // ALL samples collected
- //
- // if it was just noise/interrupts:
- // 20,000 cycles / 25,000 samples = +0.8 cycles per sample
- //
- // if it was hypervisor debt (must be paid almost totally, otherwise they would fail the global/local ratio check):
- // 40,000,000 cycles / 25,000 samples = +1600 cycles per sample, always mathematically crossing the threshold no matter when they pay the time debt
- //
- // this preserves the robust filtering of calculate_latency (it still handles small jitter),
- // but effectively raises the sea level of all samples based on the debt to mathematically always catch the hidden vmexit latency
- const size_t captured_count = (idx < samples.size()) ? idx : samples.size();
-
- if (captured_count > 0 && total_spike_debt > 0) {
- // we divide debt by the number of samples that actually contributed to the measurement period
- const u64 lift = total_spike_debt / captured_count;
+ const bool hardware_supports_cet = (res_d12[0] > 0);
+ const u32 active_xcr0_size = (u32)res_d0[1];
+ const u32 active_total_size = (u32)res_d1[1];
- for (size_t i = 0; i < captured_count; ++i) {
- samples[i] += lift;
+ if (hardware_supports_cet && (active_total_size == active_xcr0_size)) {
+ debug("TIMER: VMAware detected a SVM hypervisor with cpuid interception disabled, score was raised up due to a bypass attempt.");
+ bypass_detected = true;
}
}
- t2_end.store(acc, std::memory_order_release);
- });
-
- // logic should be in different cores to force the hypervisor to downscale TSC globally
- affinity_cookie cookie1{};
- affinity_cookie cookie2{};
- if (hw >= 2) {
- cookie1 = set_affinity(th1, 0);
- cookie2 = set_affinity(th2, 1);
- }
-
- th1.join();
- th2.join();
-
- restore_affinity(cookie1);
- restore_affinity(cookie2);
-
- // collect results
- const u64 t1_delta = t1_end.load();
- const u64 t2_delta = t2_end.load();
+ SetPriorityClass(current_process, NORMAL_PRIORITY_CLASS);
+ state.test_done.store(true, std::memory_order_release);
+ };
- std::vector used;
- for (u64 s : samples) if (s != 0) used.push_back(s);
- const u64 cpuid_latency = calculate_latency(used);
+ std::thread t1(counter_thread);
+ std::thread t2(trigger_thread);
- debug("TIMER: T1 delta: ", t1_delta);
- debug("TIMER: T2 delta: ", t2_delta);
- debug("TIMER: VMEXIT latency: ", cpuid_latency);
+ t1.join();
+ t2.join();
- if (cpuid_latency >= cycle_threshold) {
- debug("TIMER: Detected a VMEXIT on CPUID");
- return core::add(brand_enum::NULL_BRAND, 100); // to prevent false positives due to jitter, doesn't trigger a 150 score, so it never reaches 100%
- }
- else if (cpuid_latency <= 25) {
- debug("TIMER: Detected a hypervisor downscaling CPUID latency");
- // cpuid is fully serializing, no CPU have this low average cycles in real-world scenarios
- // however, in patches, zero or even negative deltas can be seen oftenly
- return true;
+ if (hypervisor_detected) {
+ return true; // 100 score
}
-
- // Within the same run, does Thread 2 see a smaller TSC delta than Thread 1?
- // If so, a hypervisor downscaled TSC in the core where exits were occurring to hide vmexit latency
- // while in the other core where no exits occurred, no TSC cycles were decreased, thus thread 2 ran faster than thread 1
- // this logic can be bypassed if the hypervisor downscales TSC in both cores, and that's precisely why we do now a Global Ratio
- const double local_ratio = double(t2_delta) / double(t1_delta);
-
- if (local_ratio < 0.95) {
- debug("TIMER: Detected a hypervisor intercepting TSC locally: ", local_ratio, "");
- return true;
+ else if (bypass_detected) {
+ return core::add(brand_enum::KVM, 150); // 150 score, KVM is a guess
}
- // To calculate the global ratio, we calculate the TSC cycles consumed per iteration of the Thread 1 workload
- // Thread 1 ran this dependency chain, x ^= i; x = (x << 1) ^ (x >> 3)
- // because each instruction depends on the result of the previous one, the CPU cannot execute these in parallel
- // on bare metal, a dependent ALU operation takes a minimum number of core cycles, for this is typically 2-4 per iteration
- const double cycles_per_iter = static_cast(t1_delta) / static_cast(ITER_XOR);
- debug("TIMER: Cycles per XOR iteration (less is faster): ", cycles_per_iter);
-
- if (cycles_per_iter <= 2 || cycles_per_iter >= 8.0) {
- debug("TIMER: Detected a hypervisor dowscaling TSC globally (IPC was impossible): ", cycles_per_iter);
- return true;
- }
+ return hypervisor_detected;
#endif
return false;
}
@@ -8100,7 +8029,7 @@ struct VM {
* @category Windows
* @implements VM::WINE_FUNC
*/
- [[nodiscard]] static bool wine_function() {
+ [[nodiscard]] static bool wine() {
#if (_WIN32_WINNT < _WIN32_WINNT_WIN8)
return false;
#else
@@ -9153,7 +9082,7 @@ struct VM {
if (!ntdll) return false;
const char* names[] = { "RtlInitUnicodeString", "NtOpenFile", "NtClose" };
- void* funcs[sizeof(names) / sizeof(names[0])] = {};
+ void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, (ULONG)(sizeof(names) / sizeof(names[0])));
const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]);
@@ -9747,22 +9676,22 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- using NtAllocateVirtualMemory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, ULONG_PTR, PSIZE_T, ULONG, ULONG);
- using NtProtectVirtualMemory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG);
- using NtFreeVirtualMemory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG);
- using NtFlushInstructionCache_t = NTSTATUS(__stdcall*)(HANDLE, PVOID, SIZE_T);
- using NtClose_t = NTSTATUS(__stdcall*)(HANDLE);
- using NtGetContextThread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
- using NtSetContextThread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
+ using nt_allocate_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, ULONG_PTR, PSIZE_T, ULONG, ULONG);
+ using nt_protect_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG);
+ using nt_free_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG);
+ using nt_flush_instruction_cache_t = NTSTATUS(__stdcall*)(HANDLE, PVOID, SIZE_T);
+ using nt_close_t = NTSTATUS(__stdcall*)(HANDLE);
+ using nt_get_context_thread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
+ using nt_set_context_thread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
// volatile ensures these are loaded from stack after SEH unwind when compiled with aggresive optimizations
- NtAllocateVirtualMemory_t volatile nt_allocate_virtual_memory = reinterpret_cast(funcs[0]);
- NtProtectVirtualMemory_t volatile nt_protect_virtual_memory = reinterpret_cast(funcs[1]);
- NtFreeVirtualMemory_t volatile nt_free_virtual_memory = reinterpret_cast(funcs[2]);
- NtFlushInstructionCache_t volatile nt_flush_instruction_cache = reinterpret_cast(funcs[3]);
- NtClose_t volatile nt_close = reinterpret_cast(funcs[4]);
- NtGetContextThread_t volatile nt_get_context_thread = reinterpret_cast(funcs[5]);
- NtSetContextThread_t volatile nt_set_context_thread = reinterpret_cast(funcs[6]);
+ nt_allocate_virtual_memory_t volatile nt_allocate_virtual_memory = reinterpret_cast(funcs[0]);
+ nt_protect_virtual_memory_t volatile nt_protect_virtual_memory = reinterpret_cast(funcs[1]);
+ nt_free_virtual_memory_t volatile nt_free_virtual_memory = reinterpret_cast(funcs[2]);
+ nt_flush_instruction_cache_t volatile nt_flush_instruction_cache = reinterpret_cast(funcs[3]);
+ nt_close_t volatile nt_close = reinterpret_cast(funcs[4]);
+ nt_get_context_thread_t volatile nt_get_context_thread = reinterpret_cast(funcs[5]);
+ nt_set_context_thread_t volatile nt_set_context_thread = reinterpret_cast(funcs[6]);
if (!nt_allocate_virtual_memory || !nt_protect_virtual_memory || !nt_flush_instruction_cache ||
!nt_free_virtual_memory || !nt_get_context_thread || !nt_set_context_thread || !nt_close) {
@@ -11797,6 +11726,321 @@ struct VM {
return false;
}
+
+
+ /**
+ * @brief Check whether KVM attempts to patch a mismatched hypercall instruction
+ * @link https://lists.nongnu.org/archive/html/qemu-devel/2025-07/msg05044.html
+ * @category Windows
+ * @implements VM::KVM_INTERCEPTION
+ */
+ [[nodiscard]] static bool kvm_interception() {
+ #if (!x86)
+ return false;
+ #endif
+ using nt_allocate_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, ULONG_PTR, PSIZE_T, ULONG, ULONG);
+ using nt_protect_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG);
+ using nt_free_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG);
+ using nt_flush_instruction_cache_t = NTSTATUS(__stdcall*)(HANDLE, PVOID, SIZE_T);
+ using nt_close_t = NTSTATUS(__stdcall*)(HANDLE);
+
+ const HMODULE ntdll = util::get_ntdll();
+ if (!ntdll) return false;
+
+ const char* names[] = { "NtAllocateVirtualMemory", "NtProtectVirtualMemory", "NtFreeVirtualMemory", "NtFlushInstructionCache", "NtClose" };
+ void* funcs[ARRAYSIZE(names)] = {};
+ util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
+
+ const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[0]);
+ const auto nt_protect_virtual_memory = reinterpret_cast(funcs[1]);
+ const auto nt_free_virtual_memory = reinterpret_cast(funcs[2]);
+ const auto nt_flush_instruction_cache = reinterpret_cast(funcs[3]);
+ const auto nt_close = reinterpret_cast(funcs[4]);
+
+ if (!nt_allocate_virtual_memory || !nt_protect_virtual_memory || !nt_free_virtual_memory || !nt_flush_instruction_cache || !nt_close)
+ return false;
+
+ // VMCALL (0F 01 C1) + RET (C3) and VMMCALL (0F 01 D9) + RET (C3)
+ constexpr BYTE opcodes[2][4] = {
+ { 0x0F, 0x01, 0xC1, 0xC3 },
+ { 0x0F, 0x01, 0xD9, 0xC3 }
+ };
+
+ const HANDLE current_process = reinterpret_cast(-1);
+ bool is_kvm_detected = false; // KVM-specific behavior, detector is 100% sure is running under KVM
+ bool generic_hypervisor = false; // behavior present in KVM but other hypervisors might replicate it as well
+
+ for (int i = 0; i < 2; ++i) {
+ PVOID base_address = nullptr;
+ SIZE_T region_size = 0x1000;
+
+ // memory as RWX initially to write the opcode
+ NTSTATUS status = nt_allocate_virtual_memory(
+ current_process, &base_address, 0, ®ion_size,
+ MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+
+ if (!NT_SUCCESS(status))
+ continue;
+
+ // copy stuff to page
+ memcpy(base_address, opcodes[i], sizeof(opcodes[i]));
+
+ ULONG old_protect = 0;
+ PVOID protect_address = base_address;
+ SIZE_T protect_size = region_size;
+
+ // change memory protection to RX because it is what breaks KVM's instruction patching attempt
+ status = nt_protect_virtual_memory(
+ current_process, &protect_address, &protect_size,
+ PAGE_EXECUTE_READ, &old_protect);
+
+ if (NT_SUCCESS(status)) {
+ DWORD exception_status = 0;
+ __try {
+ const auto execute_hypercall = reinterpret_cast(base_address);
+ execute_hypercall();
+ generic_hypervisor = true; // if no exception occurs then a hypervisor handled it, this is default KVM behavior
+ debug("KVM_INTERCEPTION: Detected a hypervisor intercepting hypercalls");
+ }
+ __except (exception_status = GetExceptionCode(), EXCEPTION_EXECUTE_HANDLER) {
+ // if it's #PF instead of #UD then old KVM quirk is present
+ if (exception_status == EXCEPTION_ACCESS_VIOLATION) {
+ debug("KVM_INTERCEPTION: Detected KVM attempting to patch instructions on the fly");
+ is_kvm_detected = true;
+ }
+ }
+ }
+
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(current_process, &base_address, &free_size, MEM_RELEASE);
+
+ if (is_kvm_detected) {
+ return core::add(brand_enum::KVM);
+ }
+ else if (generic_hypervisor) {
+ return true;
+ }
+ }
+
+ // we will run amd only stuff
+ if (!cpu::is_amd()) {
+ return false;
+ }
+
+ struct state {
+ volatile int in_asm;
+ volatile int exception_seen;
+ };
+
+ static thread_local state* tls_state = nullptr;
+
+ state state{};
+ tls_state = &state;
+
+ // lambda to capture exceptions
+ PVECTORED_EXCEPTION_HANDLER handler =
+ +[](PEXCEPTION_POINTERS ep) -> LONG {
+ if (!tls_state || !tls_state->in_asm)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ const DWORD code = ep->ExceptionRecord->ExceptionCode;
+ if (code == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ tls_state->exception_seen = 1;
+ #if (x86_64)
+ ep->ContextRecord->Rip += 3;
+ #else
+ ep->ContextRecord->Eip += 3;
+ #endif
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+ };
+
+ const PVOID vh = AddVectoredExceptionHandler(1, handler);
+ if (!vh) return false;
+
+ // xor rdpru ret
+ constexpr unsigned char code[] = {
+ 0x31, 0xC9,
+ 0x0F, 0x01, 0xFD,
+ 0xC3
+ };
+
+ PVOID mem = nullptr;
+ SIZE_T size = sizeof(code);
+
+ if (nt_allocate_virtual_memory(
+ current_process,
+ &mem,
+ 0,
+ &size,
+ MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE) != 0 || !mem)
+ {
+ RemoveVectoredExceptionHandler(vh);
+ return false;
+ }
+
+ memcpy(mem, code, sizeof(code));
+
+ nt_flush_instruction_cache(current_process, mem, sizeof(code));
+
+ using fn_t = void(*)();
+ fn_t fn = reinterpret_cast(mem);
+
+ state.exception_seen = 0;
+ state.in_asm = 1;
+
+ __try {
+ fn();
+ }
+ __except (EXCEPTION_EXECUTE_HANDLER) {
+ state.exception_seen = 1;
+ }
+
+ state.in_asm = 0;
+
+ if (state.exception_seen) {
+ debug("KVM_INTERCEPTION: Detected a hypervisor intercepting performance counter reads");
+ generic_hypervisor = true;
+ }
+
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(
+ current_process,
+ &mem,
+ &free_size,
+ MEM_RELEASE
+ );
+
+ nt_close(current_process);
+
+ RemoveVectoredExceptionHandler(vh);
+
+ // KVM does this, but other hypervisors might do the same, reason why is generic
+ // kernel might configure CR4 to inject exception if CPL > 0, so if this case is detected, trigger a lower probability score
+ if (generic_hypervisor) {
+ return core::add(brand_enum::NULL_BRAND, 50);
+ }
+
+ return false;
+ }
+
+
+ /**
+ * @brief Check whether a hypervisor uses EPT/NPT hooking to intercept hardware breakpoints
+ * @category Windows
+ * @implements VM::BREAKPOINT
+ */
+ [[nodiscard]] static bool breakpoint() {
+ const HMODULE ntdll = util::get_ntdll();
+ if (!ntdll) return false;
+
+ const char* names[] = {
+ "NtAllocateVirtualMemory",
+ "NtFreeVirtualMemory",
+ "NtGetContextThread",
+ "NtSetContextThread",
+ "RtlAddVectoredExceptionHandler",
+ "RtlRemoveVectoredExceptionHandler"
+ };
+ void* funcs[ARRAYSIZE(names)] = {};
+ util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
+
+ using nt_allocate_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, ULONG_PTR, PSIZE_T, ULONG, ULONG);
+ using nt_free_virtual_memory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG);
+ using net_get_context_thread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
+ using nt_set_context_thread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
+ using rtl_add_vectored_exception_handler_t = PVOID(__stdcall*)(ULONG, PVECTORED_EXCEPTION_HANDLER);
+ using rtl_remove_vectored_exception_handler_t = ULONG(__stdcall*)(PVOID);
+
+ // volatile ensures these are loaded from stack after SEH unwind when compiled with aggressive optimizations
+ nt_allocate_virtual_memory_t volatile nt_allocate_virtual_memory = reinterpret_cast(funcs[0]);
+ nt_free_virtual_memory_t volatile nt_free_virtual_memory = reinterpret_cast(funcs[1]);
+ net_get_context_thread_t volatile nt_get_context_thread = reinterpret_cast(funcs[2]);
+ nt_set_context_thread_t volatile nt_set_context_thread = reinterpret_cast(funcs[3]);
+ rtl_add_vectored_exception_handler_t volatile rtl_add_vectored_exception_handler = reinterpret_cast(funcs[4]);
+ rtl_remove_vectored_exception_handler_t volatile rtl_remove_vectored_exception_handler = reinterpret_cast(funcs[5]);
+
+ if (!nt_allocate_virtual_memory || !nt_free_virtual_memory || !nt_get_context_thread ||
+ !nt_set_context_thread || !rtl_add_vectored_exception_handler || !rtl_remove_vectored_exception_handler) {
+ return false;
+ }
+
+ HANDLE current_process = reinterpret_cast(-1);
+ HANDLE current_thread = reinterpret_cast(-2);
+
+ PVOID src_page = nullptr;
+ PVOID dst_page = nullptr;
+ SIZE_T region_size = 0x2000;
+
+ // allocate source and destination pages
+ const NTSTATUS status_src = nt_allocate_virtual_memory(current_process, &src_page, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ const NTSTATUS status_dst = nt_allocate_virtual_memory(current_process, &dst_page, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+
+ if (status_src < 0 || status_dst < 0) {
+ if (src_page) { SIZE_T free_size = 0; nt_free_virtual_memory(current_process, &src_page, &free_size, MEM_RELEASE); }
+ if (dst_page) { SIZE_T free_size = 0; nt_free_virtual_memory(current_process, &dst_page, &free_size, MEM_RELEASE); }
+ return false;
+ }
+
+ // initialize src memory
+ __stosb(static_cast(src_page), 0xAB, 0x2000);
+
+ thread_local static volatile bool ermsb_trap_detected = false;
+ ermsb_trap_detected = false;
+
+ // capture-less local lambda decays to PVECTORED_EXCEPTION_HANDLER function pointer
+ auto veh_handler = [](PEXCEPTION_POINTERS ctx) -> LONG {
+ if (ctx->ExceptionRecord->ExceptionCode == EXCEPTION_SINGLE_STEP) {
+ ermsb_trap_detected = true;
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+ };
+
+ const PVOID veh_handle = rtl_add_vectored_exception_handler(1, static_cast(veh_handler));
+ if (!veh_handle) {
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(current_process, &src_page, &free_size, MEM_RELEASE);
+ nt_free_virtual_memory(current_process, &dst_page, &free_size, MEM_RELEASE);
+ return false;
+ }
+
+ CONTEXT ctx = { 0 };
+ ctx.ContextFlags = CONTEXT_DEBUG_REGISTERS;
+ nt_get_context_thread(current_thread, &ctx);
+
+ // set hw breakpoint inside the source page
+ ctx.Dr0 = reinterpret_cast(src_page) + 0x1000;
+
+ // Dr7 = 0x30001
+ // bit 0 = 1
+ // bits 17:16 = 11b
+ // bits 19:18 = 00b
+ ctx.Dr7 = 0x30001;
+ nt_set_context_thread(current_thread, &ctx);
+
+ __try {
+ __movsb(static_cast(dst_page), static_cast(src_page), 0x2000);
+ }
+ __except (EXCEPTION_EXECUTE_HANDLER) {
+ // veh will already detect if Dr0 fired successfully
+ }
+
+ // Cleanup
+ rtl_remove_vectored_exception_handler(veh_handle);
+
+ ctx.Dr0 = 0;
+ ctx.Dr7 = 0;
+ nt_set_context_thread(current_thread, &ctx);
+
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(current_process, &src_page, &free_size, MEM_RELEASE);
+ nt_free_virtual_memory(current_process, &dst_page, &free_size, MEM_RELEASE);
+
+ return !ermsb_trap_detected;
+ }
// ADD NEW TECHNIQUE FUNCTION HERE
#endif
@@ -12439,7 +12683,7 @@ struct VM {
case HWMON: return "HWMON";
case DLL: return "DLL";
case HWMODEL: return "HWMODEL";
- case WINE_FUNC: return "WINE_FUNC";
+ case WINE: return "WINE_FUNC";
case POWER_CAPABILITIES: return "POWER_CAPABILITIES";
case PROCESSES: return "PROCESSES";
case LINUX_USER_HOST: return "LINUX_USER_HOST";
@@ -12506,6 +12750,8 @@ struct VM {
case CPU_HEURISTIC: return "CPU_HEURISTIC";
case CLOCK: return "CLOCK";
case MSR: return "MSR";
+ case KVM_INTERCEPTION: return "KVM_INTERCEPTION";
+ case BREAKPOINT: return "BREAKPOINT";
// END OF TECHNIQUE LIST
case DEFAULT: return "DEFAULT";
case ALL: return "ALL";
@@ -13035,27 +13281,29 @@ std::array VM::core::technique_table = [
// START OF TECHNIQUE TABLE
#if (WINDOWS)
{VM::TRAP, {100, VM::trap}},
- {VM::ACPI_SIGNATURE, {100, VM::acpi_signature}},
{VM::NVRAM, {100, VM::nvram}},
+ {VM::HYPERVISOR_QUERY, {100, VM::hypervisor_query}},
+ {VM::ACPI_SIGNATURE, {100, VM::acpi_signature}},
+ {VM::CPU_HEURISTIC, {90, VM::cpu_heuristic}},
{VM::CLOCK, {45, VM::clock}},
{VM::POWER_CAPABILITIES, {45, VM::power_capabilities}},
- {VM::CPU_HEURISTIC, {90, VM::cpu_heuristic}},
- {VM::BOOT_LOGO, {100, VM::boot_logo}},
- {VM::MSR, {100, VM::msr}},
{VM::GPU_CAPABILITIES, {45, VM::gpu_capabilities}},
- {VM::DISK_SERIAL, {100, VM::disk_serial_number}},
+ {VM::KVM_INTERCEPTION, {100, VM::kvm_interception}},
+ {VM::MSR, {100, VM::msr}},
+ {VM::BOOT_LOGO, {100, VM::boot_logo}},
{VM::EDID, {100, VM::edid}},
+ {VM::BREAKPOINT, {100, VM::breakpoint}},
+ {VM::VIRTUAL_PROCESSORS, {100, VM::virtual_processors}},
+ {VM::WINE, {100, VM::wine}},
+ {VM::DBVM_HYPERCALL, {150, VM::dbvm_hypercall}},
{VM::IVSHMEM, {100, VM::ivshmem}},
+ {VM::DISK_SERIAL, {100, VM::disk_serial_number}},
{VM::DRIVERS, {100, VM::drivers}},
{VM::HANDLES, {100, VM::device_handles}},
- {VM::VIRTUAL_PROCESSORS, {100, VM::virtual_processors}},
{VM::KERNEL_OBJECTS, {100, VM::kernel_objects}},
- {VM::HYPERVISOR_QUERY, {100, VM::hypervisor_query}},
{VM::AUDIO, {25, VM::audio}},
{VM::DISPLAY, {25, VM::display}},
- {VM::WINE_FUNC, {100, VM::wine_function}},
{VM::DLL, {50, VM::dll}},
- {VM::DBVM_HYPERCALL, {150, VM::dbvm_hypercall}},
{VM::UD, {100, VM::ud}},
{VM::BLOCKSTEP, {100, VM::blockstep}},
{VM::VMWARE_BACKDOOR, {100, VM::vmware_backdoor}},
@@ -13123,7 +13371,7 @@ std::array VM::core::technique_table = [
{VM::MAC_SYS, {100, VM::mac_sys}},
#endif
- {VM::TIMER, {150, VM::timer}},
+ {VM::TIMER, {100, VM::timer}},
{VM::THREAD_MISMATCH, {50, VM::thread_mismatch}},
{VM::VMID, {100, VM::vmid}},
{VM::CPU_BRAND, {95, VM::cpu_brand}},