As of macOS 12 (Monterey), Apple's Virtualization framework has nice support for macOS guest virtual machines, but with severe limitations: For example you can't install a macOS guest on Intel Macs, install guests with newer versions of macOS than the host, copy and paste between the host and the guest, or install third party kernel extensions in the guest. As usual for Apple, the functionality they do support is nicely implemented, but they've left out so much that the result is only marginally useful -- at least compared to
// export DYLD_SHARED_REGION=1 | |
// ./library_injector /path/to/libentitlement_bypass.dylib /System/Library/Frameworks/Virtualization.framework/Versions/A/XPCServices/com.apple.Virtualization.VirtualMachine.xpc | |
#import <xpc/xpc.h> | |
xpc_object_t xpc_connection_copy_entitlement_value(xpc_connection_t, const char *); | |
xpc_object_t overridden_xpc_connection_copy_entitlement_value(xpc_connection_t connection, const char *entitlement) { | |
return xpc_bool_create(true); |
// To compile: clang++ -arch x86_64 -arch arm64 -std=c++20 library_injector.cpp -lbsm -lEndpointSecurity -o library_injector, | |
// then codesign with com.apple.developer.endpoint-security.client and run the | |
// program as root. | |
#include <EndpointSecurity/EndpointSecurity.h> | |
#include <algorithm> | |
#include <array> | |
#include <bsm/libbsm.h> | |
#include <cstddef> | |
#include <cstdint> |
Modern fuzzers like AFL (American Fuzzy Lop) use coverage-guided mutation to explore program paths. However, they can plateau on complex conditions or structured inputs due to blind random mutations. Neural networks offer a way to guide fuzzing beyond random chance—by learning patterns in inputs and program behavior—to improve code coverage. This document explores the best neural network approaches (surrogate models, reinforcement learning, and generative models) and how they boost code coverage in fuzzing, summarizing key research and potential workflow improvements.
One successful approach is to use surrogate neural networks to approximate a program’s branching behavior, thereby enabling gradient-guided input generation. NEUZZ (2019) pioneered this by training a feed-forward neural network to learn a smooth approximation of the target program’s logic.
bool isKTX2Header(const uint8_t *buffer, size_t size) { | |
const uint8_t ktx2Identifier[12] = {0xAB, 0x4B, 0x54, 0x58, 0x20, 0x32, | |
0x30, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A}; | |
if (size < 12) { | |
return false; // Buffer is too small to be a KTX2 header | |
} | |
// Compare the first 12 bytes of the buffer with the KTX2 identifier | |
return memcmp(buffer, ktx2Identifier, 12) == 0; |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <fcntl.h> | |
#include <sys/mman.h> | |
#include <unistd.h> | |
#include <stdint.h> | |
#include <time.h> | |
#define RAH_VALID(1 << 31) /* AV */ |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <fcntl.h> | |
#include <sys/mman.h> | |
#include <unistd.h> | |
#include <stdint.h> | |
#include <time.h> | |
#define RAH_VALID (1 << 31) /* AV */ |
1- | |
fleh_dispatch64(asm code arm64 exception vector) -> sleh_synchronous -> | |
handle_abort -> handle_kernel_abort -> | |
panic_with_thread_kernel_state("Kernel data abort.", state); | |
2- | |
Entry(hndl_alltraps) | |
mov %esi, %eax | |
testb $3, %al |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <string.h> | |
#include <dlfcn.h> | |
int main(int argc, char** argv) | |
{ | |
void *handle; | |
int (*kas_info)(int, uint64_t *, size_t *); | |
size_t kaslr_size = 0; |