Skip to content

Commit

Permalink
all: support extra coverage
Browse files Browse the repository at this point in the history
Right now syzkaller only supports coverage collected from the threads that
execute syscalls. However some useful things happen in background threads,
and it would be nice to collect coverage from those threads as well.

This change adds extra coverage support to syzkaller. This coverage is not
associated with a particular syscall, but rather with the whole program.
Executor passes extra coverage over the same ipc mechanism to syz-fuzzer
with syscall number set to -1. syz-fuzzer then passes this coverage to
syz-manager with the call name "extra".

This change requires the following kcov patch:
xairy/linux#2
  • Loading branch information
xairy committed Jan 16, 2019
1 parent d5d60d1 commit 4f51740
Show file tree
Hide file tree
Showing 11 changed files with 265 additions and 106 deletions.
7 changes: 6 additions & 1 deletion executor/common_linux.h
Original file line number Diff line number Diff line change
Expand Up @@ -1791,7 +1791,12 @@ static void sandbox_common()
#endif

struct rlimit rlim;
rlim.rlim_cur = rlim.rlim_max = 200 << 20;
#if SYZ_EXECUTOR
rlim.rlim_cur = rlim.rlim_max = (200 << 20) +
(kMaxThreads * kCoverSize + kExtraCoverSize) * sizeof(void*);
#else
rlim.rlim_cur = rlim.rlim_max = (200 << 20);
#endif
setrlimit(RLIMIT_AS, &rlim);
rlim.rlim_cur = rlim.rlim_max = 32 << 20;
setrlimit(RLIMIT_MEMLOCK, &rlim);
Expand Down
71 changes: 55 additions & 16 deletions executor/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
const int kCoverFd = kOutPipeFd - kMaxThreads;
const int kMaxArgs = 9;
const int kCoverSize = 256 << 10;
const int kExtraCoverSize = 256 << 10;
const int kFailStatus = 67;
const int kRetryStatus = 69;
const int kErrorStatus = 68;
Expand Down Expand Up @@ -208,6 +209,8 @@ struct thread_t {
static thread_t threads[kMaxThreads];
static thread_t* last_scheduled;

static cover_t extra_cov;

struct res_t {
bool executed;
uint64 val;
Expand Down Expand Up @@ -288,6 +291,7 @@ static thread_t* schedule_call(int call_index, int call_num, bool colliding, uin
static void handle_completion(thread_t* th);
static void copyout_call_results(thread_t* th);
static void write_call_output(thread_t* th, bool finished);
static void write_extra_output();
static void execute_call(thread_t* th);
static void thread_create(thread_t* th, int id);
static void* worker_thread(void* arg);
Expand Down Expand Up @@ -366,8 +370,11 @@ int main(int argc, char** argv)
if (flag_cover) {
for (int i = 0; i < kMaxThreads; i++) {
threads[i].cov.fd = kCoverFd + i;
cover_open(&threads[i].cov);
cover_open(&threads[i].cov, false);
}
cover_open(&extra_cov, true);
// Don't enable comps because we don't use them in the fuzzer yet.
cover_enable(&extra_cov, false, true);
}

int status = 0;
Expand Down Expand Up @@ -557,8 +564,11 @@ void execute_one()
retry:
uint64* input_pos = (uint64*)input_data;

if (flag_cover && !colliding && !flag_threaded)
cover_enable(&threads[0].cov, flag_collect_comps);
if (flag_cover && !colliding) {
if (!flag_threaded)
cover_enable(&threads[0].cov, flag_collect_comps, false);
cover_reset(&extra_cov);
}

int call_index = 0;
for (;;) {
Expand Down Expand Up @@ -719,6 +729,7 @@ void execute_one()
write_call_output(th, false);
}
}
write_extra_output();
}
}

Expand Down Expand Up @@ -766,21 +777,21 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop
}

#if SYZ_EXECUTOR_USES_SHMEM
template <typename cover_t>
void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover_count_pos)
template <typename cover_data_t>
void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover_count_pos)
{
// Write out feedback signals.
// Currently it is code edges computed as xor of two subsequent basic block PCs.
cover_t* cover_data = ((cover_t*)th->cov.data) + 1;
cover_data_t* cover_data = ((cover_data_t*)cov->data) + 1;
uint32 nsig = 0;
cover_t prev = 0;
for (uint32 i = 0; i < th->cov.size; i++) {
cover_t pc = cover_data[i];
cover_data_t prev = 0;
for (uint32 i = 0; i < cov->size; i++) {
cover_data_t pc = cover_data[i];
if (!cover_check(pc)) {
debug("got bad pc: 0x%llx\n", (uint64)pc);
doexit(0);
}
cover_t sig = pc ^ prev;
cover_data_t sig = pc ^ prev;
prev = hash(pc);
if (dedup(sig))
continue;
Expand All @@ -793,9 +804,9 @@ void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover
if (!flag_collect_cover)
return;
// Write out real coverage (basic block PCs).
uint32 cover_size = th->cov.size;
uint32 cover_size = cov->size;
if (flag_dedup_cover) {
cover_t* end = cover_data + cover_size;
cover_data_t* end = cover_data + cover_size;
std::sort(cover_data, end);
cover_size = std::unique(cover_data, end) - cover_data;
}
Expand All @@ -814,8 +825,10 @@ void handle_completion(thread_t* th)
event_isset(&th->ready), event_isset(&th->done), th->executing);
if (th->res != (long)-1)
copyout_call_results(th);
if (!collide && !th->colliding)
if (!collide && !th->colliding) {
write_call_output(th, true);
write_extra_output();
}
th->executing = false;
running--;
if (running < 0)
Expand Down Expand Up @@ -894,9 +907,9 @@ void write_call_output(thread_t* th, bool finished)
*comps_count_pos = comps_size;
} else if (flag_cover) {
if (is_kernel_64_bit)
write_coverage_signal<uint64>(th, signal_count_pos, cover_count_pos);
write_coverage_signal<uint64>(&th->cov, signal_count_pos, cover_count_pos);
else
write_coverage_signal<uint32>(th, signal_count_pos, cover_count_pos);
write_coverage_signal<uint32>(&th->cov, signal_count_pos, cover_count_pos);
}
debug_verbose("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%u\n",
completed, th->call_index, th->call_num, reserrno, finished, blocked,
Expand All @@ -922,6 +935,32 @@ void write_call_output(thread_t* th, bool finished)
#endif
}

void write_extra_output()
{
#if SYZ_EXECUTOR_USES_SHMEM
if (!flag_cover || flag_collect_comps)
return;
cover_collect(&extra_cov);
if (!extra_cov.size)
return;
write_output(-1); // call index
write_output(-1); // call num
write_output(999); // errno
write_output(0); // call flags
uint32* signal_count_pos = write_output(0); // filled in later
uint32* cover_count_pos = write_output(0); // filled in later
write_output(0); // comps_count_pos
if (is_kernel_64_bit)
write_coverage_signal<uint64>(&extra_cov, signal_count_pos, cover_count_pos);
else
write_coverage_signal<uint32>(&extra_cov, signal_count_pos, cover_count_pos);
cover_reset(&extra_cov);
debug_verbose("extra: sig=%u cover=%u\n", *signal_count_pos, *cover_count_pos);
completed++;
write_completed(completed);
#endif
}

void thread_create(thread_t* th, int id)
{
th->created = true;
Expand All @@ -939,7 +978,7 @@ void* worker_thread(void* arg)
thread_t* th = (thread_t*)arg;

if (flag_cover)
cover_enable(&th->cov, flag_collect_comps);
cover_enable(&th->cov, flag_collect_comps, false);
for (;;) {
event_wait(&th->ready);
event_reset(&th->ready);
Expand Down
4 changes: 2 additions & 2 deletions executor/executor_bsd.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ static long execute_syscall(const call_t* c, long a[kMaxArgs])

#if GOOS_freebsd || GOOS_openbsd

static void cover_open(cover_t* cov)
static void cover_open(cover_t* cov, bool extra)
{
int fd = open("/dev/kcov", O_RDWR);
if (fd == -1)
Expand Down Expand Up @@ -85,7 +85,7 @@ static void cover_open(cover_t* cov)
cov->data_end = cov->data + mmap_alloc_size;
}

static void cover_enable(cover_t* cov, bool collect_comps)
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
int kcov_mode = collect_comps ? KCOV_MODE_TRACE_CMP : KCOV_MODE_TRACE_PC;
#if GOOS_freebsd
Expand Down
51 changes: 41 additions & 10 deletions executor/executor_linux.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,30 @@
#include <sys/syscall.h>
#include <unistd.h>

const unsigned long KCOV_TRACE_PC = 0;
const unsigned long KCOV_TRACE_CMP = 1;

template <int N>
struct kcov_remote_arg {
unsigned trace_mode;
unsigned area_size;
unsigned num_handles;
__u64 common_handle;
__u64 handles[N];
};

#define KCOV_INIT_TRACE32 _IOR('c', 1, uint32)
#define KCOV_INIT_TRACE64 _IOR('c', 1, uint64)
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg<0>)

const unsigned long KCOV_TRACE_PC = 0;
const unsigned long KCOV_TRACE_CMP = 1;
#define KCOV_REMOTE_HANDLE_USB 0x4242000000000000ull

static inline __u64 kcov_remote_handle_usb(int bus)
{
return KCOV_REMOTE_HANDLE_USB + (__u64)bus;
}

static bool detect_kernel_bitness();

Expand All @@ -38,7 +55,7 @@ static long execute_syscall(const call_t* c, long a[kMaxArgs])
return syscall(c->sys_nr, a[0], a[1], a[2], a[3], a[4], a[5]);
}

static void cover_open(cover_t* cov)
static void cover_open(cover_t* cov, bool extra)
{
int fd = open("/sys/kernel/debug/kcov", O_RDWR);
if (fd == -1)
Expand All @@ -47,25 +64,39 @@ static void cover_open(cover_t* cov)
fail("filed to dup2(%d, %d) cover fd", fd, cov->fd);
close(fd);
const int kcov_init_trace = is_kernel_64_bit ? KCOV_INIT_TRACE64 : KCOV_INIT_TRACE32;
if (ioctl(cov->fd, kcov_init_trace, kCoverSize))
const int cover_size = extra ? kExtraCoverSize : kCoverSize;
if (ioctl(cov->fd, kcov_init_trace, cover_size))
fail("cover init trace write failed");
size_t mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4);
size_t mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4);
cov->data = (char*)mmap(NULL, mmap_alloc_size,
PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0);
if (cov->data == MAP_FAILED)
fail("cover mmap failed");
cov->data_end = cov->data + mmap_alloc_size;
}

static void cover_enable(cover_t* cov, bool collect_comps)
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC;
// This should be fatal,
// The KCOV_ENABLE call should be fatal,
// but in practice ioctl fails with assorted errors (9, 14, 25),
// so we use exitf.
if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode))
exitf("cover enable write trace failed, mode=%d", kcov_mode);
current_cover = cov;
if (!extra) {
if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode))
exitf("cover enable write trace failed, mode=%d", kcov_mode);
current_cover = cov;
return;
}
struct kcov_remote_arg<1> arg;
memset(&arg, 0, sizeof(arg));
arg.trace_mode = kcov_mode;
// Coverage buffer size of remote threads.
arg.area_size = kExtraCoverSize * (is_kernel_64_bit ? 8 : 4);
arg.num_handles = 1;
arg.handles[0] = kcov_remote_handle_usb(procid);
arg.common_handle = procid + 1;
if (ioctl(cov->fd, KCOV_REMOTE_ENABLE, &arg))
exitf("cover enable write trace failed");
}

static void cover_reset(cover_t* cov)
Expand Down
4 changes: 2 additions & 2 deletions executor/nocover.h
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
// Copyright 2018 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.

static void cover_open(cover_t* cov)
static void cover_open(cover_t* cov, bool extra)
{
}

static void cover_enable(cover_t* cov, bool collect_comps)
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
}

Expand Down
7 changes: 6 additions & 1 deletion pkg/csource/generated.go
Original file line number Diff line number Diff line change
Expand Up @@ -3589,7 +3589,12 @@ static void sandbox_common()
#endif
struct rlimit rlim;
rlim.rlim_cur = rlim.rlim_max = 200 << 20;
#if SYZ_EXECUTOR
rlim.rlim_cur = rlim.rlim_max = (200 << 20) +
(kMaxThreads * kCoverSize + kExtraCoverSize) * sizeof(void*);
#else
rlim.rlim_cur = rlim.rlim_max = (200 << 20);
#endif
setrlimit(RLIMIT_AS, &rlim);
rlim.rlim_cur = rlim.rlim_max = 32 << 20;
setrlimit(RLIMIT_MEMLOCK, &rlim);
Expand Down
Loading

0 comments on commit 4f51740

Please sign in to comment.