Skip to content

Commit

Permalink
i#2350 rseq: Support cache pre-population
Browse files Browse the repository at this point in the history
Fixes the lazy rseq support to handle code cache pre-population.
Previously rseq code blocks could be created without rseq handling due
to the lazy checks not triggered until after pre-population.

Issue: #2350
  • Loading branch information
derekbruening committed Jul 20, 2019
1 parent a78ee4c commit 3ea227a
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 56 deletions.
93 changes: 47 additions & 46 deletions core/unix/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ extern vm_area_vector_t *loaded_module_areas;
vm_area_vector_t *d_r_rseq_areas;
DECLARE_CXTSWPROT_VAR(static mutex_t rseq_trigger_lock,
INIT_LOCK_FREE(rseq_trigger_lock));
static volatile bool rseq_enabled;
# endif

void
Expand All @@ -60,6 +61,8 @@ os_modules_init(void)
# ifdef LINUX
VMVECTOR_ALLOC_VECTOR(d_r_rseq_areas, GLOBAL_DCONTEXT,
VECTOR_SHARED | VECTOR_NEVER_MERGE, rseq_areas);
if (rseq_is_registered_for_current_thread())
rseq_enabled = true;
# endif
}

Expand All @@ -72,6 +75,46 @@ os_modules_exit(void)
# endif
}

# ifdef LINUX
/* Restartable sequence region identification. It lives here because it involves
* reading ELF section headers.
*
* To avoid extra overhead going to disk to read section headers, we delay looking
* for rseq data until the app invokes an rseq syscall (or on attach we see a thread
* that has rseq set up). We document that we do not handle the app using rseq
* regions for non-rseq purposes, so we do not need to flush the cache here.
*/
void
module_locate_rseq_regions(void)
{
if (rseq_enabled)
return;
d_r_mutex_lock(&rseq_trigger_lock);
if (rseq_enabled) {
d_r_mutex_unlock(&rseq_trigger_lock);
return;
}
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
rseq_enabled = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);

module_iterator_t *iter = module_iterator_start();
while (module_iterator_hasnext(iter)) {
module_area_t *ma = module_iterator_next(iter);
module_init_rseq(ma, false /*!at_map*/);
}
module_iterator_stop(iter);
d_r_mutex_unlock(&rseq_trigger_lock);
}

static void
module_init_rseq_if_enabled(module_area_t *ma, bool at_map)
{
if (rseq_enabled)
module_init_rseq(ma, at_map);
}
# endif

/* view_size can be the size of the first mapping, to handle non-contiguous
* modules -- we'll update the module's size here
*/
Expand Down Expand Up @@ -207,6 +250,10 @@ os_module_area_init(module_area_t *ma, app_pc base, size_t view_size, bool at_ma
ma->os_data.checksum = d_r_crc32((const char *)ma->start, PAGE_SIZE);
}
/* Timestamp we just leave as 0 */

# ifdef LINUX
module_init_rseq_if_enabled(ma, at_map);
# endif
}

void
Expand Down Expand Up @@ -566,52 +613,6 @@ module_get_nth_segment(app_pc module_base, uint n, app_pc *start /*OPTIONAL OUT*
return res;
}

# ifdef LINUX
/* Restartable sequence region identification. It lives here because it involves
* reading ELF section headers.
*
* To avoid extra overhead going to disk to read section headers, we delay looking
* for rseq data until the app invokes an rseq syscall (or on attach we see a thread
* that has rseq set up). We document that we do not handle the app using rseq
* regions for non-rseq purposes, so we do not need to flush the cache here.
*/
void
module_locate_rseq_regions(void)
{
static volatile bool located_regions;
if (located_regions)
return;
d_r_mutex_lock(&rseq_trigger_lock);
if (located_regions) {
d_r_mutex_unlock(&rseq_trigger_lock);
return;
}
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
located_regions = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);

module_iterator_t *iter = module_iterator_start();
while (module_iterator_hasnext(iter)) {
module_area_t *ma = module_iterator_next(iter);
if (!module_init_rseq(ma, false /*!at_map*/)) {
DODEBUG({
const char *name = GET_MODULE_NAME(&ma->names);
if (name == NULL)
name = "(null)";
LOG(GLOBAL, LOG_INTERP | LOG_VMAREAS, 2,
"%s: error looking for rseq table in %s\n", __FUNCTION__, name);
if (strstr(name, "linux-vdso.so") == NULL) {
SYSLOG_INTERNAL_WARNING_ONCE(
"Failed to identify whether a module has an rseq table");
}
});
}
}
module_iterator_stop(iter);
d_r_mutex_unlock(&rseq_trigger_lock);
}
# endif

#endif /* !NOT_DYNAMORIO_CORE_PROPER */

#ifdef CLIENT_INTERFACE
Expand Down
13 changes: 13 additions & 0 deletions core/unix/module_elf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1731,6 +1731,19 @@ module_init_rseq(module_area_t *ma, bool at_map)
os_unmap_file(sec_map, sec_size);
if (fd != INVALID_FILE)
os_close(fd);
DODEBUG({
if (!res) {
const char *name = GET_MODULE_NAME(&ma->names);
if (name == NULL)
name = "(null)";
LOG(GLOBAL, LOG_INTERP | LOG_VMAREAS, 2,
"%s: error looking for rseq table in %s\n", __FUNCTION__, name);
if (strstr(name, "linux-vdso.so") == NULL) {
SYSLOG_INTERNAL_WARNING_ONCE(
"Failed to identify whether a module has an rseq table");
}
}
});
return res;
}

Expand Down
13 changes: 6 additions & 7 deletions core/unix/os.c
Original file line number Diff line number Diff line change
Expand Up @@ -9901,6 +9901,12 @@ os_take_over_all_unknown_threads(dcontext_t *dcontext)
CLIENT_ASSERT(thread_takeover_records == NULL,
"Only one thread should attempt app take over!");

#ifdef LINUX
/* Check this thread for rseq in between setup and start. */
if (rseq_is_registered_for_current_thread())
module_locate_rseq_regions();
#endif

/* Find tids for which we have no thread record, meaning they are not under
* our control. Shift them to the beginning of the tids array.
*/
Expand Down Expand Up @@ -9998,13 +10004,6 @@ os_take_over_all_unknown_threads(dcontext_t *dcontext)
d_r_mutex_unlock(&thread_initexit_lock);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);

#ifdef LINUX
/* Check this thread for rseq as well.
*/
if (rseq_is_registered_for_current_thread())
module_locate_rseq_regions();
#endif

return threads_to_signal > 0;
}

Expand Down
8 changes: 5 additions & 3 deletions core/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -448,10 +448,12 @@ enum {
* < dynamo_areas < global_alloc_lock */
# ifdef LINUX
LOCK_RANK(rseq_trigger_lock), /* < rseq_areas, < module_data_lock */
LOCK_RANK(rseq_areas), /* < dynamo_areas < global_alloc_lock */
# endif
LOCK_RANK(module_data_lock), /* < loaded_module_areas, < special_heap_lock,
* > executable_areas */
LOCK_RANK(module_data_lock), /* < loaded_module_areas, < special_heap_lock,
* > executable_areas */
# ifdef LINUX
LOCK_RANK(rseq_areas), /* < dynamo_areas < global_alloc_lock, > module_data_lock */
# endif
LOCK_RANK(special_units_list_lock), /* < special_heap_lock */
LOCK_RANK(special_heap_lock), /* > bb_building_lock, > hotp_vul_table_lock
* < dynamo_areas, < heap_unit_lock */
Expand Down

0 comments on commit 3ea227a

Please sign in to comment.