From bc7af367cc4ecfd2a0fd8092ed15dbb778fee9cf Mon Sep 17 00:00:00 2001 From: feliam Date: Thu, 17 Jan 2019 20:11:01 -0300 Subject: [PATCH] Dev state merging loadsave (#1357) * load/save/replace as needed by state merging * WIP Move merging to a plugin --- manticore/core/executor.py | 119 ++++++++++++++++++++++++++++--------- 1 file changed, 91 insertions(+), 28 deletions(-) diff --git a/manticore/core/executor.py b/manticore/core/executor.py index 1192ba300..adc8db83d 100644 --- a/manticore/core/executor.py +++ b/manticore/core/executor.py @@ -164,8 +164,75 @@ def choice(self, state_ids): else: return None - - +def Merger(Plugin): + def load_state(self, state_id, delete=False): + return self.manticore._executor._load_state(state_id, delete) + + def delete_state(self, state_id): + return self.manticore._executor._delete_state(state_id) + + def replace_state(self, state_id, state): + return self.manticore._executor._replace_state(state_id, state) + + def did_enqueue_state_callback(self, state, state_id): + # when a new state is addded to the list we save it so we do not have + # to repload all states when try to merges last PC + with self.locked_context('cpu_stateid_dict') as cpu_stateid_dict: + # as we may be riunning in a different process we need to access this + # on a lock and over shared memory like this + l = cpu_stateid_dict.get(state.cpu.PC, list()) + l.append(state_id) + cpu_stateid_dict[state.cpu.PC] = l + + + def will_load_state(self, current_state_id): + # When a state is loaded for exploration lets check if we can find it a + # mate for merging + with self.locked_context('cpu_stateid_dict') as cpu_stateid_dict: + # we get the lock and get a copy of the shared context + merged_state = self.load_state(current_state_id) + states_at_pc = cpu_stateid_dict.get(merged_state.cpu.PC, []) + + # lets remove ourself from the list of waiting states + assert current_state_id in states_at_pc + del states_at_pc[current_state_id] + + #Iterate over all remaining states that are waiting for exploration + #at the same PC + merged_ids = [] + for new_state_id in states_at_pc: + new_state = self.load_state(new_state_id) + (exp_merged_state, exp_new_state, merged_constraint) = merge_constraints(merged_state.constraints, new_state.constraints) + is_mergeable, reason = is_merge_possible(merged_state, new_state, merged_constraint) + + if is_mergeable: + #Ok we'll merge it! + merged_state = merge(merged_state, new_state, exp_merged_state, exp_new_state, merged_constraint) + + #lets remove the vestigial links to the old state + self.delete_state(new_state_id) + self._states.remove(new_state_id) # we are locked under locked context + + merged_ids.append(new_state_id) + is_mergeable = "succeeded" + else: + is_mergeable = "failed because of " + reason + debug_string = "at PC = " + hex(current_state.cpu.PC) + \ + ", merge " + is_mergeable + " for state id = " + \ + str(current_state_id) + " and " + str(new_state_id) + print(debug_string) + + for i in merged_ids: + states_at_pc.remove(i) + + cpu_stateid_dict[current_state.cpu.PC] = states_at_pc + + #Ok so we have merged current_state_id with {merged_ids} + #And removed all merged_ids from everywhere + + #UGLY we are replacing a state_id. This may be breaking caches in + #the future + self.replace_state(current_state_id, merged_state) class Executor(Eventful): @@ -223,7 +290,6 @@ def __init__(self, initial=None, store=None, policy='random', context=None, **kw else: if initial is not None: self.add(initial) - self.cpu_stateid_dict = {} def __del__(self): self.manager.shutdown() @@ -265,10 +331,6 @@ def enqueue(self, state): ''' # save the state to secondary storage state_id = self._workspace.save_state(state) - if state.cpu.PC in self.cpu_stateid_dict: - self.cpu_stateid_dict[state.cpu.PC].append(state_id) - else: - self.cpu_stateid_dict[state.cpu.PC] = [state_id] self.put(state_id) self._publish('did_enqueue_state', state_id, state) return state_id @@ -350,6 +412,28 @@ def get(self): del self._states[self._states.index(state_id)] return state_id + @sync + def _load_state(self, state_id, delete=False): + if state_id not in self._states: + raise Exception("State does not exist") + if delete: + del self._states[self._states.index(state_id)] + return self._workspace.load_state(new_state_id, delete=delete) + + @sync + def _delete_state(self, state_id): + if state_id not in self._states: + raise Exception("State does not exist") + del self._states[self._states.index(state_id)] + return self._workspace.rm(state_id) + + @sync + def _replace_state(self, state_id, state): + if state_id not in self._states: + raise Exception("State id does not exist") + self._workspace.rm(state_id) + self._workspace.save_state(state, state_id) + def list(self): ''' Returns the list of states ids currently queued ''' return list(self._states) @@ -444,27 +528,6 @@ def run(self): if current_state_id is not None: self._publish('will_load_state', current_state_id) current_state = self._workspace.load_state(current_state_id) - merged_state = current_state - if len(self.cpu_stateid_dict[current_state.cpu.PC]) > 1: - for new_state_id in self.cpu_stateid_dict[current_state.cpu.PC]: - if current_state_id != new_state_id: - new_state = self._workspace.load_state(new_state_id, delete=False) - (exp_merged_state, exp_new_state, merged_constraint) = merge_constraints(merged_state.constraints, new_state.constraints) - is_mergeable, reason = is_merge_possible(merged_state, new_state, merged_constraint) - if is_mergeable: - merged_state = merge(merged_state, new_state, exp_merged_state, exp_new_state, merged_constraint) - self._workspace.load_state(new_state_id, delete=True) - self._states.remove(new_state_id) - self.cpu_stateid_dict[current_state.cpu.PC].remove(new_state_id) - is_mergeable = "succeeded" - else: - is_mergeable = "failed because of " + reason - debug_string = "at PC = " + hex(current_state.cpu.PC) + \ - ", merge " + is_mergeable + " for state id = " + \ - str(current_state_id) + " and " + str(new_state_id) - print(debug_string) - self.cpu_stateid_dict[current_state.cpu.PC].remove(current_state_id) - current_state = merged_state self.forward_events_from(current_state, True) self._publish('did_load_state', current_state, current_state_id) logger.info("load state %r", current_state_id)