From d565c8cd4823d0636e768d3a76acced14b8f7993 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Thu, 15 Aug 2013 15:43:00 +0200 Subject: [PATCH 01/20] Another small round of cleanup --- src/tightdb/array.cpp | 560 ++++++++++++++++++++++-------------------- src/tightdb/array.hpp | 14 +- 2 files changed, 313 insertions(+), 261 deletions(-) diff --git a/src/tightdb/array.cpp b/src/tightdb/array.cpp index bab7d180664..e1c054918cd 100644 --- a/src/tightdb/array.cpp +++ b/src/tightdb/array.cpp @@ -2078,6 +2078,7 @@ void Array::find_all(Array& result, int64_t value, size_t colOffset, size_t star return; } + bool Array::find(int cond, Action action, int64_t value, size_t start, size_t end, size_t baseindex, QueryState *state) const { if (cond == cond_Equal) { @@ -2191,6 +2192,7 @@ size_t Array::find_first(int64_t value, size_t start, size_t end) const return find_first(value, start, end); } + // Get containing array block direct through column b-tree without instatiating any Arrays. Calling with // use_retval = true will return itself if leaf and avoid unneccesary header initialization. const Array* Array::GetBlock(size_t ndx, Array& arr, size_t& off, @@ -2210,50 +2212,51 @@ const Array* Array::GetBlock(size_t ndx, Array& arr, size_t& off, return &arr; } + // Find value direct through column b-tree without instatiating any Arrays. size_t Array::ColumnFind(int64_t target, ref_type ref, Array& cache) const { char* header = m_alloc.translate(ref); bool is_leaf = get_isleaf_from_header(header); - if (!is_leaf) { - const char* data = get_data_from_header(header); - size_t width = get_width_from_header(header); + if (is_leaf) { + cache.CreateFromHeaderDirect(header); + return cache.find_first(target, 0, -1); + } - // Get subnode table - ref_type offsets_ref = to_ref(get_direct(data, width, 0)); - ref_type refs_ref = to_ref(get_direct(data, width, 1)); + const char* data = get_data_from_header(header); + size_t width = get_width_from_header(header); - const char* offsets_header = m_alloc.translate(offsets_ref); - const char* offsets_data = get_data_from_header(offsets_header); - size_t offsets_width = get_width_from_header(offsets_header); - size_t offsets_size = get_size_from_header(offsets_header); + // Get subnode table + ref_type offsets_ref = to_ref(get_direct(data, width, 0)); + ref_type refs_ref = to_ref(get_direct(data, width, 1)); - const char* refs_header = m_alloc.translate(refs_ref); - const char* refs_data = get_data_from_header(refs_header); - size_t refs_width = get_width_from_header(refs_header); + const char* offsets_header = m_alloc.translate(offsets_ref); + const char* offsets_data = get_data_from_header(offsets_header); + size_t offsets_width = get_width_from_header(offsets_header); + size_t offsets_size = get_size_from_header(offsets_header); - // Iterate over nodes until we find a match - size_t offset = 0; - for (size_t i = 0; i < offsets_size; ++i) { - ref_type ref = to_ref(get_direct(refs_data, refs_width, i)); - size_t result = ColumnFind(target, ref, cache); - if (result != not_found) - return offset + result; + const char* refs_header = m_alloc.translate(refs_ref); + const char* refs_data = get_data_from_header(refs_header); + size_t refs_width = get_width_from_header(refs_header); - size_t off = to_size_t(get_direct(offsets_data, offsets_width, i)); - offset = off; - } + // Iterate over nodes until we find a match + size_t offset = 0; + for (size_t i = 0; i < offsets_size; ++i) { + ref_type ref = to_ref(get_direct(refs_data, refs_width, i)); + size_t result = ColumnFind(target, ref, cache); + if (result != not_found) + return offset + result; - // if we get to here there is no match - return not_found; - } - else { - cache.CreateFromHeaderDirect(header); - return cache.find_first(target, 0, -1); + size_t off = to_size_t(get_direct(offsets_data, offsets_width, i)); + offset = off; } + + // if we get to here there is no match + return not_found; } + size_t Array::IndexStringFindFirst(StringData value, void* column, StringGetter get_func) const { StringData value_2 = value; @@ -2299,63 +2302,74 @@ size_t Array::IndexStringFindFirst(StringData value, void* column, StringGetter key_type stored_key = key_type(get_direct<32>(offsets_data, pos)); - if (stored_key == key) { - // Literal row index - if (ref & 1) { - size_t row_ref = size_t(uint64_t(ref) >> 1); + if (stored_key != key) + return not_found; - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) return row_ref; + // Literal row index + if (ref & 1) { + size_t row_ref = size_t(uint64_t(ref) >> 1); - StringData str = (*get_func)(column, row_ref); - if (str == value) return row_ref; - else return not_found; - } + // If the last byte in the stored key is zero, we know that we have + // compared against the entire (target) string + if (!(stored_key << 24)) + return row_ref; - const char* sub_header = m_alloc.translate(to_ref(ref)); - const bool sub_isindex = get_indexflag_from_header(sub_header); + StringData str = (*get_func)(column, row_ref); + if (str == value) + return row_ref; + return not_found; + } - // List of matching row indexes - if (!sub_isindex) { - const char* sub_data = get_data_from_header(sub_header); - const size_t sub_width = get_width_from_header(sub_header); - const bool sub_isleaf = get_isleaf_from_header(sub_header); - - // In most cases the row list will just be an array but there - // might be so many matches that it has branched into a column - size_t row_ref; - if (sub_isleaf) - row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); - else { - Array sub(to_ref(ref), 0, 0, m_alloc); - pair p = sub.find_btree_leaf(0); - const char* leaf_header = p.first.m_addr; - row_ref = to_size_t(get(leaf_header, 0)); - } + const char* sub_header = m_alloc.translate(to_ref(ref)); + const bool sub_isindex = get_indexflag_from_header(sub_header); - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) return row_ref; + // List of matching row indexes + if (!sub_isindex) { + const char* sub_data = get_data_from_header(sub_header); + const size_t sub_width = get_width_from_header(sub_header); + const bool sub_isleaf = get_isleaf_from_header(sub_header); - StringData str = (*get_func)(column, row_ref); - if (str == value) return row_ref; - else return not_found; + // In most cases the row list will just be an array but + // there might be so many matches that it has branched + // into a column + size_t row_ref; + if (sub_isleaf) + row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); + else { + Array sub(to_ref(ref), 0, 0, m_alloc); + pair p = sub.find_btree_leaf(0); + const char* leaf_header = p.first.m_addr; + row_ref = to_size_t(get(leaf_header, 0)); } - // Recurse into sub-index; - header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result - data = get_data_from_header(header); - width = get_width_from_header(header); - is_leaf = get_isleaf_from_header(header); - if (value_2.size() <= 4) value_2 = StringData(); - else value_2 = value_2.substr(4); - goto top; + // If the last byte in the stored key is zero, we know + // that we have compared against the entire (target) + // string + if (!(stored_key << 24)) + return row_ref; + + StringData str = (*get_func)(column, row_ref); + if (str == value) + return row_ref; + return not_found; + } + + // Recurse into sub-index; + header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result + data = get_data_from_header(header); + width = get_width_from_header(header); + is_leaf = get_isleaf_from_header(header); + if (value_2.size() <= 4) { + value_2 = StringData(); } - else return not_found; + else { + value_2 = value_2.substr(4); + } + goto top; } } + void Array::IndexStringFindAll(Array& result, StringData value, void* column, StringGetter get_func) const { StringData value_2 = value; @@ -2382,7 +2396,8 @@ void Array::IndexStringFindAll(Array& result, StringData value, void* column, St size_t pos = ::lower_bound<32>(offsets_data, offsets_size, key); // keys are always 32 bits wide // If key is outside range, we know there can be no match - if (pos == offsets_size) return; // not_found + if (pos == offsets_size) + return; // not_found // Get entry under key const char* refs_header = m_alloc.translate(refs_ref); @@ -2401,92 +2416,95 @@ void Array::IndexStringFindAll(Array& result, StringData value, void* column, St key_type stored_key = key_type(get_direct<32>(offsets_data, pos)); - if (stored_key == key) { - // Literal row index - if (ref & 1) { - size_t row_ref = size_t(uint64_t(ref) >> 1); + if (stored_key != key) + return; // not_found - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) { - result.add(row_ref); - return; + // Literal row index + if (ref & 1) { + size_t row_ref = size_t(uint64_t(ref) >> 1); + + // If the last byte in the stored key is zero, we know that we have + // compared against the entire (target) string + if (!(stored_key << 24)) { + result.add(row_ref); + return; + } + + StringData str = (*get_func)(column, row_ref); + if (str == value) + result.add(row_ref); + return; // not_found + } + + const char* sub_header = m_alloc.translate(to_ref(ref)); + const bool sub_isindex = get_indexflag_from_header(sub_header); + + // List of matching row indexes + if (!sub_isindex) { + const bool sub_isleaf = get_isleaf_from_header(sub_header); + + // In most cases the row list will just be an array but there + // might be so many matches that it has branched into a column + if (sub_isleaf) { + const size_t sub_width = get_width_from_header(sub_header); + const char* sub_data = get_data_from_header(sub_header); + const size_t first_row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); + + // If the last byte in the stored key is not zero, we have + // not yet compared against the entire (target) string + if ((stored_key << 24)) { + StringData str = (*get_func)(column, first_row_ref); + if (str != value) + return; // not_found } - StringData str = (*get_func)(column, row_ref); - if (str == value) { + // Copy all matches into result array + const size_t sub_size = get_size_from_header(sub_header); + + for (size_t i = 0; i < sub_size; ++i) { + size_t row_ref = to_size_t(get_direct(sub_data, sub_width, i)); result.add(row_ref); - return; } - else return; // not_found } - - const char* sub_header = m_alloc.translate(to_ref(ref)); - const bool sub_isindex = get_indexflag_from_header(sub_header); - - // List of matching row indexes - if (!sub_isindex) { - const bool sub_isleaf = get_isleaf_from_header(sub_header); - - // In most cases the row list will just be an array but there - // might be so many matches that it has branched into a column - if (sub_isleaf) { - const size_t sub_width = get_width_from_header(sub_header); - const char* sub_data = get_data_from_header(sub_header); - const size_t first_row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); - - // If the last byte in the stored key is not zero, we have - // not yet compared against the entire (target) string - if ((stored_key << 24)) { - StringData str = (*get_func)(column, first_row_ref); - if (str != value) - return; // not_found - } - - // Copy all matches into result array - const size_t sub_size = get_size_from_header(sub_header); - - for (size_t i = 0; i < sub_size; ++i) { - size_t row_ref = to_size_t(get_direct(sub_data, sub_width, i)); - result.add(row_ref); - } + else { + const Column sub(to_ref(ref), 0, 0, m_alloc); + const size_t first_row_ref = to_size_t(sub.get(0)); + + // If the last byte in the stored key is not zero, we have + // not yet compared against the entire (target) string + if ((stored_key << 24)) { + StringData str = (*get_func)(column, first_row_ref); + if (str != value) + return; // not_found } - else { - const Column sub(to_ref(ref), 0, 0, m_alloc); - const size_t first_row_ref = to_size_t(sub.get(0)); - - // If the last byte in the stored key is not zero, we have - // not yet compared against the entire (target) string - if ((stored_key << 24)) { - StringData str = (*get_func)(column, first_row_ref); - if (str != value) - return; // not_found - } - - // Copy all matches into result array - const size_t sub_size = sub.size(); - - for (size_t i = 0; i < sub_size; ++i) { - size_t row_ref = to_size_t(sub.get(i)); - result.add(row_ref); - } + + // Copy all matches into result array + const size_t sub_size = sub.size(); + + for (size_t i = 0; i < sub_size; ++i) { + size_t row_ref = to_size_t(sub.get(i)); + result.add(row_ref); } - return; } + return; + } - // Recurse into sub-index; - header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result - data = get_data_from_header(header); - width = get_width_from_header(header); - is_leaf = get_isleaf_from_header(header); - if (value_2.size() <= 4) value_2 = StringData(); - else value_2 = value_2.substr(4); - goto top; + // Recurse into sub-index; + header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result + data = get_data_from_header(header); + width = get_width_from_header(header); + is_leaf = get_isleaf_from_header(header); + if (value_2.size() <= 4) { + value_2 = StringData(); } - else return; // not_found + else { + value_2 = value_2.substr(4); + } + goto top; } } + FindRes Array::IndexStringFindAllNoCopy(StringData value, size_t& res_ref, void* column, StringGetter get_func) const { StringData value_2 = value; @@ -2513,7 +2531,8 @@ FindRes Array::IndexStringFindAllNoCopy(StringData value, size_t& res_ref, void* size_t pos = ::lower_bound<32>(offsets_data, offsets_size, key); // keys are always 32 bits wide // If key is outside range, we know there can be no match - if (pos == offsets_size) return FindRes_not_found; + if (pos == offsets_size) + return FindRes_not_found; // Get entry under key const char* refs_header = m_alloc.translate(refs_ref); @@ -2532,79 +2551,84 @@ FindRes Array::IndexStringFindAllNoCopy(StringData value, size_t& res_ref, void* key_type stored_key = key_type(get_direct<32>(offsets_data, pos)); - if (stored_key == key) { - // Literal row index - if (ref & 1) { - size_t row_ref = size_t(uint64_t(ref) >> 1); + if (stored_key != key) + return FindRes_not_found; // not_found - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) { - res_ref = row_ref; - return FindRes_single; // found single - } + // Literal row index + if (ref & 1) { + size_t row_ref = size_t(uint64_t(ref) >> 1); - StringData str = (*get_func)(column, row_ref); - if (str == value) { - res_ref = row_ref; - return FindRes_single; // found single - } - return FindRes_not_found; // not_found + // If the last byte in the stored key is zero, we know that we have + // compared against the entire (target) string + if (!(stored_key << 24)) { + res_ref = row_ref; + return FindRes_single; // found single + } + + StringData str = (*get_func)(column, row_ref); + if (str == value) { + res_ref = row_ref; + return FindRes_single; // found single } + return FindRes_not_found; // not_found + } + + const char* sub_header = m_alloc.translate(to_ref(ref)); + const bool sub_isindex = get_indexflag_from_header(sub_header); - const char* sub_header = m_alloc.translate(to_ref(ref)); - const bool sub_isindex = get_indexflag_from_header(sub_header); - - // List of matching row indexes - if (!sub_isindex) { - const bool sub_isleaf = get_isleaf_from_header(sub_header); - - // In most cases the row list will just be an array but there - // might be so many matches that it has branched into a column - if (sub_isleaf) { - const size_t sub_width = get_width_from_header(sub_header); - const char* sub_data = get_data_from_header(sub_header); - const size_t first_row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); - - // If the last byte in the stored key is not zero, we have - // not yet compared against the entire (target) string - if ((stored_key << 24)) { - StringData str = (*get_func)(column, first_row_ref); - if (str != value) - return FindRes_not_found; // not_found - } + // List of matching row indexes + if (!sub_isindex) { + const bool sub_isleaf = get_isleaf_from_header(sub_header); + + // In most cases the row list will just be an array but there + // might be so many matches that it has branched into a column + if (sub_isleaf) { + const size_t sub_width = get_width_from_header(sub_header); + const char* sub_data = get_data_from_header(sub_header); + const size_t first_row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); + + // If the last byte in the stored key is not zero, we have + // not yet compared against the entire (target) string + if ((stored_key << 24)) { + StringData str = (*get_func)(column, first_row_ref); + if (str != value) + return FindRes_not_found; // not_found } - else { - const Column sub(to_ref(ref), 0, 0, m_alloc); - const size_t first_row_ref = to_size_t(sub.get(0)); - - // If the last byte in the stored key is not zero, we have - // not yet compared against the entire (target) string - if ((stored_key << 24)) { - StringData str = (*get_func)(column, first_row_ref); - if (str != value) - return FindRes_not_found; // not_found - } + } + else { + const Column sub(to_ref(ref), 0, 0, m_alloc); + const size_t first_row_ref = to_size_t(sub.get(0)); + + // If the last byte in the stored key is not zero, we have + // not yet compared against the entire (target) string + if ((stored_key << 24)) { + StringData str = (*get_func)(column, first_row_ref); + if (str != value) + return FindRes_not_found; // not_found } - - // Return a reference to the result column - res_ref = to_ref(ref); - return FindRes_column; // column of matches } - // Recurse into sub-index; - header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result - data = get_data_from_header(header); - width = get_width_from_header(header); - is_leaf = get_isleaf_from_header(header); - if (value_2.size() <= 4) value_2 = StringData(); - else value_2 = value_2.substr(4); - goto top; + // Return a reference to the result column + res_ref = to_ref(ref); + return FindRes_column; // column of matches + } + + // Recurse into sub-index; + header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result + data = get_data_from_header(header); + width = get_width_from_header(header); + is_leaf = get_isleaf_from_header(header); + if (value_2.size() <= 4) { + value_2 = StringData(); } - else return FindRes_not_found; // not_found + else { + value_2 = value_2.substr(4); + } + goto top; } } + size_t Array::IndexStringCount(StringData value, void* column, StringGetter get_func) const { @@ -2632,7 +2656,8 @@ size_t Array::IndexStringCount(StringData value, void* column, StringGetter get_ size_t pos = ::lower_bound<32>(offsets_data, offsets_size, key); // keys are always 32 bits wide // If key is outside range, we know there can be no match - if (pos == offsets_size) return 0; + if (pos == offsets_size) + return 0; // Get entry under key const char* refs_header = m_alloc.translate(refs_ref); @@ -2651,68 +2676,80 @@ size_t Array::IndexStringCount(StringData value, void* column, StringGetter get_ key_type stored_key = key_type(get_direct<32>(offsets_data, pos)); - if (stored_key == key) { - // Literal row index - if (ref & 1) { - const size_t row_ref = size_t((uint64_t(ref) >> 1)); + if (stored_key != key) + return 0; - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) return 1; + // Literal row index + if (ref & 1) { + const size_t row_ref = size_t((uint64_t(ref) >> 1)); - StringData str = (*get_func)(column, row_ref); - if (str == value) return 1; - else return 0; - } + // If the last byte in the stored key is zero, we know that we have + // compared against the entire (target) string + if (!(stored_key << 24)) + return 1; - const char* sub_header = m_alloc.translate(to_ref(ref)); - const bool sub_isindex = get_indexflag_from_header(sub_header); + StringData str = (*get_func)(column, row_ref); + if (str == value) + return 1; + return 0; + } - // List of matching row indexes - if (!sub_isindex) { - const bool sub_isleaf = get_isleaf_from_header(sub_header); - size_t sub_count; - size_t row_ref; + const char* sub_header = m_alloc.translate(to_ref(ref)); + const bool sub_isindex = get_indexflag_from_header(sub_header); - // In most cases the row list will just be an array but there - // might be so many matches that it has branched into a column - if (sub_isleaf) { - sub_count = get_size_from_header(sub_header); + // List of matching row indexes + if (!sub_isindex) { + const bool sub_isleaf = get_isleaf_from_header(sub_header); + size_t sub_count; + size_t row_ref; - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) return sub_count; + // In most cases the row list will just be an array but + // there might be so many matches that it has branched + // into a column + if (sub_isleaf) { + sub_count = get_size_from_header(sub_header); - const char* sub_data = get_data_from_header(sub_header); - const size_t sub_width = get_width_from_header(sub_header); - row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); - } - else { - const Column sub(to_ref(ref), 0, 0, m_alloc); - sub_count = sub.size(); + // If the last byte in the stored key is zero, we know + // that we have compared against the entire (target) + // string + if (!(stored_key << 24)) + return sub_count; - // If the last byte in the stored key is zero, we know that we have - // compared against the entire (target) string - if (!(stored_key << 24)) return sub_count; + const char* sub_data = get_data_from_header(sub_header); + const size_t sub_width = get_width_from_header(sub_header); + row_ref = to_size_t(get_direct(sub_data, sub_width, 0)); + } + else { + const Column sub(to_ref(ref), 0, 0, m_alloc); + sub_count = sub.size(); - row_ref = to_size_t(sub.get(0)); - } + // If the last byte in the stored key is zero, we know + // that we have compared against the entire (target) + // string + if (!(stored_key << 24)) + return sub_count; - StringData str = (*get_func)(column, row_ref); - if (str == value) return sub_count; - else return 0; + row_ref = to_size_t(sub.get(0)); } - // Recurse into sub-index; - header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result - data = get_data_from_header(header); - width = get_width_from_header(header); - is_leaf = get_isleaf_from_header(header); - if (value_2.size() <= 4) value_2 = StringData(); - else value_2 = value_2.substr(4); - goto top; + StringData str = (*get_func)(column, row_ref); + if (str == value) + return sub_count; + return 0; } - else return 0; + + // Recurse into sub-index; + header = m_alloc.translate(to_ref(ref)); // FIXME: This is wastefull since sub_header already contains this result + data = get_data_from_header(header); + width = get_width_from_header(header); + is_leaf = get_isleaf_from_header(header); + if (value_2.size() <= 4) { + value_2 = StringData(); + } + else { + value_2 = value_2.substr(4); + } + goto top; } } @@ -2737,7 +2774,8 @@ pair Array::find_btree_leaf(size_t ndx) const TIGHTDB_NOEXCEPT header = m_alloc.translate(child_ref); bool child_is_leaf = get_isleaf_from_header(header); - if (child_is_leaf) return make_pair(MemRef(header, child_ref), ndx); + if (child_is_leaf) + return make_pair(MemRef(header, child_ref), ndx); width = get_width_from_header(header); TIGHTDB_TEMPEX(p = ::get_two_as_size, width, (header, 0)); @@ -2746,6 +2784,7 @@ pair Array::find_btree_leaf(size_t ndx) const TIGHTDB_NOEXCEPT } } + int64_t Array::get(const char* header, size_t ndx) TIGHTDB_NOEXCEPT { const char* data = get_data_from_header(header); @@ -2753,6 +2792,7 @@ int64_t Array::get(const char* header, size_t ndx) TIGHTDB_NOEXCEPT return get_direct(data, width, ndx); } + pair Array::get_size_pair(const char* header, size_t ndx) TIGHTDB_NOEXCEPT { pair p; diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 36dbf44f48b..fa2e781a02e 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -254,7 +254,19 @@ class Array: public ArrayParent { enum Type { type_Normal, - type_InnerColumnNode, ///< Inner node of B+-tree + + /// This array is the root of an innner node of a B+-tree as + /// used in table columns. + type_InnerColumnNode, + + /// This array may contain refs to subarrays. A element value + /// whose least significant bit is zero, is a ref pointing to + /// a subarray. An element value whose least significant bit + /// is one, is just a value. Is is the responsibility of the + /// application to ensure that non-ref values have their least + /// significant bit set. This will generally be done by + /// shifting the desired vlue to the left by one bit position, + /// and then setting the vacated bit to one. type_HasRefs }; From 6d2359080fcc0d3b3789b32b3d92922ce3ea3e69 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Thu, 15 Aug 2013 17:59:40 +0200 Subject: [PATCH 02/20] Use tightdb::Thread API in more places for improved portability --- test/test_transactions.cpp | 91 +++++++++++++++++++++++--------------- test/testshared.cpp | 25 +++++------ 2 files changed, 65 insertions(+), 51 deletions(-) diff --git a/test/test_transactions.cpp b/test/test_transactions.cpp index 5806b3e09be..15fe76cd608 100644 --- a/test/test_transactions.cpp +++ b/test/test_transactions.cpp @@ -5,12 +5,13 @@ #include #include -#include - #include -#include #include +#include +#include +#include + #include "testsettings.hpp" using namespace std; @@ -387,48 +388,60 @@ void thread(int index, string database_path) } -struct ThreadWrapper { - void run(int index, string database_path) +class ThreadWrapper { +public: + template void start(const F& func) { - m_index = index; - m_database_path = database_path; - m_error = false; - const int rc = pthread_create(&m_pthread, 0, &ThreadWrapper::run, this); - if (rc != 0) throw runtime_error("pthread_create() failed"); + m_except = false; + m_thread.start(util::bind(&Runner::run, func, this)); } - // Returns 'true' on error + /// Returns 'true' if thread has thrown an exception. In that case + /// the exception message will also be writte to std::cerr. bool join() { - const int rc = pthread_join(m_pthread, 0); - if (rc != 0) throw runtime_error("pthread_join() failed"); - return m_error; + string except_msg; + if (join(except_msg)) { + cerr << "Exception thrown in thread: "<(arg); - try { - thread(e.m_index, e.m_database_path); - } - catch (exception& ex) { - e.m_error = true; - cerr << "Exception thrown in thread "< struct Runner { + static void run(F func, ThreadWrapper* tw) + { + try { + func(); + } + catch (exception& e) { + tw->m_except = true; + tw->m_except_msg = e.what(); + } + catch (...) { + tw->m_except = true; + tw->m_except_msg = "Unknown error"; + } } - - return 0; - } + }; }; } // anonymous namespace @@ -446,12 +459,18 @@ TEST(Transactions) // Start threads for (int i=0; i - #include -#include #include +#include +#include +#include using namespace std; using namespace tightdb; @@ -428,10 +428,8 @@ TEST(Shared_Writes_SpecialOrder) namespace { -void* IncrementEntry(void* arg) +void increment_entry_thread(size_t row_ndx) { - const size_t row_ndx = (size_t)arg; - // Open shared db SharedGroup sg("test_shared.tightdb"); @@ -456,12 +454,11 @@ void* IncrementEntry(void* arg) ReadTransaction rt(sg); TestTableShared::ConstRef t = rt.get_table("test"); - const int64_t v = t[row_ndx].first; - const int64_t expected = i+1; + int64_t v = t[row_ndx].first; + int64_t expected = i+1; CHECK_EQUAL(expected, v); } } - return 0; } } // anonymous namespace @@ -488,18 +485,16 @@ TEST(Shared_WriterThreads) wt.commit(); } - pthread_t threads[thread_count]; + Thread threads[thread_count]; // Create all threads for (size_t i = 0; i < thread_count; ++i) { - const int rc = pthread_create(&threads[i], NULL, IncrementEntry, (void*)i); - CHECK_EQUAL(0, rc); + threads[i].start(util::bind(&increment_entry_thread, i)); } // Wait for all threads to complete for (size_t i = 0; i < thread_count; ++i) { - const int rc = pthread_join(threads[i], NULL); - CHECK_EQUAL(0, rc); + threads[i].join(); } // Verify that the changes were made @@ -508,7 +503,7 @@ TEST(Shared_WriterThreads) TestTableShared::ConstRef t = rt.get_table("test"); for (size_t i = 0; i < thread_count; ++i) { - const int64_t v = t[i].first; + int64_t v = t[i].first; CHECK_EQUAL(100, v); } } From b7089778d1f467e8e5c366f72a3bcb3249c66d6e Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Fri, 16 Aug 2013 17:10:44 +0200 Subject: [PATCH 03/20] Fixed bug in TableViewBase::sort(). Add free space tracking to a database file that does not have it when calling Group::commit(). In Group::verify(), do not asssert that free space versioning is present just because it is opened in shared mode. --- src/tightdb/alloc_slab.hpp | 6 +- src/tightdb/array.hpp | 16 ++++- src/tightdb/group.cpp | 79 ++++++++++++++++-------- src/tightdb/group.hpp | 24 ++++---- src/tightdb/group_shared.cpp | 2 +- src/tightdb/group_shared.hpp | 5 ++ src/tightdb/group_writer.cpp | 2 +- src/tightdb/table_view.cpp | 3 +- test/testgroup.cpp | 34 +++++------ test/testshared.cpp | 113 +++++++++++++++++++++++++++++++++++ 10 files changed, 220 insertions(+), 64 deletions(-) diff --git a/src/tightdb/alloc_slab.hpp b/src/tightdb/alloc_slab.hpp index 2743fcfa37a..ee968bf3cb1 100644 --- a/src/tightdb/alloc_slab.hpp +++ b/src/tightdb/alloc_slab.hpp @@ -109,7 +109,7 @@ class SlabAlloc: public Allocator { /// This function may be called only for an attached /// allocator. The effect of calling it on an unattached allocator /// is undefined. - std::size_t get_attached_size() const TIGHTDB_NOEXCEPT; + std::size_t get_baseline() const TIGHTDB_NOEXCEPT; /// Get the total amount of managed memory (allocated and /// unallocated). @@ -124,7 +124,7 @@ class SlabAlloc: public Allocator { /// Remap the attached file such that a prefix of the specified /// size becomes available in memory. If sucessfull, - /// get_attached_size() will return the specified new file size. + /// get_baseline() will return the specified new file size. void remap(std::size_t file_size); MemRef alloc(std::size_t size) TIGHTDB_OVERRIDE; @@ -204,7 +204,7 @@ inline bool SlabAlloc::is_attached() const TIGHTDB_NOEXCEPT return m_data != 0; } -inline std::size_t SlabAlloc::get_attached_size() const TIGHTDB_NOEXCEPT +inline std::size_t SlabAlloc::get_baseline() const TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(is_attached()); return m_baseline; diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index fa2e781a02e..940f2f8c6da 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -388,8 +388,8 @@ class Array: public ArrayParent { /// effect if the accessor is currently unattached (idempotency). void detach() TIGHTDB_NOEXCEPT { m_data = 0; } - std::size_t size() const TIGHTDB_NOEXCEPT { return m_size; } - bool is_empty() const TIGHTDB_NOEXCEPT { return m_size == 0; } + std::size_t size() const TIGHTDB_NOEXCEPT; + bool is_empty() const TIGHTDB_NOEXCEPT { return size() == 0; } void insert(std::size_t ndx, int64_t value); void add(int64_t value); @@ -1019,14 +1019,23 @@ inline void Array::create(Type type) } +inline std::size_t Array::size() const TIGHTDB_NOEXCEPT +{ + TIGHTDB_ASSERT(is_attached()); + return m_size; +} + + inline int64_t Array::back() const TIGHTDB_NOEXCEPT { - TIGHTDB_ASSERT(m_size); + TIGHTDB_ASSERT(is_attached()); + TIGHTDB_ASSERT(m_size > 0); return get(m_size - 1); } inline int64_t Array::get(std::size_t ndx) const TIGHTDB_NOEXCEPT { + TIGHTDB_ASSERT(is_attached()); TIGHTDB_ASSERT(ndx < m_size); return (this->*m_getter)(ndx); @@ -1049,6 +1058,7 @@ inline int64_t Array::get(std::size_t ndx) const TIGHTDB_NOEXCEPT inline ref_type Array::get_as_ref(std::size_t ndx) const TIGHTDB_NOEXCEPT { + TIGHTDB_ASSERT(is_attached()); TIGHTDB_ASSERT(m_hasRefs); int64_t v = get(ndx); return to_ref(v); diff --git a/src/tightdb/group.cpp b/src/tightdb/group.cpp index 89103a69110..806e20e92e7 100644 --- a/src/tightdb/group.cpp +++ b/src/tightdb/group.cpp @@ -169,7 +169,7 @@ void Group::create() // free space. In that case, we must add as free space, the size // of the file minus its header. if (m_alloc.is_attached()) { - size_t free = m_alloc.get_attached_size() - sizeof SlabAlloc::default_header; + size_t free = m_alloc.get_baseline() - sizeof SlabAlloc::default_header; if (free > 0) { m_free_positions.add(sizeof SlabAlloc::default_header); m_free_lengths.add(free); @@ -353,26 +353,45 @@ void Group::commit() TIGHTDB_ASSERT(is_attached()); TIGHTDB_ASSERT(m_top.is_attached()); + // GroupWriter::commit() needs free space tracking information, so + // if the attached database does not contain it, we must add it + // now. Empty (newly created) database files and database files + // created by Group::write() do not have free space tracking + // information. + if (m_free_positions.is_attached()) { + TIGHTDB_ASSERT(m_top.size() >= 2); + } + else { + TIGHTDB_ASSERT(m_top.size() == 2); + m_free_positions.create(Array::type_Normal); + m_free_lengths.create(Array::type_Normal); + m_top.add(m_free_positions.get_ref()); + m_top.add(m_free_lengths.get_ref()); + } + GroupWriter out(*this); - // Recursively write all changed arrays to end of file + // Recursively write all changed arrays to the database file bool do_sync = true; ref_type top_ref = out.commit(do_sync); - // Since the group is persisiting in single-thread (un-shared) mode - // we have to make sure that the group stays valid after commit + // Since the group is persisiting in single-thread (un-shared) + // mode we have to make sure that the group stays valid after + // commit - // Clear old allocs + // Mark all managed space as free m_alloc.free_all(); + size_t old_baseline = m_alloc.get_baseline(); + // Remap file if it has grown size_t new_file_size = out.get_file_size(); - TIGHTDB_ASSERT(new_file_size >= m_alloc.get_attached_size()); - if (new_file_size > m_alloc.get_attached_size()) + TIGHTDB_ASSERT(new_file_size >= m_alloc.get_baseline()); + if (new_file_size > m_alloc.get_baseline()) m_alloc.remap(new_file_size); // Recusively update refs in all active tables (columns, arrays..) - update_refs(top_ref); + update_refs(top_ref, old_baseline); #ifdef TIGHTDB_DEBUG Verify(); @@ -380,14 +399,23 @@ void Group::commit() } -void Group::update_refs(ref_type top_ref) +void Group::update_refs(ref_type top_ref, size_t old_baseline) { - // Update top with the new (persistent) ref + // Array nodes that a part of the previous version of the database + // will not be overwritte by Group::commit(). This is necessary + // for robustness in the face of abrupt termination of the + // process. It also means that we can be sure that an array + // remains unchanged across a commit if the new ref is equal to + // the old ref and the ref is below the previous basline. + + if (top_ref < old_baseline && m_top.get_ref() == top_ref) + return; + m_top.init_from_ref(top_ref); TIGHTDB_ASSERT(m_top.size() >= 2); // Now we can update it's child arrays - m_table_names.update_from_parent(); + m_table_names.update_from_parent(/*old_baseline*/); // No free-info in serialized databases // and version info is only in shared, @@ -433,8 +461,8 @@ void Group::update_from_shared(ref_type new_top_ref, size_t new_file_size) TIGHTDB_ASSERT(new_top_ref < new_file_size); // Update memory mapping if database file has grown - TIGHTDB_ASSERT(new_file_size >= m_alloc.get_attached_size()); - if (new_file_size > m_alloc.get_attached_size()) { + TIGHTDB_ASSERT(new_file_size >= m_alloc.get_baseline()); + if (new_file_size > m_alloc.get_baseline()) { m_alloc.remap(new_file_size); } @@ -456,11 +484,13 @@ void Group::update_from_shared(ref_type new_top_ref, size_t new_file_size) bool Group::operator==(const Group& g) const { size_t n = size(); - if (n != g.size()) return false; + if (n != g.size()) + return false; for (size_t i=0; i(readlock_version); // FIXME: Why is this parameter not used? - if (!m_is_shared) return; + if (!m_is_shared) + return; File::Map map(m_alloc.m_file, File::access_ReadWrite, file_size); size_t count = m_free_positions.size(); for (size_t i = 0; i < count; ++i) { size_t v = to_size_t(m_free_versions.get(i)); // todo, remove assizet when 64 bit - if (v >= m_readlock_version) continue; + if (v >= m_readlock_version) + continue; size_t pos = to_size_t(m_free_positions.get(i)); size_t len = to_size_t(m_free_lengths.get(i)); diff --git a/src/tightdb/group.hpp b/src/tightdb/group.hpp index c65870fb5a6..ce86bc115f2 100644 --- a/src/tightdb/group.hpp +++ b/src/tightdb/group.hpp @@ -227,16 +227,14 @@ class Group: private Table::Parent { /// Commit changes to the attached file. This requires that the /// attached file is opened in read/write mode. /// - /// Do not call this function on a group instance that is managed - /// by a shared group. Doing so will result in undefined behavior. + /// Calling this function on an unattached group, a free-standing + /// group, a group whose attached file is opened in read-only + /// mode, a group that is attached to a memory buffer, or a group + /// that is managed by a shared group, is an error and will result + /// in undefined behavior. /// /// Table accesors will remain valid across the commit. Note that /// this is not the case when working with proper transactions. - /// - /// FIXME: Must throw an exception if the group is opened in - /// read-only mode. Currently this is impossible because the - /// information is not stored anywhere. A flag probably needs to be - /// added to SlabAlloc. void commit(); // Conversion @@ -273,7 +271,7 @@ class Group: private Table::Parent { Array m_free_lengths; Array m_free_versions; mutable Array m_cached_tables; - const bool m_is_shared; // FIXME: Currently used only by Verify() when compiling in debug mode + const bool m_is_shared; std::size_t m_readlock_version; struct shared_tag {}; @@ -286,8 +284,14 @@ class Group: private Table::Parent { void invalidate(); void init_shared(); - // Recursively update all internal refs after commit - void update_refs(ref_type top_ref); + /// Recursively update refs stored in all cached array + /// accessors. This includes cached array accessors in any + /// currently attached table accessors. This ensures that the + /// group instance itself, as well as any attached table accessor + /// that exists across Group::commit() will remain valid. This + /// function is not appropriate for use in conjunction with + /// commits via shared group. + void update_refs(ref_type top_ref, std::size_t old_baseline); void update_from_shared(ref_type new_top_ref, std::size_t new_file_size); diff --git a/src/tightdb/group_shared.cpp b/src/tightdb/group_shared.cpp index 65aeaf27a7c..13852dd2a78 100644 --- a/src/tightdb/group_shared.cpp +++ b/src/tightdb/group_shared.cpp @@ -177,7 +177,7 @@ void SharedGroup::open(const string& path, bool no_create_file, // Set initial values info->version = 0; info->flags = dlevel; // durability level is fixed from creation - info->filesize = alloc.get_attached_size(); + info->filesize = alloc.get_baseline(); info->infosize = uint32_t(len); info->current_top = alloc.get_top_ref(); info->current_version = 0; diff --git a/src/tightdb/group_shared.hpp b/src/tightdb/group_shared.hpp index 7fc0882b13d..2c0883424ac 100644 --- a/src/tightdb/group_shared.hpp +++ b/src/tightdb/group_shared.hpp @@ -206,6 +206,11 @@ class ReadTransaction { m_shared_group.end_read(); } + bool has_table(StringData name) const + { + return get_group().has_table(name); + } + ConstTableRef get_table(StringData name) const { return get_group().get_table(name); diff --git a/src/tightdb/group_writer.cpp b/src/tightdb/group_writer.cpp index 3d42bdb4dc5..bde08d12dae 100644 --- a/src/tightdb/group_writer.cpp +++ b/src/tightdb/group_writer.cpp @@ -12,7 +12,7 @@ using namespace tightdb; GroupWriter::GroupWriter(Group& group) : m_group(group), m_alloc(group.m_alloc), m_current_version(0) { - m_file_map.map(m_alloc.m_file, File::access_ReadWrite, m_alloc.get_attached_size()); + m_file_map.map(m_alloc.m_file, File::access_ReadWrite, m_alloc.get_baseline()); } diff --git a/src/tightdb/table_view.cpp b/src/tightdb/table_view.cpp index c0a818a5d32..7bbb8cc90e0 100644 --- a/src/tightdb/table_view.cpp +++ b/src/tightdb/table_view.cpp @@ -251,8 +251,6 @@ void TableViewBase::sort(size_t column, bool Ascending) result.add(rr); } - ref.destroy(); - // Copy result to m_refs (todo, there might be a shortcut) m_refs.clear(); if (Ascending) { @@ -268,6 +266,7 @@ void TableViewBase::sort(size_t column, bool Ascending) } } result.destroy(); + ref.destroy(); } void TableViewBase::to_json(ostream& out) const diff --git a/test/testgroup.cpp b/test/testgroup.cpp index 6a167ab7762..6b387505f9c 100644 --- a/test/testgroup.cpp +++ b/test/testgroup.cpp @@ -138,7 +138,7 @@ TEST(Group_Serialize1) #ifdef TIGHTDB_DEBUG to_disk.Verify(); -#endif // TIGHTDB_DEBUG +#endif // Delete old file if there File::try_remove("table_test.tightdb"); @@ -153,10 +153,8 @@ TEST(Group_Serialize1) CHECK_EQUAL(4, t->get_column_count()); CHECK_EQUAL(10, t->size()); -#ifdef TIGHTDB_DEBUG // Verify that original values are there CHECK(*table == *t); -#endif // Modify both tables table[0].first = "test"; @@ -166,12 +164,12 @@ TEST(Group_Serialize1) table->remove(1); t->remove(1); -#ifdef TIGHTDB_DEBUG // Verify that both changed correctly CHECK(*table == *t); +#ifdef TIGHTDB_DEBUG to_disk.Verify(); from_disk.Verify(); -#endif // TIGHTDB_DEBUG +#endif } TEST(Group_Read1) @@ -196,7 +194,7 @@ TEST(Group_Serialize2) #ifdef TIGHTDB_DEBUG to_disk.Verify(); -#endif // TIGHTDB_DEBUG +#endif // Delete old file if there File::try_remove("table_test.tightdb"); @@ -211,13 +209,14 @@ TEST(Group_Serialize2) static_cast(t2); static_cast(t1); -#ifdef TIGHTDB_DEBUG // Verify that original values are there CHECK(*table1 == *t1); CHECK(*table2 == *t2); + +#ifdef TIGHTDB_DEBUG to_disk.Verify(); from_disk.Verify(); -#endif // TIGHTDB_DEBUG +#endif } TEST(Group_Serialize3) @@ -230,7 +229,7 @@ TEST(Group_Serialize3) #ifdef TIGHTDB_DEBUG to_disk.Verify(); -#endif // TIGHTDB_DEBUG +#endif // Delete old file if there File::try_remove("table_test.tightdb"); @@ -243,13 +242,12 @@ TEST(Group_Serialize3) TestTableGroup::Ref t = from_disk.get_table("test"); static_cast(t); - -#ifdef TIGHTDB_DEBUG // Verify that original values are there CHECK(*table == *t); +#ifdef TIGHTDB_DEBUG to_disk.Verify(); from_disk.Verify(); -#endif // TIGHTDB_DEBUG} +#endif } TEST(Group_Serialize_Mem) @@ -270,7 +268,7 @@ TEST(Group_Serialize_Mem) #ifdef TIGHTDB_DEBUG to_mem.Verify(); -#endif // TIGHTDB_DEBUG +#endif // Serialize to memory (we now own the buffer) BinaryData buffer = to_mem.write_to_mem(); @@ -282,12 +280,12 @@ TEST(Group_Serialize_Mem) CHECK_EQUAL(4, t->get_column_count()); CHECK_EQUAL(10, t->size()); -#ifdef TIGHTDB_DEBUG // Verify that original values are there CHECK(*table == *t); +#ifdef TIGHTDB_DEBUG to_mem.Verify(); from_mem.Verify(); -#endif //_DEBUG +#endif } TEST(Group_Close) @@ -323,7 +321,7 @@ TEST(Group_Serialize_Optimized) #ifdef TIGHTDB_DEBUG to_mem.Verify(); -#endif // TIGHTDB_DEBUG +#endif // Serialize to memory (we now own the buffer) BinaryData buffer = to_mem.write_to_mem(); @@ -335,9 +333,7 @@ TEST(Group_Serialize_Optimized) CHECK_EQUAL(4, t->get_column_count()); // Verify that original values are there -#ifdef TIGHTDB_DEBUG CHECK(*table == *t); -#endif // Add a row with a known (but unique) value table->add("search_target", 9, true, Fri); @@ -348,7 +344,7 @@ TEST(Group_Serialize_Optimized) #ifdef TIGHTDB_DEBUG to_mem.Verify(); from_mem.Verify(); -#endif // TIGHTDB_DEBUG +#endif } TEST(Group_Serialize_All) diff --git a/test/testshared.cpp b/test/testshared.cpp index 5f607282f03..4e33aa60abe 100644 --- a/test/testshared.cpp +++ b/test/testshared.cpp @@ -899,3 +899,116 @@ TEST(StringIndex_Bug2) } } } + + +TEST(Shared_MixedWithNonShared) +{ + File::try_remove("/tmp/x.tightdb"); + { + // Create empty file without free-space tracking + Group g; + g.write("/tmp/x.tightdb"); + } + { + // See if we can modify with non-shared group + Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + g.get_table("foo"); // Add table "foo" + g.commit(); + } + + File::try_remove("/tmp/x.tightdb"); + { + // Create non-empty file without free-space tracking + Group g; + g.get_table("x"); + g.write("/tmp/x.tightdb"); + } + { + // See if we can modify with non-shared group + Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + g.get_table("foo"); // Add table "foo" + g.commit(); + } + + File::try_remove("/tmp/x.tightdb"); + { + // Create empty file without free-space tracking + Group g; + g.write("/tmp/x.tightdb"); + } + { + // See if we can read and modify with shared group + SharedGroup sg("/tmp/x.tightdb"); + { + ReadTransaction rt(sg); + CHECK(!rt.has_table("foo")); + } + { + WriteTransaction wt(sg); + wt.get_table("foo"); // Add table "foo" + wt.commit(); + } + } + + File::try_remove("/tmp/x.tightdb"); + { + // Create non-empty file without free-space tracking + Group g; + g.get_table("x"); + g.write("/tmp/x.tightdb"); + } + { + // See if we can read and modify with shared group + SharedGroup sg("/tmp/x.tightdb"); + { + ReadTransaction rt(sg); + CHECK(!rt.has_table("foo")); + } + { + WriteTransaction wt(sg); + wt.get_table("foo"); // Add table "foo" + wt.commit(); + } + } + { + SharedGroup sg("/tmp/x.tightdb"); + { + ReadTransaction rt(sg); + CHECK(rt.has_table("foo")); + } + } + { + // Access using non-shared group + Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + g.commit(); + } + { + // Modify using non-shared group + Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + g.get_table("bar"); // Add table "bar" + g.commit(); + } + { + // See if we can still acces using shared group + SharedGroup sg("/tmp/x.tightdb"); + { + ReadTransaction rt(sg); + CHECK(rt.has_table("foo")); + CHECK(rt.has_table("bar")); + CHECK(!rt.has_table("baz")); + } + { + WriteTransaction wt(sg); + wt.get_table("baz"); // Add table "baz" + wt.commit(); + } + } + { + SharedGroup sg("/tmp/x.tightdb"); + { + ReadTransaction rt(sg); + CHECK(rt.has_table("baz")); + } + } + File::remove("/tmp/x.tightdb"); +} From 75d5518999ded37db1482117a0bafece66496a08 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Sat, 17 Aug 2013 17:00:46 +0200 Subject: [PATCH 04/20] Correction of detection of changed arrays when updating accessors after Group::commit() --- src/tightdb/alloc_slab.cpp | 10 +- src/tightdb/alloc_slab.hpp | 5 +- src/tightdb/array.cpp | 118 ++++++++++------------- src/tightdb/array.hpp | 19 ++-- src/tightdb/array_binary.cpp | 5 +- src/tightdb/array_blob.cpp | 4 +- src/tightdb/array_string.cpp | 16 +-- src/tightdb/array_string_long.cpp | 4 +- src/tightdb/column.cpp | 30 +++--- src/tightdb/column.hpp | 10 +- src/tightdb/column_binary.cpp | 4 - src/tightdb/column_mixed.cpp | 66 ++++++------- src/tightdb/column_mixed.hpp | 2 +- src/tightdb/column_string.cpp | 12 +-- src/tightdb/column_string_enum.cpp | 16 ++- src/tightdb/column_string_enum.hpp | 2 +- src/tightdb/column_table.cpp | 26 +++-- src/tightdb/column_table.hpp | 82 +++++++++------- src/tightdb/file.cpp | 6 +- src/tightdb/group.cpp | 68 ++++++------- src/tightdb/group_shared.cpp | 1 + src/tightdb/group_writer.cpp | 2 +- src/tightdb/lang_bind_helper.cpp | 6 +- src/tightdb/replication.cpp | 6 -- src/tightdb/spec.cpp | 28 +++--- src/tightdb/spec.hpp | 8 +- src/tightdb/table.cpp | 150 +++++++++++++++++++---------- src/tightdb/table.hpp | 12 ++- src/tightdb/table_view.cpp | 5 +- test/testgroup.cpp | 6 +- 30 files changed, 383 insertions(+), 346 deletions(-) diff --git a/src/tightdb/alloc_slab.cpp b/src/tightdb/alloc_slab.cpp index ed9571f9880..995fa6e12b9 100644 --- a/src/tightdb/alloc_slab.cpp +++ b/src/tightdb/alloc_slab.cpp @@ -10,8 +10,6 @@ using namespace std; using namespace tightdb; -namespace tightdb { - const char SlabAlloc::default_header[24] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -410,7 +408,7 @@ void SlabAlloc::free_all() } -void SlabAlloc::remap(size_t file_size) +bool SlabAlloc::remap(size_t file_size) { TIGHTDB_ASSERT(m_free_read_only.is_empty()); TIGHTDB_ASSERT(m_slabs.size() == m_free_space.size()); @@ -421,6 +419,8 @@ void SlabAlloc::remap(size_t file_size) void* addr = m_file.remap(m_data, m_baseline, File::access_ReadOnly, file_size); + bool addr_changed = addr != m_data; + m_data = static_cast(addr); m_baseline = file_size; @@ -434,6 +434,8 @@ void SlabAlloc::remap(size_t file_size) m_slabs[i].ref_end = new_offset; } + + return addr_changed; } @@ -545,5 +547,3 @@ void SlabAlloc::print() const } #endif // TIGHTDB_DEBUG - -} //namespace tightdb diff --git a/src/tightdb/alloc_slab.hpp b/src/tightdb/alloc_slab.hpp index ee968bf3cb1..9b561d1140f 100644 --- a/src/tightdb/alloc_slab.hpp +++ b/src/tightdb/alloc_slab.hpp @@ -125,7 +125,10 @@ class SlabAlloc: public Allocator { /// Remap the attached file such that a prefix of the specified /// size becomes available in memory. If sucessfull, /// get_baseline() will return the specified new file size. - void remap(std::size_t file_size); + /// + /// \return True if, and only if the memory address of the first + /// mapped byte has changed. + bool remap(std::size_t file_size); MemRef alloc(std::size_t size) TIGHTDB_OVERRIDE; MemRef realloc_(ref_type, const char*, std::size_t size) TIGHTDB_OVERRIDE; diff --git a/src/tightdb/array.cpp b/src/tightdb/array.cpp index e1c054918cd..b2f92382579 100644 --- a/src/tightdb/array.cpp +++ b/src/tightdb/array.cpp @@ -18,36 +18,6 @@ #include #include -using namespace std; - -namespace { - -/// Takes a 64-bit value and returns the minimum number of bits needed -/// to fit the value. For alignment this is rounded up to nearest -/// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64} -size_t bit_width(int64_t v) -{ - // FIXME: Assuming there is a 64-bit CPU reverse bitscan - // instruction and it is fast, then this function could be - // implemented simply as (v<2 ? v : - // 2<> 4) == 0) { - static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}; - return bits[int8_t(v)]; - } - - // First flip all bits if bit 63 is set (will now always be zero) - if (v < 0) v = ~v; - - // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b) - return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8; -} - -} // anonymous namespace - - -namespace tightdb { // Header format (8 bytes): // @@ -80,6 +50,37 @@ namespace tightdb { // 'capacity' is the total number of bytes allocated for this array // including the header. + +using namespace std; +using namespace tightdb; + +namespace { + +/// Takes a 64-bit value and returns the minimum number of bits needed +/// to fit the value. For alignment this is rounded up to nearest +/// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64} +size_t bit_width(int64_t v) +{ + // FIXME: Assuming there is a 64-bit CPU reverse bitscan + // instruction and it is fast, then this function could be + // implemented simply as (v<2 ? v : + // 2<> 4) == 0) { + static const int8_t bits[] = {0, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}; + return bits[int8_t(v)]; + } + + // First flip all bits if bit 63 is set (will now always be zero) + if (v < 0) v = ~v; + + // Then check if bits 15-31 used (32b), 7-31 used (16b), else (8b) + return uint64_t(v) >> 31 ? 64 : uint64_t(v) >> 15 ? 32 : uint64_t(v) >> 7 ? 16 : 8; +} + +} // anonymous namespace + + void Array::init_from_ref(ref_type ref) TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(ref); @@ -162,34 +163,24 @@ void Array::set_type(Type type) set_header_hasrefs(has_refs); } -bool Array::update_from_parent() TIGHTDB_NOEXCEPT +bool Array::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(is_attached()); + TIGHTDB_ASSERT(m_parent); - if (!m_parent) return false; + // Array nodes that a part of the previous version of the database + // will not be overwritte by Group::commit(). This is necessary + // for robustness in the face of abrupt termination of the + // process. It also means that we can be sure that an array + // remains unchanged across a commit if the new ref is equal to + // the old ref and the ref is below the previous basline. - // After commit to disk, the array may have moved - // so get ref from parent and see if it has changed ref_type new_ref = m_parent->get_child_ref(m_ndx_in_parent); + if (new_ref == m_ref && new_ref < old_baseline) + return false; // Has not changed - if (new_ref != m_ref) { - init_from_ref(new_ref); - return true; - } - - // FIXME: This early-out option is wrong. Equal 'refs' does in no - // way guarantee that the array has not been modified. - - // If the file has been remapped it might have - // moved to a new location - char* header = m_alloc.translate(m_ref); - char* data = get_data_from_header(header); - if (m_data != data) { - m_data = data; - return true; - } - - return false; // not modified + init_from_ref(new_ref); + return true; // Has changed } // Allocates space for 'count' items being between min and min in size, both inclusive. Crashes! Why? Todo/fixme @@ -618,6 +609,8 @@ size_t Array::FirstSetBit64(int64_t v) const } +namespace { + template inline int64_t LowerBits() { if (width == 1) @@ -692,6 +685,9 @@ template size_t FindZero(uint64_t v) return start; } +} // anonymous namesapce + + template bool Array::minmax(int64_t& result, size_t start, size_t end) const { if (end == size_t(-1)) @@ -1900,8 +1896,6 @@ void Array::stats(MemStats& stats) const #endif // TIGHTDB_DEBUG -} // namespace tightdb - namespace { @@ -1976,8 +1970,6 @@ inline int64_t get_direct(const char* data, size_t width, size_t ndx) TIGHTDB_NO template inline size_t lower_bound(const char* data, size_t size, int64_t value) TIGHTDB_NOEXCEPT { - using namespace tightdb; - size_t i = 0; size_t size_2 = size; while (0 < size_2) { @@ -1999,8 +1991,6 @@ inline size_t lower_bound(const char* data, size_t size, int64_t value) TIGHTDB_ template inline size_t upper_bound(const char* data, size_t size, int64_t value) TIGHTDB_NOEXCEPT { - using namespace tightdb; - size_t i = 0; size_t size_2 = size; while (0 < size_2) { @@ -2028,7 +2018,6 @@ inline size_t upper_bound(const char* data, size_t size, int64_t value) TIGHTDB_ template inline pair find_child(const char* offsets_header, size_t elem_ndx) TIGHTDB_NOEXCEPT { - using namespace tightdb; const char* offsets_data = Array::get_data_from_header(offsets_header); size_t offsets_size = Array::get_size_from_header(offsets_header); size_t child_ndx = upper_bound(offsets_data, offsets_size, elem_ndx); @@ -2041,17 +2030,15 @@ find_child(const char* offsets_header, size_t elem_ndx) TIGHTDB_NOEXCEPT template inline pair get_two_as_size(const char* header, size_t ndx) TIGHTDB_NOEXCEPT { - const char* data = tightdb::Array::get_data_from_header(header); - return make_pair(tightdb::to_size_t(get_direct(data, ndx+0)), - tightdb::to_size_t(get_direct(data, ndx+1))); + const char* data = Array::get_data_from_header(header); + return make_pair(to_size_t(get_direct(data, ndx+0)), + to_size_t(get_direct(data, ndx+1))); } } // anonymous namespace -namespace tightdb { - size_t Array::lower_bound_int(int64_t value) const TIGHTDB_NOEXCEPT { @@ -2800,6 +2787,3 @@ pair Array::get_size_pair(const char* header, size_t ndx) TIGHTD TIGHTDB_TEMPEX(p = ::get_two_as_size, width, (header, ndx)); return p; } - - -} //namespace tightdb diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 940f2f8c6da..16b1c87e706 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -338,17 +338,16 @@ class Array: public ArrayParent { /// does nothing. void update_parent(); - /// When there is a chance that this array node has been modified - /// and possibly relocated, this function will update the accessor - /// such that it becomes valid again. Of course, this is allowed - /// only when the parent continues to exist and it continues to - /// have some child at the same index as the child that this - /// accessosr was originally attached to. Even then, one must be - /// carefull, because the new child at that index, may be a - /// completely different one in a logical sense. + /// Called in the context of Group::commit() to ensure that + /// attached accessors stay valid across a commit. Please note + /// that this works only for non-transactional commits. Accessors + /// obtained during a transaction are always detached when the + /// transaction ends. /// - /// FIXME: What is the point of this one? When can it ever be used safely? - bool update_from_parent() TIGHTDB_NOEXCEPT; + /// Returns true if, and only if the array has changed. If the + /// array has not cahnged, then its children are guaranteed to + /// also not have changed. + bool update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; /// Change the type of an already attached array node. /// diff --git a/src/tightdb/array_binary.cpp b/src/tightdb/array_binary.cpp index 368a33d305c..aa67a1a9512 100644 --- a/src/tightdb/array_binary.cpp +++ b/src/tightdb/array_binary.cpp @@ -6,8 +6,7 @@ #include using namespace std; - -namespace tightdb { +using namespace tightdb; ArrayBinary::ArrayBinary(ArrayParent* parent, size_t pndx, Allocator& alloc): @@ -180,5 +179,3 @@ void ArrayBinary::to_dot(ostream& out, const char* title) const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/array_blob.cpp b/src/tightdb/array_blob.cpp index 1ebaa3993ac..79ad8e9e2a3 100644 --- a/src/tightdb/array_blob.cpp +++ b/src/tightdb/array_blob.cpp @@ -3,8 +3,8 @@ #include using namespace std; +using namespace tightdb; -namespace tightdb { void ArrayBlob::replace(size_t begin, size_t end, const char* data, size_t size, bool add_zero_term) { @@ -81,5 +81,3 @@ void ArrayBlob::to_dot(ostream& out, const char* title) const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/array_string.cpp b/src/tightdb/array_string.cpp index ed753c4f7e5..93f80d08f95 100644 --- a/src/tightdb/array_string.cpp +++ b/src/tightdb/array_string.cpp @@ -8,6 +8,8 @@ #include using namespace std; +using namespace tightdb; + namespace { @@ -19,7 +21,8 @@ const int max_width = 64; // Thus, 0 < size < 256 implies that size < round_up(size). size_t round_up(size_t size) { - if (size < 2) return size << 2; + if (size < 2) + return size << 2; size |= size >> 1; size |= size >> 2; size |= size >> 4; @@ -30,9 +33,6 @@ size_t round_up(size_t size) } // anonymous namespace -namespace tightdb { - - void ArrayString::set(size_t ndx, StringData value) { TIGHTDB_ASSERT(ndx < m_size); @@ -374,10 +374,12 @@ void ArrayString::find_all(Array& result, StringData value, size_t add_offset, bool ArrayString::compare_string(const ArrayString& c) const { - if (c.size() != size()) return false; + if (c.size() != size()) + return false; for (size_t i = 0; i < size(); ++i) { - if (get(i) != c.get(i)) return false; + if (get(i) != c.get(i)) + return false; } return true; @@ -484,5 +486,3 @@ void ArrayString::to_dot(ostream& out, StringData title) const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/array_string_long.cpp b/src/tightdb/array_string_long.cpp index be8fc6fb536..cd313cd6738 100644 --- a/src/tightdb/array_string_long.cpp +++ b/src/tightdb/array_string_long.cpp @@ -7,8 +7,8 @@ #include using namespace std; +using namespace tightdb; -namespace tightdb { ArrayStringLong::ArrayStringLong(ArrayParent* parent, size_t pndx, Allocator& alloc): Array(type_HasRefs, parent, pndx, alloc), @@ -232,5 +232,3 @@ void ArrayStringLong::to_dot(ostream& out, StringData title) const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/column.cpp b/src/tightdb/column.cpp index f45fea91503..ea6d0454440 100644 --- a/src/tightdb/column.cpp +++ b/src/tightdb/column.cpp @@ -10,10 +10,10 @@ #include using namespace std; +using namespace tightdb; -namespace { -using namespace tightdb; +namespace { Column get_column_from_ref(Array& parent, size_t ndx) { @@ -231,10 +231,18 @@ bool callme_arrays(Array* a, size_t start, size_t end, size_t caller_offset, voi return true; } -} +} // anonymous namespace -namespace tightdb { +void ColumnBase::adjust_ndx_in_parent(int diff) TIGHTDB_NOEXCEPT +{ + m_array->adjust_ndx_in_parent(diff); +} + +void ColumnBase::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT +{ + m_array->update_from_parent(old_baseline); +} size_t ColumnBase::get_size_from_ref(ref_type ref, Allocator& alloc) TIGHTDB_NOEXCEPT { @@ -483,7 +491,9 @@ void Column::Increment64(int64_t value, size_t start, size_t end) void Column::IncrementIf(int64_t limit, int64_t value) { - if (root_is_leaf()) m_array->IncrementIf(limit, value); + if (root_is_leaf()) { + m_array->IncrementIf(limit, value); + } else { Array refs = NodeGetRefs(); size_t count = refs.size(); @@ -504,9 +514,8 @@ size_t Column::find_first(int64_t value, size_t start, size_t end) const size_t ref = m_array->get_ref(); return m_array->ColumnFind(value, ref, cache); } - else { - return TreeFind(value, start, end); - } + + return TreeFind(value, start, end); } void Column::find_all(Array& result, int64_t value, size_t caller_offset, size_t start, size_t end) const @@ -624,7 +633,8 @@ void ColumnBase::to_dot(ostream& out, StringData title) const out << "subgraph cluster_column" << ref << " {" << endl; out << " label = \"Column"; - if (0 < title.size()) out << "\\n'" << title << "'"; + if (0 < title.size()) + out << "\\n'" << title << "'"; out << "\";" << endl; array_to_dot(out, *m_array); @@ -674,5 +684,3 @@ MemStats Column::stats() const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/column.hpp b/src/tightdb/column.hpp index 0d95740a4e7..23429506123 100644 --- a/src/tightdb/column.hpp +++ b/src/tightdb/column.hpp @@ -69,8 +69,14 @@ class ColumnBase { virtual bool has_index() const TIGHTDB_NOEXCEPT { return false; } virtual void set_index_ref(ref_type, ArrayParent*, std::size_t) {} - virtual void adjust_ndx_in_parent(int diff) TIGHTDB_NOEXCEPT { m_array->adjust_ndx_in_parent(diff); } - virtual void update_from_parent() TIGHTDB_NOEXCEPT { m_array->update_from_parent(); } + virtual void adjust_ndx_in_parent(int diff) TIGHTDB_NOEXCEPT; + + /// Called in the context of Group::commit() to ensure that + /// attached table accessors stay valid across a commit. Please + /// note that this works only for non-transactional commits. Table + /// accessors obtained during a transaction are always detached + /// when the transaction ends. + virtual void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; virtual void invalidate_subtables_virtual() {} diff --git a/src/tightdb/column_binary.cpp b/src/tightdb/column_binary.cpp index 705962e6471..06b210188f4 100644 --- a/src/tightdb/column_binary.cpp +++ b/src/tightdb/column_binary.cpp @@ -6,8 +6,6 @@ using namespace std; using namespace tightdb; -namespace tightdb { - ColumnBinary::ColumnBinary(Allocator& alloc) { m_array = new ArrayBinary(NULL, 0, alloc); @@ -217,5 +215,3 @@ void ColumnBinary::leaf_to_dot(ostream& out, const Array& array) const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/column_mixed.cpp b/src/tightdb/column_mixed.cpp index 001ddee1f2f..5f5f5816da3 100644 --- a/src/tightdb/column_mixed.cpp +++ b/src/tightdb/column_mixed.cpp @@ -1,8 +1,8 @@ #include using namespace std; +using namespace tightdb; -namespace tightdb { ColumnMixed::~ColumnMixed() { @@ -18,15 +18,15 @@ void ColumnMixed::destroy() m_array->destroy(); } -void ColumnMixed::update_from_parent() TIGHTDB_NOEXCEPT +void ColumnMixed::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT { - if (!m_array->update_from_parent()) + if (!m_array->update_from_parent(old_baseline)) return; - m_types->update_from_parent(); - m_refs->update_from_parent(); + m_types->update_from_parent(old_baseline); + m_refs->update_from_parent(old_baseline); if (m_data) - m_data->update_from_parent(); + m_data->update_from_parent(old_baseline); } @@ -268,37 +268,37 @@ bool ColumnMixed::compare_mixed(const ColumnMixed& c) const if (c.get_type(i) != type) return false; switch (type) { - case type_Int: - if (get_int(i) != c.get_int(i)) return false; - break; - case type_Bool: - if (get_bool(i) != c.get_bool(i)) return false; - break; - case type_Date: - if (get_date(i) != c.get_date(i)) return false; - break; - case type_Float: - if (get_float(i) != c.get_float(i)) return false; - break; - case type_Double: - if (get_double(i) != c.get_double(i)) return false; - break; - case type_String: - if (get_string(i) != c.get_string(i)) return false; - break; - case type_Binary: - if (get_binary(i) != c.get_binary(i)) return false; - break; - case type_Table: { + case type_Int: + if (get_int(i) != c.get_int(i)) return false; + break; + case type_Bool: + if (get_bool(i) != c.get_bool(i)) return false; + break; + case type_Date: + if (get_date(i) != c.get_date(i)) return false; + break; + case type_Float: + if (get_float(i) != c.get_float(i)) return false; + break; + case type_Double: + if (get_double(i) != c.get_double(i)) return false; + break; + case type_String: + if (get_string(i) != c.get_string(i)) return false; + break; + case type_Binary: + if (get_binary(i) != c.get_binary(i)) return false; + break; + case type_Table: { ConstTableRef t1 = get_subtable_ptr(i)->get_table_ref(); ConstTableRef t2 = c.get_subtable_ptr(i)->get_table_ref(); if (*t1 != *t2) return false; + break; } - break; - case type_Mixed: - TIGHTDB_ASSERT(false); - break; + case type_Mixed: + TIGHTDB_ASSERT(false); + break; } } return true; @@ -364,5 +364,3 @@ void ColumnMixed::to_dot(ostream& out, StringData title) const } #endif // TIGHTDB_DEBUG - -} // namespace tightdb diff --git a/src/tightdb/column_mixed.hpp b/src/tightdb/column_mixed.hpp index e8c99033ce8..c8e2d30d0d9 100644 --- a/src/tightdb/column_mixed.hpp +++ b/src/tightdb/column_mixed.hpp @@ -69,7 +69,7 @@ class ColumnMixed: public ColumnBase { ~ColumnMixed(); void destroy() TIGHTDB_OVERRIDE; - void update_from_parent() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; DataType get_type(std::size_t ndx) const TIGHTDB_NOEXCEPT; std::size_t size() const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { return m_types->size(); } diff --git a/src/tightdb/column_string.cpp b/src/tightdb/column_string.cpp index f9adac103e8..dd0add26235 100644 --- a/src/tightdb/column_string.cpp +++ b/src/tightdb/column_string.cpp @@ -10,26 +10,26 @@ #include using namespace std; +using namespace tightdb; namespace { -tightdb::Array::Type get_type_from_ref(tightdb::ref_type ref, tightdb::Allocator& alloc) +Array::Type get_type_from_ref(ref_type ref, Allocator& alloc) { const char* header = alloc.translate(ref); - return tightdb::Array::get_type_from_header(header); + return Array::get_type_from_header(header); } // Getter function for string index -tightdb::StringData get_string(void* column, size_t ndx) +StringData get_string(void* column, size_t ndx) { - return static_cast(column)->get(ndx); + return static_cast(column)->get(ndx); } } // anonymous namespace -namespace tightdb { AdaptiveStringColumn::AdaptiveStringColumn(Allocator& alloc): m_index(0) { @@ -532,5 +532,3 @@ void AdaptiveStringColumn::leaf_to_dot(ostream& out, const Array& array) const } #endif // TIGHTDB_DEBUG - -} // namespace tightdb diff --git a/src/tightdb/column_string_enum.cpp b/src/tightdb/column_string_enum.cpp index b1e1a52e052..f4f08880096 100644 --- a/src/tightdb/column_string_enum.cpp +++ b/src/tightdb/column_string_enum.cpp @@ -2,20 +2,20 @@ #include using namespace std; +using namespace tightdb; + namespace { // Getter function for string index -tightdb::StringData get_string(void* column, size_t ndx) +StringData get_string(void* column, size_t ndx) { - return static_cast(column)->get(ndx); + return static_cast(column)->get(ndx); } } // anonymous namespace -namespace tightdb { - ColumnStringEnum::ColumnStringEnum(ref_type keys, ref_type values, ArrayParent* parent, size_t ndx_in_parent, Allocator& alloc): Column(values, parent, ndx_in_parent+1, alloc), // Throws @@ -43,10 +43,10 @@ void ColumnStringEnum::adjust_ndx_in_parent(int diff) TIGHTDB_NOEXCEPT Column::adjust_ndx_in_parent(diff); } -void ColumnStringEnum::update_from_parent() TIGHTDB_NOEXCEPT +void ColumnStringEnum::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT { - m_array->update_from_parent(); - m_keys.update_from_parent(); + m_array->update_from_parent(old_baseline); + m_keys.update_from_parent(old_baseline); } void ColumnStringEnum::add(StringData value) @@ -265,5 +265,3 @@ void ColumnStringEnum::to_dot(ostream& out, StringData title) const } #endif // TIGHTDB_DEBUG - -} diff --git a/src/tightdb/column_string_enum.hpp b/src/tightdb/column_string_enum.hpp index 980885e5991..27ca27f0586 100644 --- a/src/tightdb/column_string_enum.hpp +++ b/src/tightdb/column_string_enum.hpp @@ -66,7 +66,7 @@ class ColumnStringEnum: public Column { //@{ void adjust_ndx_in_parent(int diff) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; - void update_from_parent() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; // Index bool has_index() const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { return m_index != 0; } diff --git a/src/tightdb/column_table.cpp b/src/tightdb/column_table.cpp index 5fa8861fc5b..a6ba3419eb0 100644 --- a/src/tightdb/column_table.cpp +++ b/src/tightdb/column_table.cpp @@ -1,15 +1,23 @@ #include using namespace std; +using namespace tightdb; -namespace tightdb { + +void ColumnSubtableParent::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT +{ + if (!m_array->update_from_parent(old_baseline)) + return; + m_subtable_map.update_from_parent(old_baseline); +} void ColumnSubtableParent::child_destroyed(size_t subtable_ndx) { m_subtable_map.remove(subtable_ndx); // Note that this column instance may be destroyed upon return // from Table::unbind_ref(). - if (m_table && m_subtable_map.empty()) m_table->unbind_ref(); + if (m_table && m_subtable_map.empty()) + m_table->unbind_ref(); } size_t ColumnTable::get_subtable_size(size_t ndx) const TIGHTDB_NOEXCEPT @@ -20,7 +28,8 @@ size_t ColumnTable::get_subtable_size(size_t ndx) const TIGHTDB_NOEXCEPT TIGHTDB_ASSERT(ndx < size()); ref_type columns_ref = get_as_ref(ndx); - if (columns_ref == 0) return 0; + if (columns_ref == 0) + return 0; ref_type first_col_ref = Array(columns_ref, 0, 0, get_alloc()).get_as_ref(0); return get_size_from_ref(first_col_ref, get_alloc()); @@ -105,7 +114,8 @@ void ColumnTable::move_last_over(size_t ndx) void ColumnTable::destroy_subtable(size_t ndx) { ref_type ref_columns = get_as_ref(ndx); - if (ref_columns == 0) return; // It was never created + if (ref_columns == 0) + return; // It was never created // Delete sub-tree Allocator& alloc = get_alloc(); @@ -116,11 +126,13 @@ void ColumnTable::destroy_subtable(size_t ndx) bool ColumnTable::compare_table(const ColumnTable& c) const { size_t n = size(); - if (c.size() != n) return false; + if (c.size() != n) + return false; for (size_t i=0; iget_table_ref(); ConstTableRef t2 = c.get_subtable_ptr(i)->get_table_ref(); - if (!compare_subtable_rows(*t1, *t2)) return false; + if (!compare_subtable_rows(*t1, *t2)) + return false; } return true; } @@ -156,5 +168,3 @@ void ColumnTable::leaf_to_dot(ostream& out, const Array& array) const } #endif // TIGHTDB_DEBUG - -} // namespace tightdb diff --git a/src/tightdb/column_table.hpp b/src/tightdb/column_table.hpp index 68564ce64c8..162e30c523e 100644 --- a/src/tightdb/column_table.hpp +++ b/src/tightdb/column_table.hpp @@ -29,7 +29,7 @@ namespace tightdb { /// Base class for any type of column that can contain subtables. class ColumnSubtableParent: public Column, public Table::Parent { public: - void update_from_parent() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; void invalidate_subtables(); @@ -103,17 +103,17 @@ class ColumnSubtableParent: public Column, public Table::Parent { private: struct SubtableMap { - SubtableMap(Allocator& alloc): m_indexes(alloc), m_wrappers(alloc) {} + SubtableMap(Allocator& alloc): m_indexes(alloc), m_tables(alloc) {} ~SubtableMap(); bool empty() const TIGHTDB_NOEXCEPT { return !m_indexes.is_attached() || m_indexes.is_empty(); } Table* find(std::size_t subtable_ndx) const; - void insert(std::size_t subtable_ndx, Table* wrapper); + void insert(std::size_t subtable_ndx, Table*); void remove(std::size_t subtable_ndx); - void update_from_parents() TIGHTDB_NOEXCEPT; + void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; void invalidate_subtables(); private: Array m_indexes; - Array m_wrappers; + Array m_tables; }; mutable SubtableMap m_subtable_map; @@ -123,8 +123,8 @@ class ColumnSubtableParent: public Column, public Table::Parent { class ColumnTable: public ColumnSubtableParent { public: - /// Create a subtable column wrapper and have it instantiate a new - /// underlying structure of arrays. + /// Create a subtable column accessor and have it instantiate a + /// new underlying structure of arrays. /// /// \param table If this column is used as part of a table you must /// pass a pointer to that table. Otherwise you must pass null. @@ -134,7 +134,7 @@ class ColumnTable: public ColumnSubtableParent { /// table. Otherwise you should pass zero. ColumnTable(Allocator&, const Table* table, std::size_t column_ndx, ref_type spec_ref); - /// Create a subtable column wrapper and attach it to a + /// Create a subtable column accessor and attach it to a /// preexisting underlying structure of arrays. /// /// \param table If this column is used as part of a table you must @@ -202,12 +202,6 @@ class ColumnTable: public ColumnSubtableParent { // Implementation -inline void ColumnSubtableParent::update_from_parent() TIGHTDB_NOEXCEPT -{ - if (!m_array->update_from_parent()) return; - m_subtable_map.update_from_parents(); -} - inline Table* ColumnSubtableParent::get_subtable_ptr(std::size_t subtable_ndx) const { TIGHTDB_ASSERT(subtable_ndx < size()); @@ -259,69 +253,77 @@ inline ColumnSubtableParent::SubtableMap::~SubtableMap() if (m_indexes.is_attached()) { TIGHTDB_ASSERT(m_indexes.is_empty()); m_indexes.destroy(); - m_wrappers.destroy(); + m_tables.destroy(); } } inline Table* ColumnSubtableParent::SubtableMap::find(std::size_t subtable_ndx) const { - if (!m_indexes.is_attached()) return 0; + if (!m_indexes.is_attached()) + return 0; std::size_t pos = m_indexes.find_first(subtable_ndx); - return pos != std::size_t(-1) ? reinterpret_cast(m_wrappers.get(pos)) : 0; + if (pos == not_found) + return 0; + return reinterpret_cast(uintptr_t(m_tables.get(pos))); } -inline void ColumnSubtableParent::SubtableMap::insert(std::size_t subtable_ndx, Table* wrapper) +inline void ColumnSubtableParent::SubtableMap::insert(std::size_t subtable_ndx, Table* table) { if (!m_indexes.is_attached()) { m_indexes.create(Array::type_Normal); - m_wrappers.create(Array::type_Normal); + m_tables.create(Array::type_Normal); } m_indexes.add(subtable_ndx); - m_wrappers.add(reinterpret_cast(wrapper)); + m_tables.add(int64_t(reinterpret_cast(table))); } inline void ColumnSubtableParent::SubtableMap::remove(std::size_t subtable_ndx) { TIGHTDB_ASSERT(m_indexes.is_attached()); std::size_t pos = m_indexes.find_first(subtable_ndx); - TIGHTDB_ASSERT(pos != std::size_t(-1)); - // FIXME: It is a problem that Array as our most low-level array - // construct has too many features to deliver a erase() method + TIGHTDB_ASSERT(pos != not_found); + // FIXME: It is a problem that Array, as our most low-level array + // construct, has too many features to deliver an erase() method // that cannot be guaranteed to never throw. m_indexes.erase(pos); - m_wrappers.erase(pos); + m_tables.erase(pos); } -inline void ColumnSubtableParent::SubtableMap::update_from_parents() TIGHTDB_NOEXCEPT +inline void ColumnSubtableParent::SubtableMap:: +update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT { - if (!m_indexes.is_attached()) return; + if (!m_indexes.is_attached()) + return; - std::size_t n = m_wrappers.size(); + std::size_t n = m_tables.size(); for (std::size_t i = 0; i < n; ++i) { - Table* t = reinterpret_cast(m_wrappers.get(i)); - t->update_from_parent(); + Table* t = reinterpret_cast(uintptr_t(m_tables.get(i))); + t->update_from_parent(old_baseline); } } inline void ColumnSubtableParent::SubtableMap::invalidate_subtables() { - if (!m_indexes.is_attached()) return; + if (!m_indexes.is_attached()) + return; - std::size_t n = m_wrappers.size(); + std::size_t n = m_tables.size(); for (std::size_t i=0; i(m_wrappers.get(i)); + Table* t = reinterpret_cast(uintptr_t(m_tables.get(i))); t->invalidate(); } m_indexes.clear(); // FIXME: Can we rely on Array::clear() never failing???? - m_wrappers.clear(); + m_tables.clear(); } inline ColumnSubtableParent::ColumnSubtableParent(Allocator& alloc, const Table* table, std::size_t column_ndx): Column(Array::type_HasRefs, alloc), m_table(table), m_index(column_ndx), - m_subtable_map(Allocator::get_default()) {} + m_subtable_map(Allocator::get_default()) +{ +} inline ColumnSubtableParent::ColumnSubtableParent(Allocator& alloc, const Table* table, std::size_t column_ndx, @@ -329,7 +331,9 @@ inline ColumnSubtableParent::ColumnSubtableParent(Allocator& alloc, ref_type ref): Column(ref, parent, ndx_in_parent, alloc), m_table(table), m_index(column_ndx), - m_subtable_map(Allocator::get_default()) {} + m_subtable_map(Allocator::get_default()) +{ +} inline void ColumnSubtableParent::update_child_ref(std::size_t subtable_ndx, ref_type new_ref) { @@ -381,13 +385,17 @@ std::size_t* ColumnSubtableParent::record_subtable_path(std::size_t* begin, inline ColumnTable::ColumnTable(Allocator& alloc, const Table* table, std::size_t column_ndx, ref_type spec_ref): - ColumnSubtableParent(alloc, table, column_ndx), m_spec_ref(spec_ref) {} + ColumnSubtableParent(alloc, table, column_ndx), m_spec_ref(spec_ref) +{ +} inline ColumnTable::ColumnTable(Allocator& alloc, const Table* table, std::size_t column_ndx, ArrayParent* parent, std::size_t ndx_in_parent, ref_type spec_ref, ref_type column_ref): ColumnSubtableParent(alloc, table, column_ndx, parent, ndx_in_parent, column_ref), - m_spec_ref(spec_ref) {} + m_spec_ref(spec_ref) +{ +} inline void ColumnTable::add(const Table* subtable) { diff --git a/src/tightdb/file.cpp b/src/tightdb/file.cpp index bfb1c917cfc..939534f11a7 100644 --- a/src/tightdb/file.cpp +++ b/src/tightdb/file.cpp @@ -182,6 +182,9 @@ string make_temp_dir() } +} // namespace tightdb + + void File::open(const string& path, AccessMode a, CreateMode c, int flags) { TIGHTDB_ASSERT(!is_attached()); @@ -886,6 +889,3 @@ bool File::is_removed() const #endif } - - -} // namespace tightdb diff --git a/src/tightdb/group.cpp b/src/tightdb/group.cpp index 806e20e92e7..18c0b3a5311 100644 --- a/src/tightdb/group.cpp +++ b/src/tightdb/group.cpp @@ -12,6 +12,7 @@ using namespace std; using namespace tightdb; + namespace { class Initialization { @@ -110,7 +111,6 @@ class FileOStream { } // anonymous namespace -namespace tightdb { void Group::open(const string& file_path, OpenMode mode) { @@ -193,13 +193,13 @@ void Group::init_from_ref(ref_type top_ref) // at all, and files that are not shared does not need version // info for free space. if (top_size > 2) { - TIGHTDB_ASSERT(top_size >= 4); + TIGHTDB_ASSERT(top_size == 4 || top_size == 5); size_t fp_ref = m_top.get_as_ref(2); size_t fl_ref = m_top.get_as_ref(3); m_free_positions.init_from_ref(fp_ref); m_free_lengths.init_from_ref(fl_ref); - if (top_size > 4) { + if (m_is_shared && top_size > 4) { TIGHTDB_ASSERT(top_size == 5); m_free_versions.init_from_ref(m_top.get_as_ref(4)); } @@ -353,13 +353,13 @@ void Group::commit() TIGHTDB_ASSERT(is_attached()); TIGHTDB_ASSERT(m_top.is_attached()); - // GroupWriter::commit() needs free space tracking information, so + // GroupWriter::commit() needs free-space tracking information, so // if the attached database does not contain it, we must add it // now. Empty (newly created) database files and database files - // created by Group::write() do not have free space tracking + // created by Group::write() do not have free-space tracking // information. if (m_free_positions.is_attached()) { - TIGHTDB_ASSERT(m_top.size() >= 2); + TIGHTDB_ASSERT(m_top.size() == 4 || m_top.size() == 5); } else { TIGHTDB_ASSERT(m_top.size() == 2); @@ -387,8 +387,13 @@ void Group::commit() // Remap file if it has grown size_t new_file_size = out.get_file_size(); TIGHTDB_ASSERT(new_file_size >= m_alloc.get_baseline()); - if (new_file_size > m_alloc.get_baseline()) - m_alloc.remap(new_file_size); + if (new_file_size > m_alloc.get_baseline()) { + if (m_alloc.remap(new_file_size)) { + // The file was mapped to a new address, so all array + // accessors must be updated. + old_baseline = 0; + } + } // Recusively update refs in all active tables (columns, arrays..) update_refs(top_ref, old_baseline); @@ -401,6 +406,12 @@ void Group::commit() void Group::update_refs(ref_type top_ref, size_t old_baseline) { + TIGHTDB_ASSERT(!m_free_versions.is_attached()); + + // After Group::commit() we will always have free space tracking + // info. + TIGHTDB_ASSERT(m_top.size() == 4 || m_top.size() == 5); + // Array nodes that a part of the previous version of the database // will not be overwritte by Group::commit(). This is necessary // for robustness in the face of abrupt termination of the @@ -412,45 +423,24 @@ void Group::update_refs(ref_type top_ref, size_t old_baseline) return; m_top.init_from_ref(top_ref); - TIGHTDB_ASSERT(m_top.size() >= 2); // Now we can update it's child arrays - m_table_names.update_from_parent(/*old_baseline*/); - - // No free-info in serialized databases - // and version info is only in shared, - if (m_top.size() >= 4) { - m_free_positions.update_from_parent(); - m_free_lengths.update_from_parent(); - } - else { - m_free_positions.detach(); - m_free_lengths.detach(); - } - if (m_top.size() == 5) { - m_free_versions.update_from_parent(); - } - else { - m_free_versions.detach(); - } + m_table_names.update_from_parent(old_baseline); + m_free_positions.update_from_parent(old_baseline); + m_free_lengths.update_from_parent(old_baseline); - // if the tables have not been modfied we don't - // need to update cached tables - // - // FIXME: This early-out option is wrong. Equal 'refs' does in no - // way guarantee that the table has not been modified. - if (!m_tables.update_from_parent()) + // If m_tables has not been modfied we don't + // need to update attached table accessors + if (!m_tables.update_from_parent(old_baseline)) return; - // FIXME: Be sure that the updating of the table accessors works recursivly - // FIXME: Probably move this to a new function - - // Also update cached tables + // Update all attached table accessors including those attached to + // subtables. size_t n = m_cached_tables.size(); for (size_t i = 0; i < n; ++i) { Table* t = reinterpret_cast(m_cached_tables.get(i)); if (t) { - t->update_from_parent(); + t->update_from_parent(old_baseline); } } } @@ -696,5 +686,3 @@ void Group::zero_free_space(size_t file_size, size_t readlock_version) } #endif // TIGHTDB_DEBUG - -} //namespace tightdb diff --git a/src/tightdb/group_shared.cpp b/src/tightdb/group_shared.cpp index 13852dd2a78..f50b4a1f6fd 100644 --- a/src/tightdb/group_shared.cpp +++ b/src/tightdb/group_shared.cpp @@ -43,6 +43,7 @@ struct SharedGroup::SharedInfo { ReadCount readers[32]; // has to be power of two }; + namespace { class ScopedMutexLock { diff --git a/src/tightdb/group_writer.cpp b/src/tightdb/group_writer.cpp index bde08d12dae..6b3e2a3d12d 100644 --- a/src/tightdb/group_writer.cpp +++ b/src/tightdb/group_writer.cpp @@ -414,7 +414,7 @@ size_t GroupWriter::extend_free_space(size_t requested_size) size_t last_ndx = to_size_t(positions.size()-1); size_t last_size = to_size_t(lengths[last_ndx]); size_t end = to_size_t(positions[last_ndx] + last_size); - size_t ver = to_size_t(is_shared ? versions[last_ndx] : 0); + size_t ver = is_shared ? to_size_t(versions[last_ndx]) : 0; if (end == old_file_size && ver == 0) { lengths.set(last_ndx, last_size + ext_size); return last_ndx; diff --git a/src/tightdb/lang_bind_helper.cpp b/src/tightdb/lang_bind_helper.cpp index e06de9f0b0f..f0e32bca586 100644 --- a/src/tightdb/lang_bind_helper.cpp +++ b/src/tightdb/lang_bind_helper.cpp @@ -2,8 +2,7 @@ #include using namespace std; - -namespace tightdb { +using namespace tightdb; Table* LangBindHelper::get_subtable_ptr_during_insert(Table* t, size_t col_ndx, size_t row_ndx) @@ -33,6 +32,3 @@ const char* LangBindHelper::get_data_type_name(DataType type) TIGHTDB_NOEXCEPT TIGHTDB_ASSERT(false); return "int"; } - - -} // namespace tightdb diff --git a/src/tightdb/replication.cpp b/src/tightdb/replication.cpp index c4216b38a66..5daff74cd5a 100644 --- a/src/tightdb/replication.cpp +++ b/src/tightdb/replication.cpp @@ -21,9 +21,6 @@ const size_t init_subtab_path_buf_size = 2*init_subtab_path_buf_levels - 1; } // anonymous namespace -namespace tightdb { - - Replication::Replication(): m_selected_table(0), m_selected_spec(0) { m_subtab_path_buf.set_size(init_subtab_path_buf_size); // Throws @@ -847,6 +844,3 @@ void Replication::apply_transact_log(InputStream& transact_log, Group& group) applier.apply(); // Throws } #endif - - -} // namespace tightdb diff --git a/src/tightdb/spec.cpp b/src/tightdb/spec.cpp index b6530e8f1e3..924bce37026 100644 --- a/src/tightdb/spec.cpp +++ b/src/tightdb/spec.cpp @@ -5,8 +5,8 @@ #endif using namespace std; +using namespace tightdb; -namespace tightdb { Spec::~Spec() { @@ -44,22 +44,23 @@ ref_type Spec::get_ref() const TIGHTDB_NOEXCEPT return m_top.get_ref(); } -void Spec::set_parent(ArrayParent* parent, size_t pndx) TIGHTDB_NOEXCEPT +void Spec::set_parent(ArrayParent* parent, size_t ndx_in_parent) TIGHTDB_NOEXCEPT { - m_top.set_parent(parent, pndx); + m_top.set_parent(parent, ndx_in_parent); } -bool Spec::update_from_parent() TIGHTDB_NOEXCEPT +void Spec::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT { - if (m_top.update_from_parent()) { - m_spec.update_from_parent(); - m_names.update_from_parent(); - if (m_top.size() == 3) { - m_subspecs.update_from_parent(); - } - return true; + if (!m_top.update_from_parent(old_baseline)) + return; + + m_spec.update_from_parent(old_baseline); + m_names.update_from_parent(old_baseline); + + if (m_top.size() > 2) { + TIGHTDB_ASSERT(m_top.size() == 3); + m_subspecs.update_from_parent(old_baseline); } - return false; } size_t Spec::add_column(DataType type, StringData name, ColumnType attr) @@ -530,6 +531,3 @@ void Spec::to_dot(ostream& out, StringData) const } #endif // TIGHTDB_DEBUG - - -} //namespace tightdb diff --git a/src/tightdb/spec.hpp b/src/tightdb/spec.hpp index 1dae6742115..44ffe9dada1 100644 --- a/src/tightdb/spec.hpp +++ b/src/tightdb/spec.hpp @@ -98,7 +98,13 @@ class Spec { ref_type get_ref() const TIGHTDB_NOEXCEPT; - bool update_from_parent() TIGHTDB_NOEXCEPT; + /// Called in the context of Group::commit() to ensure that + /// attached table accessors stay valid across a commit. Please + /// note that this works only for non-transactional commits. Table + /// accessors obtained during a transaction are always detached + /// when the transaction ends. + void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; + void set_parent(ArrayParent*, std::size_t ndx_in_parent) TIGHTDB_NOEXCEPT; void set_column_type(std::size_t column_ndx, ColumnType type); diff --git a/src/tightdb/table.cpp b/src/tightdb/table.cpp index b3ee5c5c82f..238beb913be 100644 --- a/src/tightdb/table.cpp +++ b/src/tightdb/table.cpp @@ -19,15 +19,7 @@ #include using namespace std; - - -namespace tightdb { - -struct FakeParent: Table::Parent { - void update_child_ref(size_t, ref_type) TIGHTDB_OVERRIDE {} // Ignore - void child_destroyed(size_t) TIGHTDB_OVERRIDE {} // Ignore - ref_type get_child_ref(size_t) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { return 0; } -}; +using namespace tightdb; // -- Table --------------------------------------------------------------------------------- @@ -798,30 +790,85 @@ ref_type Table::clone(Allocator& alloc) const // TODO: get rid of the Column* template parameter -Column& Table::get_column(size_t ndx) { return get_column(ndx); } -const Column& Table::get_column(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +Column& Table::get_column(size_t ndx) +{ + return get_column(ndx); +} -AdaptiveStringColumn& Table::get_column_string(size_t ndx) { return get_column(ndx); } -const AdaptiveStringColumn& Table::get_column_string(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +const Column& Table::get_column(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} -ColumnStringEnum& Table::get_column_string_enum(size_t ndx) { return get_column(ndx); } -const ColumnStringEnum& Table::get_column_string_enum(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +AdaptiveStringColumn& Table::get_column_string(size_t ndx) +{ + return get_column(ndx); +} + +const AdaptiveStringColumn& Table::get_column_string(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} + +ColumnStringEnum& Table::get_column_string_enum(size_t ndx) +{ + return get_column(ndx); +} + +const ColumnStringEnum& Table::get_column_string_enum(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} -ColumnFloat& Table::get_column_float(size_t ndx) { return get_column(ndx); } -const ColumnFloat& Table::get_column_float(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +ColumnFloat& Table::get_column_float(size_t ndx) +{ + return get_column(ndx); +} -ColumnDouble& Table::get_column_double(size_t ndx) { return get_column(ndx); } -const ColumnDouble& Table::get_column_double(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +const ColumnFloat& Table::get_column_float(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} -ColumnBinary& Table::get_column_binary(size_t ndx) { return get_column(ndx); } -const ColumnBinary& Table::get_column_binary(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +ColumnDouble& Table::get_column_double(size_t ndx) +{ + return get_column(ndx); +} -ColumnTable &Table::get_column_table(size_t ndx) { return get_column(ndx); } -const ColumnTable &Table::get_column_table(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +const ColumnDouble& Table::get_column_double(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} -ColumnMixed& Table::get_column_mixed(size_t ndx) { return get_column(ndx); } -const ColumnMixed& Table::get_column_mixed(size_t ndx) const TIGHTDB_NOEXCEPT { return get_column(ndx); } +ColumnBinary& Table::get_column_binary(size_t ndx) +{ + return get_column(ndx); +} +const ColumnBinary& Table::get_column_binary(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} + +ColumnTable &Table::get_column_table(size_t ndx) +{ + return get_column(ndx); +} + +const ColumnTable &Table::get_column_table(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} + +ColumnMixed& Table::get_column_mixed(size_t ndx) +{ + return get_column(ndx); +} + +const ColumnMixed& Table::get_column_mixed(size_t ndx) const TIGHTDB_NOEXCEPT +{ + return get_column(ndx); +} size_t Table::add_empty_row(size_t num_rows) @@ -2010,7 +2057,8 @@ void Table::optimize() // enumeration column. Since this involves changing the spec of // the table, it is not something we can do for a subtable with // shared spec. - if (has_shared_spec()) return; + if (has_shared_spec()) + return; Allocator& alloc = m_columns.get_alloc(); @@ -2022,7 +2070,8 @@ void Table::optimize() ref_type keys_ref, values_ref; bool res = column->auto_enumerate(keys_ref, values_ref); - if (!res) continue; + if (!res) + continue; // Add to spec and column refs m_spec_set.set_column_type(i, col_type_StringEnum); @@ -2038,7 +2087,8 @@ void Table::optimize() adjust_column_ndx_in_parent(i+1, 1); // Replace cached column - ColumnStringEnum* e = new ColumnStringEnum(keys_ref, values_ref, &m_columns, column_ref_ndx, alloc); + ColumnStringEnum* e = + new ColumnStringEnum(keys_ref, values_ref, &m_columns, column_ref_ndx, alloc); m_cols.set(i, intptr_t(e)); // Inherit any existing index @@ -2066,30 +2116,26 @@ void Table::adjust_column_ndx_in_parent(size_t column_ndx_begin, int diff) TIGHT } } -void Table::update_from_parent() TIGHTDB_NOEXCEPT +void Table::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT { + TIGHTDB_ASSERT(is_valid()); + // There is no top for sub-tables sharing spec if (m_top.is_attached()) { - if (!m_top.update_from_parent()) return; + if (!m_top.update_from_parent(old_baseline)) + return; } - m_spec_set.update_from_parent(); - if (!m_columns.update_from_parent()) return; + m_spec_set.update_from_parent(old_baseline); - // Update cached columns - size_t column_count = get_column_count(); - for (size_t i = 0; i < column_count; ++i) { - ColumnBase* column = reinterpret_cast(m_cols.get(i)); - column->update_from_parent(); - } + if (!m_columns.update_from_parent(old_baseline)) + return; - // Size may have changed - if (column_count == 0) { - m_size = 0; - } - else { - const ColumnBase* column = reinterpret_cast(m_cols.get(0)); - m_size = column->size(); + // Update column accessors + size_t n = m_cols.size(); + for (size_t i = 0; i < n; ++i) { + ColumnBase* column = reinterpret_cast(uintptr_t(m_cols.get(i))); + column->update_from_parent(old_baseline); } } @@ -2122,6 +2168,7 @@ void Table::to_json(ostream& out) const out << "]"; } + namespace { inline void out_date(ostream& out, Date value) @@ -2155,6 +2202,7 @@ template void out_floats(ostream& out, T value) } // anonymous namespace + void Table::to_json_row(size_t row_ndx, ostream& out) const { out << "{"; @@ -2240,6 +2288,7 @@ void Table::to_json_row(size_t row_ndx, ostream& out) const namespace { + size_t chars_in_int(int64_t v) { size_t count = 0; @@ -2247,7 +2296,9 @@ size_t chars_in_int(int64_t v) ++count; return count+1; } -} + +} // anonymous namespace + void Table::to_string(ostream& out, size_t limit) const { @@ -2398,6 +2449,7 @@ void Table::to_string_header(ostream& out, vector& widths) const out << "\n"; } + namespace { inline void out_string(ostream& out, const string text, const size_t max_len) @@ -2417,7 +2469,8 @@ inline void out_table(ostream& out, const size_t len) out << "[" << len << "]"; } -} +} // anonymous namespace + void Table::to_string_row(size_t row_ndx, ostream& out, const vector& widths) const { @@ -2504,7 +2557,6 @@ void Table::to_string_row(size_t row_ndx, ostream& out, const vector& wi } - bool Table::compare_rows(const Table& t) const { // A wrapper for an empty subtable with shared spec may be created @@ -2836,5 +2888,3 @@ MemStats Table::stats() const #endif // TIGHTDB_DEBUG - -} // namespace tightdb diff --git a/src/tightdb/table.hpp b/src/tightdb/table.hpp index bda803698af..f1d4a8cc937 100644 --- a/src/tightdb/table.hpp +++ b/src/tightdb/table.hpp @@ -433,9 +433,15 @@ class Table { void cache_columns(); void clear_cached_columns(); + /// Called in the context of Group::commit() to ensure that + /// attached table accessors stay valid across a commit. Please + /// note that this works only for non-transactional commits. Table + /// accessors obtained during a transaction are always detached + /// when the transaction ends. + void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; + // Specification void adjust_column_ndx_in_parent(std::size_t column_ndx_begin, int diff) TIGHTDB_NOEXCEPT; - void update_from_parent() TIGHTDB_NOEXCEPT; std::size_t do_add_column(DataType); void do_add_subcolumn(const std::vector& column_path, std::size_t pos, DataType); static void do_remove_column(Array& column_refs, const Spec::ColumnInfo&); @@ -466,10 +472,10 @@ class Table { /// recursively for subtables. When this function returns, /// is_valid() will return false. /// - /// This function may be called for a table wrapper that is + /// This function may be called for a table accessor that is /// already in the invalid state (idempotency). /// - /// It is also valid to call this function for a table wrapper + /// It is also valid to call this function for a table accessor /// that has not yet been marked as invalid, but whose underlying /// structure of arrays have changed in an unpredictable/unknown /// way. This generally happens when a modifying table operation diff --git a/src/tightdb/table_view.cpp b/src/tightdb/table_view.cpp index 7bbb8cc90e0..523ccade28f 100644 --- a/src/tightdb/table_view.cpp +++ b/src/tightdb/table_view.cpp @@ -3,8 +3,8 @@ #include using namespace std; +using namespace tightdb; -namespace tightdb { // Searching @@ -338,6 +338,3 @@ void TableView::clear() m_refs.clear(); } - - -} // namespace tightdb diff --git a/test/testgroup.cpp b/test/testgroup.cpp index 6b387505f9c..bb633fab68d 100644 --- a/test/testgroup.cpp +++ b/test/testgroup.cpp @@ -416,7 +416,7 @@ TEST(Group_Persist) #ifdef TIGHTDB_DEBUG db.Verify(); -#endif // TIGHTDB_DEBUG +#endif CHECK_EQUAL(6, table->get_column_count()); CHECK_EQUAL(1, table->size()); @@ -437,7 +437,7 @@ TEST(Group_Persist) #ifdef TIGHTDB_DEBUG db.Verify(); -#endif // TIGHTDB_DEBUG +#endif CHECK_EQUAL(6, table->get_column_count()); CHECK_EQUAL(1, table->size()); @@ -1054,5 +1054,5 @@ TEST(Group_ToDot) fs.close(); } -#endif //TIGHTDB_TO_DOT +#endif // TIGHTDB_TO_DOT #endif // TIGHTDB_DEBUG From a1d02ba7763af2695c339237a2229e1b07a9fa2a Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Mon, 19 Aug 2013 06:19:21 +0200 Subject: [PATCH 05/20] Avoid clobbering the previous database version in GroupWriter::commit() in the non-transactional case. Eliminate the risk of extending byte size of Group::m_free_positions in GroupWriter::commit() after using its exact size in a calculation. Eliminate unnecessary free space fragmentation by stopping GroupWriter::reserve_free_space() from adjusting the size of the identified chunk when it is different from the requested size. GroupWriter::get_fre_space() and Group::Writer::reserve_free_space() now share searching code, and therefore, both skip the first part of the list when searching for a chunk that is larger than 1024 bytes. Fixed an undetected free-space leak when deleting free-space version tracking in GroupWriter::commit(). More documentation. Many other improvements. --- src/tightdb/array.cpp | 18 +- src/tightdb/array.hpp | 149 +++++++++---- src/tightdb/group.cpp | 87 ++++---- src/tightdb/group_writer.cpp | 417 +++++++++++++++++------------------ src/tightdb/group_writer.hpp | 38 +++- src/tightdb/table.hpp | 30 ++- src/tightdb/utilities.cpp | 8 + 7 files changed, 426 insertions(+), 321 deletions(-) diff --git a/src/tightdb/array.cpp b/src/tightdb/array.cpp index b2f92382579..4c746684765 100644 --- a/src/tightdb/array.cpp +++ b/src/tightdb/array.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include #include @@ -1107,17 +1105,6 @@ size_t Array::count(int64_t value) const return count; } -size_t Array::GetByteSize(bool align) const -{ - size_t size = CalcByteLen(m_size, m_width); - if (align) { - size_t rest = (~size & 0x7) + 1; - if (rest < 8) - size += rest; // 64-bit blocks - } - return size; -} - size_t Array::CalcByteLen(size_t count, size_t width) const { // FIXME: This arithemtic could overflow. Consider using @@ -1210,7 +1197,7 @@ void Array::copy_on_write() // Calculate size in bytes (plus a bit of matchcount room for expansion) size_t size = CalcByteLen(m_size, m_width); - size_t rest = (~size & 0x7)+1; + size_t rest = (~size & 0x7) + 1; if (rest < 8) size += rest; // 64bit blocks size_t new_size = size + 64; @@ -1274,7 +1261,8 @@ void Array::alloc(size_t size, size_t width) if (capacity_bytes < needed_bytes) { size_t rest = (~needed_bytes & 0x7) + 1; capacity_bytes = needed_bytes; - if (rest < 8) capacity_bytes += rest; // 64bit align + if (rest < 8) + capacity_bytes += rest; // 64bit align } // Allocate and initialize header diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 16b1c87e706..6e7a4b70282 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -41,6 +41,7 @@ Searching: The main finding function is: #include #include // std::size_t #include // memmove +#include #include #include #include @@ -402,7 +403,17 @@ class Array: public ArrayParent { int64_t operator[](std::size_t ndx) const TIGHTDB_NOEXCEPT { return get(ndx); } int64_t back() const TIGHTDB_NOEXCEPT; + + /// Erase the element at the specified index, and move elements at + /// succeeding indexes to the next lower index. + /// + /// FIXME: Carefull with this one. It does not destroy/deallocate + /// subarrays as clear() does. This difference is surprising and + /// highly counterintuitive. void erase(std::size_t ndx); + + /// Erase every element in this array. Subarrays will be destroyed + /// recursively, and space allocated for subarrays will be freed. void clear(); /// If neccessary, expand the representation so that it can store @@ -501,10 +512,12 @@ class Array: public ArrayParent { /// Returns the position in the target where the first byte of /// this array was written. + /// + /// The number of bytes that will be written by a non-recursive + /// invocation of this function is exactly the number returned by + /// get_byte_size(). template std::size_t write(S& target, bool recurse = true, bool persist = false) const; - template void write_at(std::size_t pos, S& out) const; - std::size_t GetByteSize(bool align = false) const; std::vector ToVector() const; /// Compare two arrays for equality. @@ -679,12 +692,6 @@ class Array: public ArrayParent { static Type get_type_from_header(const char*) TIGHTDB_NOEXCEPT; - /// Get the number of bytes currently in use by the specified - /// array. This includes the array header, but it does not include - /// allocated bytes corresponding to excess capacity. The result - /// is guaranteed to be a multiple of 8 (i.e., 64-bit aligned). - static std::size_t get_byte_size_from_header(const char*) TIGHTDB_NOEXCEPT; - #ifdef TIGHTDB_DEBUG void print() const; void Verify() const; @@ -785,6 +792,27 @@ class Array: public ArrayParent { static ref_type create_empty_array(Type, WidthType, Allocator&); static ref_type clone(const char* header, Allocator& alloc, Allocator& clone_alloc); + /// Get the address of the header of this array. + char* get_header() TIGHTDB_NOEXCEPT; + + /// Get the number of bytes currently in use by this array. This + /// includes the array header, but it does not include allocated + /// bytes corresponding to excess capacity. The result is + /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned). + /// + /// This number is exactly the number of bytes that will be + /// written by a non-recursive invocation of write(). + std::size_t get_byte_size() const TIGHTDB_NOEXCEPT; + + /// Same as get_byte_size(). + static std::size_t get_byte_size_from_header(const char*) TIGHTDB_NOEXCEPT; + + /// Get the maximum number of bytes that can be written by a + /// non-recursive invocation of write() on an array with the + /// specified number of elements, that is, the maxumum value that + /// can be returned by get_byte_size(). + static std::size_t get_max_byte_size(std::size_t num_elems) TIGHTDB_NOEXCEPT; + void update_child_ref(std::size_t child_ndx, ref_type new_ref) TIGHTDB_OVERRIDE; ref_type get_child_ref(std::size_t child_ndx) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; @@ -803,6 +831,7 @@ class Array: public ArrayParent { int64_t m_lbound; // min number that can be stored with current m_width int64_t m_ubound; // max number that can be stored with current m_width + friend class SlabAlloc; friend class GroupWriter; friend class AdaptiveStringColumn; }; @@ -1273,38 +1302,83 @@ inline Array::Type Array::get_type_from_header(const char* header) TIGHTDB_NOEXC } +inline char* Array::get_header() TIGHTDB_NOEXCEPT +{ + return get_header_from_data(m_data); +} + + +inline std::size_t Array::get_byte_size() const TIGHTDB_NOEXCEPT +{ + std::size_t num_bytes = 0; + const char* header = get_header_from_data(m_data); + switch (get_wtype_from_header(header)) { + case wtype_Bits: { + std::size_t num_bits = (m_size * m_width); // FIXME: Prone to overflow + num_bytes = num_bits / 8; + if (num_bits & 0x7) + ++num_bytes; + goto found; + } + case wtype_Multiply: { + num_bytes = m_size * m_width; + goto found; + } + case wtype_Ignore: + num_bytes = m_size; + goto found; + } + TIGHTDB_ASSERT(false); + + found: + // Ensure 8-byte alignment + std::size_t rest = (~num_bytes & 0x7) + 1; + if (rest < 8) + num_bytes += rest; + + num_bytes += header_size; + + TIGHTDB_ASSERT(num_bytes <= get_capacity_from_header(header)); + + return num_bytes; +} + + inline std::size_t Array::get_byte_size_from_header(const char* header) TIGHTDB_NOEXCEPT { - // Calculate full size of array in bytes, including padding - // for 64bit alignment (that may be composed of random bits) + std::size_t num_bytes = 0; std::size_t size = get_size_from_header(header); - - // Adjust size to number of bytes switch (get_wtype_from_header(header)) { case wtype_Bits: { int width = get_width_from_header(header); - std::size_t bits = (size * width); - size = bits / 8; - if (bits & 0x7) ++size; - break; + std::size_t num_bits = (size * width); // FIXME: Prone to overflow + num_bytes = num_bits / 8; + if (num_bits & 0x7) + ++num_bytes; + goto found; } case wtype_Multiply: { int width = get_width_from_header(header); - size *= width; - break; + num_bytes = size * width; + goto found; } case wtype_Ignore: - break; + num_bytes = size; + goto found; } + TIGHTDB_ASSERT(false); + + found: + // Ensure 8-byte alignment + std::size_t rest = (~num_bytes & 0x7) + 1; + if (rest < 8) + num_bytes += rest; - // Add bytes used for padding - const std::size_t rest = (~size & 0x7) + 1; - if (rest < 8) size += rest; // 64bit blocks - size += get_data_from_header(header) - header; // include header in total + num_bytes += header_size; - TIGHTDB_ASSERT(size <= get_capacity_from_header(header)); + TIGHTDB_ASSERT(num_bytes <= get_capacity_from_header(header)); - return size; + return num_bytes; } @@ -1313,9 +1387,8 @@ inline void Array::init_header(char* header, bool is_leaf, bool has_refs, WidthT { // Note: Since the header layout contains unallocated bit and/or // bytes, it is important that we put the entire header into a - // well defined state initially. Note also: The C++11 standard - // does not guarantee that int64_t is available on all platforms. - *reinterpret_cast(header) = 0; + // well defined state initially. + std::fill(header, header + header_size, 0); set_header_isleaf(is_leaf, header); set_header_hasrefs(has_refs, header); set_header_wtype(width_type, header); @@ -1378,25 +1451,13 @@ template std::size_t Array::write(S& out, bool recurse, bool persist) c // Write array const char* header = get_header_from_data(m_data); - std::size_t size = get_byte_size_from_header(header); + std::size_t size = get_byte_size(); std::size_t array_pos = out.write(header, size); TIGHTDB_ASSERT((array_pos & 0x7) == 0); /// 64-bit alignment return array_pos; } -template void Array::write_at(std::size_t pos, S& out) const -{ - TIGHTDB_ASSERT(is_attached()); - - // TODO: replace capacity with checksum - - // Write array - const char* header = get_header_from_data(m_data); - std::size_t size = get_byte_size_from_header(header); - out.write_at(pos, header, size); -} - inline ref_type Array::clone(Allocator& clone_alloc) const { const char* header = get_header_from_data(m_data); @@ -1422,6 +1483,12 @@ inline ref_type Array::create_empty_array(Type type, Allocator& alloc) return create_empty_array(type, wtype_Bits, alloc); // Throws } +inline std::size_t Array::get_max_byte_size(std::size_t num_elems) TIGHTDB_NOEXCEPT +{ + int max_bytes_per_elem = 8; + return header_size + num_elems * max_bytes_per_elem; // FIXME: Prone to overflow +} + inline void Array::update_parent() { if (m_parent) diff --git a/src/tightdb/group.cpp b/src/tightdb/group.cpp index 18c0b3a5311..176ed0a6a38 100644 --- a/src/tightdb/group.cpp +++ b/src/tightdb/group.cpp @@ -165,7 +165,7 @@ void Group::create() m_top.add(m_free_lengths.get_ref()); // We may have been attached to a newly created file, that is, to - // a file consisting only of a default header and possible some + // a file consisting only of a default header and possibly some // free space. In that case, we must add as free space, the size // of the file minus its header. if (m_alloc.is_attached()) { @@ -215,8 +215,6 @@ void Group::init_from_ref(ref_type top_ref) void Group::init_shared() { - // FIXME: Can this be done as part of update_from_shared()? - if (m_free_versions.is_attached()) { // If free space tracking is enabled // we just have to reset it @@ -360,6 +358,11 @@ void Group::commit() // information. if (m_free_positions.is_attached()) { TIGHTDB_ASSERT(m_top.size() == 4 || m_top.size() == 5); + if (m_top.size() > 4) { + // Delete free-list version information + Array::destroy(m_top.get_as_ref(4), m_top.get_alloc()); + m_top.erase(4); + } } else { TIGHTDB_ASSERT(m_top.size() == 2); @@ -379,7 +382,7 @@ void Group::commit() // mode we have to make sure that the group stays valid after // commit - // Mark all managed space as free + // Mark all managed space (beyond the atatched file) as free m_alloc.free_all(); size_t old_baseline = m_alloc.get_baseline(); @@ -539,50 +542,46 @@ void Group::Verify() const if (m_free_positions.is_attached()) { TIGHTDB_ASSERT(m_free_lengths.is_attached()); - size_t count_p = m_free_positions.size(); - size_t count_l = m_free_lengths.size(); - TIGHTDB_ASSERT(count_p == count_l); - - if (m_free_versions.is_attached()) { - TIGHTDB_ASSERT(count_p == m_free_versions.size()); - } - - if (count_p) { - // Check for alignment - for (size_t i = 0; i < count_p; ++i) { - size_t p = to_size_t(m_free_positions.get(i)); - size_t l = to_size_t(m_free_lengths.get(i)); - TIGHTDB_ASSERT((p & 0x7) == 0); // 64bit alignment - TIGHTDB_ASSERT((l & 0x7) == 0); // 64bit alignment - } - - size_t filelen = m_alloc.get_baseline(); - - // Segments should be ordered and without overlap - for (size_t i = 0; i < count_p-1; ++i) { - size_t pos1 = to_size_t(m_free_positions.get(i)); - size_t pos2 = to_size_t(m_free_positions.get(i+1)); - TIGHTDB_ASSERT(pos1 < pos2); - - size_t len1 = to_size_t(m_free_lengths.get(i)); - TIGHTDB_ASSERT(len1 != 0); - TIGHTDB_ASSERT(len1 < filelen); - - size_t end = pos1 + len1; - TIGHTDB_ASSERT(end <= pos2); - } - - size_t lastlen = to_size_t(m_free_lengths.back()); - TIGHTDB_ASSERT(lastlen != 0 && lastlen <= filelen); - - size_t end = to_size_t(m_free_positions.back() + lastlen); - TIGHTDB_ASSERT(end <= filelen); + size_t n = m_free_positions.size(); + TIGHTDB_ASSERT(n == m_free_lengths.size()); + + if (m_free_versions.is_attached()) + TIGHTDB_ASSERT(n == m_free_versions.size()); + + // FIXME: What we really need here is the "logical" size of + // the file and not the real size. The real size may have + // changed without the free space information having been + // adjusted accordingly. This can happen, for example, if + // commit() fails before writing the new top-ref, but after + // having extended the file size. We currently do not have a + // concept of a logical file size, but if provided, it would + // have to be stored as part of a database version such that + // it is updated atomically together with the rest of the + // contents of the version. + size_t file_size = m_alloc.is_attached() ? m_alloc.get_baseline() : 0; + + size_t prev_end = 0; + for (size_t i = 0; i < n; ++i) { + size_t pos = to_size_t(m_free_positions.get(i)); + size_t size = to_size_t(m_free_lengths.get(i)); + + TIGHTDB_ASSERT(pos < file_size); + TIGHTDB_ASSERT(size > 0); + TIGHTDB_ASSERT(pos + size <= file_size); + TIGHTDB_ASSERT(prev_end <= pos); + + TIGHTDB_ASSERT(pos % 8 == 0); // 8-byte alignment + TIGHTDB_ASSERT(size % 8 == 0); // 8-byte alignment + + prev_end = pos + size; } } // Verify tables - for (size_t i = 0; i < m_tables.size(); ++i) { - get_table_ptr(i)->Verify(); + { + size_t n = m_tables.size(); + for (size_t i = 0; i < n; ++i) + get_table_ptr(i)->Verify(); } } diff --git a/src/tightdb/group_writer.cpp b/src/tightdb/group_writer.cpp index 6b3e2a3d12d..29f9215833a 100644 --- a/src/tightdb/group_writer.cpp +++ b/src/tightdb/group_writer.cpp @@ -36,92 +36,138 @@ size_t GroupWriter::commit(bool do_sync) TIGHTDB_ASSERT(fpositions.size() == flengths.size()); TIGHTDB_ASSERT(!is_shared || fversions.size() == flengths.size()); - // Ensure that the freelist arrays are are themselves added to - // (the allocator) free list + // Recursively write all changed arrays (but not 'top' and + // free-lists yet, as they a going to change along the way.) If + // free space is available in the attached database file, we use + // it, but this does not include space that has been release + // during the current transaction (or since the last commit), as + // that would lead to clobbering of the previous database version. + bool recurse = true, persist = true; + size_t names_pos = m_group.m_table_names.write(*this, recurse, persist); + size_t tables_pos = m_group.m_tables.write(*this, recurse, persist); + + // We now have a bit of a chicken-and-egg problem. We need to + // write the free-lists to the file, but the act of writing them + // will consume free space, and thereby change the free-lists. To + // solve this problem, we calculate an upper bound on the amount + // af space required for all of the remaining arrays and allocate + // the space as one big chunk. This way we can finalize the + // free-lists before writing them to the file. + size_t max_free_list_size = fpositions.size(); + + // We need to add to the free-list any space that was freed during + // the current transaction, but to avoid clobering the previous + // version, we cannot add it yet. Instead we simply account for + // the space required. Since we will modify the free-lists + // themselves, we must ensure that the original arrays used by the + // free-lists are counted as part of the space that was freed + // during the current transaction. fpositions.copy_on_write(); flengths.copy_on_write(); if (is_shared) fversions.copy_on_write(); - - // Recursively write all changed arrays (but not top yet, as it - // contains refs to free lists which are changing.) If free space - // is available in the database file, we use it, but this does not - // include space that has been release during the current - // transaction (or since the last commit), as that would lead to - // clobbering of the previous database version. - bool recurse = true, persist = true; - const size_t n_pos = m_group.m_table_names.write(*this, recurse, persist); - const size_t t_pos = m_group.m_tables.write(*this, recurse, persist); - - // Add free space created during this transaction (or since last - // commit) to free lists - // - // FIXME: This is bad, because it opens the posibility of - // clobering the previous database version when we later write the - // remaining arrays into the file - const SlabAlloc::FreeSpace& free_space = m_group.m_alloc.get_free_read_only(); - const size_t fcount = free_space.size(); - - for (size_t i = 0; i < fcount; ++i) { - SlabAlloc::FreeSpace::ConstCursor r = free_space[i]; - add_free_space(to_size_t(r.ref), to_size_t(r.size), to_size_t(m_current_version)); + const SlabAlloc::FreeSpace& new_free_space = m_group.m_alloc.get_free_read_only(); + max_free_list_size += new_free_space.size(); + + // The final allocation of free space (i.e., the call to + // reserve_free_space() below) may add an extra entry to the + // free-lists. + ++max_free_list_size; + + int num_free_lists = is_shared ? 3 : 2; + int max_top_size = 2 + num_free_lists; + size_t max_free_space_needed = Array::get_max_byte_size(max_top_size) + + num_free_lists * Array::get_max_byte_size(max_free_list_size); + + // Reserve space for remaining arrays. We ask for one extra byte + // beyond the maxumum number that is required. This ensures that + // even if we end up using the maximum size possible, we still do + // not end up with a zero size free-space chunk as we deduct the + // actually used size from it. + pair reserve = reserve_free_space(max_free_space_needed + 1); + size_t reserve_ndx = reserve.first; + size_t reserve_size = reserve.second; + + // At this point we have allocated all the space we need, so we + // can add to the free-lists any free space created during the + // current transaction (or since last commit). Had we added it + // earlier, we would have risked clobering the previous database + // version. Note, however, that this risk would only have been + // present in the non-transactionl case where there is no version + // tracking on the free-space chunks. + { + const size_t n = new_free_space.size(); + if (n > 0) { + for (size_t i = 0; i < n; ++i) { + SlabAlloc::FreeSpace::ConstCursor r = new_free_space[i]; + size_t pos = to_size_t(r.ref); + size_t size = to_size_t(r.size); + // We always want to keep the list of free space in sorted order + // (by ascending position) to facilitate merge of adjacent + // segments. We can find the correct insert postion by binary + // search + size_t ndx = fpositions.lower_bound_int(pos); + fpositions.insert(ndx, pos); + flengths.insert(ndx, size); + if (is_shared) + fversions.insert(ndx, m_current_version); + if (ndx <= reserve_ndx) + ++reserve_ndx; + } + } } - // We now have a bit of an chicken-and-egg problem. We need to write our free - // lists to the file, but the act of writing them will affect the amount - // of free space, changing them. - - // To make sure we have room for top and free list we calculate the absolute - // largest size they can get: - // (64bit width + one possible ekstra entry per alloc and header) - const size_t free_count = fpositions.size() + 5; - const size_t top_max_size = (5 + 1) * 8; - const size_t flist_max_size = free_count * 8; - const size_t total_reserve = top_max_size + (flist_max_size * (is_shared ? 3 : 2)); - - // Reserve space for each block. We explicitly ask for a bigger space than - // the blocks can occupy, so that later when we know the real size, we can - // adjust the segment size, without changing the width. - const size_t res_ndx = reserve_free_space(total_reserve); - const size_t res_pos = to_size_t(fpositions.get(res_ndx)); // top of reserved segments - - // Get final sizes of free lists - const size_t fp_size = fpositions.GetByteSize(true); - const size_t fl_size = flengths.GetByteSize(true); - const size_t fv_size = is_shared ? fversions.GetByteSize(true) : 0; - - // Calc write positions - const size_t fl_pos = res_pos + fp_size; - const size_t fv_pos = fl_pos + fl_size; - const size_t top_pos = fv_pos + fv_size; - - // Update top to point to the reserved locations - top.set(0, n_pos); - top.set(1, t_pos); - top.set(2, res_pos); - top.set(3, fl_pos); + // Before we calculate the actual sizes of the free-list arrays, + // we must make sure that the final adjustments of the free lists + // (i.e., the deduction of the actually used space from the + // reserved chunk,) will not change the byte-size of those arrays. + size_t reserve_pos = to_size_t(fpositions.get(reserve_ndx)); + TIGHTDB_ASSERT(reserve_size > max_free_space_needed); + fpositions.ensure_minimum_width(reserve_pos + max_free_space_needed); + + // Get final sizes of free-list arrays + size_t free_positions_size = fpositions.get_byte_size(); + size_t free_sizes_size = flengths.get_byte_size(); + size_t free_versions_size = is_shared ? fversions.get_byte_size() : 0; + + // Calculate write positions + size_t free_positions_pos = reserve_pos; + size_t free_sizes_pos = free_positions_pos + free_positions_size; + size_t free_versions_pos = free_sizes_pos + free_sizes_size; + size_t top_pos = free_versions_pos + free_versions_size; + + // Update top to point to the calculated positions + top.set(0, names_pos); + top.set(1, tables_pos); + top.set(2, free_positions_pos); + top.set(3, free_sizes_pos); if (is_shared) - top.set(4, fv_pos); - else if (top.size() == 5) - top.erase(4); // versions + top.set(4, free_versions_pos); // Get final sizes - size_t top_size = top.GetByteSize(true); + size_t top_size = top.get_byte_size(); size_t end_pos = top_pos + top_size; - size_t rest = total_reserve - (end_pos - res_pos); - - // Set the correct values for rest space - fpositions.set(res_ndx, end_pos); - flengths.set(res_ndx, rest); - - // Write free lists - fpositions.write_at(res_pos, *this); - flengths.write_at(fl_pos, *this); + TIGHTDB_ASSERT(end_pos <= reserve_pos + max_free_space_needed); + + // Deduct the used space from the reserved chunk. Note that we + // have made sure that the remaining size is never zero. Also, by + // the call to fpositions.ensure_minimum_width() above, we have + // made sure that fpositions has the capacity to store the new + // larger value without reallocation. + size_t rest = reserve_pos + reserve_size - end_pos; + TIGHTDB_ASSERT(rest > 0); + fpositions.set(reserve_ndx, end_pos); + flengths.set(reserve_ndx, rest); + + // The free-list now have their final form, so we can write them + // to the file + write_at(free_positions_pos, fpositions.get_header(), free_positions_size); + write_at(free_sizes_pos, flengths.get_header(), free_sizes_size); if (is_shared) - fversions.write_at(fv_pos, *this); + write_at(free_versions_pos, fversions.get_header(), free_versions_size); // Write top - top.write_at(top_pos, *this); + write_at(top_pos, top.get_header(), top_size); // In swap-only mode, we just use the file as backing for the shared // memory. So we never actually flush the data to disk (the OS may do @@ -202,7 +248,7 @@ void GroupWriter::merge_free_space() size_t n = lengths.size() - 1; for (size_t i = 0; i < n; ++i) { - size_t i2 = i+1; + size_t i2 = i + 1; size_t pos1 = to_size_t(positions.get(i)); size_t size1 = to_size_t(lengths.get(i)); size_t pos2 = to_size_t(positions.get(i2)); @@ -233,167 +279,121 @@ void GroupWriter::merge_free_space() } -void GroupWriter::add_free_space(size_t pos, size_t size, size_t version) +size_t GroupWriter::get_free_space(size_t size) { - Array& positions = m_group.m_free_positions; - Array& lengths = m_group.m_free_lengths; - Array& versions = m_group.m_free_versions; - bool is_shared = m_group.m_is_shared; + TIGHTDB_ASSERT(size % 8 == 0); // 8-byte alignment + TIGHTDB_ASSERT(m_file_map.get_size() % 8 == 0); // 8-byte alignment - // We always want to keep the list of free space in - // sorted order (by position) to facilitate merge of - // adjecendant segments. We can find the correct - // insert postion by binary search - size_t p = positions.lower_bound_int(pos); + pair p = reserve_free_space(size); - if (p == positions.size()) { - positions.add(pos); - lengths.add(size); - if (is_shared) - versions.add(version); - } - else { - positions.insert(p, pos); - lengths.insert(p, size); - if (is_shared) - versions.insert(p, version); - } -} - - -size_t GroupWriter::reserve_free_space(size_t size) -{ Array& positions = m_group.m_free_positions; Array& lengths = m_group.m_free_lengths; Array& versions = m_group.m_free_versions; bool is_shared = m_group.m_is_shared; - // Do we have a free space we can reuse? - size_t ndx = not_found; - size_t n = lengths.size(); - for (size_t i = 0; i < n; ++i) { - size_t free_size = to_size_t(lengths.get(i)); - if (size <= free_size) { - // Only blocks that are not occupied by current - // readers are allowed to be used. - if (is_shared) { - size_t v = to_size_t(versions.get(i)); - if (v >= m_readlock_version) - continue; - } - - // Match found! - ndx = i; - break; - } - } + // Claim space from identified chunk + size_t chunk_ndx = p.first; + size_t chunk_pos = to_size_t(positions.get(chunk_ndx)); + size_t chunk_size = p.second; + TIGHTDB_ASSERT(chunk_size >= size); - if (ndx == not_found) { - // No free space, so we have to extend the file. - ndx = extend_free_space(size); + size_t rest = chunk_size - size; + if (rest > 0) { + positions.set(chunk_ndx, chunk_pos + size); // FIXME: Undefined conversion to signed + lengths.set(chunk_ndx, rest); // FIXME: Undefined conversion to signed } - - // Split segment so we get exactly what was asked for - size_t free_size = to_size_t(lengths.get(ndx)); - if (size != free_size) { - lengths.set(ndx, size); - - size_t pos = to_size_t(positions.get(ndx)) + size; - size_t rest = free_size - size; - positions.insert(ndx+1, pos); - lengths.insert(ndx+1, rest); + else { + positions.erase(chunk_ndx); + lengths.erase(chunk_ndx); if (is_shared) - versions.insert(ndx+1, 0); + versions.erase(chunk_ndx); } - return ndx; + return chunk_pos; } -size_t GroupWriter::get_free_space(size_t size) +pair GroupWriter::reserve_free_space(size_t size) { - TIGHTDB_ASSERT((size & 0x7) == 0); // 64-bit alignment - TIGHTDB_ASSERT((m_file_map.get_size() & 0x7) == 0); // 64-bit alignment - - Array& positions = m_group.m_free_positions; Array& lengths = m_group.m_free_lengths; Array& versions = m_group.m_free_versions; bool is_shared = m_group.m_is_shared; - size_t count = lengths.size(); + size_t end = lengths.size(); // Since we do a 'first fit' search, the top pieces are likely // to get smaller and smaller. So if we are looking for a bigger piece // we may find it faster by looking further down in the list. - size_t start = size < 1024 ? 0 : count / 2; + size_t begin = size < 1024 ? 0 : end / 2; // Do we have a free space we can reuse? - for (size_t i = start; i < count; ++i) { - size_t free_size = to_size_t(lengths.get(i)); - if (size <= free_size) { - // Only blocks that are not occupied by current - // readers are allowed to be used. + for (size_t i = begin; i != end; ++i) { + size_t chunk_size = to_size_t(lengths.get(i)); + if (chunk_size >= size) { + // Only blocks that are not occupied by current readers + // are allowed to be used. if (is_shared) { - size_t v = to_size_t(versions.get(i)); - if (v >= m_readlock_version) + size_t ver = to_size_t(versions.get(i)); + if (ver >= m_readlock_version) continue; } - size_t pos = to_size_t(positions.get(i)); - - // Update free list - size_t rest = free_size - size; - if (rest == 0) { - positions.erase(i); - lengths.erase(i); - if (is_shared) - versions.erase(i); - } - else { - lengths.set(i, rest); - positions.set(i, pos + size); - } - - return pos; + // Match found! + return make_pair(i, chunk_size); } } - // No free space, so we have to expand the file. - size_t old_file_size = m_file_map.get_size(); - size_t ext_pos = extend_free_space(size); - - // Claim space from new extension - size_t end = old_file_size + size; - size_t rest = m_file_map.get_size() - end; - if (rest) { - positions.set(ext_pos, end); - lengths.set(ext_pos, rest); - } - else { - positions.erase(ext_pos); - lengths.erase(ext_pos); - if (is_shared) - versions.erase(ext_pos); - } - - return old_file_size; + // No free space, so we have to extend the file. + return extend_free_space(size); } -size_t GroupWriter::extend_free_space(size_t requested_size) +pair GroupWriter::extend_free_space(size_t requested_size) { Array& positions = m_group.m_free_positions; Array& lengths = m_group.m_free_lengths; Array& versions = m_group.m_free_versions; - const bool is_shared = m_group.m_is_shared; + bool is_shared = m_group.m_is_shared; + + // FIXME: What we really need here is the "logical" size of the + // file and not the real size. The real size may have changed + // without the free space information having been adjusted + // accordingly. This can happen, for example, if commit() fails + // before writing the new top-ref, but after having extended the + // file size. We currently do not have a concept of a logical file + // size, but if provided, it would have to be stored as part of a + // database version such that it is updated atomically together + // with the rest of the contents of the version. + size_t file_size = m_file_map.get_size(); + + bool extend_last_chunk = false; + size_t last_chunk_size; + if (!positions.is_empty()) { + bool in_use = false; + if (is_shared) { + size_t ver = to_size_t(versions.back()); + if (ver >= m_readlock_version) + in_use = true; + } + if (!in_use) { + size_t last_pos = to_size_t(positions.back()); + size_t last_size = to_size_t(lengths.back()); + TIGHTDB_ASSERT(last_size < requested_size); + TIGHTDB_ASSERT(last_pos + last_size <= file_size); + if (last_pos + last_size == file_size) { + extend_last_chunk = true; + last_chunk_size = last_size; + requested_size -= last_size; + } + } + } // we always expand megabytes at a time, both for // performance and to avoid excess fragmentation const size_t megabyte = 1024 * 1024; - const size_t old_file_size = m_file_map.get_size(); - const size_t needed_size = old_file_size + requested_size; + const size_t needed_size = file_size + requested_size; const size_t rest = needed_size % megabyte; - const size_t new_file_size = rest ? (needed_size + (megabyte - rest)) : needed_size; + const size_t new_file_size = rest > 0 ? (needed_size + (megabyte - rest)) : needed_size; // Extend the file m_alloc.m_file.alloc(0, new_file_size); // Throws @@ -402,35 +402,34 @@ size_t GroupWriter::extend_free_space(size_t requested_size) // fact, is seems like it acheives nothing at all, because only if // the new top 'ref' is successfully instated will we need to see // a bigger file on disk. On the other hand, if it does acheive - // something, what exactly is that? + // something, what exactly is that? On the other hand, if it relly + // must stay, it should at least be skipped when + // SharedGroup::durability_MemOnly is selected. m_alloc.m_file.sync(); m_file_map.remap(m_alloc.m_file, File::access_ReadWrite, new_file_size); - size_t ext_size = new_file_size - old_file_size; - - // See if we can merge in new space - if (!positions.is_empty()) { - size_t last_ndx = to_size_t(positions.size()-1); - size_t last_size = to_size_t(lengths[last_ndx]); - size_t end = to_size_t(positions[last_ndx] + last_size); - size_t ver = is_shared ? to_size_t(versions[last_ndx]) : 0; - if (end == old_file_size && ver == 0) { - lengths.set(last_ndx, last_size + ext_size); - return last_ndx; - } + size_t chunk_ndx = positions.size(); + size_t chunk_size = new_file_size - file_size; + if (extend_last_chunk) { + --chunk_ndx; + chunk_size += last_chunk_size; + TIGHTDB_ASSERT(chunk_size % 8 == 0); // 8-byte alignment + lengths.set(chunk_ndx, chunk_size); + } + else { // Else add new free space + TIGHTDB_ASSERT(chunk_size % 8 == 0); // 8-byte alignment + positions.add(file_size); + lengths.add(chunk_size); + if (is_shared) + versions.add(0); // new space is always free for writing } - // Else add new free space - positions.add(old_file_size); - lengths.add(ext_size); - if (is_shared) - versions.add(0); // new space is always free for writing - - return positions.size() - 1; + return make_pair(chunk_ndx, chunk_size); } + #ifdef TIGHTDB_DEBUG void GroupWriter::dump() diff --git a/src/tightdb/group_writer.hpp b/src/tightdb/group_writer.hpp index 683fadf3834..5caca961cb1 100644 --- a/src/tightdb/group_writer.hpp +++ b/src/tightdb/group_writer.hpp @@ -21,11 +21,12 @@ #define TIGHTDB_GROUP_WRITER_HPP #include // unint8_t etc -#include // size_t +#include #include #include + namespace tightdb { // Pre-declarations @@ -65,11 +66,38 @@ class GroupWriter { // Controlled update of physical medium void sync(uint64_t top_pos); + /// Allocate a chunk of free space of the specified size. The + /// specified size must be 8-byte aligned. Extend the file if + /// required. The returned chunk is removed from the amount of + /// remaing free space. + /// + /// \return The position within the database file of the allocated + /// chunk. std::size_t get_free_space(std::size_t size); - std::size_t reserve_free_space(std::size_t size); - void add_free_space(std::size_t pos, std::size_t size, std::size_t version = 0); - void merge_free_space(); - std::size_t extend_free_space(std::size_t requested_size); + + /// Find a block of free space that is at least as big as the + /// specified size. The specified size does not need to be 8-byte + /// aligned. Extend the file if required. The returned chunk is + /// not removed from the amount of remaing free space. This + /// function guarantees that it will add at most one entry to the + /// free-lists. + /// + /// \return A pair (`chunk_ndx`, `chunk_size`) where `chunk_ndx` + /// is the index of a chunk whose size is at least the requestd + /// size, and `chunk_size` is the size of that chunk. + std::pair reserve_free_space(std::size_t size); + + /// Extend the file to ensure that a chunk of free space of the + /// specified size is available. The specified size does not need + /// to be 8-byte aligned. This function guarantees that it will + /// add at most one entry to the free-lists. + /// + /// \return A pair (`chunk_ndx`, `chunk_size`) where `chunk_ndx` + /// is the index of a chunk whose size is at least the requestd + /// size, and `chunk_size` is the size of that chunk. + std::pair extend_free_space(std::size_t requested_size); + + void merge_free_space(); }; diff --git a/src/tightdb/table.hpp b/src/tightdb/table.hpp index f1d4a8cc937..bcbc0427780 100644 --- a/src/tightdb/table.hpp +++ b/src/tightdb/table.hpp @@ -114,13 +114,13 @@ class Table { /// any attempt at accessing such a failed group. /// /// FIXME: The C++ documentation must state that if any modifying - /// operation on a group (incl. tables, subtables, and specs), or - /// on a free standing table (incl. subtables and specs), then any - /// further access to that group (except ~Group()) or freestanding - /// table (except ~Table()) has undefined behaviour and is - /// considered an error on behalf of the application. Note that - /// even Table::is_valid() is disallowed in this case. - bool is_valid() const TIGHTDB_NOEXCEPT { return m_columns.has_parent(); } + /// operation on a group (incl. tables, subtables, and specs) or + /// on a free standing table (incl. subtables and specs) fails, + /// then any further access to that group (except ~Group()) or + /// freestanding table (except ~Table()) has undefined behaviour + /// and is considered an error on behalf of the application. Note + /// that even Table::is_valid() is disallowed in this case. + bool is_valid() const TIGHTDB_NOEXCEPT; /// A shared spec is a column specification that in general /// applies to many tables. A table is not allowed to directly @@ -591,6 +591,22 @@ class Table::Parent: public ArrayParent { // Implementation: +inline bool Table::is_valid() const TIGHTDB_NOEXCEPT +{ + // Note that it is not possible to link the state of attachment of + // a table to the state of attachment of m_top, because tables + // with shared spec do not have a 'top' array. Neither is it + // possible to link it to the state of attachment of m_columns, + // because subtables with shared spec start out in a degenerate + // form where they do not have a 'columns' array. For these + // reasons, it is neccessary to define the state of attachment of + // a table as follows: A table is attached if, and ony if m_column + // stores a non-null parent pointer. This works because even for + // degenerate subtables, m_columns is initialized with the correct + // parent pointer. + return m_columns.has_parent(); +} + inline std::size_t Table::get_column_count() const TIGHTDB_NOEXCEPT { return m_spec_set.get_column_count(); diff --git a/src/tightdb/utilities.cpp b/src/tightdb/utilities.cpp index d706f4129a8..cab36712265 100644 --- a/src/tightdb/utilities.cpp +++ b/src/tightdb/utilities.cpp @@ -44,14 +44,22 @@ void cpuid_init() } +// FIXME: Move all these rounding functions to the header file to +// allow inlining. void* round_up(void* p, size_t align) { + // FIXME: The C++ standard does not guarantee that a pointer can + // be stored in size_t. Use uintptr_t instead. The problem with + // uintptr_t, is that is is not part of C++03. size_t r = size_t(p) % align == 0 ? 0 : align - size_t(p) % align; return static_cast(p) + r; } void* round_down(void* p, size_t align) { + // FIXME: The C++ standard does not guarantee that a pointer can + // be stored in size_t. Use uintptr_t instead. The problem with + // uintptr_t, is that is is not part of C++03. size_t r = size_t(p); return reinterpret_cast(r & ~(align - 1)); } From 78ada0bd68bf855c37f2fe1b66f077cabff26dfd Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Mon, 19 Aug 2013 20:29:51 +0200 Subject: [PATCH 06/20] More cleanup --- src/tightdb/array.cpp | 66 ++++++++--------------------------- src/tightdb/array.hpp | 44 ++++++++++++++++++++++- src/tightdb/group.cpp | 28 ++++++++------- src/tightdb/group_shared.cpp | 3 ++ src/tightdb/query_engine.hpp | 37 ++++++++++++-------- src/tightdb/table.hpp | 6 +--- test/experiments/testcase.cpp | 1 + test/testcolumnmixed.cpp | 25 +++++++------ 8 files changed, 115 insertions(+), 95 deletions(-) diff --git a/src/tightdb/array.cpp b/src/tightdb/array.cpp index 4c746684765..46d2809d2b3 100644 --- a/src/tightdb/array.cpp +++ b/src/tightdb/array.cpp @@ -204,64 +204,28 @@ void Array::set_parent(ArrayParent* parent, size_t ndx_in_parent) TIGHTDB_NOEXCE m_ndx_in_parent = ndx_in_parent; } -void Array::destroy() -{ - if (!m_data) return; - - if (m_hasRefs) { - for (size_t i = 0; i < m_size; ++i) { - int64_t v = get(i); - - // null-refs signify empty sub-trees - if (v == 0) continue; - - // all refs are 64bit aligned, so the lowest bits - // cannot be set. If they are it means that it should - // not be interpreted as a ref - if (v & 0x1) continue; - Array sub(to_ref(v), this, i, m_alloc); - sub.destroy(); - } - } - - char* header = get_header_from_data(m_data); - m_alloc.free_(m_ref, header); - m_data = 0; -} - -void Array::clear() +void Array::destroy_children() { - copy_on_write(); // Throws - - // Make sure we don't have any dangling references - if (m_hasRefs) { - for (size_t i = 0; i < size(); ++i) { - int64_t v = get(i); + for (size_t i = 0; i < m_size; ++i) { + int64_t v = get(i); - // null-refs signify empty sub-trees - if (v == 0) continue; + // Null-refs indicate empty sub-trees + if (v == 0) + continue; - // all refs are 64bit aligned, so the lowest bits - // cannot be set. If they are it means that it should - // not be interpreted as a ref - if (v & 0x1) continue; + // A ref is always 8-byte aligned, so the lowest bit + // cannot be set. If it is, it means that it should not be + // interpreted as a ref. + if (v % 2 != 0) + continue; - Array sub(to_ref(v), this, i, m_alloc); - sub.destroy(); - } + Array sub(to_ref(v), this, i, m_alloc); + sub.destroy(); } - - // Truncate size to zero (but keep capacity) - m_size = 0; - m_capacity = CalcItemCount(get_capacity_from_header(), 0); - set_width(0); - - // Update header - set_header_size(0); - set_header_width(0); } + void Array::erase(size_t ndx) { TIGHTDB_ASSERT(ndx < m_size); @@ -440,7 +404,7 @@ void Array::ensure_minimum_width(int64_t value) } } -void Array::SetAllToZero() +void Array::set_all_to_zero() { copy_on_write(); // Throws diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 6e7a4b70282..10a2255c05c 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -431,7 +431,10 @@ class Array: public ArrayParent { size_t IndexStringCount(StringData value, void* column, StringGetter get_func) const; FindRes IndexStringFindAllNoCopy(StringData value, size_t& res_ref, void* column, StringGetter get_func) const; - void SetAllToZero(); + /// This one may change the represenation of the array, so be + /// carefull if you call it after ensure_minimum_width(). + void set_all_to_zero(); + void Increment(int64_t value, std::size_t start=0, std::size_t end=std::size_t(-1)); void IncrementIf(int64_t limit, int64_t value); void adjust(std::size_t start, int64_t diff); @@ -503,7 +506,13 @@ class Array: public ArrayParent { Array GetSubArray(std::size_t ndx) const TIGHTDB_NOEXCEPT; ref_type get_ref() const TIGHTDB_NOEXCEPT { return m_ref; } + + /// Recursively destroy children (as if calling clear()), then + /// transition to the detached state (as if calling detach()), + /// then free the allocated memory. For an unattached accessor, + /// this function has no effect (idempotency). void destroy(); + static void destroy(ref_type, Allocator&); Allocator& get_alloc() const TIGHTDB_NOEXCEPT { return m_alloc; } @@ -816,6 +825,8 @@ class Array: public ArrayParent { void update_child_ref(std::size_t child_ndx, ref_type new_ref) TIGHTDB_OVERRIDE; ref_type get_child_ref(std::size_t child_ndx) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + void destroy_children(); + // FIXME: below should be moved to a specific IntegerArray class protected: // Getters and Setters for adaptive-packed arrays @@ -1110,6 +1121,37 @@ inline bool Array::is_index_node(ref_type ref, const Allocator& alloc) return get_indexflag_from_header(alloc.translate(ref)); } +inline void Array::destroy() +{ + if (!is_attached()) + return; + + if (m_hasRefs) + destroy_children(); // Throws + + char* header = get_header_from_data(m_data); + m_alloc.free_(m_ref, header); + m_data = 0; +} + +inline void Array::clear() +{ + TIGHTDB_ASSERT(is_attached()); + + copy_on_write(); // Throws + + if (m_hasRefs) + destroy_children(); // Throws + + // Truncate size to zero (but keep capacity) + m_size = 0; + m_capacity = CalcItemCount(get_capacity_from_header(), 0); + set_width(0); + + // Update header + set_header_size(0); + set_header_width(0); +} inline void Array::destroy(ref_type ref, Allocator& alloc) { diff --git a/src/tightdb/group.cpp b/src/tightdb/group.cpp index 176ed0a6a38..73d27594eb6 100644 --- a/src/tightdb/group.cpp +++ b/src/tightdb/group.cpp @@ -193,16 +193,14 @@ void Group::init_from_ref(ref_type top_ref) // at all, and files that are not shared does not need version // info for free space. if (top_size > 2) { - TIGHTDB_ASSERT(top_size == 4 || top_size == 5); + TIGHTDB_ASSERT(top_size >= 4); size_t fp_ref = m_top.get_as_ref(2); size_t fl_ref = m_top.get_as_ref(3); m_free_positions.init_from_ref(fp_ref); m_free_lengths.init_from_ref(fl_ref); - if (m_is_shared && top_size > 4) { - TIGHTDB_ASSERT(top_size == 5); + if (m_is_shared && top_size > 4) m_free_versions.init_from_ref(m_top.get_as_ref(4)); - } } // Make room for pointers to cached tables @@ -218,14 +216,17 @@ void Group::init_shared() if (m_free_versions.is_attached()) { // If free space tracking is enabled // we just have to reset it - m_free_versions.SetAllToZero(); + m_free_versions.set_all_to_zero(); } else { // Serialized files have no free space tracking // at all so we have to add the basic free lists if (m_top.size() == 2) { - // FIXME: Is there a risk that these are already - // allocated? That would cause a leak. + // FIXME: There is a risk that these are already + // allocated, and that would cause a leak. This could + // happen if an earlier commit attempt failed. + TIGHTDB_ASSERT(!m_free_positions.is_attached()); + TIGHTDB_ASSERT(!m_free_lengths.is_attached()); m_free_positions.create(Array::type_Normal); m_free_lengths.create(Array::type_Normal); m_top.add(m_free_positions.get_ref()); @@ -235,13 +236,14 @@ void Group::init_shared() // Files that have only been used in single thread // mode do not have version tracking for the free lists if (m_top.size() == 4) { - // FIXME: Is there a risk that this one is already - // allocated? That would cause a leak. + // FIXME: There is a risk that this one is already + // allocated, and that would cause a leak. This could + // happen if an earlier commit attempt failed. + TIGHTDB_ASSERT(!m_free_versions.is_attached()); m_free_versions.create(Array::type_Normal); size_t n = m_free_positions.size(); - for (size_t i = 0; i < n; ++i) { + for (size_t i = 0; i < n; ++i) m_free_versions.add(0); - } m_top.add(m_free_versions.get_ref()); } } @@ -452,12 +454,12 @@ void Group::update_refs(ref_type top_ref, size_t old_baseline) void Group::update_from_shared(ref_type new_top_ref, size_t new_file_size) { TIGHTDB_ASSERT(new_top_ref < new_file_size); + TIGHTDB_ASSERT(!m_top.is_attached()); // Update memory mapping if database file has grown TIGHTDB_ASSERT(new_file_size >= m_alloc.get_baseline()); - if (new_file_size > m_alloc.get_baseline()) { + if (new_file_size > m_alloc.get_baseline()) m_alloc.remap(new_file_size); - } // If our last look at the file was when it // was empty, we may have to re-create the group diff --git a/src/tightdb/group_shared.cpp b/src/tightdb/group_shared.cpp index f50b4a1f6fd..6e8452f931a 100644 --- a/src/tightdb/group_shared.cpp +++ b/src/tightdb/group_shared.cpp @@ -464,6 +464,9 @@ void SharedGroup::commit() // failed call to commit(). A failed call to commit() is any that // returns to the caller by throwing an exception. As it is right now, // rollback() does not handle all cases. +// +// FIXME: This function must be modified is such a way that it can be +// guaranteed that it never throws. There are two problems to be delat with. Group::invalidate() calls Group::clear_cache() void SharedGroup::rollback() { TIGHTDB_ASSERT(m_transact_stage == transact_Writing); diff --git a/src/tightdb/query_engine.hpp b/src/tightdb/query_engine.hpp index 56aae1cb75f..fb1a85888d4 100644 --- a/src/tightdb/query_engine.hpp +++ b/src/tightdb/query_engine.hpp @@ -235,7 +235,7 @@ templateclass SequentialGetter : public SequentialGetterBase { class ParentNode { public: - ParentNode() : m_is_integer_node(false), m_table(NULL) {} + ParentNode(): m_is_integer_node(false), m_table(0) {} void gather_children(std::vector& v) { @@ -245,7 +245,7 @@ class ParentNode { v.push_back(this); p = p->child_criteria(); - if (p != NULL) + if (p) p->gather_children(v); m_children = v; @@ -256,7 +256,7 @@ class ParentNode { } struct score_compare { - bool operator ()(ParentNode const* a, ParentNode const* b) const { return (a->cost() < b->cost()); } + bool operator ()(const ParentNode* a, const ParentNode* b) const { return a->cost() < b->cost(); } }; double cost() const @@ -465,6 +465,15 @@ class ParentNode { const Table* m_table; std::string error_code; + const ColumnBase& get_column_base(const Table& table, std::size_t ndx) + { + return table.get_column_base(ndx); + } + + ColumnType get_real_column_type(const Table& table, std::size_t ndx) + { + return table.get_real_column_type(ndx); + } }; @@ -585,7 +594,7 @@ template class IntegerNode: pu void init(const Table& table) { m_dD = 100.0; - m_condition_column = static_cast(&table.get_column_base(m_condition_column_idx)); + m_condition_column = static_cast(&get_column_base(table, m_condition_column_idx)); m_table = &table; m_leaf_end = 0; if (m_child) @@ -848,8 +857,8 @@ template class StringNode: public ParentNode { m_matches = 0; m_end_s = 0; m_table = &table; - m_condition_column = &table.get_column_base(m_condition_column_idx); - m_column_type = table.get_real_column_type(m_condition_column_idx); + m_condition_column = &get_column_base(table, m_condition_column_idx); + m_column_type = get_real_column_type(table, m_condition_column_idx); if (m_child) m_child->init(table); @@ -867,7 +876,7 @@ template class StringNode: public ParentNode { t = static_cast(m_condition_column)->get(s); } else { - // short or long + // short or long const AdaptiveStringColumn* asc = static_cast(m_condition_column); if (s >= m_end_s) { // we exceeded current leaf's range @@ -931,7 +940,7 @@ template class BasicNode: publ { m_dD = 100.0; m_table = &table; - m_condition_column.m_column = static_cast(&table.get_column_base(m_condition_column_idx)); + m_condition_column.m_column = static_cast(&get_column_base(table, m_condition_column_idx)); m_condition_column.m_leaf_end = 0; if (m_child) @@ -981,8 +990,8 @@ template class BinaryNode: public ParentNode { { m_dD = 100.0; m_table = &table; - m_condition_column = static_cast(&table.get_column_base(m_condition_column_idx)); - m_column_type = table.get_real_column_type(m_condition_column_idx); + m_condition_column = static_cast(&get_column_base(table, m_condition_column_idx)); + m_column_type = get_real_column_type(table, m_condition_column_idx); if (m_child) m_child->init(table); @@ -1064,8 +1073,8 @@ template<> class StringNode: public ParentNode { m_dD = 10.0; m_leaf_end = 0; m_table = &table; - m_condition_column = &table.get_column_base(m_condition_column_idx); - m_column_type = table.get_real_column_type(m_condition_column_idx); + m_condition_column = &get_column_base(table, m_condition_column_idx); + m_column_type = get_real_column_type(table, m_condition_column_idx); if (m_column_type == col_type_StringEnum) { m_dT = 1.0; @@ -1330,10 +1339,10 @@ template class TwoColumnsNode: m_dD = 100.0; m_table = &table; - const ColType* c = static_cast(&table.get_column_base(m_condition_column_idx1)); + const ColType* c = static_cast(&get_column_base(table, m_condition_column_idx1)); m_getter1.init(c); - c = static_cast(&table.get_column_base(m_condition_column_idx2)); + c = static_cast(&get_column_base(table, m_condition_column_idx2)); m_getter2.init(c); if (m_child) diff --git a/src/tightdb/table.hpp b/src/tightdb/table.hpp index bcbc0427780..a7cba2a778e 100644 --- a/src/tightdb/table.hpp +++ b/src/tightdb/table.hpp @@ -563,11 +563,7 @@ class Table { friend class ColumnSubtableParent; friend class LangBindHelper; friend class TableViewBase; - template friend class StringNode; - template friend class BinaryNode; - template friend class IntegerNode; - template friend class BasicNode; - template friend class TwoColumnsNode; + friend class ParentNode; template friend class SequentialGetter; }; diff --git a/test/experiments/testcase.cpp b/test/experiments/testcase.cpp index 3237f35a012..564877b3a23 100644 --- a/test/experiments/testcase.cpp +++ b/test/experiments/testcase.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/test/testcolumnmixed.cpp b/test/testcolumnmixed.cpp index b37941bcd95..c0698c8d264 100644 --- a/test/testcolumnmixed.cpp +++ b/test/testcolumnmixed.cpp @@ -1,16 +1,19 @@ +#include + #include -#include -#include +#include +using namespace std; using namespace tightdb; + TEST(ColumnMixed_Int) { ColumnMixed c; - const int64_t maxval = std::numeric_limits::max(); - const int64_t minval = std::numeric_limits::min(); - const int64_t allbit = 0xFFFFFFFFFFFFFFFF; + int64_t maxval = numeric_limits::max(); + int64_t minval = numeric_limits::min(); + int64_t allbit = 0xFFFFFFFFFFFFFFFF; c.insert_int(0, 2); c.insert_int(1, minval); @@ -50,10 +53,10 @@ TEST(ColumnMixed_Float) { ColumnMixed c; - const uint32_t v = 0xFFFFFFFF; - const float f = static_cast(v); + uint32_t v = 0xFFFFFFFF; + float f = float(v); float fval1[] = {0.0f, 100.123f, -111.222f, f}; - float fval2[] = {-0.0f, -100.123f, std::numeric_limits::max(), std::numeric_limits::min()}; + float fval2[] = {-0.0f, -100.123f, numeric_limits::max(), numeric_limits::min()}; // Test insert for (size_t i=0; i<4; ++i) @@ -83,10 +86,10 @@ TEST(ColumnMixed_Double) { ColumnMixed c; - const uint64_t v = 0xFFFFFFFFFFFFFFFF; - const double d = static_cast(v); + uint64_t v = 0xFFFFFFFFFFFFFFFF; + double d = double(v); double fval1[] = {1.0, 200.123, -111.222, d}; - double fval2[] = {-1.0, -100.123, std::numeric_limits::max(), std::numeric_limits::min()}; + double fval2[] = {-1.0, -100.123, numeric_limits::max(), numeric_limits::min()}; // Test insert for (size_t i=0; i<4; ++i) { From 6aed1ac9e1a5735e3985ffbe6ce1a90eee57d27e Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Mon, 19 Aug 2013 21:05:11 +0200 Subject: [PATCH 07/20] In GroupWriter::reserve_free_space() after looking in the last half of the list of free space hunks, look in the first half --- src/tightdb/group_writer.cpp | 174 ++++++++++++++++++----------------- src/tightdb/group_writer.hpp | 10 +- 2 files changed, 94 insertions(+), 90 deletions(-) diff --git a/src/tightdb/group_writer.cpp b/src/tightdb/group_writer.cpp index 29f9215833a..58f0437d92f 100644 --- a/src/tightdb/group_writer.cpp +++ b/src/tightdb/group_writer.cpp @@ -8,7 +8,6 @@ using namespace std; using namespace tightdb; -// todo, test (int) cast GroupWriter::GroupWriter(Group& group) : m_group(group), m_alloc(group.m_alloc), m_current_version(0) { @@ -28,11 +27,11 @@ size_t GroupWriter::commit(bool do_sync) { merge_free_space(); - Array& top = m_group.m_top; - Array& fpositions = m_group.m_free_positions; - Array& flengths = m_group.m_free_lengths; - Array& fversions = m_group.m_free_versions; - const bool is_shared = m_group.m_is_shared; + Array& top = m_group.m_top; + Array& fpositions = m_group.m_free_positions; + Array& flengths = m_group.m_free_lengths; + Array& fversions = m_group.m_free_versions; + bool is_shared = m_group.m_is_shared; TIGHTDB_ASSERT(fpositions.size() == flengths.size()); TIGHTDB_ASSERT(!is_shared || fversions.size() == flengths.size()); @@ -96,24 +95,22 @@ size_t GroupWriter::commit(bool do_sync) // present in the non-transactionl case where there is no version // tracking on the free-space chunks. { - const size_t n = new_free_space.size(); - if (n > 0) { - for (size_t i = 0; i < n; ++i) { - SlabAlloc::FreeSpace::ConstCursor r = new_free_space[i]; - size_t pos = to_size_t(r.ref); - size_t size = to_size_t(r.size); - // We always want to keep the list of free space in sorted order - // (by ascending position) to facilitate merge of adjacent - // segments. We can find the correct insert postion by binary - // search - size_t ndx = fpositions.lower_bound_int(pos); - fpositions.insert(ndx, pos); - flengths.insert(ndx, size); - if (is_shared) - fversions.insert(ndx, m_current_version); - if (ndx <= reserve_ndx) - ++reserve_ndx; - } + size_t n = new_free_space.size(); + for (size_t i = 0; i < n; ++i) { + SlabAlloc::FreeSpace::ConstCursor r = new_free_space[i]; + size_t pos = to_size_t(r.ref); + size_t size = to_size_t(r.size); + // We always want to keep the list of free space in sorted + // order (by ascending position) to facilitate merge of + // adjacent segments. We can find the correct insert + // postion by binary search + size_t ndx = fpositions.lower_bound_int(pos); + fpositions.insert(ndx, pos); + flengths.insert(ndx, size); + if (is_shared) + fversions.insert(ndx, m_current_version); + if (ndx <= reserve_ndx) + ++reserve_ndx; } } @@ -182,60 +179,6 @@ size_t GroupWriter::commit(bool do_sync) } -size_t GroupWriter::write(const char* data, size_t size) -{ - // Get position of free space to write in (expanding file if needed) - size_t pos = get_free_space(size); - TIGHTDB_ASSERT((pos & 0x7) == 0); // Write position should always be 64bit aligned - - // Write the block - char* dest = m_file_map.get_addr() + pos; - copy(data, data+size, dest); - - // return the position it was written - return pos; -} - - -void GroupWriter::write_at(size_t pos, const char* data, size_t size) -{ - char* dest = m_file_map.get_addr() + pos; - - char* mmap_end = m_file_map.get_addr() + m_file_map.get_size(); - char* copy_end = dest + size; - TIGHTDB_ASSERT(copy_end <= mmap_end); - static_cast(mmap_end); - static_cast(copy_end); - - copy(data, data+size, dest); -} - - -void GroupWriter::sync(uint64_t top_pos) -{ - // Write data - m_file_map.sync(); - - // File header is 24 bytes, composed of three 64bit - // blocks. The two first being top_refs (only one valid - // at a time) and the last being the info block. - char* file_header = m_file_map.get_addr(); - - // Least significant bit in last byte of info block indicates - // which top_ref block is valid - int current_valid_ref = file_header[16+7] & 0x1; - int new_valid_ref = current_valid_ref ^ 0x1; - - // Update top ref pointer - uint64_t* top_refs = reinterpret_cast(file_header); - top_refs[new_valid_ref] = top_pos; - file_header[16+7] = char(new_valid_ref); // swap - - // Write new header to disk - m_file_map.sync(); -} - - void GroupWriter::merge_free_space() { Array& positions = m_group.m_free_positions; @@ -315,18 +258,19 @@ size_t GroupWriter::get_free_space(size_t size) pair GroupWriter::reserve_free_space(size_t size) { - Array& lengths = m_group.m_free_lengths; - Array& versions = m_group.m_free_versions; + Array& lengths = m_group.m_free_lengths; + Array& versions = m_group.m_free_versions; bool is_shared = m_group.m_is_shared; - size_t end = lengths.size(); - - // Since we do a 'first fit' search, the top pieces are likely - // to get smaller and smaller. So if we are looking for a bigger piece - // we may find it faster by looking further down in the list. + // Since we do a first-fit search for small chunks, the top pieces + // are likely to get smaller and smaller. So when we are looking + // for bigger chunks we are likely to find them faster by skipping + // the first half of the list. + size_t end = lengths.size(); size_t begin = size < 1024 ? 0 : end / 2; // Do we have a free space we can reuse? + again: for (size_t i = begin; i != end; ++i) { size_t chunk_size = to_size_t(lengths.get(i)); if (chunk_size >= size) { @@ -343,6 +287,12 @@ pair GroupWriter::reserve_free_space(size_t size) } } + if (begin > 0) { + begin = 0; + end = begin; + goto again; + } + // No free space, so we have to extend the file. return extend_free_space(size); } @@ -429,6 +379,60 @@ pair GroupWriter::extend_free_space(size_t requested_size) } +size_t GroupWriter::write(const char* data, size_t size) +{ + // Get position of free space to write in (expanding file if needed) + size_t pos = get_free_space(size); + TIGHTDB_ASSERT((pos & 0x7) == 0); // Write position should always be 64bit aligned + + // Write the block + char* dest = m_file_map.get_addr() + pos; + copy(data, data+size, dest); + + // return the position it was written + return pos; +} + + +void GroupWriter::write_at(size_t pos, const char* data, size_t size) +{ + char* dest = m_file_map.get_addr() + pos; + + char* mmap_end = m_file_map.get_addr() + m_file_map.get_size(); + char* copy_end = dest + size; + TIGHTDB_ASSERT(copy_end <= mmap_end); + static_cast(mmap_end); + static_cast(copy_end); + + copy(data, data+size, dest); +} + + +void GroupWriter::sync(uint64_t top_pos) +{ + // Write data + m_file_map.sync(); + + // File header is 24 bytes, composed of three 64bit + // blocks. The two first being top_refs (only one valid + // at a time) and the last being the info block. + char* file_header = m_file_map.get_addr(); + + // Least significant bit in last byte of info block indicates + // which top_ref block is valid + int current_valid_ref = file_header[16+7] & 0x1; + int new_valid_ref = current_valid_ref ^ 0x1; + + // Update top ref pointer + uint64_t* top_refs = reinterpret_cast(file_header); + top_refs[new_valid_ref] = top_pos; + file_header[16+7] = char(new_valid_ref); // swap + + // Write new header to disk + m_file_map.sync(); +} + + #ifdef TIGHTDB_DEBUG diff --git a/src/tightdb/group_writer.hpp b/src/tightdb/group_writer.hpp index 5caca961cb1..2c470bd03c3 100644 --- a/src/tightdb/group_writer.hpp +++ b/src/tightdb/group_writer.hpp @@ -50,8 +50,6 @@ class GroupWriter { /// written. std::size_t write(const char* data, std::size_t size); - void write_at(std::size_t pos, const char* data, std::size_t size); - #ifdef TIGHTDB_DEBUG void dump(); #endif @@ -63,8 +61,7 @@ class GroupWriter { std::size_t m_readlock_version; File::Map m_file_map; - // Controlled update of physical medium - void sync(uint64_t top_pos); + void merge_free_space(); /// Allocate a chunk of free space of the specified size. The /// specified size must be 8-byte aligned. Extend the file if @@ -97,7 +94,10 @@ class GroupWriter { /// size, and `chunk_size` is the size of that chunk. std::pair extend_free_space(std::size_t requested_size); - void merge_free_space(); + void write_at(std::size_t pos, const char* data, std::size_t size); + + // Controlled update of physical medium + void sync(uint64_t top_pos); }; From 6d56d96bad6cf6d2a4d780da21973de35310ed62 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Wed, 21 Aug 2013 22:56:01 +0200 Subject: [PATCH 08/20] Bad line removed from testcase 'Table1' --- test/testtable.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/test/testtable.cpp b/test/testtable.cpp index f62a02d63d2..1e895c9dff2 100644 --- a/test/testtable.cpp +++ b/test/testtable.cpp @@ -42,7 +42,6 @@ TEST(Table1) CHECK_EQUAL(type_Int, table.get_column_type(1)); CHECK_EQUAL("first", table.get_column_name(0)); CHECK_EQUAL("second", table.get_column_name(1)); - CHECK_EQUAL("", table.get_column_name(2)); // Test adding a single empty row // and filling it with values From 7b7a37db73ceaf643538c39b7f6728ef77506081 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Fri, 23 Aug 2013 13:27:40 +0200 Subject: [PATCH 09/20] Stop throwing from destructors, and from SharedGroup::rollback() and SharedGRoup::end_read(). Avoid function local statics as they are not necessarily thread-safe. --- src/tightdb/Makefile | 10 +- src/tightdb/alloc.cpp | 34 ++++- src/tightdb/alloc.hpp | 24 +--- src/tightdb/alloc_slab.cpp | 185 ++++++++++++++++++--------- src/tightdb/alloc_slab.hpp | 37 ++++-- src/tightdb/array.cpp | 2 +- src/tightdb/array.hpp | 66 ++++++++-- src/tightdb/array_basic.hpp | 1 + src/tightdb/array_binary.hpp | 1 + src/tightdb/array_blob.hpp | 1 + src/tightdb/array_string.hpp | 1 + src/tightdb/array_string_long.hpp | 1 + src/tightdb/binary_data.hpp | 1 + src/tightdb/bind_ptr.hpp | 42 +++--- src/tightdb/buffer.hpp | 70 ++++++++++ src/tightdb/column.hpp | 24 ++-- src/tightdb/column_basic.hpp | 4 +- src/tightdb/column_basic_tpl.hpp | 23 ++-- src/tightdb/column_binary.cpp | 18 +-- src/tightdb/column_binary.hpp | 4 +- src/tightdb/column_mixed.cpp | 7 +- src/tightdb/column_mixed.hpp | 9 +- src/tightdb/column_mixed_tpl.hpp | 4 +- src/tightdb/column_string.cpp | 21 +-- src/tightdb/column_string.hpp | 4 +- src/tightdb/column_string_enum.cpp | 12 +- src/tightdb/column_string_enum.hpp | 4 +- src/tightdb/column_table.cpp | 5 +- src/tightdb/column_table.hpp | 142 ++++++++++---------- src/tightdb/config.h | 5 + src/tightdb/date.hpp | 2 + src/tightdb/group.cpp | 156 +++++++++++++--------- src/tightdb/group.hpp | 160 +++++++++-------------- src/tightdb/group_shared.cpp | 40 +++--- src/tightdb/group_shared.hpp | 21 +-- src/tightdb/group_writer.cpp | 48 +++---- src/tightdb/index_string.hpp | 1 + src/tightdb/lang_bind_helper.hpp | 3 +- src/tightdb/mixed.hpp | 2 + src/tightdb/query.cpp | 2 +- src/tightdb/query.hpp | 12 +- src/tightdb/query_engine.hpp | 35 +++-- src/tightdb/replication.cpp | 4 +- src/tightdb/replication.hpp | 27 +--- src/tightdb/spec.cpp | 9 +- src/tightdb/spec.hpp | 9 +- src/tightdb/string_data.hpp | 1 + src/tightdb/table.cpp | 28 ++-- src/tightdb/table.hpp | 91 +++++++++---- src/tightdb/table_accessors.hpp | 28 ++-- src/tightdb/table_basic.hpp | 96 ++++++++------ src/tightdb/table_macros.hpp | 180 +++++++++++++++++++------- src/tightdb/table_macros.hpp.cheetah | 10 +- src/tightdb/table_ref.hpp | 34 ++--- src/tightdb/table_view.hpp | 79 +++++++----- src/tightdb/thread.hpp | 4 +- test/testarray.cpp | 20 +-- 57 files changed, 1097 insertions(+), 767 deletions(-) create mode 100644 src/tightdb/buffer.hpp diff --git a/src/tightdb/Makefile b/src/tightdb/Makefile index 817da11260b..233257fa146 100644 --- a/src/tightdb/Makefile +++ b/src/tightdb/Makefile @@ -1,11 +1,11 @@ GENERATED_SOURCES = table_macros.hpp INST_HEADERS = config.h meta.hpp assert.hpp exceptions.hpp terminate.hpp type_list.hpp tuple.hpp \ -bind.hpp safe_int_ops.hpp unique_ptr.hpp bind_ptr.hpp string_buffer.hpp file.hpp thread.hpp \ -utf8.hpp utilities.hpp alloc.hpp alloc_slab.hpp array.hpp array_string.hpp data_type.hpp \ -column_type.hpp column_fwd.hpp spec.hpp date.hpp string_data.hpp binary_data.hpp mixed.hpp \ -table.hpp table_ref.hpp table_basic_fwd.hpp table_accessors.hpp table_basic.hpp table_view.hpp \ -table_view_basic.hpp table_macros.hpp group.hpp group_shared.hpp query.hpp \ +bind.hpp safe_int_ops.hpp unique_ptr.hpp bind_ptr.hpp buffer.hpp string_buffer.hpp file.hpp \ +thread.hpp utf8.hpp utilities.hpp alloc.hpp alloc_slab.hpp array.hpp array_string.hpp \ +data_type.hpp column_type.hpp column_fwd.hpp spec.hpp date.hpp string_data.hpp binary_data.hpp \ +mixed.hpp table.hpp table_ref.hpp table_basic_fwd.hpp table_accessors.hpp table_basic.hpp \ +table_view.hpp table_view_basic.hpp table_macros.hpp group.hpp group_shared.hpp query.hpp \ query_conditions.hpp lang_bind_helper.hpp tightdb_nmmintrin.h replication.hpp lib_LIBRARIES = libtightdb.a diff --git a/src/tightdb/alloc.cpp b/src/tightdb/alloc.cpp index 63d220278ae..f40c35c781e 100644 --- a/src/tightdb/alloc.cpp +++ b/src/tightdb/alloc.cpp @@ -8,10 +8,33 @@ using namespace std; using namespace tightdb; +// FIXME: Casting a pointers to std::size_t is inherently +// nonportable. For example, systems exist where pointers are 64 bits +// and std::size_t is 32. One idea would be to use a different type +// for refs such as std::uintptr_t, the problem with this one is that +// while it is described by the C++11 standard it is not required to +// be present. C++03 does not even mention it. A real working solution +// will be to introduce a new name for the type of refs. The typedef +// can then be made as complex as required to pick out an appropriate +// type on any supported platform. +// +// A better solution may be to use an instance of SlabAlloc. The main +// problem is that SlabAlloc is not thread-safe. Another problem is +// that its free-list management is currently exceedingly slow do to +// linear searches. Another problem is that it is prone to general +// memory corruption due to lack of exception safety when upding +// free-lists. But these problems must be fixed anyway. + + namespace { -// For use with free-standing objects (objects that are not part of a -// TightDB group) +/// For use with free-standing objects (objects that are not part of a +/// TightDB group) +/// +/// Note that it is essential that this class is stateless as it may +/// be used by multiple threads. Although it has m_replication, this +/// is not a problem, as there is no way to modify it, so it will +/// remain zero. class DefaultAllocator: public tightdb::Allocator { public: MemRef alloc(size_t size) TIGHTDB_OVERRIDE @@ -32,7 +55,7 @@ class DefaultAllocator: public tightdb::Allocator { throw bad_alloc(); } - void free_(ref_type, const char* addr) TIGHTDB_OVERRIDE + void free_(ref_type, const char* addr) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { free(const_cast(addr)); } @@ -52,12 +75,13 @@ class DefaultAllocator: public tightdb::Allocator { #endif }; +DefaultAllocator default_alloc; + } // anonymous namespace Allocator& Allocator::get_default() TIGHTDB_NOEXCEPT { - static DefaultAllocator alloc; - return alloc; + return default_alloc; } diff --git a/src/tightdb/alloc.hpp b/src/tightdb/alloc.hpp index 538513b4a31..fafdd57f388 100644 --- a/src/tightdb/alloc.hpp +++ b/src/tightdb/alloc.hpp @@ -50,17 +50,6 @@ struct MemRef { ref_type m_ref; }; -// FIXME: Casting a pointer to std::size_t is inherently nonportable -// (see the default definition of Allocator::alloc()). For example, -// systems exist where pointers are 64 bits and std::size_t is 32. One -// idea would be to use a different type for refs such as -// std::uintptr_t, the problem with this one is that while it is -// described by the C++11 standard it is not required to be -// present. C++03 does not even mention it. A real working solution -// will be to introduce a new name for the type of refs. The typedef -// can then be made as complex as required to pick out an appropriate -// type on any supported platform. - /// The common interface for TightDB allocators. /// @@ -87,16 +76,15 @@ class Allocator { /// /// \throw std::bad_alloc If insufficient memory was available. /// - /// Note: The underscore was added because the name \c realloc + /// Note: The underscore has been added because the name `realloc` /// would conflict with a macro on the Windows platform. virtual MemRef realloc_(ref_type ref, const char* addr, std::size_t size) = 0; - // FIXME: SlabAlloc::free_() should be modified such than this - // method never throws. + /// Release the specified chunk of memory. /// - /// Note: The underscore was added because the name \c free would - /// conflict with a macro on the Windows platform. - virtual void free_(ref_type, const char* addr) = 0; + /// Note: The underscore has been added because the name `free + /// would conflict with a macro on the Windows platform. + virtual void free_(ref_type, const char* addr) TIGHTDB_NOEXCEPT = 0; /// Map the specified \a ref to the corresponding memory /// address. Note that if is_read_only(ref) returns true, then the @@ -118,7 +106,7 @@ class Allocator { /// therefore, is not part of an actual database. static Allocator& get_default() TIGHTDB_NOEXCEPT; - virtual ~Allocator() {} + virtual ~Allocator() TIGHTDB_NOEXCEPT {} #ifdef TIGHTDB_DEBUG virtual void Verify() const = 0; diff --git a/src/tightdb/alloc_slab.cpp b/src/tightdb/alloc_slab.cpp index 995fa6e12b9..b03f3780b94 100644 --- a/src/tightdb/alloc_slab.cpp +++ b/src/tightdb/alloc_slab.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -10,6 +11,18 @@ using namespace std; using namespace tightdb; +namespace { + +class InvalidFreeSpace: std::exception { +public: + const char* what() const TIGHTDB_NOEXCEPT_OR_NOTHROW + { + return "Free space tracking was lost due to out-of-memory"; + } +}; + +} // anonymous namespace + const char SlabAlloc::default_header[24] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -17,27 +30,31 @@ const char SlabAlloc::default_header[24] = { }; -SlabAlloc::~SlabAlloc() +SlabAlloc::~SlabAlloc() TIGHTDB_NOEXCEPT { #ifdef TIGHTDB_DEBUG - if (!is_all_free()) { + if (!m_free_space_invalid && !is_all_free()) { m_slabs.print(); m_free_space.print(); TIGHTDB_TERMINATE("SlabAlloc detected a leak"); } -#endif // TIGHTDB_DEBUG +#endif // Release all allocated memory - for (size_t i = 0; i < m_slabs.size(); ++i) { + for (size_t i = 0; i < m_slabs.size(); ++i) delete[] reinterpret_cast(m_slabs[i].addr.get()); - } // Release memory if (m_data) { switch (m_free_mode) { - case free_Noop: break; - case free_Unalloc: ::free(m_data); break; - case free_Unmap: File::unmap(m_data, m_baseline); break; + case free_Noop: + break; + case free_Unalloc: + ::free(m_data); + break; + case free_Unmap: + File::unmap(m_data, m_baseline); + break; } } } @@ -45,9 +62,25 @@ SlabAlloc::~SlabAlloc() MemRef SlabAlloc::alloc(size_t size) { + // FIXME: The table operations that modify the free lists below + // are not yet exception safe, that is, if one of them fails + // (presumably due to std::bad_alloc being thrown) they may leave + // the underlying node structure of the tables in a state that is + // so corrupt that it can lead to memory leaks and even general + // memory corruption. This must be fixed. Note that corruption may + // be accetable, but we must be able to guarantee that corruption + // never gets so bad that destruction of the tables fail. + TIGHTDB_ASSERT(0 < size); TIGHTDB_ASSERT((size & 0x7) == 0); // only allow sizes that are multiples of 8 + // FIXME: Ideally, instead of just marking the free space as + // invalid in free_(), we shuld at least make a best effort to + // keep a record of what was freed and then attempt to rebuild the + // free lists here when they have become invalid. + if (m_free_space_invalid) + throw InvalidFreeSpace(); + // Do we have a free space we can reuse? { size_t n = m_free_space.size(); @@ -67,9 +100,8 @@ MemRef SlabAlloc::alloc(size_t size) } #ifdef TIGHTDB_DEBUG - if (m_debug_out) { + if (m_debug_out) cerr << "Alloc ref: " << ref << " size: " << size << "\n"; - } #endif char* addr = translate(ref); @@ -117,21 +149,18 @@ MemRef SlabAlloc::alloc(size_t size) size_t ref = curr_ref_end; #ifdef TIGHTDB_DEBUG - if (m_debug_out) { + if (m_debug_out) cerr << "Alloc ref: " << ref << " size: " << size << "\n"; - } #endif return MemRef(addr_2, ref); } -// FIXME: We need to come up with a way to make free() a method that -// never throws. This is essential for exception safety in large parts -// of the TightDB API. -void SlabAlloc::free_(ref_type ref, const char* addr) +void SlabAlloc::free_(ref_type ref, const char* addr) TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(translate(ref) == addr); + // Free space in read only segment is tracked separately bool read_only = is_read_only(ref); FreeSpace& free_space = read_only ? m_free_read_only : m_free_space; @@ -140,50 +169,67 @@ void SlabAlloc::free_(ref_type ref, const char* addr) size_t size = read_only ? Array::get_byte_size_from_header(addr) : Array::get_capacity_from_header(addr); size_t ref_end = ref + size; - size_t merged_with = npos; #ifdef TIGHTDB_DEBUG - if (m_debug_out) { + if (m_debug_out) cerr << "Free ref: " << ref << " size: " << size << "\n"; - } -#endif // TIGHTDB_DEBUG +#endif - // Check if we can merge with start of free block - { - size_t n = free_space.column().ref.find_first(ref_end); - if (n != not_found) { - // No consolidation over slab borders - if (m_slabs.column().ref_end.find_first(ref_end) == not_found) { - free_space[n].ref = ref; - free_space[n].size += size; - merged_with = n; + if (m_free_space_invalid) + return; + + try { + // FIXME: The table operations that modify the free lists + // below are not yet exception safe, that is, if one of them + // fails (presumably due to std::bad_alloc being thrown) they + // may leave the underlying node structure of the tables in a + // state that is so corrupt that it can lead to memory leaks + // and even general memory corruption. This must be + // fixed. Note that corruption may be accetable, but we must + // be able to guarantee that corruption never gets so bad that + // destruction of the tables fail. + + // Check if we can merge with start of free block + size_t merged_with = npos; + { + size_t n = free_space.column().ref.find_first(ref_end); + if (n != not_found) { + // No consolidation over slab borders + if (m_slabs.column().ref_end.find_first(ref_end) == not_found) { + free_space[n].ref = ref; // Throws + free_space[n].size += size; // Throws + merged_with = n; + } } } - } - // Check if we can merge with end of free block - if (m_slabs.column().ref_end.find_first(ref) == not_found) { // avoid slab borders - size_t n = free_space.size(); - for (size_t i = 0; i < n; ++i) { - FreeSpace::Cursor c = free_space[i]; - - ref_type end = to_ref(c.ref + c.size); - if (ref == end) { - if (merged_with != npos) { - c.size += free_space[merged_with].size; - free_space.remove(merged_with); - } - else { - c.size += size; + // Check if we can merge with end of free block + if (m_slabs.column().ref_end.find_first(ref) == not_found) { // avoid slab borders + size_t n = free_space.size(); + for (size_t i = 0; i < n; ++i) { + FreeSpace::Cursor c = free_space[i]; + ref_type end = to_ref(c.ref + c.size); + if (ref == end) { + if (merged_with != npos) { + size_t s = to_size_t(free_space[merged_with].size); + c.size += s; // Throws + free_space.remove(merged_with); // Throws + } + else { + c.size += size; // Throws + } + return; } - return; } } - } - // Else just add to freelist - if (merged_with == npos) - free_space.add(ref, size); // Throws + // Else just add to freelist + if (merged_with == npos) + free_space.add(ref, size); // Throws + } + catch (...) { + m_free_space_invalid = true; + } } @@ -193,7 +239,8 @@ MemRef SlabAlloc::realloc_(size_t ref, const char* addr, size_t size) TIGHTDB_ASSERT(0 < size); TIGHTDB_ASSERT((size & 0x7) == 0); // only allow sizes that are multiples of 8 - //TODO: Check if we can extend current space + // FIXME: Check if we can extend current space. In that case, + // remember to check m_free_space_invalid. // Allocate new space MemRef new_mem = alloc(size); // Throws @@ -222,15 +269,15 @@ MemRef SlabAlloc::realloc_(size_t ref, const char* addr, size_t size) char* SlabAlloc::translate(ref_type ref) const TIGHTDB_NOEXCEPT { - if (ref < m_baseline) return m_data + ref; - else { - size_t ndx = m_slabs.column().ref_end.upper_bound(ref); - TIGHTDB_ASSERT(ndx != m_slabs.size()); + if (ref < m_baseline) + return m_data + ref; - size_t offset = ndx == 0 ? m_baseline : to_size_t(m_slabs[ndx-1].ref_end); - intptr_t addr = intptr_t(m_slabs[ndx].addr.get()); - return reinterpret_cast(addr) + (ref - offset); - } + size_t ndx = m_slabs.column().ref_end.upper_bound(ref); + TIGHTDB_ASSERT(ndx != m_slabs.size()); + + size_t offset = ndx == 0 ? m_baseline : to_size_t(m_slabs[ndx-1].ref_end); + intptr_t addr = intptr_t(m_slabs[ndx].addr.get()); + return reinterpret_cast(addr) + (ref - offset); } @@ -313,7 +360,8 @@ void SlabAlloc::attach_file(const string& path, bool is_shared, bool read_only, void SlabAlloc::attach_buffer(char* data, size_t size, bool take_ownership) { // Verify the data structures - if (!validate_buffer(data, size)) throw InvalidDatabase(); + if (!validate_buffer(data, size)) + throw InvalidDatabase(); m_data = data; m_baseline = size; @@ -387,6 +435,13 @@ size_t SlabAlloc::get_total_size() const void SlabAlloc::free_all() { + // FIXME: Currently, it is not safe to call + // m_free_read_only.clear() and m_free_space.clear() after a + // failure to update them in free() (m_free_space_valid = + // true). It is expected that this problem will be fixed by + // changes that will make the public API completely exception + // safe. + // Free all scratch space (done after all data has // been commited to persistent space) m_free_read_only.clear(); @@ -405,6 +460,8 @@ void SlabAlloc::free_all() } TIGHTDB_ASSERT(is_all_free()); + + m_free_space_invalid = false; } @@ -438,6 +495,13 @@ bool SlabAlloc::remap(size_t file_size) return addr_changed; } +const SlabAlloc::FreeSpace& SlabAlloc::get_free_read_only() const +{ + if (m_free_space_invalid) + throw InvalidFreeSpace(); + return m_free_read_only; +} + #ifdef TIGHTDB_DEBUG @@ -489,9 +553,8 @@ void SlabAlloc::print() const size_t(m_slabs[m_slabs.size()-1].ref_end - m_baseline); size_t free = 0; - for (size_t i = 0; i < m_free_space.size(); ++i) { + for (size_t i = 0; i < m_free_space.size(); ++i) free += to_ref(m_free_space[i].size); - } size_t allocated = allocated_for_slabs - free; cout << "Attached: " << (m_data ? m_baseline : 0) << " Allocated: " << allocated << "\n"; diff --git a/src/tightdb/alloc_slab.hpp b/src/tightdb/alloc_slab.hpp index 9b561d1140f..a65e051e4a9 100644 --- a/src/tightdb/alloc_slab.hpp +++ b/src/tightdb/alloc_slab.hpp @@ -59,7 +59,7 @@ class SlabAlloc: public Allocator { /// Construct a slab allocator in the unattached state. SlabAlloc(); - ~SlabAlloc(); + ~SlabAlloc() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; /// Attach this allocator to the specified file. /// @@ -132,8 +132,9 @@ class SlabAlloc: public Allocator { MemRef alloc(std::size_t size) TIGHTDB_OVERRIDE; MemRef realloc_(ref_type, const char*, std::size_t size) TIGHTDB_OVERRIDE; - void free_(ref_type, const char*) TIGHTDB_OVERRIDE; // FIXME: It would be very nice if we could detect an invalid free operation in debug mode - char* translate(ref_type) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + // FIXME: It would be very nice if we could detect an invalid free operation in debug mode + void free_(ref_type, const char*) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + char* translate(ref_type) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; bool is_read_only(ref_type) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; #ifdef TIGHTDB_DEBUG @@ -156,19 +157,31 @@ class SlabAlloc: public Allocator { static const char default_header[24]; - File m_file; - char* m_data; - FreeMode m_free_mode; + File m_file; + char* m_data; + FreeMode m_free_mode; + + /// When set to true, the free lists are no longer + /// up-to-date. This happens if free_() fails, presumably due to + /// std::bad_alloc being thrown during updating of the free space + /// list. In this this case, alloc(), realloc(), and + /// get_free_read_only() must throw. This member is deliberately + /// placed after m_free_mode in the hope that it leads to less + /// padding between members due to alignment requirements. + bool m_free_space_invalid; + std::size_t m_baseline; // Also size of memory mapped portion of database file - Slabs m_slabs; - FreeSpace m_free_space; - FreeSpace m_free_read_only; + Slabs m_slabs; + FreeSpace m_free_space; + FreeSpace m_free_read_only; #ifdef TIGHTDB_DEBUG - bool m_debug_out; + bool m_debug_out; #endif - const FreeSpace& get_free_read_only() const { return m_free_read_only; } + /// Throws if free-lists are no longer valid. + const FreeSpace& get_free_read_only() const; + bool validate_buffer(const char* data, std::size_t len) const; #ifdef TIGHTDB_ENABLE_REPLICATION @@ -190,6 +203,8 @@ inline SlabAlloc::SlabAlloc() // Mark as unattached m_data = 0; + m_free_space_invalid = false; + // We cannot initialize m_baseline to zero, because that would // cause the first invocation of alloc() to return a 'ref' equal // to zero, and zero is by definition not a valid ref. Since all diff --git a/src/tightdb/array.cpp b/src/tightdb/array.cpp index 46d2809d2b3..fd238c6d23d 100644 --- a/src/tightdb/array.cpp +++ b/src/tightdb/array.cpp @@ -205,7 +205,7 @@ void Array::set_parent(ArrayParent* parent, size_t ndx_in_parent) TIGHTDB_NOEXCE } -void Array::destroy_children() +void Array::destroy_children() TIGHTDB_NOEXCEPT { for (size_t i = 0; i < m_size; ++i) { int64_t v = get(i); diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 10a2255c05c..3b0d8033021 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -182,7 +182,7 @@ class MemStats { class ArrayParent { public: - virtual ~ArrayParent() {} + virtual ~ArrayParent() TIGHTDB_NOEXCEPT {} // FIXME: Must be protected. Solve problem by having the Array constructor, that creates a new array, call it. virtual void update_child_ref(std::size_t child_ndx, ref_type new_ref) = 0; @@ -315,7 +315,7 @@ class Array: public ArrayParent { struct no_prealloc_tag {}; explicit Array(no_prealloc_tag) TIGHTDB_NOEXCEPT; - virtual ~Array() TIGHTDB_NOEXCEPT {} + ~Array() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} /// Create a new empty array of the specified type and attach to /// it. This does not modify the parent reference information. @@ -361,7 +361,7 @@ class Array: public ArrayParent { /// ref to the new array. ref_type clone(Allocator&) const; - void move_assign(Array&); // Move semantics for assignment + void move_assign(Array&) TIGHTDB_NOEXCEPT; // Move semantics for assignment /// Construct an empty array of the specified type and return just /// the reference to the underlying memory. @@ -407,6 +407,9 @@ class Array: public ArrayParent { /// Erase the element at the specified index, and move elements at /// succeeding indexes to the next lower index. /// + /// Is guaranteed not to throw if + /// get_alloc().is_read_only(get_ref()) returns false. + /// /// FIXME: Carefull with this one. It does not destroy/deallocate /// subarrays as clear() does. This difference is surprising and /// highly counterintuitive. @@ -414,6 +417,9 @@ class Array: public ArrayParent { /// Erase every element in this array. Subarrays will be destroyed /// recursively, and space allocated for subarrays will be freed. + /// + /// Is guaranteed not to throw if + /// get_alloc().is_read_only(get_ref()) returns false. void clear(); /// If neccessary, expand the representation so that it can store @@ -511,9 +517,11 @@ class Array: public ArrayParent { /// transition to the detached state (as if calling detach()), /// then free the allocated memory. For an unattached accessor, /// this function has no effect (idempotency). - void destroy(); + void destroy() TIGHTDB_NOEXCEPT; - static void destroy(ref_type, Allocator&); + static void destroy(ref_type, Allocator&) TIGHTDB_NOEXCEPT; + + class DestroyGuard; Allocator& get_alloc() const TIGHTDB_NOEXCEPT { return m_alloc; } @@ -697,7 +705,6 @@ class Array: public ArrayParent { static WidthType get_wtype_from_header(const char*) TIGHTDB_NOEXCEPT; static int get_width_from_header(const char*) TIGHTDB_NOEXCEPT; static std::size_t get_size_from_header(const char*) TIGHTDB_NOEXCEPT; - static std::size_t get_capacity_from_header(const char*) TIGHTDB_NOEXCEPT; static Type get_type_from_header(const char*) TIGHTDB_NOEXCEPT; @@ -816,6 +823,8 @@ class Array: public ArrayParent { /// Same as get_byte_size(). static std::size_t get_byte_size_from_header(const char*) TIGHTDB_NOEXCEPT; + static std::size_t get_capacity_from_header(const char*) TIGHTDB_NOEXCEPT; + /// Get the maximum number of bytes that can be written by a /// non-recursive invocation of write() on an array with the /// specified number of elements, that is, the maxumum value that @@ -825,7 +834,7 @@ class Array: public ArrayParent { void update_child_ref(std::size_t child_ndx, ref_type new_ref) TIGHTDB_OVERRIDE; ref_type get_child_ref(std::size_t child_ndx) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; - void destroy_children(); + void destroy_children() TIGHTDB_NOEXCEPT; // FIXME: below should be moved to a specific IntegerArray class protected: @@ -1121,13 +1130,13 @@ inline bool Array::is_index_node(ref_type ref, const Allocator& alloc) return get_indexflag_from_header(alloc.translate(ref)); } -inline void Array::destroy() +inline void Array::destroy() TIGHTDB_NOEXCEPT { if (!is_attached()) return; if (m_hasRefs) - destroy_children(); // Throws + destroy_children(); char* header = get_header_from_data(m_data); m_alloc.free_(m_ref, header); @@ -1141,7 +1150,7 @@ inline void Array::clear() copy_on_write(); // Throws if (m_hasRefs) - destroy_children(); // Throws + destroy_children(); // Truncate size to zero (but keep capacity) m_size = 0; @@ -1153,7 +1162,7 @@ inline void Array::clear() set_header_width(0); } -inline void Array::destroy(ref_type ref, Allocator& alloc) +inline void Array::destroy(ref_type ref, Allocator& alloc) TIGHTDB_NOEXCEPT { Array array(alloc); array.init_from_ref(ref); @@ -1161,6 +1170,36 @@ inline void Array::destroy(ref_type ref, Allocator& alloc) } +class Array::DestroyGuard { +public: + DestroyGuard(ref_type ref, Allocator& alloc) TIGHTDB_NOEXCEPT: m_ref(ref), m_alloc(alloc) + { + } + + ~DestroyGuard() TIGHTDB_NOEXCEPT + { + if (m_ref) + destroy(m_ref, m_alloc); + } + + ref_type get() const TIGHTDB_NOEXCEPT + { + return m_ref; + } + + ref_type release() TIGHTDB_NOEXCEPT + { + ref_type ref = m_ref; + m_ref = 0; + return ref; + } + +private: + ref_type m_ref; + Allocator& m_alloc; +}; + + //------------------------------------------------- @@ -1506,9 +1545,10 @@ inline ref_type Array::clone(Allocator& clone_alloc) const return clone(header, m_alloc, clone_alloc); // Throws } -inline void Array::move_assign(Array& a) +inline void Array::move_assign(Array& a) TIGHTDB_NOEXCEPT { - // FIXME: Be carefull with the old parent info here. Should it be copied? + // FIXME: Be carefull with the old parent info here. Should it be + // copied? // FIXME: It will likely be a lot better for the optimizer if we // did a member-wise copy, rather than recreating the state from diff --git a/src/tightdb/array_basic.hpp b/src/tightdb/array_basic.hpp index 736a5e21aac..d84bfdb4091 100644 --- a/src/tightdb/array_basic.hpp +++ b/src/tightdb/array_basic.hpp @@ -35,6 +35,7 @@ template class BasicArray: public Array { BasicArray(ref_type, ArrayParent*, std::size_t ndx_in_parent, Allocator& = Allocator::get_default()) TIGHTDB_NOEXCEPT; explicit BasicArray(no_prealloc_tag) TIGHTDB_NOEXCEPT; + ~BasicArray() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} T get(std::size_t ndx) const TIGHTDB_NOEXCEPT; void add(T value); diff --git a/src/tightdb/array_binary.hpp b/src/tightdb/array_binary.hpp index 8b957917fa1..5788463bff5 100644 --- a/src/tightdb/array_binary.hpp +++ b/src/tightdb/array_binary.hpp @@ -34,6 +34,7 @@ class ArrayBinary: public Array { Allocator&) TIGHTDB_NOEXCEPT; ArrayBinary(ref_type, ArrayParent*, std::size_t ndx_in_parent, Allocator& = Allocator::get_default()) TIGHTDB_NOEXCEPT; + ~ArrayBinary() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} bool is_empty() const TIGHTDB_NOEXCEPT; std::size_t size() const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/array_blob.hpp b/src/tightdb/array_blob.hpp index c1aa826efec..4f31712f35d 100644 --- a/src/tightdb/array_blob.hpp +++ b/src/tightdb/array_blob.hpp @@ -32,6 +32,7 @@ class ArrayBlob: public Array { ArrayBlob(ref_type, ArrayParent*, std::size_t ndx_in_parent, Allocator& = Allocator::get_default()) TIGHTDB_NOEXCEPT; explicit ArrayBlob(Allocator&) TIGHTDB_NOEXCEPT; + ~ArrayBlob() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} const char* get(std::size_t pos) const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/array_string.hpp b/src/tightdb/array_string.hpp index 72896121863..a6940a56072 100644 --- a/src/tightdb/array_string.hpp +++ b/src/tightdb/array_string.hpp @@ -37,6 +37,7 @@ class ArrayString: public Array { ArrayString(ref_type, ArrayParent*, std::size_t ndx_in_parent, Allocator& = Allocator::get_default()) TIGHTDB_NOEXCEPT; explicit ArrayString(Allocator&) TIGHTDB_NOEXCEPT; + ~ArrayString() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} StringData get(std::size_t ndx) const TIGHTDB_NOEXCEPT; void add(); diff --git a/src/tightdb/array_string_long.hpp b/src/tightdb/array_string_long.hpp index 02a90da944f..9000dc3c509 100644 --- a/src/tightdb/array_string_long.hpp +++ b/src/tightdb/array_string_long.hpp @@ -34,6 +34,7 @@ class ArrayStringLong: public Array { ArrayStringLong(MemRef, ArrayParent*, std::size_t ndx_in_parent, Allocator&) TIGHTDB_NOEXCEPT; ArrayStringLong(ref_type, ArrayParent*, std::size_t ndx_in_parent, Allocator& = Allocator::get_default()) TIGHTDB_NOEXCEPT; + ~ArrayStringLong() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} bool is_empty() const TIGHTDB_NOEXCEPT; std::size_t size() const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/binary_data.hpp b/src/tightdb/binary_data.hpp index 701507f89a8..53b333575be 100644 --- a/src/tightdb/binary_data.hpp +++ b/src/tightdb/binary_data.hpp @@ -39,6 +39,7 @@ class BinaryData { BinaryData() TIGHTDB_NOEXCEPT: m_data(0), m_size(0) {} BinaryData(const char* data, std::size_t size) TIGHTDB_NOEXCEPT: m_data(data), m_size(size) {} template explicit BinaryData(const char (&data)[N]): m_data(data), m_size(N) {} + ~BinaryData() TIGHTDB_NOEXCEPT {} char operator[](std::size_t i) const TIGHTDB_NOEXCEPT { return m_data[i]; } diff --git a/src/tightdb/bind_ptr.hpp b/src/tightdb/bind_ptr.hpp index cae42ce3b92..1ca63b9ae54 100644 --- a/src/tightdb/bind_ptr.hpp +++ b/src/tightdb/bind_ptr.hpp @@ -44,10 +44,13 @@ namespace tightdb { /// for an example of that. /// /// A restricted notion of move semantics (as defined by C++11) is -/// provided. Instead of calling std::move() one must call -/// move() without the std qualifier. The -/// effectiveness of this 'emulation' relies on 'return value -/// optimization' being enabled in the compiler. +/// provided. Instead of calling `std::move()` one must call `move()` +/// without the `std::` qualifier. The effectiveness of this +/// 'emulation' relies on 'return value optimization' being enabled in +/// the compiler. +/// +/// This smart pointer implementation assumes that the target object +/// destructor never throws. template class bind_ptr { public: #ifdef TIGHTDB_HAVE_CXX11_CONSTEXPR @@ -57,7 +60,7 @@ template class bind_ptr { #endif explicit bind_ptr(T* p) TIGHTDB_NOEXCEPT { bind(p); } template explicit bind_ptr(U* p) TIGHTDB_NOEXCEPT { bind(p); } - ~bind_ptr() { unbind(); } + ~bind_ptr() TIGHTDB_NOEXCEPT { unbind(); } #ifdef TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE @@ -66,16 +69,16 @@ template class bind_ptr { template bind_ptr(const bind_ptr& p) TIGHTDB_NOEXCEPT { bind(p.m_ptr); } // Copy assign - bind_ptr& operator=(const bind_ptr& p) { bind_ptr(p).swap(*this); return *this; } - template bind_ptr& operator=(const bind_ptr& p) { bind_ptr(p).swap(*this); return *this; } + bind_ptr& operator=(const bind_ptr& p) TIGHTDB_NOEXCEPT { bind_ptr(p).swap(*this); return *this; } + template bind_ptr& operator=(const bind_ptr& p) TIGHTDB_NOEXCEPT { bind_ptr(p).swap(*this); return *this; } // Move construct bind_ptr(bind_ptr&& p) TIGHTDB_NOEXCEPT: m_ptr(p.release()) {} template bind_ptr(bind_ptr&& p) TIGHTDB_NOEXCEPT: m_ptr(p.release()) {} // Move assign - bind_ptr& operator=(bind_ptr&& p) { bind_ptr(std::move(p)).swap(*this); return *this; } - template bind_ptr& operator=(bind_ptr&& p) { bind_ptr(std::move(p)).swap(*this); return *this; } + bind_ptr& operator=(bind_ptr&& p) TIGHTDB_NOEXCEPT { bind_ptr(std::move(p)).swap(*this); return *this; } + template bind_ptr& operator=(bind_ptr&& p) TIGHTDB_NOEXCEPT { bind_ptr(std::move(p)).swap(*this); return *this; } #else // !TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE @@ -84,8 +87,8 @@ template class bind_ptr { template bind_ptr(bind_ptr p) TIGHTDB_NOEXCEPT: m_ptr(p.release()) {} // Copy assign - bind_ptr& operator=(bind_ptr p) { p.swap(*this); return *this; } - template bind_ptr& operator=(bind_ptr p) { bind_ptr(move(p)).swap(*this); return *this; } + bind_ptr& operator=(bind_ptr p) TIGHTDB_NOEXCEPT { p.swap(*this); return *this; } + template bind_ptr& operator=(bind_ptr p) TIGHTDB_NOEXCEPT { bind_ptr(move(p)).swap(*this); return *this; } #endif // !TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE @@ -109,9 +112,9 @@ template class bind_ptr { #endif T* get() const TIGHTDB_NOEXCEPT { return m_ptr; } - void reset() { bind_ptr().swap(*this); } - void reset(T* p) { bind_ptr(p).swap(*this); } - template void reset(U* p) { bind_ptr(p).swap(*this); } + void reset() TIGHTDB_NOEXCEPT { bind_ptr().swap(*this); } + void reset(T* p) TIGHTDB_NOEXCEPT { bind_ptr(p).swap(*this); } + template void reset(U* p) TIGHTDB_NOEXCEPT { bind_ptr(p).swap(*this); } void swap(bind_ptr& p) TIGHTDB_NOEXCEPT { std::swap(m_ptr, p.m_ptr); } friend void swap(bind_ptr& a, bind_ptr& b) TIGHTDB_NOEXCEPT { a.swap(b); } @@ -128,7 +131,7 @@ template class bind_ptr { T* m_ptr; void bind(T* p) TIGHTDB_NOEXCEPT { if (p) p->bind_ref(); m_ptr = p; } - void unbind() { if (m_ptr) m_ptr->unbind_ref(); } + void unbind() TIGHTDB_NOEXCEPT { if (m_ptr) m_ptr->unbind_ref(); } T* release() TIGHTDB_NOEXCEPT { T* const p = m_ptr; m_ptr = 0; return p; } @@ -156,16 +159,17 @@ inline std::basic_ostream& operator<<(std::basic_ostream& out, const b class RefCountBase { public: RefCountBase() TIGHTDB_NOEXCEPT: m_ref_count(0) {} - virtual ~RefCountBase() {} + virtual ~RefCountBase() TIGHTDB_NOEXCEPT {} private: mutable std::atomic m_ref_count; // FIXME: Operators ++ and -- as used below use - // std::memory_order_seq_cst. I'm not sure whether it is the - // most effecient choice, that also guarantees safety. + // std::memory_order_seq_cst. I'm not sure whether this is the + // choice that leads to maximum efficiency, but at least it is + // safe. void bind_ref() const TIGHTDB_NOEXCEPT { ++m_ref_count; } - void unbind_ref() const { if (--m_ref_count == 0) delete this; } + void unbind_ref() const TIGHTDB_NOEXCEPT { if (--m_ref_count == 0) delete this; } template friend class bind_ptr; }; diff --git a/src/tightdb/buffer.hpp b/src/tightdb/buffer.hpp new file mode 100644 index 00000000000..7f538ad7dd0 --- /dev/null +++ b/src/tightdb/buffer.hpp @@ -0,0 +1,70 @@ +/************************************************************************* + * + * TIGHTDB CONFIDENTIAL + * __________________ + * + * [2011] - [2012] TightDB Inc + * All Rights Reserved. + * + * NOTICE: All information contained herein is, and remains + * the property of TightDB Incorporated and its suppliers, + * if any. The intellectual and technical concepts contained + * herein are proprietary to TightDB Incorporated + * and its suppliers and may be covered by U.S. and Foreign Patents, + * patents in process, and are protected by trade secret or copyright law. + * Dissemination of this information or reproduction of this material + * is strictly forbidden unless prior written permission is obtained + * from TightDB Incorporated. + * + **************************************************************************/ +#ifndef TIGHTDB_UTIL_BUFFER_HPP +#define TIGHTDB_UTIL_BUFFER_HPP + +#include +#include +#include + +#include +#include + +namespace tightdb { +namespace util { + +template class Buffer { +public: + T& operator[](std::size_t i) TIGHTDB_NOEXCEPT { return m_data[i]; } + + const T& operator[](std::size_t i) const TIGHTDB_NOEXCEPT { return m_data[i]; } + + Buffer() TIGHTDB_NOEXCEPT: m_data(0), m_size(0) {} + + void set_size(std::size_t); + + friend void swap(Buffer&a, Buffer&b) + { + using std::swap; + swap(a.m_data, b.m_data); + swap(a.m_size, b.m_size); + } + +private: + UniquePtr m_data; + std::size_t m_size; +}; + + + + +// Implementation: + +template void Buffer::set_size(std::size_t size) +{ + m_data.reset(new T[size]); + m_size = size; +} + + +} // namespace util +} // namespace tightdb + +#endif // TIGHTDB_UTIL_BUFFER_HPP diff --git a/src/tightdb/column.hpp b/src/tightdb/column.hpp index 23429506123..66fc67abcd7 100644 --- a/src/tightdb/column.hpp +++ b/src/tightdb/column.hpp @@ -61,9 +61,9 @@ class ColumnBase { virtual bool IsIntColumn() const TIGHTDB_NOEXCEPT { return false; } - virtual void destroy() = 0; + virtual void destroy() TIGHTDB_NOEXCEPT; - virtual ~ColumnBase() {}; + virtual ~ColumnBase() TIGHTDB_NOEXCEPT {}; // Indexing virtual bool has_index() const TIGHTDB_NOEXCEPT { return false; } @@ -78,7 +78,7 @@ class ColumnBase { /// when the transaction ends. virtual void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; - virtual void invalidate_subtables_virtual() {} + virtual void invalidate_subtables_virtual() TIGHTDB_NOEXCEPT {} Allocator& get_alloc() const TIGHTDB_NOEXCEPT { return m_array->get_alloc(); } @@ -172,9 +172,7 @@ class Column: public ColumnBase { explicit Column(ref_type, ArrayParent* = 0, std::size_t ndx_in_parent = 0, Allocator& = Allocator::get_default()); // Throws Column(const Column&); // FIXME: Constness violation - ~Column(); - - void destroy() TIGHTDB_OVERRIDE; + ~Column() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; bool IsIntColumn() const TIGHTDB_NOEXCEPT { return true; } @@ -269,6 +267,12 @@ class Column: public ColumnBase { // Implementation: +inline void ColumnBase::destroy() TIGHTDB_NOEXCEPT +{ + if (m_array) + m_array->destroy(); +} + template std::size_t ColumnBase::lower_bound(const L& list, T value) const TIGHTDB_NOEXCEPT { @@ -341,17 +345,11 @@ inline Column::Column(const Column& column): ColumnBase(column.m_array) inline Column::Column(Array* root): ColumnBase(root) {} -inline Column::~Column() +inline Column::~Column() TIGHTDB_NOEXCEPT { delete m_array; } -inline void Column::destroy() -{ - if (m_array) - m_array->destroy(); -} - inline int64_t Column::get(std::size_t ndx) const TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(ndx < size()); diff --git a/src/tightdb/column_basic.hpp b/src/tightdb/column_basic.hpp index 227323a744e..227987a1230 100644 --- a/src/tightdb/column_basic.hpp +++ b/src/tightdb/column_basic.hpp @@ -45,9 +45,7 @@ class BasicColumn: public ColumnBase { explicit BasicColumn(Allocator& = Allocator::get_default()); explicit BasicColumn(ref_type, ArrayParent* = 0, std::size_t ndx_in_parent = 0, Allocator& = Allocator::get_default()); - ~BasicColumn(); - - void destroy() TIGHTDB_OVERRIDE; + ~BasicColumn() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; size_t size() const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; bool is_empty() const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/column_basic_tpl.hpp b/src/tightdb/column_basic_tpl.hpp index a7ca6823eeb..1ba9887d480 100644 --- a/src/tightdb/column_basic_tpl.hpp +++ b/src/tightdb/column_basic_tpl.hpp @@ -41,28 +41,23 @@ template BasicColumn::BasicColumn(ref_type ref, ArrayParent* parent, std::size_t pndx, Allocator& alloc) { bool root_is_leaf = root_is_leaf_from_ref(ref, alloc); - if (root_is_leaf) + if (root_is_leaf) { m_array = new BasicArray(ref, parent, pndx, alloc); - else + } + else { m_array = new Array(ref, parent, pndx, alloc); + } } template -BasicColumn::~BasicColumn() +BasicColumn::~BasicColumn() TIGHTDB_NOEXCEPT { - if (root_is_leaf()) + if (root_is_leaf()) { delete static_cast*>(m_array); - else + } + else { delete m_array; -} - -template -void BasicColumn::destroy() -{ - if (root_is_leaf()) - static_cast*>(m_array)->destroy(); - else - m_array->destroy(); + } } template diff --git a/src/tightdb/column_binary.cpp b/src/tightdb/column_binary.cpp index 06b210188f4..5a12aa77899 100644 --- a/src/tightdb/column_binary.cpp +++ b/src/tightdb/column_binary.cpp @@ -8,7 +8,7 @@ using namespace tightdb; ColumnBinary::ColumnBinary(Allocator& alloc) { - m_array = new ArrayBinary(NULL, 0, alloc); + m_array = new ArrayBinary(0, 0, alloc); } ColumnBinary::ColumnBinary(ref_type ref, ArrayParent* parent, size_t pndx, Allocator& alloc) @@ -22,20 +22,14 @@ ColumnBinary::ColumnBinary(ref_type ref, ArrayParent* parent, size_t pndx, Alloc } } -ColumnBinary::~ColumnBinary() +ColumnBinary::~ColumnBinary() TIGHTDB_NOEXCEPT { - if (root_is_leaf()) + if (root_is_leaf()) { delete static_cast(m_array); - else + } + else { delete m_array; -} - -void ColumnBinary::destroy() -{ - if (root_is_leaf()) - static_cast(m_array)->destroy(); - else - m_array->destroy(); + } } bool ColumnBinary::is_empty() const TIGHTDB_NOEXCEPT diff --git a/src/tightdb/column_binary.hpp b/src/tightdb/column_binary.hpp index 2c5c4daa0d0..a91eedbb6c7 100644 --- a/src/tightdb/column_binary.hpp +++ b/src/tightdb/column_binary.hpp @@ -33,9 +33,7 @@ class ColumnBinary: public ColumnBase { explicit ColumnBinary(Allocator& = Allocator::get_default()); explicit ColumnBinary(ref_type, ArrayParent* = 0, std::size_t ndx_in_parent = 0, Allocator& = Allocator::get_default()); - ~ColumnBinary(); - - void destroy() TIGHTDB_OVERRIDE; + ~ColumnBinary() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; std::size_t size() const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; bool is_empty() const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/column_mixed.cpp b/src/tightdb/column_mixed.cpp index 5f5f5816da3..7e3edbe4e98 100644 --- a/src/tightdb/column_mixed.cpp +++ b/src/tightdb/column_mixed.cpp @@ -4,7 +4,7 @@ using namespace std; using namespace tightdb; -ColumnMixed::~ColumnMixed() +ColumnMixed::~ColumnMixed() TIGHTDB_NOEXCEPT { delete m_types; delete m_refs; @@ -12,11 +12,6 @@ ColumnMixed::~ColumnMixed() delete m_array; } -void ColumnMixed::destroy() -{ - if (m_array != 0) - m_array->destroy(); -} void ColumnMixed::update_from_parent(size_t old_baseline) TIGHTDB_NOEXCEPT { diff --git a/src/tightdb/column_mixed.hpp b/src/tightdb/column_mixed.hpp index c8e2d30d0d9..544440fac6b 100644 --- a/src/tightdb/column_mixed.hpp +++ b/src/tightdb/column_mixed.hpp @@ -66,8 +66,7 @@ class ColumnMixed: public ColumnBase { ColumnMixed(Allocator&, const Table* table, std::size_t column_ndx, ArrayParent*, std::size_t ndx_in_parent, ref_type); - ~ColumnMixed(); - void destroy() TIGHTDB_OVERRIDE; + ~ColumnMixed() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; @@ -124,10 +123,9 @@ class ColumnMixed: public ColumnBase { /// Compare two mixed columns for equality. bool compare_mixed(const ColumnMixed&) const; - void invalidate_subtables(); + void invalidate_subtables() TIGHTDB_NOEXCEPT; - // Overriding virtual method. - void invalidate_subtables_virtual(); + void invalidate_subtables_virtual() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; static ref_type create(std::size_t num_default_values, Allocator&); @@ -193,6 +191,7 @@ class ColumnMixed::RefsColumn: public ColumnSubtableParent { RefsColumn(Allocator& alloc, const Table* table, std::size_t column_ndx, ArrayParent* parent, std::size_t ndx_in_parent, ref_type ref): ColumnSubtableParent(alloc, table, column_ndx, parent, ndx_in_parent, ref) {} + ~RefsColumn() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} using ColumnSubtableParent::get_subtable_ptr; using ColumnSubtableParent::get_subtable; }; diff --git a/src/tightdb/column_mixed_tpl.hpp b/src/tightdb/column_mixed_tpl.hpp index 2d1c26894a7..64203e856a8 100644 --- a/src/tightdb/column_mixed_tpl.hpp +++ b/src/tightdb/column_mixed_tpl.hpp @@ -67,12 +67,12 @@ inline Table* ColumnMixed::get_subtable_ptr(std::size_t row_idx) const return m_refs->get_subtable_ptr(row_idx); } -inline void ColumnMixed::invalidate_subtables() +inline void ColumnMixed::invalidate_subtables() TIGHTDB_NOEXCEPT { m_refs->invalidate_subtables(); } -inline void ColumnMixed::invalidate_subtables_virtual() +inline void ColumnMixed::invalidate_subtables_virtual() TIGHTDB_NOEXCEPT { invalidate_subtables(); } diff --git a/src/tightdb/column_string.cpp b/src/tightdb/column_string.cpp index dd0add26235..33c446f8bb2 100644 --- a/src/tightdb/column_string.cpp +++ b/src/tightdb/column_string.cpp @@ -53,28 +53,15 @@ AdaptiveStringColumn::AdaptiveStringColumn(ref_type ref, ArrayParent* parent, si } } -AdaptiveStringColumn::~AdaptiveStringColumn() +AdaptiveStringColumn::~AdaptiveStringColumn() TIGHTDB_NOEXCEPT { delete m_array; - if (m_index) - delete m_index; + delete m_index; } -void AdaptiveStringColumn::destroy() +void AdaptiveStringColumn::destroy() TIGHTDB_NOEXCEPT { - if (root_is_leaf()) { - bool long_strings = m_array->has_refs(); - if (long_strings) { - static_cast(m_array)->destroy(); - } - else { - static_cast(m_array)->destroy(); - } - } - else { - m_array->destroy(); - } - + ColumnBase::destroy(); if (m_index) m_index->destroy(); } diff --git a/src/tightdb/column_string.hpp b/src/tightdb/column_string.hpp index 736ab57ab51..7019048b63c 100644 --- a/src/tightdb/column_string.hpp +++ b/src/tightdb/column_string.hpp @@ -37,9 +37,9 @@ class AdaptiveStringColumn: public ColumnBase { explicit AdaptiveStringColumn(Allocator& = Allocator::get_default()); explicit AdaptiveStringColumn(ref_type, ArrayParent* = 0, std::size_t ndx_in_parent = 0, Allocator& = Allocator::get_default()); - ~AdaptiveStringColumn(); + ~AdaptiveStringColumn() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; - void destroy() TIGHTDB_OVERRIDE; + void destroy() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; std::size_t size() const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; bool is_empty() const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/column_string_enum.cpp b/src/tightdb/column_string_enum.cpp index f4f08880096..850adc13e9e 100644 --- a/src/tightdb/column_string_enum.cpp +++ b/src/tightdb/column_string_enum.cpp @@ -20,19 +20,19 @@ ColumnStringEnum::ColumnStringEnum(ref_type keys, ref_type values, ArrayParent* size_t ndx_in_parent, Allocator& alloc): Column(values, parent, ndx_in_parent+1, alloc), // Throws m_keys(keys, parent, ndx_in_parent, alloc), // Throws - m_index(0) {} + m_index(0) +{ +} -ColumnStringEnum::~ColumnStringEnum() +ColumnStringEnum::~ColumnStringEnum() TIGHTDB_NOEXCEPT { - if (m_index) - delete m_index; + delete m_index; } -void ColumnStringEnum::destroy() +void ColumnStringEnum::destroy() TIGHTDB_NOEXCEPT { m_keys.destroy(); Column::destroy(); - if (m_index) m_index->destroy(); } diff --git a/src/tightdb/column_string_enum.hpp b/src/tightdb/column_string_enum.hpp index 27ca27f0586..a0aa479d6d0 100644 --- a/src/tightdb/column_string_enum.hpp +++ b/src/tightdb/column_string_enum.hpp @@ -33,8 +33,8 @@ class ColumnStringEnum: public Column { ColumnStringEnum(ref_type keys, ref_type values, ArrayParent* = 0, std::size_t ndx_in_parent = 0, Allocator& = Allocator::get_default()); - ~ColumnStringEnum(); - void destroy() TIGHTDB_OVERRIDE; + ~ColumnStringEnum() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; + void destroy() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; StringData get(std::size_t ndx) const TIGHTDB_NOEXCEPT; void add(StringData value); diff --git a/src/tightdb/column_table.cpp b/src/tightdb/column_table.cpp index a6ba3419eb0..27b2b8393ac 100644 --- a/src/tightdb/column_table.cpp +++ b/src/tightdb/column_table.cpp @@ -11,11 +11,12 @@ void ColumnSubtableParent::update_from_parent(size_t old_baseline) TIGHTDB_NOEXC m_subtable_map.update_from_parent(old_baseline); } -void ColumnSubtableParent::child_destroyed(size_t subtable_ndx) +void ColumnSubtableParent::child_accessor_destroyed(size_t subtable_ndx) TIGHTDB_NOEXCEPT { m_subtable_map.remove(subtable_ndx); // Note that this column instance may be destroyed upon return - // from Table::unbind_ref(). + // from Table::unbind_ref(), i.e., a so-called suicide is + // possible. if (m_table && m_subtable_map.empty()) m_table->unbind_ref(); } diff --git a/src/tightdb/column_table.hpp b/src/tightdb/column_table.hpp index 162e30c523e..11496b4c293 100644 --- a/src/tightdb/column_table.hpp +++ b/src/tightdb/column_table.hpp @@ -20,6 +20,8 @@ #ifndef TIGHTDB_COLUMN_TABLE_HPP #define TIGHTDB_COLUMN_TABLE_HPP +#include + #include #include @@ -31,7 +33,9 @@ class ColumnSubtableParent: public Column, public Table::Parent { public: void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; - void invalidate_subtables(); + void invalidate_subtables() TIGHTDB_NOEXCEPT; + + ~ColumnSubtableParent() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} protected: /// A pointer to the table that this column is part of. For a @@ -82,7 +86,7 @@ class ColumnSubtableParent: public Column, public Table::Parent { void update_child_ref(std::size_t subtable_ndx, ref_type new_ref) TIGHTDB_OVERRIDE; ref_type get_child_ref(std::size_t subtable_ndx) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; - void child_destroyed(std::size_t subtable_ndx) TIGHTDB_OVERRIDE; + void child_accessor_destroyed(std::size_t subtable_ndx) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; /// Assumes that the two tables have the same spec. static bool compare_subtable_rows(const Table&, const Table&); @@ -103,17 +107,20 @@ class ColumnSubtableParent: public Column, public Table::Parent { private: struct SubtableMap { - SubtableMap(Allocator& alloc): m_indexes(alloc), m_tables(alloc) {} - ~SubtableMap(); - bool empty() const TIGHTDB_NOEXCEPT { return !m_indexes.is_attached() || m_indexes.is_empty(); } - Table* find(std::size_t subtable_ndx) const; - void insert(std::size_t subtable_ndx, Table*); - void remove(std::size_t subtable_ndx); - void update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT; - void invalidate_subtables(); + ~SubtableMap() TIGHTDB_NOEXCEPT {} + bool empty() const TIGHTDB_NOEXCEPT { return m_entries.empty(); } + Table* find(std::size_t subtable_ndx) const TIGHTDB_NOEXCEPT; + void add(std::size_t subtable_ndx, Table*); + void remove(std::size_t subtable_ndx) TIGHTDB_NOEXCEPT; + void update_from_parent(std::size_t old_baseline) const TIGHTDB_NOEXCEPT; + void invalidate_subtables() TIGHTDB_NOEXCEPT; private: - Array m_indexes; - Array m_tables; + struct entry { + std::size_t m_subtable_ndx; + Table* m_table; + }; + typedef std::vector entries; + entries m_entries; }; mutable SubtableMap m_subtable_map; @@ -147,6 +154,8 @@ class ColumnTable: public ColumnSubtableParent { ArrayParent*, std::size_t ndx_in_parent, ref_type spec_ref, ref_type column_ref); + ~ColumnTable() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} + std::size_t get_subtable_size(std::size_t ndx) const TIGHTDB_NOEXCEPT; /// The returned table pointer must always end up being wrapped in @@ -178,7 +187,7 @@ class ColumnTable: public ColumnSubtableParent { /// Compare two subtable columns for equality. bool compare_table(const ColumnTable&) const; - void invalidate_subtables_virtual() TIGHTDB_OVERRIDE; + void invalidate_subtables_virtual() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; static ref_type create(std::size_t size, Allocator&); @@ -213,8 +222,9 @@ inline Table* ColumnSubtableParent::get_subtable_ptr(std::size_t subtable_ndx) c subtable = new Table(Table::RefCountTag(), alloc, top_ref, const_cast(this), subtable_ndx); bool was_empty = m_subtable_map.empty(); - m_subtable_map.insert(subtable_ndx, subtable); - if (was_empty && m_table) m_table->bind_ref(); + m_subtable_map.add(subtable_ndx, subtable); + if (was_empty && m_table) + m_table->bind_ref(); } return subtable; } @@ -231,8 +241,9 @@ inline Table* ColumnSubtableParent::get_subtable_ptr(std::size_t subtable_ndx, subtable = new Table(Table::RefCountTag(), alloc, spec_ref, columns_ref, const_cast(this), subtable_ndx); bool was_empty = m_subtable_map.empty(); - m_subtable_map.insert(subtable_ndx, subtable); - if (was_empty && m_table) m_table->bind_ref(); + m_subtable_map.add(subtable_ndx, subtable); + if (was_empty && m_table) + m_table->bind_ref(); } return subtable; } @@ -248,80 +259,59 @@ inline TableRef ColumnSubtableParent::get_subtable(std::size_t subtable_ndx) con return TableRef(get_subtable_ptr(subtable_ndx)); } -inline ColumnSubtableParent::SubtableMap::~SubtableMap() +inline Table* ColumnSubtableParent::SubtableMap::find(std::size_t subtable_ndx) const TIGHTDB_NOEXCEPT { - if (m_indexes.is_attached()) { - TIGHTDB_ASSERT(m_indexes.is_empty()); - m_indexes.destroy(); - m_tables.destroy(); - } + typedef entries::const_iterator iter; + iter end = m_entries.end(); + for (iter i = m_entries.begin(); i != end; ++i) + if (i->m_subtable_ndx == subtable_ndx) + return i->m_table; + return 0; } -inline Table* ColumnSubtableParent::SubtableMap::find(std::size_t subtable_ndx) const +inline void ColumnSubtableParent::SubtableMap::add(std::size_t subtable_ndx, Table* table) { - if (!m_indexes.is_attached()) - return 0; - std::size_t pos = m_indexes.find_first(subtable_ndx); - if (pos == not_found) - return 0; - return reinterpret_cast(uintptr_t(m_tables.get(pos))); + entry e; + e.m_subtable_ndx = subtable_ndx; + e.m_table = table; + m_entries.push_back(e); } -inline void ColumnSubtableParent::SubtableMap::insert(std::size_t subtable_ndx, Table* table) +inline void ColumnSubtableParent::SubtableMap::remove(std::size_t subtable_ndx) TIGHTDB_NOEXCEPT { - if (!m_indexes.is_attached()) { - m_indexes.create(Array::type_Normal); - m_tables.create(Array::type_Normal); + typedef entries::iterator iter; + iter end = m_entries.end(); + for (iter i = m_entries.begin(); i != end; ++i) { + if (i->m_subtable_ndx == subtable_ndx) { + m_entries.erase(i); + return; + } } - m_indexes.add(subtable_ndx); - m_tables.add(int64_t(reinterpret_cast(table))); -} - -inline void ColumnSubtableParent::SubtableMap::remove(std::size_t subtable_ndx) -{ - TIGHTDB_ASSERT(m_indexes.is_attached()); - std::size_t pos = m_indexes.find_first(subtable_ndx); - TIGHTDB_ASSERT(pos != not_found); - // FIXME: It is a problem that Array, as our most low-level array - // construct, has too many features to deliver an erase() method - // that cannot be guaranteed to never throw. - m_indexes.erase(pos); - m_tables.erase(pos); + TIGHTDB_ASSERT(false); } inline void ColumnSubtableParent::SubtableMap:: -update_from_parent(std::size_t old_baseline) TIGHTDB_NOEXCEPT +update_from_parent(std::size_t old_baseline) const TIGHTDB_NOEXCEPT { - if (!m_indexes.is_attached()) - return; - - std::size_t n = m_tables.size(); - for (std::size_t i = 0; i < n; ++i) { - Table* t = reinterpret_cast(uintptr_t(m_tables.get(i))); - t->update_from_parent(old_baseline); - } + typedef entries::const_iterator iter; + iter end = m_entries.end(); + for (iter i = m_entries.begin(); i != end; ++i) + i->m_table->update_from_parent(old_baseline); } -inline void ColumnSubtableParent::SubtableMap::invalidate_subtables() +inline void ColumnSubtableParent::SubtableMap::invalidate_subtables() TIGHTDB_NOEXCEPT { - if (!m_indexes.is_attached()) - return; - - std::size_t n = m_tables.size(); - for (std::size_t i=0; i(uintptr_t(m_tables.get(i))); - t->invalidate(); - } - - m_indexes.clear(); // FIXME: Can we rely on Array::clear() never failing???? - m_tables.clear(); + typedef entries::const_iterator iter; + iter end = m_entries.end(); + for (iter i = m_entries.begin(); i != end; ++i) + i->m_table->invalidate(); + m_entries.clear(); } inline ColumnSubtableParent::ColumnSubtableParent(Allocator& alloc, const Table* table, std::size_t column_ndx): Column(Array::type_HasRefs, alloc), - m_table(table), m_index(column_ndx), - m_subtable_map(Allocator::get_default()) + m_table(table), m_index(column_ndx) { } @@ -330,8 +320,7 @@ inline ColumnSubtableParent::ColumnSubtableParent(Allocator& alloc, ArrayParent* parent, std::size_t ndx_in_parent, ref_type ref): Column(ref, parent, ndx_in_parent, alloc), - m_table(table), m_index(column_ndx), - m_subtable_map(Allocator::get_default()) + m_table(table), m_index(column_ndx) { } @@ -345,11 +334,12 @@ inline ref_type ColumnSubtableParent::get_child_ref(std::size_t subtable_ndx) co return get_as_ref(subtable_ndx); } -inline void ColumnSubtableParent::invalidate_subtables() +inline void ColumnSubtableParent::invalidate_subtables() TIGHTDB_NOEXCEPT { bool was_empty = m_subtable_map.empty(); m_subtable_map.invalidate_subtables(); - if (!was_empty && m_table) m_table->unbind_ref(); + if (!was_empty && m_table) + m_table->unbind_ref(); } inline bool ColumnSubtableParent::compare_subtable_rows(const Table& a, const Table& b) @@ -402,7 +392,7 @@ inline void ColumnTable::add(const Table* subtable) insert(size(), subtable); } -inline void ColumnTable::invalidate_subtables_virtual() +inline void ColumnTable::invalidate_subtables_virtual() TIGHTDB_NOEXCEPT { invalidate_subtables(); } diff --git a/src/tightdb/config.h b/src/tightdb/config.h index a53e87080c9..e7175f186d2 100644 --- a/src/tightdb/config.h +++ b/src/tightdb/config.h @@ -120,6 +120,11 @@ #else # define TIGHTDB_NOEXCEPT #endif +#if TIGHTDB_HAVE_CXX11 && TIGHTDB_HAVE_GCC_GE_4_6 +# define TIGHTDB_NOEXCEPT_OR_NOTHROW noexcept +#else +# define TIGHTDB_NOEXCEPT_OR_NOTHROW throw() +#endif /* Support for C++11 explicit virtual overrides */ diff --git a/src/tightdb/date.hpp b/src/tightdb/date.hpp index 67163188536..62d2246b4f9 100644 --- a/src/tightdb/date.hpp +++ b/src/tightdb/date.hpp @@ -34,6 +34,8 @@ class Date { /// 1970. Date(std::time_t d) TIGHTDB_NOEXCEPT: m_time(d) {} + ~Date() TIGHTDB_NOEXCEPT {} + /// Return the time as seconds since Jan 1 00:00:00 UTC 1970. std::time_t get_date() const TIGHTDB_NOEXCEPT { return m_time; } diff --git a/src/tightdb/group.cpp b/src/tightdb/group.cpp index 3ed18ff7be7..3e529a4ca1f 100644 --- a/src/tightdb/group.cpp +++ b/src/tightdb/group.cpp @@ -203,12 +203,6 @@ void Group::init_from_ref(ref_type top_ref) if (m_is_shared && top_size > 4) m_free_versions.init_from_ref(m_top.get_as_ref(4)); } - - // Make room for pointers to cached tables - size_t n = m_tables.size(); - for (size_t i = 0; i < n; ++i) { - m_cached_tables.add(0); // Throws - } } @@ -251,24 +245,35 @@ void Group::init_shared() } -Group::~Group() +Group::~Group() TIGHTDB_NOEXCEPT { - if (m_top.is_attached()) { - clear_cache(); + if (!m_top.is_attached()) + return; - // Recursively deletes entire tree - m_top.destroy(); - } + destroy_table_accessors(); + + // Recursively deletes entire tree + m_top.destroy(); +} - m_cached_tables.destroy(); + +void Group::destroy_table_accessors() TIGHTDB_NOEXCEPT +{ + typedef table_accessors::const_iterator iter; + iter end = m_table_accessors.end(); + for (iter i = m_table_accessors.begin(); i != end; ++i) { + if (Table* t = *i) { + t->invalidate(); + t->unbind_ref(); + } + } } -void Group::invalidate() +void Group::invalidate() TIGHTDB_NOEXCEPT { - // TODO: Should only invalidate object wrappers and never touch - // the underlying data (that may no longer be valid) - clear_cache(); + destroy_table_accessors(); + m_table_accessors.clear(); m_top.detach(); m_tables.detach(); @@ -276,52 +281,77 @@ void Group::invalidate() m_free_positions.detach(); m_free_lengths.detach(); m_free_versions.detach(); - - // Reads may allocate some temproary state that we have - // to clean up - m_alloc.free_all(); } -Table* Group::get_table_ptr(size_t ndx) +Table* Group::get_table_by_ndx(size_t ndx) { TIGHTDB_ASSERT(m_top.is_attached()); TIGHTDB_ASSERT(ndx < m_tables.size()); + if (m_table_accessors.empty()) + m_table_accessors.resize(m_tables.size()); // Throws + + TIGHTDB_ASSERT(m_table_accessors.size() == m_tables.size()); + // Get table from cache if exists, else create - Table* table = reinterpret_cast(m_cached_tables.get(ndx)); + Table* table = m_table_accessors[ndx]; if (!table) { - const size_t ref = m_tables.get_as_ref(ndx); - Table::UnbindGuard t(new Table(Table::RefCountTag(), m_alloc, ref, this, ndx)); // Throws - t->bind_ref(); // Increase reference count to 1 - m_cached_tables.set(ndx, intptr_t(t.get())); // FIXME: intptr_t is not guaranteed to exists, even in C++11 - // This group shares ownership of the table, so leave - // reference count at 1. - table = t.release(); + ref_type ref = m_tables.get_as_ref(ndx); + table = new Table(Table::RefCountTag(), m_alloc, ref, this, ndx); // Throws + m_table_accessors[ndx] = table; + table->bind_ref(); // Increase reference count from 0 to 1 } return table; } -Table* Group::create_new_table(StringData name) +Table* Group::create_new_table(StringData name, SpecSetter spec_setter) { - ref_type ref = Table::create_empty_table(m_alloc); // Throws - m_tables.add(ref); - m_table_names.add(name); - Table::UnbindGuard table(new Table(Table::RefCountTag(), m_alloc, - ref, this, m_tables.size()-1)); // Throws - table->bind_ref(); // Increase reference count to 1 - m_cached_tables.add(intptr_t(table.get())); // FIXME: intptr_t is not guaranteed to exists, even in C++11 - + // FIXME: This function is exception safe under the assumption + // that m_tables.insert() and m_table_names.insert() are exception + // safe. Currently, Array::insert() is not exception safe, but it + // is expected that it will be in the future. Note that a function + // is considered exception safe if it produces no visible + // side-effects when it throws, at least not in any way that + // matters. + + Array::DestroyGuard ref_dg(Table::create_empty_table(m_alloc), m_alloc); // Throws + Table::UnbindGuard table_ug(new Table(Table::RefCountTag(), m_alloc, + ref_dg.get(), 0, 0)); // Throws + table_ug->bind_ref(); // Increase reference count from 0 to 1 + if (spec_setter) + (*spec_setter)(*table_ug); // Throws + + size_t ndx = m_tables.size(); + m_table_accessors.resize(ndx+1); // Throws + + TIGHTDB_ASSERT(ndx == m_table_names.size()); + m_tables.insert(ndx, ref_dg.get()); // Throws + try { + m_table_names.insert(ndx, name); // Throws + try { #ifdef TIGHTDB_ENABLE_REPLICATION - Replication* repl = m_alloc.get_replication(); - if (repl) - repl->new_top_level_table(name); // Throws + if (Replication* repl = m_alloc.get_replication()) + repl->new_top_level_table(name); // Throws #endif - // This group shares ownership of the table, so leave reference - // count at 1. - return table.release(); + // The rest is guaranteed not to throw + Table* table = table_ug.release(); + ref_dg.release(); + table->m_top.set_parent(this, ndx); + m_table_accessors[ndx] = table; + return table; + } + catch (...) { + m_table_names.erase(ndx); // Guaranteed not to throw + throw; + } + } + catch (...) { + m_tables.erase(ndx); // Guaranteed not to throw + throw; + } } @@ -360,7 +390,7 @@ void Group::commit() // created by Group::write() do not have free-space tracking // information. if (m_free_positions.is_attached()) { - TIGHTDB_ASSERT(m_top.size() == 4 || m_top.size() == 5); + TIGHTDB_ASSERT(m_top.size() >= 4); if (m_top.size() > 4) { // Delete free-list version information Array::destroy(m_top.get_as_ref(4), m_top.get_alloc()); @@ -416,7 +446,7 @@ void Group::update_refs(ref_type top_ref, size_t old_baseline) // After Group::commit() we will always have free space tracking // info. - TIGHTDB_ASSERT(m_top.size() == 4 || m_top.size() == 5); + TIGHTDB_ASSERT(m_top.size() >= 4); // Array nodes that a part of the previous version of the database // will not be overwritte by Group::commit(). This is necessary @@ -442,12 +472,11 @@ void Group::update_refs(ref_type top_ref, size_t old_baseline) // Update all attached table accessors including those attached to // subtables. - size_t n = m_cached_tables.size(); - for (size_t i = 0; i < n; ++i) { - Table* t = reinterpret_cast(m_cached_tables.get(i)); - if (t) { - t->update_from_parent(old_baseline); - } + typedef table_accessors::const_iterator iter; + iter end = m_table_accessors.end(); + for (iter i = m_table_accessors.begin(); i != end; ++i) { + if (Table* table = *i) + table->update_from_parent(old_baseline); } } @@ -457,6 +486,10 @@ void Group::update_from_shared(ref_type new_top_ref, size_t new_file_size) TIGHTDB_ASSERT(new_top_ref < new_file_size); TIGHTDB_ASSERT(!m_top.is_attached()); + // Make all managed memory beyond the attached file available + // again. + m_alloc.free_all(); + // Update memory mapping if database file has grown TIGHTDB_ASSERT(new_file_size >= m_alloc.get_baseline()); if (new_file_size > m_alloc.get_baseline()) @@ -470,10 +503,10 @@ void Group::update_from_shared(ref_type new_top_ref, size_t new_file_size) m_free_versions.create(Array::type_Normal); m_top.add(m_free_versions.get_ref()); m_free_versions.add(0); - return; } - - init_from_ref(new_top_ref); // Throws + else { + init_from_ref(new_top_ref); // Throws + } } @@ -483,8 +516,8 @@ bool Group::operator==(const Group& g) const if (n != g.size()) return false; for (size_t i=0; isize(); @@ -583,7 +615,7 @@ void Group::Verify() const { size_t n = m_tables.size(); for (size_t i = 0; i < n; ++i) - get_table_ptr(i)->Verify(); + get_table_by_ndx(i)->Verify(); } } @@ -640,7 +672,7 @@ void Group::to_dot(ostream& out) const // Tables for (size_t i = 0; i < m_tables.size(); ++i) { - const Table* table = get_table_ptr(i); + const Table* table = get_table_by_ndx(i); StringData name = get_table_name(i); table->to_dot(out, name); } diff --git a/src/tightdb/group.hpp b/src/tightdb/group.hpp index 2c47bdf18eb..dc0ca9e7dbb 100644 --- a/src/tightdb/group.hpp +++ b/src/tightdb/group.hpp @@ -22,6 +22,7 @@ #define TIGHTDB_GROUP_HPP #include +#include #include #include @@ -70,7 +71,7 @@ class Group: private Table::Parent { /// behavior. Group(unattached_tag) TIGHTDB_NOEXCEPT; - ~Group(); + ~Group() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; /// Attach this Group instance to the specified database file. /// @@ -265,7 +266,8 @@ class Group: private Table::Parent { Array m_free_positions; Array m_free_lengths; Array m_free_versions; - mutable Array m_cached_tables; + typedef std::vector table_accessors; + mutable table_accessors m_table_accessors; const bool m_is_shared; std::size_t m_readlock_version; @@ -276,7 +278,7 @@ class Group: private Table::Parent { Group(const Group&); // Disable copying void init_array_parents(); - void invalidate(); + void invalidate() TIGHTDB_NOEXCEPT; void init_shared(); /// Recursively update refs stored in all cached array @@ -295,7 +297,7 @@ class Group: private Table::Parent { m_tables.set(subtable_ndx, new_ref); } - void child_destroyed(std::size_t) TIGHTDB_OVERRIDE {} // Ignore + void child_accessor_destroyed(std::size_t) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} // Ignore ref_type get_child_ref(std::size_t subtable_ndx) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { @@ -314,17 +316,19 @@ class Group: private Table::Parent { /// cache. void init_from_ref(ref_type top_ref); + typedef void (*SpecSetter)(Table&); + Table* get_table_ptr(StringData name, SpecSetter, bool& was_created); + Table* get_table_ptr(StringData name); - Table* get_table_ptr(StringData name, bool& was_created); const Table* get_table_ptr(StringData name) const; template T* get_table_ptr(StringData name); template const T* get_table_ptr(StringData name) const; - Table* get_table_ptr(std::size_t ndx); - const Table* get_table_ptr(std::size_t ndx) const; - Table* create_new_table(StringData name); + Table* get_table_by_ndx(std::size_t ndx); + const Table* get_table_by_ndx(std::size_t ndx) const; + Table* create_new_table(StringData name, SpecSetter); - void clear_cache(); + void destroy_table_accessors() TIGHTDB_NOEXCEPT; friend class GroupWriter; friend class SharedGroup; @@ -348,18 +352,7 @@ inline Group::Group(): m_free_lengths(m_alloc), m_free_versions(m_alloc), m_is_shared(false) { init_array_parents(); - - // FIXME: The try-catch is required because of the unfortunate - // fact that Array violates the RAII idiom by allocating memory in - // the constructor and not freeing it in the destructor. We must - // find a way to improve Array. - try { - create(); // Throws - } - catch (...) { - m_cached_tables.destroy(); - throw; - } + create(); // Throws } inline Group::Group(const std::string& file, OpenMode mode): @@ -367,18 +360,7 @@ inline Group::Group(const std::string& file, OpenMode mode): m_free_lengths(m_alloc), m_free_versions(m_alloc), m_is_shared(false) { init_array_parents(); - - // FIXME: The try-catch is required because of the unfortunate - // fact that Array violates the RAII idiom by allocating memory in - // the constructor and not freeing it in the destructor. We must - // find a way to improve Array. - try { - open(file, mode); // Throws - } - catch (...) { - m_cached_tables.destroy(); - throw; - } + open(file, mode); // Throws } inline Group::Group(BinaryData buffer, bool take_ownership): @@ -386,18 +368,7 @@ inline Group::Group(BinaryData buffer, bool take_ownership): m_free_lengths(m_alloc), m_free_versions(m_alloc), m_is_shared(false) { init_array_parents(); - - // FIXME: The try-catch is required because of the unfortunate - // fact that Array violates the RAII idiom by allocating memory in - // the constructor and not freeing it in the destructor. We must - // find a way to improve Array. - try { - open(buffer, take_ownership); // Throws - } - catch (...) { - m_cached_tables.destroy(); - throw; - } + open(buffer, take_ownership); // Throws } inline Group::Group(unattached_tag) TIGHTDB_NOEXCEPT: @@ -447,80 +418,73 @@ inline StringData Group::get_table_name(std::size_t table_ndx) const return m_table_names.get(table_ndx); } -inline const Table* Group::get_table_ptr(std::size_t ndx) const -{ - return const_cast(this)->get_table_ptr(ndx); -} - inline bool Group::has_table(StringData name) const { - if (!m_top.is_attached()) return false; + if (!m_top.is_attached()) + return false; std::size_t i = m_table_names.find_first(name); - return i != std::size_t(-1); + return i != not_found; } template inline bool Group::has_table(StringData name) const { - if (!m_top.is_attached()) return false; - std::size_t i = m_table_names.find_first(name); - if (i == std::size_t(-1)) return false; - const Table* table = get_table_ptr(i); + if (!m_top.is_attached()) + return false; + std::size_t ndx = m_table_names.find_first(name); + if (ndx == not_found) + return false; + const Table* table = get_table_by_ndx(ndx); return T::matches_dynamic_spec(&table->get_spec()); } -inline Table* Group::get_table_ptr(StringData name) +inline Table* Group::get_table_ptr(StringData name, SpecSetter spec_setter, bool& was_created) { TIGHTDB_ASSERT(m_top.is_attached()); std::size_t ndx = m_table_names.find_first(name); - if (ndx != std::size_t(-1)) { - // Get table from cache - return get_table_ptr(ndx); - } - return create_new_table(name); -} - -inline Table* Group::get_table_ptr(StringData name, bool& was_created) -{ - TIGHTDB_ASSERT(m_top.is_attached()); - std::size_t ndx = m_table_names.find_first(name); - if (ndx != std::size_t(-1)) { + if (ndx != not_found) { + Table* table = get_table_by_ndx(ndx); // Throws was_created = false; - // Get table from cache - return get_table_ptr(ndx); + return table; } + Table* table = create_new_table(name, spec_setter); // Throws was_created = true; - return create_new_table(name); + return table; } -inline const Table* Group::get_table_ptr(StringData name) const +inline Table* Group::get_table_ptr(StringData name) { - TIGHTDB_ASSERT(has_table(name)); - return const_cast(this)->get_table_ptr(name); + SpecSetter spec_setter = 0; // Do not add any columns + bool was_created; // Dummy + return get_table_ptr(name, spec_setter, was_created); } -template inline T* Group::get_table_ptr(StringData name) +inline const Table* Group::get_table_ptr(StringData name) const { - TIGHTDB_STATIC_ASSERT(IsBasicTable::value, "Invalid table type"); - TIGHTDB_ASSERT(!has_table(name) || has_table(name)); - TIGHTDB_ASSERT(m_top.is_attached()); std::size_t ndx = m_table_names.find_first(name); - if (ndx != std::size_t(-1)) { - // Get table from cache - return static_cast(get_table_ptr(ndx)); - } + if (ndx == not_found) + return 0; + return get_table_by_ndx(ndx); // Throws +} - T* table = static_cast(create_new_table(name)); - table->set_dynamic_spec(); // FIXME: May fail - return table; +template inline T* Group::get_table_ptr(StringData name) +{ + TIGHTDB_STATIC_ASSERT(IsBasicTable::value, "Invalid table type"); + SpecSetter spec_setter = &T::set_dynamic_spec; + bool was_created; // Dummy + Table* table = get_table_ptr(name, spec_setter, was_created); + TIGHTDB_ASSERT(T::matches_dynamic_spec(&table->get_spec())); + return static_cast(table); } template inline const T* Group::get_table_ptr(StringData name) const { - TIGHTDB_ASSERT(has_table(name)); - return const_cast(this)->get_table_ptr(name); + TIGHTDB_STATIC_ASSERT(IsBasicTable::value, "Invalid table type"); + const Table* table = get_table_ptr(name); // Throws + TIGHTDB_ASSERT(table || T::matches_dynamic_spec(&table->get_spec())); + return static_cast(table); } inline TableRef Group::get_table(StringData name) @@ -543,6 +507,11 @@ template inline typename T::ConstRef Group::get_table(StringData name) return get_table_ptr(name)->get_table_ref(); } +inline const Table* Group::get_table_by_ndx(size_t ndx) const +{ + return const_cast(this)->get_table_by_ndx(ndx); +} + template std::size_t Group::write_to_stream(S& out) const { // Space for file header @@ -596,7 +565,7 @@ void Group::to_json(S& out) const for (std::size_t i = 0; i < m_tables.size(); ++i) { StringData name = m_table_names.get(i); - const Table* table = get_table_ptr(i); + const Table* table = get_table_by_ndx(i); if (i) out << ","; out << "\"" << name << "\""; @@ -608,19 +577,6 @@ void Group::to_json(S& out) const } -inline void Group::clear_cache() -{ - std::size_t n = m_cached_tables.size(); - for (std::size_t i = 0; i < n; ++i) { - if (Table* t = reinterpret_cast(m_cached_tables.get(i))) { - t->invalidate(); - t->unbind_ref(); - } - } - m_cached_tables.clear(); -} - - } // namespace tightdb #endif // TIGHTDB_GROUP_HPP diff --git a/src/tightdb/group_shared.cpp b/src/tightdb/group_shared.cpp index 6e8452f931a..ccb962e07ae 100644 --- a/src/tightdb/group_shared.cpp +++ b/src/tightdb/group_shared.cpp @@ -48,7 +48,7 @@ namespace { class ScopedMutexLock { public: - ScopedMutexLock(pthread_mutex_t* mutex) TIGHTDB_NOEXCEPT : m_mutex(mutex) + ScopedMutexLock(pthread_mutex_t* mutex) TIGHTDB_NOEXCEPT: m_mutex(mutex) { int r = pthread_mutex_lock(m_mutex); TIGHTDB_ASSERT(r == 0); @@ -224,13 +224,17 @@ void SharedGroup::open(const string& path, bool no_create_file, } -SharedGroup::~SharedGroup() +SharedGroup::~SharedGroup() TIGHTDB_NOEXCEPT { if (!is_attached()) return; TIGHTDB_ASSERT(m_transact_stage == transact_Ready); + // FIXME: Throws. Exception must not escape, as that would + // terminate the program. + m_group.m_alloc.free_all(); + #ifdef TIGHTDB_ENABLE_REPLICATION if (Replication* repl = m_group.get_replication()) delete repl; @@ -244,7 +248,11 @@ SharedGroup::~SharedGroup() // FIXME: This upgrading of the lock is not guaranteed to be atomic m_file.unlock(); - if (!m_file.try_lock_exclusive()) + + // FIXME: File::try_lock_exclusive() can throw. We cannot allow + // the exception to escape, because that would terminate the + // program (due to 'noexcept' on the destructor). + if (!m_file.try_lock_exclusive()) // Throws return; SharedInfo* info = m_file_map.get_addr(); @@ -253,7 +261,10 @@ SharedGroup::~SharedGroup() // we can delete it when done. if (info->flags == durability_MemOnly) { size_t path_len = m_file_path.size()-5; // remove ".lock" - string db_path = m_file_path.substr(0, path_len); + // FIXME: Find a way to avoid the possible exception from + // m_file_path.substr(). Currently, if it throws, the program + // will be terminated due to 'noexcept' on ~SharedGroup(). + string db_path = m_file_path.substr(0, path_len); // Throws remove(db_path.c_str()); } @@ -301,7 +312,6 @@ bool SharedGroup::has_changed() const TIGHTDB_NOEXCEPT const Group& SharedGroup::begin_read() { TIGHTDB_ASSERT(m_transact_stage == transact_Ready); - TIGHTDB_ASSERT(m_group.m_alloc.is_all_free()); ref_type new_top_ref = 0; size_t new_file_size = 0; @@ -321,7 +331,7 @@ const Group& SharedGroup::begin_read() // Update reader list if (ringbuf_is_empty()) { - ReadCount r2 = {info->current_version, 1}; + ReadCount r2 = { info->current_version, 1 }; ringbuf_put(r2); } else { @@ -330,7 +340,7 @@ const Group& SharedGroup::begin_read() ++r.count; } else { - ReadCount r2 = {info->current_version, 1}; + ReadCount r2 = { info->current_version, 1 }; ringbuf_put(r2); } } @@ -349,7 +359,7 @@ const Group& SharedGroup::begin_read() } -void SharedGroup::end_read() +void SharedGroup::end_read() TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(m_transact_stage == transact_Reading); TIGHTDB_ASSERT(m_version != numeric_limits::max()); @@ -366,15 +376,14 @@ void SharedGroup::end_read() // Find entry for current version size_t ndx = ringbuf_find(uint32_t(m_version)); - TIGHTDB_ASSERT(ndx != size_t(-1)); + TIGHTDB_ASSERT(ndx != not_found); ReadCount& r = ringbuf_get(ndx); // Decrement count and remove as many entries as possible if (r.count == 1 && ringbuf_is_first(ndx)) { ringbuf_remove_first(); - while (!ringbuf_is_empty() && ringbuf_get_first().count == 0) { + while (!ringbuf_is_empty() && ringbuf_get_first().count == 0) ringbuf_remove_first(); - } } else { TIGHTDB_ASSERT(r.count > 0); @@ -394,7 +403,6 @@ void SharedGroup::end_read() Group& SharedGroup::begin_write() { TIGHTDB_ASSERT(m_transact_stage == transact_Ready); - TIGHTDB_ASSERT(m_group.m_alloc.is_all_free()); SharedInfo* info = m_file_map.get_addr(); @@ -465,9 +473,11 @@ void SharedGroup::commit() // returns to the caller by throwing an exception. As it is right now, // rollback() does not handle all cases. // -// FIXME: This function must be modified is such a way that it can be -// guaranteed that it never throws. There are two problems to be delat with. Group::invalidate() calls Group::clear_cache() -void SharedGroup::rollback() +// FIXME: This function must be modified in such a way that it can be +// guaranteed that it never throws. There are two problems to be dealt +// with. Group::invalidate() calls Group::clear_cache() and +// SlabAlloc::free_all(). +void SharedGroup::rollback() TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(m_transact_stage == transact_Writing); diff --git a/src/tightdb/group_shared.hpp b/src/tightdb/group_shared.hpp index 2c0883424ac..8e7f39e9b95 100644 --- a/src/tightdb/group_shared.hpp +++ b/src/tightdb/group_shared.hpp @@ -53,7 +53,7 @@ class SharedGroup { /// state has undefined behavior. SharedGroup(unattached_tag) TIGHTDB_NOEXCEPT; - ~SharedGroup(); + ~SharedGroup() TIGHTDB_NOEXCEPT; /// Attach this SharedGroup instance to the specified database /// file. @@ -110,12 +110,12 @@ class SharedGroup { // Read transactions const Group& begin_read(); - void end_read(); + void end_read() TIGHTDB_NOEXCEPT; // Write transactions Group& begin_write(); void commit(); - void rollback(); + void rollback() TIGHTDB_NOEXCEPT; #ifdef TIGHTDB_DEBUG void test_ringbuf(); @@ -201,7 +201,7 @@ class ReadTransaction { m_shared_group.begin_read(); } - ~ReadTransaction() + ~ReadTransaction() TIGHTDB_NOEXCEPT { m_shared_group.end_read(); } @@ -221,7 +221,7 @@ class ReadTransaction { return get_group().get_table(name); } - const Group& get_group() const + const Group& get_group() const TIGHTDB_NOEXCEPT { return m_shared_group.m_group; } @@ -238,9 +238,10 @@ class WriteTransaction { m_shared_group->begin_write(); } - ~WriteTransaction() + ~WriteTransaction() TIGHTDB_NOEXCEPT { - if (m_shared_group) m_shared_group->rollback(); + if (m_shared_group) + m_shared_group->rollback(); } TableRef get_table(StringData name) const @@ -253,7 +254,7 @@ class WriteTransaction { return get_group().get_table(name); } - Group& get_group() const + Group& get_group() const TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(m_shared_group); return m_shared_group->m_group; @@ -284,7 +285,9 @@ inline SharedGroup::SharedGroup(const std::string& file, bool no_create, Durabil inline SharedGroup::SharedGroup(unattached_tag) TIGHTDB_NOEXCEPT: - m_group(Group::shared_tag()), m_version(std::numeric_limits::max()) {} + m_group(Group::shared_tag()), m_version(std::numeric_limits::max()) +{ +} #ifdef TIGHTDB_ENABLE_REPLICATION diff --git a/src/tightdb/group_writer.cpp b/src/tightdb/group_writer.cpp index 58f0437d92f..682007d5ecb 100644 --- a/src/tightdb/group_writer.cpp +++ b/src/tightdb/group_writer.cpp @@ -25,7 +25,7 @@ void GroupWriter::set_versions(size_t current, size_t read_lock) size_t GroupWriter::commit(bool do_sync) { - merge_free_space(); + merge_free_space(); // Throws Array& top = m_group.m_top; Array& fpositions = m_group.m_free_positions; @@ -42,8 +42,8 @@ size_t GroupWriter::commit(bool do_sync) // during the current transaction (or since the last commit), as // that would lead to clobbering of the previous database version. bool recurse = true, persist = true; - size_t names_pos = m_group.m_table_names.write(*this, recurse, persist); - size_t tables_pos = m_group.m_tables.write(*this, recurse, persist); + size_t names_pos = m_group.m_table_names.write(*this, recurse, persist); // Throws + size_t tables_pos = m_group.m_tables.write(*this, recurse, persist); // Throws // We now have a bit of a chicken-and-egg problem. We need to // write the free-lists to the file, but the act of writing them @@ -61,11 +61,11 @@ size_t GroupWriter::commit(bool do_sync) // themselves, we must ensure that the original arrays used by the // free-lists are counted as part of the space that was freed // during the current transaction. - fpositions.copy_on_write(); - flengths.copy_on_write(); + fpositions.copy_on_write(); // Throws + flengths.copy_on_write(); // Throws if (is_shared) - fversions.copy_on_write(); - const SlabAlloc::FreeSpace& new_free_space = m_group.m_alloc.get_free_read_only(); + fversions.copy_on_write(); // Throws + const SlabAlloc::FreeSpace& new_free_space = m_group.m_alloc.get_free_read_only(); // Throws max_free_list_size += new_free_space.size(); // The final allocation of free space (i.e., the call to @@ -83,7 +83,7 @@ size_t GroupWriter::commit(bool do_sync) // even if we end up using the maximum size possible, we still do // not end up with a zero size free-space chunk as we deduct the // actually used size from it. - pair reserve = reserve_free_space(max_free_space_needed + 1); + pair reserve = reserve_free_space(max_free_space_needed + 1); // Throws size_t reserve_ndx = reserve.first; size_t reserve_size = reserve.second; @@ -105,10 +105,10 @@ size_t GroupWriter::commit(bool do_sync) // adjacent segments. We can find the correct insert // postion by binary search size_t ndx = fpositions.lower_bound_int(pos); - fpositions.insert(ndx, pos); - flengths.insert(ndx, size); + fpositions.insert(ndx, pos); // Throws + flengths.insert(ndx, size); // Throws if (is_shared) - fversions.insert(ndx, m_current_version); + fversions.insert(ndx, m_current_version); // Throws if (ndx <= reserve_ndx) ++reserve_ndx; } @@ -120,7 +120,7 @@ size_t GroupWriter::commit(bool do_sync) // reserved chunk,) will not change the byte-size of those arrays. size_t reserve_pos = to_size_t(fpositions.get(reserve_ndx)); TIGHTDB_ASSERT(reserve_size > max_free_space_needed); - fpositions.ensure_minimum_width(reserve_pos + max_free_space_needed); + fpositions.ensure_minimum_width(reserve_pos + max_free_space_needed); // Throws // Get final sizes of free-list arrays size_t free_positions_size = fpositions.get_byte_size(); @@ -134,12 +134,12 @@ size_t GroupWriter::commit(bool do_sync) size_t top_pos = free_versions_pos + free_versions_size; // Update top to point to the calculated positions - top.set(0, names_pos); - top.set(1, tables_pos); - top.set(2, free_positions_pos); - top.set(3, free_sizes_pos); + top.set(0, names_pos); // Throws + top.set(1, tables_pos); // Throws + top.set(2, free_positions_pos); // Throws + top.set(3, free_sizes_pos); // Throws if (is_shared) - top.set(4, free_versions_pos); + top.set(4, free_versions_pos); // Throws // Get final sizes size_t top_size = top.get_byte_size(); @@ -153,25 +153,25 @@ size_t GroupWriter::commit(bool do_sync) // larger value without reallocation. size_t rest = reserve_pos + reserve_size - end_pos; TIGHTDB_ASSERT(rest > 0); - fpositions.set(reserve_ndx, end_pos); - flengths.set(reserve_ndx, rest); + fpositions.set(reserve_ndx, end_pos); // Throws + flengths.set(reserve_ndx, rest); // Throws // The free-list now have their final form, so we can write them // to the file - write_at(free_positions_pos, fpositions.get_header(), free_positions_size); - write_at(free_sizes_pos, flengths.get_header(), free_sizes_size); + write_at(free_positions_pos, fpositions.get_header(), free_positions_size); // Throws + write_at(free_sizes_pos, flengths.get_header(), free_sizes_size); // Throws if (is_shared) - write_at(free_versions_pos, fversions.get_header(), free_versions_size); + write_at(free_versions_pos, fversions.get_header(), free_versions_size); // Throws // Write top - write_at(top_pos, top.get_header(), top_size); + write_at(top_pos, top.get_header(), top_size); // Throws // In swap-only mode, we just use the file as backing for the shared // memory. So we never actually flush the data to disk (the OS may do // so for swapping though). Note that this means that the file on disk // may very likely be in an invalid state. if (do_sync) - sync(top_pos); + sync(top_pos); // Throws // Return top_pos so that it can be saved in lock file used // for coordination diff --git a/src/tightdb/index_string.hpp b/src/tightdb/index_string.hpp index d7bd42a34f9..e27f9389943 100644 --- a/src/tightdb/index_string.hpp +++ b/src/tightdb/index_string.hpp @@ -34,6 +34,7 @@ class StringIndex: public Column { StringIndex(void* target_column, StringGetter get_func, Allocator&); StringIndex(ref_type, ArrayParent*, std::size_t ndx_in_parent, void* target_column, StringGetter get_func, Allocator&); + ~StringIndex() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} void set_target(void* target_column, StringGetter get_func) TIGHTDB_NOEXCEPT; bool is_empty() const; diff --git a/src/tightdb/lang_bind_helper.hpp b/src/tightdb/lang_bind_helper.hpp index 69bab2d005a..a3a1a27d706 100644 --- a/src/tightdb/lang_bind_helper.hpp +++ b/src/tightdb/lang_bind_helper.hpp @@ -176,7 +176,8 @@ inline Table* LangBindHelper::get_table_ptr(Group* grp, StringData name) inline Table* LangBindHelper::get_table_ptr(Group* grp, StringData name, bool& was_created) { - Table* subtab = grp->get_table_ptr(name, was_created); + Group::SpecSetter spec_setter = 0; // Do not add any columns + Table* subtab = grp->get_table_ptr(name, spec_setter, was_created); subtab->bind_ref(); return subtab; } diff --git a/src/tightdb/mixed.hpp b/src/tightdb/mixed.hpp index 5daf517cf29..575bc11de99 100644 --- a/src/tightdb/mixed.hpp +++ b/src/tightdb/mixed.hpp @@ -118,6 +118,8 @@ class Mixed { struct subtable_tag {}; Mixed(subtable_tag) TIGHTDB_NOEXCEPT: m_type(type_Table) {} + ~Mixed() TIGHTDB_NOEXCEPT {} + DataType get_type() const TIGHTDB_NOEXCEPT { return m_type; } int64_t get_int() const TIGHTDB_NOEXCEPT; diff --git a/src/tightdb/query.cpp b/src/tightdb/query.cpp index e9f15eca576..c6102fd8e41 100644 --- a/src/tightdb/query.cpp +++ b/src/tightdb/query.cpp @@ -52,7 +52,7 @@ Query::Query(const Query& copy) do_delete = true; } -Query::~Query() +Query::~Query() TIGHTDB_NOEXCEPT { #if TIGHTDB_MULTITHREAD_QUERY for (size_t i = 0; i < m_threadcount; i++) diff --git a/src/tightdb/query.hpp b/src/tightdb/query.hpp index f48b65818d0..c361bb4205d 100644 --- a/src/tightdb/query.hpp +++ b/src/tightdb/query.hpp @@ -51,7 +51,7 @@ class Array; class Query { public: Query(const Query& copy); // FIXME: Try to remove this - ~Query(); + ~Query() TIGHTDB_NOEXCEPT; // Conditions: Query only rows contained in tv Query& tableview(const TableView& tv); @@ -202,11 +202,6 @@ class Query { #endif protected: - friend class Table; - template friend class BasicTable; - friend class XQueryAccessorInt; - friend class XQueryAccessorString; - Query(Table& table); Query(const Table& table); // FIXME: This constructor should not exist. We need a ConstQuery class. void Create(); @@ -266,6 +261,11 @@ class Query { template R aggregate(R (ColClass::*method)(size_t, size_t) const, size_t column_ndx, size_t* resultcount, size_t start, size_t end, size_t limit) const; + + friend class Table; + template friend class BasicTable; + friend class XQueryAccessorInt; + friend class XQueryAccessorString; }; diff --git a/src/tightdb/query_engine.hpp b/src/tightdb/query_engine.hpp index fb1a85888d4..56dcf819f8d 100644 --- a/src/tightdb/query_engine.hpp +++ b/src/tightdb/query_engine.hpp @@ -164,30 +164,32 @@ template<> struct ColumnTypeTraitsSum { }; // Lets you access elements of an integer column in increasing order in a fast way where leafs are cached -struct SequentialGetterBase { virtual ~SequentialGetterBase() {} }; +struct SequentialGetterBase { + virtual ~SequentialGetterBase() TIGHTDB_NOEXCEPT {} +}; templateclass SequentialGetter : public SequentialGetterBase { public: typedef typename ColumnTypeTraits::column_type ColType; typedef typename ColumnTypeTraits::array_type ArrayType; - SequentialGetter() : m_array((Array::no_prealloc_tag())) - { - } + SequentialGetter(): m_array((Array::no_prealloc_tag())) {} - SequentialGetter(const Table& table, size_t column_ndx) : m_array((Array::no_prealloc_tag())) + SequentialGetter(const Table& table, size_t column_ndx): m_array((Array::no_prealloc_tag())) { if (column_ndx != not_found) m_column = static_cast(&table.get_column_base(column_ndx)); m_leaf_end = 0; } - SequentialGetter(const ColType* column) : m_array((Array::no_prealloc_tag())) + SequentialGetter(const ColType* column): m_array((Array::no_prealloc_tag())) { init(column); } - void init (const ColType* column) + ~SequentialGetter() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} + + void init(const ColType* column) { m_column = column; m_leaf_end = 0; @@ -290,7 +292,7 @@ class ParentNode { } - virtual ~ParentNode() {} + virtual ~ParentNode() TIGHTDB_NOEXCEPT {} virtual void init(const Table& table) { @@ -480,6 +482,7 @@ class ParentNode { class ArrayNode: public ParentNode { public: ArrayNode(const Array& arr) : m_arr(arr), m_max(0), m_next(0), m_size(arr.size()) {m_child = 0; m_dT = 0.0;} + ~ArrayNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} void init(const Table& table) { @@ -517,6 +520,7 @@ class SubtableNode: public ParentNode { public: SubtableNode(size_t column): m_column(column) {m_child = 0; m_child2 = 0; m_dT = 100.0;} SubtableNode() {}; + ~SubtableNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} void init(const Table& table) { m_dD = 10.0; @@ -579,6 +583,7 @@ template class IntegerNode: pu m_probes = 0; m_matches = 0; } + ~IntegerNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} // Only purpose of this function is to let you quickly create a IntegerNode object and call aggregate_local() on it to aggregate // on a single stand-alone column, with 1 or 0 search criterias, without involving any tables, etc. Todo, could @@ -840,7 +845,7 @@ template class StringNode: public ParentNode { m_lcase = lower; } - ~StringNode() + ~StringNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { delete[] m_value.data(); delete[] m_ucase; @@ -924,6 +929,7 @@ template class BasicNode: publ m_child = 0; m_dT = 1.0; } + ~BasicNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} // Only purpose of this function is to let you quickly create a IntegerNode object and call aggregate_local() on it to aggregate // on a single stand-alone column, with 1 or 0 search criterias, without involving any tables, etc. Todo, could @@ -981,7 +987,7 @@ template class BinaryNode: public ParentNode { m_value = BinaryData(data, v.size()); } - ~BinaryNode() + ~BinaryNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { delete[] m_value.data(); } @@ -1040,7 +1046,7 @@ template<> class StringNode: public ParentNode { m_index_matches = 0; m_index_matches_destroy = false; } - ~StringNode() + ~StringNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { deallocate(); delete[] m_value.data(); @@ -1048,7 +1054,7 @@ template<> class StringNode: public ParentNode { m_index.destroy(); } - void deallocate() + void deallocate() TIGHTDB_NOEXCEPT { // Must be called after each query execution too free temporary resources used by the execution. Run in // destructor, but also in Init because a user could define a query once and execute it multiple times. @@ -1237,7 +1243,8 @@ class OrNode: public ParentNode { return 0; } - OrNode(ParentNode* p1) {m_child = NULL; m_cond[0] = p1; m_cond[1] = NULL; m_dT = 50.0;}; + OrNode(ParentNode* p1) {m_child = NULL; m_cond[0] = p1; m_cond[1] = NULL; m_dT = 50.0;} + ~OrNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} void init(const Table& table) { @@ -1328,7 +1335,7 @@ template class TwoColumnsNode: m_child = 0; } - ~TwoColumnsNode() + ~TwoColumnsNode() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE { delete[] m_value.data(); } diff --git a/src/tightdb/replication.cpp b/src/tightdb/replication.cpp index 5daff74cd5a..eb4e52073b2 100644 --- a/src/tightdb/replication.cpp +++ b/src/tightdb/replication.cpp @@ -147,7 +147,7 @@ struct Replication::TransactLogApplier { const char* m_input_begin; const char* m_input_end; TableRef m_table; - Buffer m_subspecs; + util::Buffer m_subspecs; size_t m_num_subspecs; bool m_dirty_spec; StringBuffer m_string_buffer; @@ -314,7 +314,7 @@ void Replication::TransactLogApplier::read_string(StringBuffer& buf) void Replication::TransactLogApplier::add_subspec(Spec* spec) { if (m_num_subspecs == m_subspecs.m_size) { - Buffer new_subspecs; + util::Buffer new_subspecs; size_t new_size = m_subspecs.m_size; if (new_size == 0) { new_size = 16; // FIXME: Use a small value (1) when compiling in debug mode diff --git a/src/tightdb/replication.hpp b/src/tightdb/replication.hpp index c9c98ccd98c..182202c00ce 100644 --- a/src/tightdb/replication.hpp +++ b/src/tightdb/replication.hpp @@ -31,7 +31,7 @@ #include #include -#include +#include #include #include @@ -211,7 +211,7 @@ class Replication { static void apply_transact_log(InputStream& transact_log, Group& target); #endif - virtual ~Replication() {} + virtual ~Replication() TIGHTDB_NOEXCEPT {} protected: // These two delimit a contiguous region of free space in a @@ -271,21 +271,7 @@ class Replication { private: struct TransactLogApplier; - template struct Buffer { - UniquePtr m_data; - std::size_t m_size; - T& operator[](std::size_t i) TIGHTDB_NOEXCEPT { return m_data[i]; } - const T& operator[](std::size_t i) const TIGHTDB_NOEXCEPT { return m_data[i]; } - Buffer() TIGHTDB_NOEXCEPT: m_data(0), m_size(0) {} - void set_size(std::size_t); - friend void swap(Buffer&a, Buffer&b) - { - using std::swap; - swap(a.m_data, b.m_data); - swap(a.m_size, b.m_size); - } - }; - Buffer m_subtab_path_buf; + util::Buffer m_subtab_path_buf; const Table* m_selected_table; const Spec* m_selected_spec; @@ -747,13 +733,6 @@ inline void Replication::on_spec_destroyed(const Spec* s) TIGHTDB_NOEXCEPT } -template void Replication::Buffer::set_size(std::size_t size) -{ - m_data.reset(new T[size]); - m_size = size; -} - - } // namespace tightdb #endif // TIGHTDB_REPLICATION_HPP diff --git a/src/tightdb/spec.cpp b/src/tightdb/spec.cpp index 924bce37026..812623de4d3 100644 --- a/src/tightdb/spec.cpp +++ b/src/tightdb/spec.cpp @@ -8,15 +8,16 @@ using namespace std; using namespace tightdb; -Spec::~Spec() +Spec::~Spec() TIGHTDB_NOEXCEPT { #ifdef TIGHTDB_ENABLE_REPLICATION Replication* repl = m_top.get_alloc().get_replication(); - if (repl) repl->on_spec_destroyed(this); + if (repl) + repl->on_spec_destroyed(this); #endif } -void Spec::init_from_ref(ref_type ref, ArrayParent* parent, size_t ndx_in_parent) +void Spec::init_from_ref(ref_type ref, ArrayParent* parent, size_t ndx_in_parent) TIGHTDB_NOEXCEPT { m_top.init_from_ref(ref); m_top.set_parent(parent, ndx_in_parent); @@ -34,7 +35,7 @@ void Spec::init_from_ref(ref_type ref, ArrayParent* parent, size_t ndx_in_parent } } -void Spec::destroy() +void Spec::destroy() TIGHTDB_NOEXCEPT { m_top.destroy(); } diff --git a/src/tightdb/spec.hpp b/src/tightdb/spec.hpp index 44ffe9dada1..34a7852fec2 100644 --- a/src/tightdb/spec.hpp +++ b/src/tightdb/spec.hpp @@ -32,7 +32,7 @@ class Table; class Spec { public: Spec(const Spec&); - ~Spec(); + ~Spec() TIGHTDB_NOEXCEPT; std::size_t add_column(DataType type, StringData name, ColumnType attr = col_attr_None); std::size_t add_subcolumn(const std::vector& column_path, DataType type, @@ -93,8 +93,8 @@ class Spec { Spec(const Table*, Allocator&, ArrayParent*, std::size_t ndx_in_parent); Spec(const Table*, Allocator&, ref_type, ArrayParent*, std::size_t ndx_in_parent); - void init_from_ref(ref_type, ArrayParent*, std::size_t ndx_in_parent); - void destroy(); + void init_from_ref(ref_type, ArrayParent*, std::size_t ndx_in_parent) TIGHTDB_NOEXCEPT; + void destroy() TIGHTDB_NOEXCEPT; ref_type get_ref() const TIGHTDB_NOEXCEPT; @@ -110,9 +110,6 @@ class Spec { void set_column_type(std::size_t column_ndx, ColumnType type); void set_column_attr(std::size_t column_ndx, ColumnType attr); - // Serialization - template std::size_t write(S& out, std::size_t& pos) const; - std::size_t get_column_type_pos(std::size_t column_ndx) const TIGHTDB_NOEXCEPT; std::size_t get_subspec_ndx(std::size_t column_ndx) const; std::size_t get_subspec_ref(std::size_t subspec_ndx) const; diff --git a/src/tightdb/string_data.hpp b/src/tightdb/string_data.hpp index 3469c20149e..37a21649007 100644 --- a/src/tightdb/string_data.hpp +++ b/src/tightdb/string_data.hpp @@ -65,6 +65,7 @@ class StringData { /// Initialize from a zero terminated C style string. StringData(const char* c_str) TIGHTDB_NOEXCEPT; + ~StringData() TIGHTDB_NOEXCEPT {} char operator[](std::size_t i) const TIGHTDB_NOEXCEPT { return m_data[i]; } diff --git a/src/tightdb/table.cpp b/src/tightdb/table.cpp index 238beb913be..4ff379fae5e 100644 --- a/src/tightdb/table.cpp +++ b/src/tightdb/table.cpp @@ -155,7 +155,7 @@ void Table::create_columns() } -void Table::invalidate() +void Table::invalidate() TIGHTDB_NOEXCEPT { // This prevents the destructor from deallocating the underlying // memory structure, and from attempting to notify the parent. It @@ -165,15 +165,15 @@ void Table::invalidate() // Invalidate all subtables invalidate_subtables(); - clear_cached_columns(); + destroy_column_accessors(); } -void Table::invalidate_subtables() +void Table::invalidate_subtables() TIGHTDB_NOEXCEPT { size_t n = m_cols.size(); for (size_t i=0; i(m_cols.get(i)); + ColumnBase* c = reinterpret_cast(uintptr_t(m_cols.get(i))); c->invalidate_subtables_virtual(); } } @@ -310,7 +310,7 @@ void Table::cache_columns() if (num_rows != size_t(-1)) m_size = num_rows; } -void Table::clear_cached_columns() +void Table::destroy_column_accessors() TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(m_cols.is_attached()); @@ -322,7 +322,7 @@ void Table::clear_cached_columns() m_cols.destroy(); } -Table::~Table() +Table::~Table() TIGHTDB_NOEXCEPT { #ifdef TIGHTDB_ENABLE_REPLICATION transact_log().on_table_destroyed(); @@ -342,8 +342,8 @@ Table::~Table() TIGHTDB_ASSERT(parent); TIGHTDB_ASSERT(m_ref_count == 0); TIGHTDB_ASSERT(dynamic_cast(parent)); - static_cast(parent)->child_destroyed(m_columns.get_ndx_in_parent()); - clear_cached_columns(); + static_cast(parent)->child_accessor_destroyed(m_columns.get_ndx_in_parent()); + destroy_column_accessors(); return; } @@ -353,8 +353,8 @@ Table::~Table() // counting, so we must let our parent know about our demise. TIGHTDB_ASSERT(m_ref_count == 0); TIGHTDB_ASSERT(dynamic_cast(parent)); - static_cast(parent)->child_destroyed(m_top.get_ndx_in_parent()); - clear_cached_columns(); + static_cast(parent)->child_accessor_destroyed(m_top.get_ndx_in_parent()); + destroy_column_accessors(); return; } @@ -367,8 +367,12 @@ Table::~Table() // be zero, because that is what has caused the destructor to be // called. In the latter case, there can be no subtables to // invalidate, because they would have kept their parent alive. - if (0 < m_ref_count) invalidate(); - else clear_cached_columns(); + if (0 < m_ref_count) { + invalidate(); + } + else { + destroy_column_accessors(); + } m_top.destroy(); } diff --git a/src/tightdb/table.hpp b/src/tightdb/table.hpp index 4c3f9513cdd..07f7fb84a29 100644 --- a/src/tightdb/table.hpp +++ b/src/tightdb/table.hpp @@ -85,7 +85,7 @@ class Table { /// dynamic lifetime, use Table::copy() instead. Table(const Table&, Allocator& = Allocator::get_default()); - ~Table(); + ~Table() TIGHTDB_NOEXCEPT; /// Construct a new freestanding top-level table with dynamic /// lifetime. @@ -151,8 +151,8 @@ class Table { /// specifies a column, C2, of the spec of C1, and so forth. /// /// \sa Spec - Spec& get_spec(); - const Spec& get_spec() const; + Spec& get_spec() TIGHTDB_NOEXCEPT; + const Spec& get_spec() const TIGHTDB_NOEXCEPT; void update_from_spec(); // Must not be called for a table with shared spec std::size_t add_column(DataType type, StringData name); // Add a column dynamically std::size_t add_subcolumn(const std::vector& column_path, DataType type, @@ -403,7 +403,23 @@ class Table { mutable std::size_t m_ref_count; mutable const StringIndex* m_lookup_index; - Table& operator=(const Table&); // Disable copying assignment + /// Disable copying assignment. + /// + /// It could easily be implemented by calling assign(), but the + /// non-checking nature of the low-level dynamically typed API + /// makes it too risky to offer this feature as an + /// operator. + /// + /// FIXME: assign() has not yet been implemented, but the + /// intention is that it will copy the rows of the argument table + /// into this table after clearing the original contents, and for + /// target tables without a shared spec, it would also copy the + /// spec. For target tables with shared spec, it would be an error + /// to pass an argument table with an incompatible spec, but + /// assign() would not check for spec compatibility. This would + /// make it ideal as a basis for implementing operator=() for + /// typed tables. + Table& operator=(const Table&); /// Used when the lifetime of a table is managed by reference /// counting. The lifetime of free-standing tables allocated on @@ -431,7 +447,7 @@ class Table { void create_columns(); void cache_columns(); - void clear_cached_columns(); + void destroy_column_accessors() TIGHTDB_NOEXCEPT; /// Called in the context of Group::commit() to ensure that /// attached table accessors stay valid across a commit. Please @@ -481,15 +497,15 @@ class Table { /// way. This generally happens when a modifying table operation /// fails, and also when one transaction is ended and a new one is /// started. - void invalidate(); + void invalidate() TIGHTDB_NOEXCEPT; - /// Detach all cached subtable accessors. - void invalidate_subtables(); + /// Detach all attached subtable accessors. + void invalidate_subtables() TIGHTDB_NOEXCEPT; void bind_ref() const TIGHTDB_NOEXCEPT { ++m_ref_count; } - void unbind_ref() const { if (--m_ref_count == 0) delete this; } // FIXME: Cannot be noexcept since ~Table() may throw + void unbind_ref() const TIGHTDB_NOEXCEPT { if (--m_ref_count == 0) delete this; } - struct UnbindGuard; + class UnbindGuard; ColumnType get_real_column_type(std::size_t column_ndx) const TIGHTDB_NOEXCEPT; @@ -570,9 +586,12 @@ class Table { class Table::Parent: public ArrayParent { +public: + ~Parent() TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE {} + protected: - /// Must be called whenever a child Table is destroyed. - virtual void child_destroyed(std::size_t child_ndx) = 0; + /// Must be called whenever a child table accessor is destroyed. + virtual void child_accessor_destroyed(std::size_t child_ndx) TIGHTDB_NOEXCEPT = 0; #ifdef TIGHTDB_ENABLE_REPLICATION virtual std::size_t* record_subtable_path(std::size_t* begin, std::size_t* end) TIGHTDB_NOEXCEPT; @@ -657,23 +676,51 @@ inline bool Table::has_shared_spec() const return !m_top.is_attached(); } -inline Spec& Table::get_spec() +inline Spec& Table::get_spec() TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT(!has_shared_spec()); // you can only change specs on top-level tables return m_spec_set; } -inline const Spec& Table::get_spec() const +inline const Spec& Table::get_spec() const TIGHTDB_NOEXCEPT { return m_spec_set; } -struct Table::UnbindGuard { - UnbindGuard(Table* t) TIGHTDB_NOEXCEPT: m_table(t) {} - ~UnbindGuard() { if (m_table) m_table->unbind_ref(); } // FIXME: Cannot be noexcept since ~Table() may throw - Table* operator->() const { return m_table; } - Table* get() const { return m_table; } - Table* release() TIGHTDB_NOEXCEPT { Table* t = m_table; m_table = 0; return t; } +class Table::UnbindGuard { +public: + UnbindGuard(Table* table) TIGHTDB_NOEXCEPT: m_table(table) + { + } + + ~UnbindGuard() TIGHTDB_NOEXCEPT + { + if (m_table) + m_table->unbind_ref(); + } + + Table& operator*() const TIGHTDB_NOEXCEPT + { + return *m_table; + } + + Table* operator->() const TIGHTDB_NOEXCEPT + { + return m_table; + } + + Table* get() const TIGHTDB_NOEXCEPT + { + return m_table; + } + + Table* release() TIGHTDB_NOEXCEPT + { + Table* table = m_table; + m_table = 0; + return table; + } + private: Table* m_table; }; @@ -738,14 +785,14 @@ inline void Table::set_index(std::size_t column_ndx) inline TableRef Table::create(Allocator& alloc) { ref_type ref = create_empty_table(alloc); // Throws - Table* table = new Table(Table::RefCountTag(), alloc, ref, 0, 0); // Throws + Table* table = new Table(RefCountTag(), alloc, ref, 0, 0); // Throws return table->get_table_ref(); } inline TableRef Table::copy(Allocator& alloc) const { ref_type ref = clone(alloc); // Throws - Table* table = new Table(Table::RefCountTag(), alloc, ref, 0, 0); // Throws + Table* table = new Table(RefCountTag(), alloc, ref, 0, 0); // Throws return table->get_table_ref(); } diff --git a/src/tightdb/table_accessors.hpp b/src/tightdb/table_accessors.hpp index 166352c2480..91145ab2a9f 100644 --- a/src/tightdb/table_accessors.hpp +++ b/src/tightdb/table_accessors.hpp @@ -50,7 +50,7 @@ struct SpecBase { template class Enum { public: typedef E enum_type; - Enum(E v) : m_value(v) {}; + Enum(E v): m_value(v) {} operator E() const { return m_value; } private: E m_value; @@ -59,7 +59,7 @@ struct SpecBase { template class Subtable { public: typedef T table_type; - Subtable(T* t) : m_table(t) {}; + Subtable(T* t): m_table(t) {} operator T*() const { return m_table; } private: T* m_table; @@ -93,7 +93,7 @@ struct SpecBase { /// FIXME: Currently we do not support absence of dynamic column /// names. - static const StringData* dyn_col_names() { return 0; } + static void dyn_col_names(StringData*) TIGHTDB_NOEXCEPT {} /// This is the fallback class that is used when no convenience /// methods are specified in the users Spec class. @@ -118,16 +118,16 @@ struct SpecBase { /// /// \endcode /// - /// FIXME: Note: Users ConvenienceMethods may not contain any - /// virtual methods, nor may it contain any data memebers. We - /// might want to check this by - /// TIGHTDB_STATIC_ASSERT(sizeof(Derivative of ConvenienceMethods) - /// == 1)), however, this would not be guaranteed by the standard, - /// since even an empty class may add to the size of the derived - /// class. Fortunately, as long as ConvenienceMethods is derived - /// from, by BasicTable, after deriving from Table, this cannot - /// become a problem, nor would it lead to a violation of the - /// strict aliasing rule of C++03 or C++11. + /// FIXME: ConvenienceMethods may not contain any virtual methods, + /// nor may it contain any data memebers. We might want to check + /// this by TIGHTDB_STATIC_ASSERT(sizeof(Derivative of + /// ConvenienceMethods) == 1)), however, this would not be + /// guaranteed by the standard, since even an empty class may add + /// to the size of the derived class. Fortunately, as long as + /// ConvenienceMethods is derived from, by BasicTable, after + /// deriving from Table, this cannot become a problem, nor would + /// it lead to a violation of the strict aliasing rule of C++03 or + /// C++11. struct ConvenienceMethods {}; }; @@ -729,7 +729,7 @@ class FieldAccessor: template BasicTableRef set_subtable() const { BasicTableRef t = unchecked_cast(set_subtable()); - t->set_dynamic_spec(); + T::set_dynamic_spec(*t); return move(t); } diff --git a/src/tightdb/table_basic.hpp b/src/tightdb/table_basic.hpp index a3336346fbc..a69e69c2978 100644 --- a/src/tightdb/table_basic.hpp +++ b/src/tightdb/table_basic.hpp @@ -76,15 +76,20 @@ template class BasicTable: private Table, public Spec::ConvenienceMe using Table::add_empty_row; using Table::insert_empty_row; - BasicTable(Allocator& alloc = Allocator::get_default()): Table(alloc) { set_dynamic_spec(); } + BasicTable(Allocator& alloc = Allocator::get_default()): Table(alloc) + { + set_dynamic_spec(*this); + } BasicTable(const BasicTable& t, Allocator& alloc = Allocator::get_default()): Table(t, alloc) {} + ~BasicTable() TIGHTDB_NOEXCEPT {} + static Ref create(Allocator& = Allocator::get_default()); Ref copy(Allocator& = Allocator::get_default()) const; - static int get_column_count() { return TypeCount::value; } + static int get_column_count() TIGHTDB_NOEXCEPT { return TypeCount::value; } Ref get_table_ref() { return Ref(this); } @@ -225,13 +230,13 @@ template class BasicTable: private Table, public Spec::ConvenienceMe /// where it is desirable to be able to cast to a table type with /// different column names. Similar changes are needed in the Java /// and Objective-C language bindings. - template friend bool is_a(const Table&); + template friend bool is_a(const Table&) TIGHTDB_NOEXCEPT; //@{ /// These functions return null if the specified table is not /// compatible with the specified table type. - template friend BasicTableRef checked_cast(TableRef); - template friend BasicTableRef checked_cast(ConstTableRef); + template friend BasicTableRef checked_cast(TableRef) TIGHTDB_NOEXCEPT; + template friend BasicTableRef checked_cast(ConstTableRef) TIGHTDB_NOEXCEPT; //@} #ifdef TIGHTDB_DEBUG @@ -259,17 +264,23 @@ template class BasicTable: private Table, public Spec::ConvenienceMe return static_cast(Table::get_subtable_ptr(col_idx, row_idx)); } - void set_dynamic_spec() + static void set_dynamic_spec(Table& table) { - tightdb::Spec& spec = get_spec(); - ForEachType::exec(&spec, Spec::dyn_col_names()); - update_from_spec(); + tightdb::Spec& spec = table.get_spec(); + const int num_cols = TypeCount::value; + StringData dyn_col_names[num_cols]; + Spec::dyn_col_names(dyn_col_names); + ForEachType::exec(&spec, dyn_col_names); + table.update_from_spec(); } - static bool matches_dynamic_spec(const tightdb::Spec* spec) + static bool matches_dynamic_spec(const tightdb::Spec* spec) TIGHTDB_NOEXCEPT { + const int num_cols = TypeCount::value; + StringData dyn_col_names[num_cols]; + Spec::dyn_col_names(dyn_col_names); return !HasType::exec(spec, Spec::dyn_col_names()); + _impl::DiffColType>::exec(spec, dyn_col_names); } // This one allows a BasicTable to know that BasicTables with @@ -307,19 +318,18 @@ template class BasicTable: private Table, public Spec::ConvenienceMe #endif template class BasicTable::Query: - public Spec::template ColNames { + public Spec::template ColNames { public: - template friend class _impl::QueryColumnBase; - template friend class _impl::QueryColumn; - Query(const Query&q): Spec::template ColNames(this), m_impl(q.m_impl) {} + ~Query() TIGHTDB_NOEXCEPT {} Query& tableview(const Array& arr) { m_impl.tableview(arr); return *this; } // Query& Query::tableview(const TableView& tv) // Query& Query::tableview(const Array &arr) - Query& tableview(const typename BasicTable::View& v) { + Query& tableview(const typename BasicTable::View& v) + { m_impl.tableview(*v.get_impl()); return *this; } @@ -333,33 +343,34 @@ template class BasicTable::Query: Query& Or() { m_impl.Or(); return *this; } - std::size_t find_next(std::size_t lastmatch=std::size_t(-1)) + std::size_t find_next(std::size_t lastmatch = std::size_t(-1)) { return m_impl.find_next(lastmatch); } - typename BasicTable::View find_all(std::size_t start=0, - std::size_t end=std::size_t(-1), - std::size_t limit=std::size_t(-1)) + typename BasicTable::View find_all(std::size_t start = 0, + std::size_t end = std::size_t(-1), + std::size_t limit = std::size_t(-1)) { return m_impl.find_all(start, end, limit); } - typename BasicTable::ConstView find_all(std::size_t start=0, - std::size_t end=std::size_t(-1), - std::size_t limit=std::size_t(-1)) const + typename BasicTable::ConstView find_all(std::size_t start = 0, + std::size_t end = std::size_t(-1), + std::size_t limit = std::size_t(-1)) const { return m_impl.find_all(start, end, limit); } - std::size_t count(std::size_t start=0, - std::size_t end=std::size_t(-1), std::size_t limit=std::size_t(-1)) const + std::size_t count(std::size_t start = 0, + std::size_t end = std::size_t(-1), + std::size_t limit = std::size_t(-1)) const { return m_impl.count(start, end, limit); } std::size_t remove(std::size_t start = 0, - std::size_t end = std::size_t(-1), + std::size_t end = std::size_t(-1), std::size_t limit = std::size_t(-1)) { return m_impl.remove(start, end, limit); @@ -370,12 +381,15 @@ template class BasicTable::Query: #endif protected: - friend class BasicTable; - - Query(const BasicTable& table): Spec::template ColNames(this), m_impl(table) {} + Query(const BasicTable& table): + Spec::template ColNames(this), m_impl(table) {} private: tightdb::Query m_impl; + + friend class BasicTable; + template friend class _impl::QueryColumnBase; + template friend class _impl::QueryColumn; }; #ifdef _MSC_VER @@ -435,8 +449,10 @@ namespace _impl TIGHTDB_ASSERT(col_idx == spec->get_column_count()); typedef typename Subtab::Columns Subcolumns; Spec subspec = spec->add_subtable_column(col_names[col_idx]); - const StringData* const subcol_names = Subtab::spec_type::dyn_col_names(); - ForEachType::exec(&subspec, subcol_names); + const int num_cols = TypeCount::value; + StringData dyn_col_names[num_cols]; + Subtab::spec_type::dyn_col_names(dyn_col_names); + ForEachType::exec(&subspec, dyn_col_names); } }; @@ -632,9 +648,9 @@ namespace _impl template inline typename BasicTable::Ref BasicTable::create(Allocator& alloc) { - TableRef ref = Table::create(alloc); - static_cast(*ref)->set_dynamic_spec(); - return unchecked_cast >(move(ref)); + TableRef table = Table::create(alloc); + set_dynamic_spec(*table); + return unchecked_cast >(move(table)); } @@ -645,22 +661,24 @@ inline typename BasicTable::Ref BasicTable::copy(Allocator& alloc) c } -template inline bool is_a(const Table& t) +template inline bool is_a(const Table& t) TIGHTDB_NOEXCEPT { return T::matches_dynamic_spec(&t.get_spec()); } -template inline BasicTableRef checked_cast(TableRef t) +template inline BasicTableRef checked_cast(TableRef t) TIGHTDB_NOEXCEPT { - if (!is_a(*t)) return BasicTableRef(); // Null + if (!is_a(*t)) + return BasicTableRef(); // Null return unchecked_cast(t); } -template inline BasicTableRef checked_cast(ConstTableRef t) +template inline BasicTableRef checked_cast(ConstTableRef t) TIGHTDB_NOEXCEPT { - if (!is_a(*t)) return BasicTableRef(); // Null + if (!is_a(*t)) + return BasicTableRef(); // Null return unchecked_cast(t); } diff --git a/src/tightdb/table_macros.hpp b/src/tightdb/table_macros.hpp index b169f434379..034861a5830 100644 --- a/src/tightdb/table_macros.hpp +++ b/src/tightdb/table_macros.hpp @@ -43,10 +43,9 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -84,10 +83,10 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -127,10 +126,11 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -172,10 +172,12 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -219,10 +221,13 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -268,10 +273,14 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -319,10 +328,15 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -372,10 +386,16 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -427,10 +447,17 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -484,10 +511,18 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i), name10(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1), tightdb::StringData(#name10, sizeof(#name10)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ + names[9] = tightdb::StringData(#name10, sizeof #name10 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -543,10 +578,19 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i), name10(i), name11(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1), tightdb::StringData(#name10, sizeof(#name10)-1), tightdb::StringData(#name11, sizeof(#name11)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ + names[9] = tightdb::StringData(#name10, sizeof #name10 - 1); \ + names[10] = tightdb::StringData(#name11, sizeof #name11 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -604,10 +648,20 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i), name10(i), name11(i), name12(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1), tightdb::StringData(#name10, sizeof(#name10)-1), tightdb::StringData(#name11, sizeof(#name11)-1), tightdb::StringData(#name12, sizeof(#name12)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ + names[9] = tightdb::StringData(#name10, sizeof #name10 - 1); \ + names[10] = tightdb::StringData(#name11, sizeof #name11 - 1); \ + names[11] = tightdb::StringData(#name12, sizeof #name12 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -667,10 +721,21 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i), name10(i), name11(i), name12(i), name13(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1), tightdb::StringData(#name10, sizeof(#name10)-1), tightdb::StringData(#name11, sizeof(#name11)-1), tightdb::StringData(#name12, sizeof(#name12)-1), tightdb::StringData(#name13, sizeof(#name13)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ + names[9] = tightdb::StringData(#name10, sizeof #name10 - 1); \ + names[10] = tightdb::StringData(#name11, sizeof #name11 - 1); \ + names[11] = tightdb::StringData(#name12, sizeof #name12 - 1); \ + names[12] = tightdb::StringData(#name13, sizeof #name13 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -732,10 +797,22 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i), name10(i), name11(i), name12(i), name13(i), name14(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1), tightdb::StringData(#name10, sizeof(#name10)-1), tightdb::StringData(#name11, sizeof(#name11)-1), tightdb::StringData(#name12, sizeof(#name12)-1), tightdb::StringData(#name13, sizeof(#name13)-1), tightdb::StringData(#name14, sizeof(#name14)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ + names[9] = tightdb::StringData(#name10, sizeof #name10 - 1); \ + names[10] = tightdb::StringData(#name11, sizeof #name11 - 1); \ + names[11] = tightdb::StringData(#name12, sizeof #name12 - 1); \ + names[12] = tightdb::StringData(#name13, sizeof #name13 - 1); \ + names[13] = tightdb::StringData(#name14, sizeof #name14 - 1); \ } \ \ struct ConvenienceMethods { \ @@ -799,10 +876,23 @@ struct Table##Spec: ::tightdb::SpecBase { \ ColNames(Init i) TIGHTDB_NOEXCEPT: name1(i), name2(i), name3(i), name4(i), name5(i), name6(i), name7(i), name8(i), name9(i), name10(i), name11(i), name12(i), name13(i), name14(i), name15(i) {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { tightdb::StringData(#name1, sizeof(#name1)-1), tightdb::StringData(#name2, sizeof(#name2)-1), tightdb::StringData(#name3, sizeof(#name3)-1), tightdb::StringData(#name4, sizeof(#name4)-1), tightdb::StringData(#name5, sizeof(#name5)-1), tightdb::StringData(#name6, sizeof(#name6)-1), tightdb::StringData(#name7, sizeof(#name7)-1), tightdb::StringData(#name8, sizeof(#name8)-1), tightdb::StringData(#name9, sizeof(#name9)-1), tightdb::StringData(#name10, sizeof(#name10)-1), tightdb::StringData(#name11, sizeof(#name11)-1), tightdb::StringData(#name12, sizeof(#name12)-1), tightdb::StringData(#name13, sizeof(#name13)-1), tightdb::StringData(#name14, sizeof(#name14)-1), tightdb::StringData(#name15, sizeof(#name15)-1) }; \ - return names; \ + names[0] = tightdb::StringData(#name1, sizeof #name1 - 1); \ + names[1] = tightdb::StringData(#name2, sizeof #name2 - 1); \ + names[2] = tightdb::StringData(#name3, sizeof #name3 - 1); \ + names[3] = tightdb::StringData(#name4, sizeof #name4 - 1); \ + names[4] = tightdb::StringData(#name5, sizeof #name5 - 1); \ + names[5] = tightdb::StringData(#name6, sizeof #name6 - 1); \ + names[6] = tightdb::StringData(#name7, sizeof #name7 - 1); \ + names[7] = tightdb::StringData(#name8, sizeof #name8 - 1); \ + names[8] = tightdb::StringData(#name9, sizeof #name9 - 1); \ + names[9] = tightdb::StringData(#name10, sizeof #name10 - 1); \ + names[10] = tightdb::StringData(#name11, sizeof #name11 - 1); \ + names[11] = tightdb::StringData(#name12, sizeof #name12 - 1); \ + names[12] = tightdb::StringData(#name13, sizeof #name13 - 1); \ + names[13] = tightdb::StringData(#name14, sizeof #name14 - 1); \ + names[14] = tightdb::StringData(#name15, sizeof #name15 - 1); \ } \ \ struct ConvenienceMethods { \ diff --git a/src/tightdb/table_macros.hpp.cheetah b/src/tightdb/table_macros.hpp.cheetah index f9182fa6f90..9ff86d84e5f 100644 --- a/src/tightdb/table_macros.hpp.cheetah +++ b/src/tightdb/table_macros.hpp.cheetah @@ -81,17 +81,11 @@ name${j+1}%slurp {} \ }; \ \ - static const tightdb::StringData* dyn_col_names() \ + static void dyn_col_names(tightdb::StringData* names) TIGHTDB_NOEXCEPT \ { \ - static tightdb::StringData names[] = { %slurp %for $j in range($num_cols) -%if 0 < $j -, %slurp -%end if -tightdb::StringData(#name${j+1}, sizeof(#name${j+1})-1)%slurp + names[$j] = tightdb::StringData(#name${j+1}, sizeof #name${j+1} - 1); \ %end for - }; \ - return names; \ } \ \ struct ConvenienceMethods { \ diff --git a/src/tightdb/table_ref.hpp b/src/tightdb/table_ref.hpp index fc7325a1a1b..04323b8262b 100644 --- a/src/tightdb/table_ref.hpp +++ b/src/tightdb/table_ref.hpp @@ -41,7 +41,7 @@ template class BasicTable; /// /// void func(Table& table) /// { -/// Table& sub1 = *table.get_subtable(0,0); // INVALID! (sub1 becomes 'dangeling') +/// Table& sub1 = *(table.get_subtable(0,0)); // INVALID! (sub1 becomes 'dangeling') /// TableRef sub2 = table.get_subtable(0,0); // Safe! /// } /// @@ -63,6 +63,7 @@ template class BasicTableRef: bind_ptr { #else BasicTableRef() TIGHTDB_NOEXCEPT {} #endif + ~BasicTableRef() TIGHTDB_NOEXCEPT {} #ifdef TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE @@ -71,16 +72,16 @@ template class BasicTableRef: bind_ptr { template BasicTableRef(const BasicTableRef& r) TIGHTDB_NOEXCEPT: bind_ptr(r) {} // Copy assign - BasicTableRef& operator=(const BasicTableRef&); - template BasicTableRef& operator=(const BasicTableRef&); + BasicTableRef& operator=(const BasicTableRef&) TIGHTDB_NOEXCEPT; + template BasicTableRef& operator=(const BasicTableRef&) TIGHTDB_NOEXCEPT; // Move construct BasicTableRef(BasicTableRef&& r) TIGHTDB_NOEXCEPT: bind_ptr(std::move(r)) {} template BasicTableRef(BasicTableRef&& r) TIGHTDB_NOEXCEPT: bind_ptr(std::move(r)) {} // Move assign - BasicTableRef& operator=(BasicTableRef&&); - template BasicTableRef& operator=(BasicTableRef&&); + BasicTableRef& operator=(BasicTableRef&&) TIGHTDB_NOEXCEPT; + template BasicTableRef& operator=(BasicTableRef&&) TIGHTDB_NOEXCEPT; #else // !TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE @@ -89,8 +90,8 @@ template class BasicTableRef: bind_ptr { template BasicTableRef(BasicTableRef r) TIGHTDB_NOEXCEPT: bind_ptr(move(r)) {} // Copy assign - BasicTableRef& operator=(BasicTableRef); - template BasicTableRef& operator=(BasicTableRef); + BasicTableRef& operator=(BasicTableRef) TIGHTDB_NOEXCEPT; + template BasicTableRef& operator=(BasicTableRef) TIGHTDB_NOEXCEPT; #endif // !TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE @@ -105,7 +106,7 @@ template class BasicTableRef: bind_ptr { // Dereference #ifdef __clang__ // Clang has a bug that causes it to effectively ignore the 'using' declaration. - T& operator*() const { return bind_ptr::operator*(); } + T& operator*() const TIGHTDB_NOEXCEPT { return bind_ptr::operator*(); } #else using bind_ptr::operator*; #endif @@ -117,7 +118,7 @@ template class BasicTableRef: bind_ptr { using bind_ptr::operator typename bind_ptr::unspecified_bool_type; #endif - void reset() { bind_ptr::reset(); } + void reset() TIGHTDB_NOEXCEPT { bind_ptr::reset(); } void swap(BasicTableRef& r) TIGHTDB_NOEXCEPT { this->bind_ptr::swap(r); } friend void swap(BasicTableRef& a, BasicTableRef& b) TIGHTDB_NOEXCEPT { a.swap(b); } @@ -185,27 +186,29 @@ template inline BasicTableRef unchecked_cast(ConstTableRef t) #ifdef TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE -template inline BasicTableRef& BasicTableRef::operator=(const BasicTableRef& r) +template +inline BasicTableRef& BasicTableRef::operator=(const BasicTableRef& r) TIGHTDB_NOEXCEPT { this->bind_ptr::operator=(r); return *this; } template template -inline BasicTableRef& BasicTableRef::operator=(const BasicTableRef& r) +inline BasicTableRef& BasicTableRef::operator=(const BasicTableRef& r) TIGHTDB_NOEXCEPT { this->bind_ptr::operator=(r); return *this; } -template inline BasicTableRef& BasicTableRef::operator=(BasicTableRef&& r) +template +inline BasicTableRef& BasicTableRef::operator=(BasicTableRef&& r) TIGHTDB_NOEXCEPT { this->bind_ptr::operator=(std::move(r)); return *this; } template template -inline BasicTableRef& BasicTableRef::operator=(BasicTableRef&& r) +inline BasicTableRef& BasicTableRef::operator=(BasicTableRef&& r) TIGHTDB_NOEXCEPT { this->bind_ptr::operator=(std::move(r)); return *this; @@ -213,14 +216,15 @@ inline BasicTableRef& BasicTableRef::operator=(BasicTableRef&& r) #else // !TIGHTDB_HAVE_CXX11_RVALUE_REFERENCE -template inline BasicTableRef& BasicTableRef::operator=(BasicTableRef r) +template +inline BasicTableRef& BasicTableRef::operator=(BasicTableRef r) TIGHTDB_NOEXCEPT { this->bind_ptr::operator=(move(static_cast&>(r))); return *this; } template template -inline BasicTableRef& BasicTableRef::operator=(BasicTableRef r) +inline BasicTableRef& BasicTableRef::operator=(BasicTableRef r) TIGHTDB_NOEXCEPT { this->bind_ptr::operator=(move(static_cast&>(r))); return *this; diff --git a/src/tightdb/table_view.hpp b/src/tightdb/table_view.hpp index ca42a6b09b7..e7181632b34 100644 --- a/src/tightdb/table_view.hpp +++ b/src/tightdb/table_view.hpp @@ -34,7 +34,7 @@ using std::size_t; class TableViewBase { public: bool is_empty() const TIGHTDB_NOEXCEPT { return m_refs.is_empty(); } - size_t size() const TIGHTDB_NOEXCEPT { return m_refs.size(); } + std::size_t size() const TIGHTDB_NOEXCEPT { return m_refs.size(); } // Column information size_t get_column_count() const TIGHTDB_NOEXCEPT; @@ -46,11 +46,11 @@ class TableViewBase { int64_t get_int(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; bool get_bool(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; Date get_date(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; - float get_float(size_t column_ndx, size_t row_ndx) const; // FIXME: Should be modified so it never throws - double get_double(size_t column_ndx, size_t row_ndx) const; // FIXME: Should be modified so it never throws + float get_float(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; + double get_double(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; StringData get_string(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; - BinaryData get_binary(size_t column_ndx, size_t row_ndx) const; // FIXME: Should be modified so it never throws - Mixed get_mixed(size_t column_ndx, size_t row_ndx) const; // FIXME: Should be modified so it never throws + BinaryData get_binary(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; + Mixed get_mixed(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; DataType get_mixed_type(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT; // Subtables @@ -108,14 +108,6 @@ class TableViewBase { void to_string(std::ostream& out, size_t limit=500) const; protected: - friend class Table; - friend class Query; - - template static R find_all_integer(V*, size_t, int64_t); - template static R find_all_float(V*, size_t, float); - template static R find_all_double(V*, size_t, double); - template static R find_all_string(V*, size_t, StringData); - Table* m_table; Array m_refs; @@ -130,17 +122,25 @@ class TableViewBase { m_table(tv.m_table), m_refs(tv.m_refs, Allocator::get_default()) {} /// Moving constructor. - TableViewBase(TableViewBase*); + TableViewBase(TableViewBase*) TIGHTDB_NOEXCEPT; + + ~TableViewBase() TIGHTDB_NOEXCEPT { m_refs.destroy(); } - ~TableViewBase() { m_refs.destroy(); } + void move_assign(TableViewBase*) TIGHTDB_NOEXCEPT; - void move_assign(TableViewBase*); + Array& get_ref_column() TIGHTDB_NOEXCEPT { return m_refs; } + const Array& get_ref_column() const TIGHTDB_NOEXCEPT { return m_refs; } - Array& get_ref_column() { return m_refs; } - const Array& get_ref_column() const { return m_refs; } + template static R find_all_integer(V*, std::size_t, int64_t); + template static R find_all_float(V*, std::size_t, float); + template static R find_all_double(V*, std::size_t, double); + template static R find_all_string(V*, std::size_t, StringData); private: - size_t find_first_integer(size_t column_ndx, int64_t value) const; + std::size_t find_first_integer(std::size_t column_ndx, int64_t value) const; + + friend class Table; + friend class Query; }; @@ -180,6 +180,7 @@ class ConstTableView; class TableView: public TableViewBase { public: TableView() {} + ~TableView() TIGHTDB_NOEXCEPT {} TableView& operator=(TableView tv) { move_assign(&tv); return *this; } friend TableView move(TableView& tv) { return TableView(&tv); } @@ -226,16 +227,16 @@ class TableView: public TableViewBase { const Table& get_parent() const TIGHTDB_NOEXCEPT { return *m_table; } private: - friend class ConstTableView; - friend class Table; - friend class Query; - friend class TableViewBase; - TableView(Table& parent): TableViewBase(&parent) {} - TableView(TableView* tv): TableViewBase(tv) {} + TableView(TableView* tv) TIGHTDB_NOEXCEPT: TableViewBase(tv) {} TableView find_all_integer(size_t column_ndx, int64_t value); ConstTableView find_all_integer(size_t column_ndx, int64_t value) const; + + friend class ConstTableView; + friend class Table; + friend class Query; + friend class TableViewBase; }; @@ -254,6 +255,7 @@ class TableView: public TableViewBase { class ConstTableView: public TableViewBase { public: ConstTableView() {} + ~ConstTableView() TIGHTDB_NOEXCEPT {} ConstTableView& operator=(ConstTableView tv) { move_assign(&tv); return *this; } friend ConstTableView move(ConstTableView& tv) { return ConstTableView(&tv); } @@ -274,16 +276,18 @@ class ConstTableView: public TableViewBase { const Table& get_parent() const TIGHTDB_NOEXCEPT { return *m_table; } private: + ConstTableView(const Table& parent): TableViewBase(const_cast(&parent)) {} + ConstTableView(ConstTableView* tv) TIGHTDB_NOEXCEPT: TableViewBase(tv) {} + + ConstTableView find_all_integer(size_t column_ndx, int64_t value) const; + friend class TableView; friend class Table; friend class Query; friend class TableViewBase; +}; - ConstTableView(const Table& parent): TableViewBase(const_cast(&parent)) {} - ConstTableView(ConstTableView* tv): TableViewBase(tv) {} - ConstTableView find_all_integer(size_t column_ndx, int64_t value) const; -}; // ================================================================================================ @@ -307,14 +311,14 @@ class ConstTableView: public TableViewBase { TIGHTDB_ASSERT(row_ndx < m_refs.size()); -inline TableViewBase::TableViewBase(TableViewBase* tv): +inline TableViewBase::TableViewBase(TableViewBase* tv) TIGHTDB_NOEXCEPT: m_table(tv->m_table), m_refs(tv->m_refs) // Note: This is a moving copy { tv->m_table = 0; } -inline void TableViewBase::move_assign(TableViewBase* tv) +inline void TableViewBase::move_assign(TableViewBase* tv) TIGHTDB_NOEXCEPT { m_table = tv->m_table; tv->m_table = 0; @@ -353,7 +357,8 @@ inline DataType TableViewBase::get_column_type(size_t column_ndx) const TIGHTDB_ // Getters -inline int64_t TableViewBase::get_int(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT +inline int64_t TableViewBase::get_int(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX(column_ndx, row_ndx); @@ -361,7 +366,8 @@ inline int64_t TableViewBase::get_int(size_t column_ndx, size_t row_ndx) const T return m_table->get_int(column_ndx, real_ndx); } -inline bool TableViewBase::get_bool(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT +inline bool TableViewBase::get_bool(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX_AND_TYPE(column_ndx, row_ndx, type_Bool); @@ -369,7 +375,8 @@ inline bool TableViewBase::get_bool(size_t column_ndx, size_t row_ndx) const TIG return m_table->get_bool(column_ndx, real_ndx); } -inline Date TableViewBase::get_date(size_t column_ndx, size_t row_ndx) const TIGHTDB_NOEXCEPT +inline Date TableViewBase::get_date(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX_AND_TYPE(column_ndx, row_ndx, type_Date); @@ -378,6 +385,7 @@ inline Date TableViewBase::get_date(size_t column_ndx, size_t row_ndx) const TIG } inline float TableViewBase::get_float(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX_AND_TYPE(column_ndx, row_ndx, type_Float); @@ -386,6 +394,7 @@ inline float TableViewBase::get_float(size_t column_ndx, size_t row_ndx) const } inline double TableViewBase::get_double(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX_AND_TYPE(column_ndx, row_ndx, type_Double); @@ -403,6 +412,7 @@ inline StringData TableViewBase::get_string(size_t column_ndx, size_t row_ndx) c } inline BinaryData TableViewBase::get_binary(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX_AND_TYPE(column_ndx, row_ndx, type_Binary); @@ -411,6 +421,7 @@ inline BinaryData TableViewBase::get_binary(size_t column_ndx, size_t row_ndx) c } inline Mixed TableViewBase::get_mixed(size_t column_ndx, size_t row_ndx) const + TIGHTDB_NOEXCEPT { TIGHTDB_ASSERT_INDEX_AND_TYPE(column_ndx, row_ndx, type_Mixed); diff --git a/src/tightdb/thread.hpp b/src/tightdb/thread.hpp index f8d83c2d41b..1ad5747dcfc 100644 --- a/src/tightdb/thread.hpp +++ b/src/tightdb/thread.hpp @@ -39,7 +39,7 @@ namespace tightdb { class Thread { public: Thread(); - ~Thread(); + ~Thread() TIGHTDB_NOEXCEPT; template explicit Thread(F func); @@ -169,7 +169,7 @@ template inline void Thread::start(F func) m_joinable = true; } -inline Thread::~Thread() +inline Thread::~Thread() TIGHTDB_NOEXCEPT { if (m_joinable) { std::terminate(); diff --git a/test/testarray.cpp b/test/testarray.cpp index 33d8c76c90e..537e6614e44 100644 --- a/test/testarray.cpp +++ b/test/testarray.cpp @@ -21,7 +21,7 @@ struct db_setup_array { Array db_setup_array::c; -void hasZeroByte(int64_t value, size_t reps) +void has_zero_byte(int64_t value, size_t reps) { Array a; Array r; @@ -795,18 +795,18 @@ TEST(findallint7) r.destroy(); } -// Tests the case where a value does *not* exist in one entire 64-bit chunk (triggers the 'if (hasZeroByte) break;' condition) -TEST(FindhasZeroByte) +// Tests the case where a value does *not* exist in one entire 64-bit chunk (triggers the 'if (has_zero_byte()) break;' condition) +TEST(FindHasZeroByte) { // we want at least 1 entire 64-bit chunk-test, and we also want a remainder-test, so we chose n to be a prime > 64 size_t n = 73; - hasZeroByte(1, n); // width = 1 - hasZeroByte(3, n); // width = 2 - hasZeroByte(13, n); // width = 4 - hasZeroByte(100, n); // 8 - hasZeroByte(10000, n); // 16 - hasZeroByte(100000, n); // 32 - hasZeroByte(8000000000LL, n); // 64 + has_zero_byte(1, n); // width = 1 + has_zero_byte(3, n); // width = 2 + has_zero_byte(13, n); // width = 4 + has_zero_byte(100, n); // 8 + has_zero_byte(10000, n); // 16 + has_zero_byte(100000, n); // 32 + has_zero_byte(8000000000LL, n); // 64 } // New find test for SSE search, to trigger partial finds (see FindSSE()) before and after the aligned data area From e46d017f2ac4223dcd1c6c0857f619dd8bd2167f Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Fri, 23 Aug 2013 13:32:43 +0200 Subject: [PATCH 10/20] changelog.txt updated --- changelog.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/changelog.txt b/changelog.txt index 10d7522fb06..9f9ab3a8d50 100644 --- a/changelog.txt +++ b/changelog.txt @@ -12,9 +12,20 @@ Format: ================== + +2013-08-23 (Kristian Spangsege) ++ Stop throwing from destructors (all), and from SharedGroup::rollback() and SharedGRoup::end_read(). ++ General stability and eror checking improvements. +! Fixed many bugs relating to Group::commit(). +! Fixed some bugs relating to SharedGroup::commit(). +! Fixed bug in TableViewBase::sort(). + + + 2013-08-18 (Kenneth Geisshirt) ! Group::to_string formatting was incorrect. See https://app.asana.com/0/1441391972580/5659532773181. + 2013-08-03 (Kristian Spangsege) + Table::find_sorted_int() replaced by Table::lower_bound_int() and Table::upper_bound_int() as these are standardized and provide more flexibility. + Addition of Table::lower_bound_bool() and Table::upper_bound_bool(). From 7fc2e4ed9797a5a70b7047745bbf25f75e463072 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Sat, 24 Aug 2013 16:27:58 +0200 Subject: [PATCH 11/20] No longer appempting to place database files in /tmp/ --- test/testshared.cpp | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/test/testshared.cpp b/test/testshared.cpp index 4e33aa60abe..c4e4bb4a6ee 100644 --- a/test/testshared.cpp +++ b/test/testshared.cpp @@ -820,9 +820,9 @@ TEST(Shared_FromSerialized) TEST(StringIndex_Bug1) { - File::try_remove("x.tightdb"); - File::try_remove("x.tightdb.lock"); - SharedGroup sg("x.tightdb"); + File::try_remove("test.tightdb"); + File::try_remove("test.tightdb.lock"); + SharedGroup sg("test.tightdb"); { WriteTransaction wt(sg); @@ -903,42 +903,42 @@ TEST(StringIndex_Bug2) TEST(Shared_MixedWithNonShared) { - File::try_remove("/tmp/x.tightdb"); + File::try_remove("test.tightdb"); { // Create empty file without free-space tracking Group g; - g.write("/tmp/x.tightdb"); + g.write("test.tightdb"); } { // See if we can modify with non-shared group - Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + Group g("test.tightdb", Group::mode_ReadWrite); g.get_table("foo"); // Add table "foo" g.commit(); } - File::try_remove("/tmp/x.tightdb"); + File::try_remove("test.tightdb"); { // Create non-empty file without free-space tracking Group g; g.get_table("x"); - g.write("/tmp/x.tightdb"); + g.write("test.tightdb"); } { // See if we can modify with non-shared group - Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + Group g("test.tightdb", Group::mode_ReadWrite); g.get_table("foo"); // Add table "foo" g.commit(); } - File::try_remove("/tmp/x.tightdb"); + File::try_remove("test.tightdb"); { // Create empty file without free-space tracking Group g; - g.write("/tmp/x.tightdb"); + g.write("test.tightdb"); } { // See if we can read and modify with shared group - SharedGroup sg("/tmp/x.tightdb"); + SharedGroup sg("test.tightdb"); { ReadTransaction rt(sg); CHECK(!rt.has_table("foo")); @@ -950,16 +950,16 @@ TEST(Shared_MixedWithNonShared) } } - File::try_remove("/tmp/x.tightdb"); + File::try_remove("test.tightdb"); { // Create non-empty file without free-space tracking Group g; g.get_table("x"); - g.write("/tmp/x.tightdb"); + g.write("test.tightdb"); } { // See if we can read and modify with shared group - SharedGroup sg("/tmp/x.tightdb"); + SharedGroup sg("test.tightdb"); { ReadTransaction rt(sg); CHECK(!rt.has_table("foo")); @@ -971,7 +971,7 @@ TEST(Shared_MixedWithNonShared) } } { - SharedGroup sg("/tmp/x.tightdb"); + SharedGroup sg("test.tightdb"); { ReadTransaction rt(sg); CHECK(rt.has_table("foo")); @@ -979,18 +979,18 @@ TEST(Shared_MixedWithNonShared) } { // Access using non-shared group - Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + Group g("test.tightdb", Group::mode_ReadWrite); g.commit(); } { // Modify using non-shared group - Group g("/tmp/x.tightdb", Group::mode_ReadWrite); + Group g("test.tightdb", Group::mode_ReadWrite); g.get_table("bar"); // Add table "bar" g.commit(); } { // See if we can still acces using shared group - SharedGroup sg("/tmp/x.tightdb"); + SharedGroup sg("test.tightdb"); { ReadTransaction rt(sg); CHECK(rt.has_table("foo")); @@ -1004,11 +1004,11 @@ TEST(Shared_MixedWithNonShared) } } { - SharedGroup sg("/tmp/x.tightdb"); + SharedGroup sg("test.tightdb"); { ReadTransaction rt(sg); CHECK(rt.has_table("baz")); } } - File::remove("/tmp/x.tightdb"); + File::remove("test.tightdb"); } From 1bb2b73b8880a1f715c8476da2dbc27dc7f411ac Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Sun, 25 Aug 2013 15:15:39 +0200 Subject: [PATCH 12/20] Avoid superflous use of 'const' and use UniquePtr<> instead of reinventing the wheel --- src/tightdb/buffer.hpp | 7 +++++- test/test_transactions.cpp | 47 +++++++++++++++----------------------- 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/src/tightdb/buffer.hpp b/src/tightdb/buffer.hpp index 7f538ad7dd0..6a9fdb124c4 100644 --- a/src/tightdb/buffer.hpp +++ b/src/tightdb/buffer.hpp @@ -37,6 +37,7 @@ template class Buffer { const T& operator[](std::size_t i) const TIGHTDB_NOEXCEPT { return m_data[i]; } Buffer() TIGHTDB_NOEXCEPT: m_data(0), m_size(0) {} + Buffer(std::size_t size): m_ptr(new T[size]) {} void set_size(std::size_t); @@ -57,7 +58,11 @@ template class Buffer { // Implementation: -template void Buffer::set_size(std::size_t size) +template inline Buffer::Buffer(std::size_t size): m_data(new T[size]), m_size(size) +{ +} + +template inline void Buffer::set_size(std::size_t size) { m_data.reset(new T[size]); m_size = size; diff --git a/test/test_transactions.cpp b/test/test_transactions.cpp index 15fe76cd608..dd844a995db 100644 --- a/test/test_transactions.cpp +++ b/test/test_transactions.cpp @@ -19,17 +19,6 @@ using namespace tightdb; namespace { -template struct mem_buf { - mem_buf(std::size_t size): m_ptr(new T[size]) {} - ~mem_buf() { delete[] m_ptr; } - T* get() { return m_ptr; } - const T* get() const { return m_ptr; } - T& operator[](std::size_t i) { return m_ptr[i]; } - const T& operator[](std::size_t i) const { return m_ptr[i]; } -private: - T* m_ptr; -}; - enum MyEnum { moja, mbili, tatu, nne, tano, sita, saba, nane, tisa, kumi, kumi_na_moja, kumi_na_mbili, kumi_na_tatu }; @@ -183,8 +172,8 @@ void round(SharedGroup& db, int index) MyTable::Ref table = wt.get_table("my_table"); MySubtable::Ref subtable = table[0].eta; MySubsubtable::Ref subsubtable = subtable[0].bar; - const size_t size = ((512 + index%1024) * 1024) % max_blob_size; - mem_buf data(size); + size_t size = ((512 + index%1024) * 1024) % max_blob_size; + UniquePtr data(new char[size]); for (size_t i=0; i((i+index) * 677 % 256); subsubtable[index].binary = BinaryData(data.get(), size); @@ -202,8 +191,8 @@ void round(SharedGroup& db, int index) { WriteTransaction wt(db); // Write transaction #11 MyTable::Ref table = wt.get_table("my_table"); - const size_t size = ((512 + (333 + 677*index) % 1024) * 1024) % max_blob_size; - mem_buf data(size); + size_t size = ((512 + (333 + 677*index) % 1024) * 1024) % max_blob_size; + UniquePtr data(new char[size]); for (size_t i=0; i((i+index+73) * 677 % 256); table[index%2].zeta = BinaryData(data.get(), size); @@ -226,8 +215,8 @@ void round(SharedGroup& db, int index) { WriteTransaction wt(db); // Write transaction #13 MyTable::Ref table = wt.get_table("my_table"); - const size_t size = (512 + (333 + 677*index) % 1024) * 327; - mem_buf data(size); + size_t size = (512 + (333 + 677*index) % 1024) * 327; + UniquePtr data(new char[size]); for (size_t i=0; i((i+index+73) * 677 % 256); table[(index+1)%2].zeta = BinaryData(data.get(), size); @@ -247,10 +236,10 @@ void round(SharedGroup& db, int index) subtable->add(); subtable->add(); } - const int n = 1 + 13 / (1+index); + int n = 1 + 13 / (1+index); for (int i=0; iadd(0, false, moja, time_t(), "alpha", bin, 0, mix); subtable->add(1, false, mbili, time_t(), "beta", bin, 0, mix); subtable->add(2, false, tatu, time_t(), "gamma", bin, 0, mix); @@ -285,11 +274,11 @@ void round(SharedGroup& db, int index) else { subsubtable = subtable[0].theta.set_subtable(); } - const size_t size = (17 + 233*index) % 523; - mem_buf data(size); + size_t size = (17 + 233*index) % 523; + UniquePtr data(new char[size]); for (size_t i=0; i((i+index+79) * 677 % 256); - const BinaryData bin(data.get(), size); + BinaryData bin(data.get(), size); subsubtable->add(0, false, nne, 0, "", bin, 0, Mixed(int64_t(index*13))); subsubtable->add(1, false, tano, 0, "", bin, 0, Mixed(index%2==0?false:true)); subsubtable->add(2, false, sita, 0, "", bin, 0, Mixed(Date(index*13))); @@ -310,7 +299,7 @@ void round(SharedGroup& db, int index) else { subsubtable = subtable[1].theta.set_subtable(); } - const int num = 8; + int num = 8; for (int i=0; iadd(i, 0); } @@ -320,7 +309,7 @@ void round(SharedGroup& db, int index) } for (int i=0; i<3; ++i) { for (int j=0; jadd((i-j)*index-19, bin); } } @@ -348,7 +337,7 @@ void round(SharedGroup& db, int index) else { subsubtable = subtable[2].theta.set_subtable(); } - const int num = 9; + int num = 9; for (int i=0; iadd(i, BinaryData(0,0)); } @@ -370,7 +359,7 @@ void round(SharedGroup& db, int index) // FIXME: Reenable this when it works!!! // subsubtable->column().value.set_index(); } - const int num = 9; + int num = 9; for (int i=0; iadd(i, BinaryData(0,0)); } @@ -513,8 +502,8 @@ TEST(Transactions) MySubsubtable::ConstRef subsubtable = subtable[0].bar; for (int i=0; i data(size); + size_t size = ((512 + i%1024) * 1024) % max_blob_size; + UniquePtr data(new char[size]); for (size_t j=0; j((j+i) * 677 % 256); CHECK_EQUAL(BinaryData(data.get(), size), subsubtable[i].binary); From 2cd8f48092d7304ed6e1a3e6e80c7a5b3b500ec5 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Sun, 25 Aug 2013 18:14:06 +0200 Subject: [PATCH 13/20] Typos fixed --- src/tightdb/file.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tightdb/file.hpp b/src/tightdb/file.hpp index 709f65b152d..9fcd84a1781 100644 --- a/src/tightdb/file.hpp +++ b/src/tightdb/file.hpp @@ -251,8 +251,8 @@ class File { enum { /// If possible, disable opportunistic flushing of dirted /// pages of a memory mapped file to physical medium. On some - /// systems this cannot be disable. On other systems it is the - /// default behavior. En explicit call to sync_map() will + /// systems this cannot be disabled. On other systems it is + /// the default behavior. An explicit call to sync_map() will /// flush the buffers regardless of whether this flag is /// specified or not. map_NoSync = 1 From 298188d91e1f8a4c978cbbd509f8299888263ce5 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Sun, 25 Aug 2013 20:41:21 +0200 Subject: [PATCH 14/20] Use ScopedMutexLock when possible, and keep track of exception sources --- src/tightdb/group_shared.cpp | 28 ++++++++++++++-------------- src/tightdb/group_writer.cpp | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/tightdb/group_shared.cpp b/src/tightdb/group_shared.cpp index ccb962e07ae..5a0e3a0d03f 100644 --- a/src/tightdb/group_shared.cpp +++ b/src/tightdb/group_shared.cpp @@ -321,7 +321,7 @@ const Group& SharedGroup::begin_read() ScopedMutexLock lock(&info->readmutex); if (TIGHTDB_UNLIKELY(info->infosize > m_reader_map.get_size())) { - m_reader_map.remap(m_file, File::access_ReadWrite, info->infosize); + m_reader_map.remap(m_file, File::access_ReadWrite, info->infosize); // Throws } // Get the current top ref @@ -332,7 +332,7 @@ const Group& SharedGroup::begin_read() // Update reader list if (ringbuf_is_empty()) { ReadCount r2 = { info->current_version, 1 }; - ringbuf_put(r2); + ringbuf_put(r2); // Throws } else { ReadCount& r = ringbuf_get_last(); @@ -341,14 +341,14 @@ const Group& SharedGroup::begin_read() } else { ReadCount r2 = { info->current_version, 1 }; - ringbuf_put(r2); + ringbuf_put(r2); // Throws } } } // Make sure the group is up-to-date. // A zero ref means that the file has just been created. - m_group.update_from_shared(new_top_ref, new_file_size); + m_group.update_from_shared(new_top_ref, new_file_size); // Throws #ifdef TIGHTDB_DEBUG m_group.Verify(); @@ -563,10 +563,10 @@ void SharedGroup::ringbuf_put(const ReadCount& v) // Check if the ringbuf is full // (there should always be one empty entry) size_t size = ringbuf_size(); - bool is_full = (size == (info->capacity)); + bool is_full = size == info->capacity; if (TIGHTDB_UNLIKELY(is_full)) { - ringbuf_expand(); + ringbuf_expand(); // Throws info = m_reader_map.get_addr(); } @@ -587,8 +587,8 @@ void SharedGroup::ringbuf_expand() size_t new_file_size = base_file_size + (sizeof (ReadCount) * new_entry_count); // Extend file - m_file.alloc(0, new_file_size); - m_reader_map.remap(m_file, File::access_ReadWrite, new_file_size); + m_file.alloc(0, new_file_size); // Throws + m_reader_map.remap(m_file, File::access_ReadWrite, new_file_size); // Throws info = m_reader_map.get_addr(); // Move existing entries (if there is a split) @@ -718,8 +718,9 @@ void SharedGroup::zero_free_space() size_t current_version; size_t readlock_version; size_t file_size; - pthread_mutex_lock(&info->readmutex); + { + ScopedMutexLock lock(&info->readmutex); current_version = info->current_version + 1; file_size = to_size_t(info->filesize); @@ -731,7 +732,6 @@ void SharedGroup::zero_free_space() readlock_version = r.version; } } - pthread_mutex_unlock(&info->readmutex); m_group.zero_free_space(file_size, readlock_version); } @@ -762,7 +762,7 @@ void SharedGroup::low_level_commit(size_t new_version) ScopedMutexLock lock(&info->readmutex); if (TIGHTDB_UNLIKELY(info->infosize > m_reader_map.get_size())) { - m_reader_map.remap(m_file, File::access_ReadWrite, info->infosize); + m_reader_map.remap(m_file, File::access_ReadWrite, info->infosize); // Throws } if (ringbuf_is_empty()) { @@ -783,18 +783,18 @@ void SharedGroup::low_level_commit(size_t new_version) // info->writemutex. This is true (not a data race) becasue // info->current_version is modified only while // info->writemutex is locked. - m_group.init_shared(); + m_group.init_shared(); // Throws } // Do the actual commit TIGHTDB_ASSERT(m_group.m_top.is_attached()); TIGHTDB_ASSERT(readlock_version <= new_version); - GroupWriter out(m_group); + GroupWriter out(m_group); // Throws m_group.m_readlock_version = readlock_version; out.set_versions(new_version, readlock_version); // Recursively write all changed arrays to end of file bool do_sync = info->flags == durability_Full; - ref_type new_top_ref = out.commit(do_sync); + ref_type new_top_ref = out.commit(do_sync); // Throws size_t new_file_size = out.get_file_size(); // Update reader info diff --git a/src/tightdb/group_writer.cpp b/src/tightdb/group_writer.cpp index 682007d5ecb..907f9a09a04 100644 --- a/src/tightdb/group_writer.cpp +++ b/src/tightdb/group_writer.cpp @@ -11,7 +11,7 @@ using namespace tightdb; GroupWriter::GroupWriter(Group& group) : m_group(group), m_alloc(group.m_alloc), m_current_version(0) { - m_file_map.map(m_alloc.m_file, File::access_ReadWrite, m_alloc.get_baseline()); + m_file_map.map(m_alloc.m_file, File::access_ReadWrite, m_alloc.get_baseline()); // Throws } From dfc370031869cabc85b52e0e988cc59e5e686e74 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Mon, 26 Aug 2013 02:02:36 +0200 Subject: [PATCH 15/20] New conditional compilation flag TIGHTDB_ALLOC_SET_ZERO to help in hunting down memory and thread synchronization bugs --- src/tightdb/alloc.cpp | 31 +++++++++++++++-------- src/tightdb/alloc.hpp | 3 ++- src/tightdb/alloc_slab.cpp | 38 ++++++++++++++++------------- src/tightdb/alloc_slab.hpp | 3 ++- src/tightdb/array.cpp | 50 ++++++++++++++++++-------------------- src/tightdb/array.hpp | 15 ++++++++++-- 6 files changed, 83 insertions(+), 57 deletions(-) diff --git a/src/tightdb/alloc.cpp b/src/tightdb/alloc.cpp index f40c35c781e..1739590b502 100644 --- a/src/tightdb/alloc.cpp +++ b/src/tightdb/alloc.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include @@ -40,19 +41,29 @@ class DefaultAllocator: public tightdb::Allocator { MemRef alloc(size_t size) TIGHTDB_OVERRIDE { char* addr = static_cast(malloc(size)); - if (TIGHTDB_LIKELY(addr)) - return MemRef(addr, reinterpret_cast(addr)); - TIGHTDB_ASSERT(errno == ENOMEM); - throw bad_alloc(); + if (TIGHTDB_UNLIKELY(!addr)) { + TIGHTDB_ASSERT(errno == ENOMEM); + throw bad_alloc(); + } +#ifdef TIGHTDB_ALLOC_SET_ZERO + fill(addr, addr+size, 0); +#endif + return MemRef(addr, reinterpret_cast(addr)); } - MemRef realloc_(ref_type, const char* addr, size_t size) TIGHTDB_OVERRIDE + MemRef realloc_(ref_type, const char* addr, size_t old_size, size_t new_size) TIGHTDB_OVERRIDE { - char* new_addr = static_cast(realloc(const_cast(addr), size)); - if (TIGHTDB_LIKELY(new_addr)) - return MemRef(new_addr, reinterpret_cast(new_addr)); - TIGHTDB_ASSERT(errno == ENOMEM); - throw bad_alloc(); + char* new_addr = static_cast(realloc(const_cast(addr), new_size)); + if (TIGHTDB_UNLIKELY(!new_addr)) { + TIGHTDB_ASSERT(errno == ENOMEM); + throw bad_alloc(); + } +#ifdef TIGHTDB_ALLOC_SET_ZERO + fill(new_addr+old_size, new_addr+new_size, 0); +#else + static_cast(old_size); +#endif + return MemRef(new_addr, reinterpret_cast(new_addr)); } void free_(ref_type, const char* addr) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE diff --git a/src/tightdb/alloc.hpp b/src/tightdb/alloc.hpp index fafdd57f388..179b233283c 100644 --- a/src/tightdb/alloc.hpp +++ b/src/tightdb/alloc.hpp @@ -78,7 +78,8 @@ class Allocator { /// /// Note: The underscore has been added because the name `realloc` /// would conflict with a macro on the Windows platform. - virtual MemRef realloc_(ref_type ref, const char* addr, std::size_t size) = 0; + virtual MemRef realloc_(ref_type ref, const char* addr, std::size_t old_size, + std::size_t new_size) = 0; /// Release the specified chunk of memory. /// diff --git a/src/tightdb/alloc_slab.cpp b/src/tightdb/alloc_slab.cpp index b03f3780b94..a1bb18c3582 100644 --- a/src/tightdb/alloc_slab.cpp +++ b/src/tightdb/alloc_slab.cpp @@ -105,6 +105,9 @@ MemRef SlabAlloc::alloc(size_t size) #endif char* addr = translate(ref); +#ifdef TIGHTDB_ALLOC_SET_ZERO + fill(addr, addr+size, 0); +#endif return MemRef(addr, ref); } } @@ -133,9 +136,9 @@ MemRef SlabAlloc::alloc(size_t size) // Add to slab table size_t new_ref_end = curr_ref_end + new_size; // FIXME: intptr_t is not guaranteed to exists, not even in C++11 - uintptr_t addr = reinterpret_cast(slab); + uintptr_t slab_2 = reinterpret_cast(slab); // FIXME: Dangerous conversions to int64_t here (undefined behavior according to C++11) - m_slabs.add(int64_t(new_ref_end), int64_t(addr)); + m_slabs.add(int64_t(new_ref_end), int64_t(slab_2)); // Update free list size_t unused = new_size - size; @@ -145,7 +148,7 @@ MemRef SlabAlloc::alloc(size_t size) m_free_space.add(int64_t(ref), int64_t(unused)); } - char* addr_2 = slab; + char* addr = slab; size_t ref = curr_ref_end; #ifdef TIGHTDB_DEBUG @@ -153,7 +156,11 @@ MemRef SlabAlloc::alloc(size_t size) cerr << "Alloc ref: " << ref << " size: " << size << "\n"; #endif - return MemRef(addr_2, ref); +#ifdef TIGHTDB_ALLOC_SET_ZERO + fill(addr, addr+size, 0); +#endif + + return MemRef(addr, ref); } @@ -233,33 +240,30 @@ void SlabAlloc::free_(ref_type ref, const char* addr) TIGHTDB_NOEXCEPT } -MemRef SlabAlloc::realloc_(size_t ref, const char* addr, size_t size) +MemRef SlabAlloc::realloc_(size_t ref, const char* addr, size_t old_size, size_t new_size) { TIGHTDB_ASSERT(translate(ref) == addr); - TIGHTDB_ASSERT(0 < size); - TIGHTDB_ASSERT((size & 0x7) == 0); // only allow sizes that are multiples of 8 + TIGHTDB_ASSERT(0 < new_size); + TIGHTDB_ASSERT((new_size & 0x7) == 0); // only allow sizes that are multiples of 8 // FIXME: Check if we can extend current space. In that case, - // remember to check m_free_space_invalid. + // remember to check m_free_space_invalid. Also remember to fill + // with zero if TIGHTDB_ALLOC_SET_ZERO is defined. // Allocate new space - MemRef new_mem = alloc(size); // Throws - - /*if (doCopy) {*/ //TODO: allow realloc without copying - // Get size of old segment - size_t old_size = Array::get_capacity_from_header(addr); + MemRef new_mem = alloc(new_size); // Throws // Copy existing segment - copy(addr, addr+old_size, new_mem.m_addr); + char* new_addr = new_mem.m_addr; + copy(addr, addr+old_size, new_addr); // Add old segment to freelist - free_(ref, addr); // FIXME: Unfortunately, this one can throw - //} + free_(ref, addr); #ifdef TIGHTDB_DEBUG if (m_debug_out) { cerr << "Realloc orig_ref: " << ref << " old_size: " << old_size << " " - "new_ref: " << new_mem.m_ref << " new_size: " << size << "\n"; + "new_ref: " << new_mem.m_ref << " new_size: " << new_size << "\n"; } #endif // TIGHTDB_DEBUG diff --git a/src/tightdb/alloc_slab.hpp b/src/tightdb/alloc_slab.hpp index a65e051e4a9..09c668c0b80 100644 --- a/src/tightdb/alloc_slab.hpp +++ b/src/tightdb/alloc_slab.hpp @@ -131,7 +131,8 @@ class SlabAlloc: public Allocator { bool remap(std::size_t file_size); MemRef alloc(std::size_t size) TIGHTDB_OVERRIDE; - MemRef realloc_(ref_type, const char*, std::size_t size) TIGHTDB_OVERRIDE; + MemRef realloc_(ref_type, const char*, std::size_t old_size, + std::size_t new_size) TIGHTDB_OVERRIDE; // FIXME: It would be very nice if we could detect an invalid free operation in debug mode void free_(ref_type, const char*) TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; char* translate(ref_type) const TIGHTDB_NOEXCEPT TIGHTDB_OVERRIDE; diff --git a/src/tightdb/array.cpp b/src/tightdb/array.cpp index fd238c6d23d..62518ae9ea8 100644 --- a/src/tightdb/array.cpp +++ b/src/tightdb/array.cpp @@ -312,7 +312,7 @@ void Array::insert(size_t ndx, int64_t value) // Check if we need to copy before modifying copy_on_write(); // Throws - Getter old_getter = m_getter; // Save old getter before potential width expansion + Getter old_getter = m_getter; // Save old getter before potential width expansion bool do_expand = value < m_lbound || value > m_ubound; if (do_expand) { @@ -382,7 +382,8 @@ void Array::resize(size_t count) void Array::ensure_minimum_width(int64_t value) { - if (value >= m_lbound && value <= m_ubound) return; + if (value >= m_lbound && value <= m_ubound) + return; // Check if we need to copy before modifying copy_on_write(); // Throws @@ -391,7 +392,7 @@ void Array::ensure_minimum_width(int64_t value) size_t width = bit_width(value); TIGHTDB_ASSERT(width > m_width); - Getter old_getter = m_getter; // Save old getter before width expansion + Getter old_getter = m_getter; // Save old getter before width expansion alloc(m_size, width); // Throws set_width(width); @@ -1061,7 +1062,7 @@ size_t Array::count(int64_t value) const return count; } - // Sum remainding elements + // Check remaining elements for (; i < end; ++i) if (value == get(i)) ++count; @@ -1080,7 +1081,7 @@ size_t Array::CalcByteLen(size_t count, size_t width) const size_t Array::CalcItemCount(size_t bytes, size_t width) const TIGHTDB_NOEXCEPT { if (width == 0) - return numeric_limits::max(); // zero width gives infinite space + return numeric_limits::max(); // Zero width gives "infinite" space size_t bytes_data = bytes - header_size; // ignore 8 byte header size_t total_bits = bytes_data * 8; @@ -1179,6 +1180,7 @@ void Array::copy_on_write() m_ref = mref.m_ref; m_data = get_data_from_header(new_begin); m_capacity = CalcItemCount(new_size, m_width); + TIGHTDB_ASSERT(m_capacity > 0); // Update capacity in header set_header_capacity(new_size); // uses m_data to find header, so m_data must be initialized correctly first @@ -1211,15 +1213,20 @@ ref_type Array::create_empty_array(Type type, WidthType width_type, Allocator& a } +// FIXME: It may be worth trying to combine this with copy_on_write() +// to avoid two copies. void Array::alloc(size_t size, size_t width) { + TIGHTDB_ASSERT(is_attached()); + TIGHTDB_ASSERT(m_capacity > 0); if (m_capacity < size || width != m_width) { size_t needed_bytes = CalcByteLen(size, width); - size_t capacity_bytes = m_capacity ? get_capacity_from_header() : 0; // space currently available in bytes + size_t orig_capacity_bytes = get_capacity_from_header(); + size_t capacity_bytes = orig_capacity_bytes; if (capacity_bytes < needed_bytes) { // Double to avoid too many reallocs (or initialize to initial size) - capacity_bytes = capacity_bytes ? capacity_bytes * 2 : initial_capacity; + capacity_bytes = capacity_bytes * 2; // FIXME: Highly prone to overflow on 32-bit systems // If doubling is not enough, expand enough to fit if (capacity_bytes < needed_bytes) { @@ -1229,31 +1236,22 @@ void Array::alloc(size_t size, size_t width) capacity_bytes += rest; // 64bit align } - // Allocate and initialize header - MemRef mem_ref; - char* header; - if (!m_data) { - mem_ref = m_alloc.alloc(capacity_bytes); // Throws - header = mem_ref.m_addr; - init_header(header, !m_isNode, m_hasRefs, GetWidthType(), int(width), size, - capacity_bytes); - } - else { - header = get_header_from_data(m_data); - mem_ref = m_alloc.realloc_(m_ref, header, capacity_bytes); // Throws - header = mem_ref.m_addr; - set_header_width(int(width), header); - set_header_size(size, header); - set_header_capacity(capacity_bytes, header); - } + // Allocate and update header + char* header = get_header_from_data(m_data); + MemRef mem_ref = m_alloc.realloc_(m_ref, header, orig_capacity_bytes, + capacity_bytes); // Throws + header = mem_ref.m_addr; + set_header_width(int(width), header); + set_header_size(size, header); + set_header_capacity(capacity_bytes, header); - // Update wrapper objects + // Update this accessor and its ancestors m_ref = mem_ref.m_ref; m_data = get_data_from_header(header); m_capacity = CalcItemCount(capacity_bytes, width); // FIXME: Trouble when this one throws. We will then leave // this array instance in a corrupt state - update_parent(); + update_parent(); // Throws return; } diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 3b0d8033021..9dc4628ade6 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -745,7 +745,7 @@ class Array: public ArrayParent { void CreateFromHeaderDirect(char* header, ref_type = 0) TIGHTDB_NOEXCEPT; - virtual std::size_t CalcByteLen(std::size_t count, std::size_t width) const; + virtual std::size_t CalcByteLen(std::size_t count, std::size_t width) const; // Not 8-byte aligned virtual std::size_t CalcItemCount(std::size_t bytes, std::size_t width) const TIGHTDB_NOEXCEPT; virtual WidthType GetWidthType() const { return wtype_Bits; } @@ -804,7 +804,10 @@ class Array: public ArrayParent { Allocator& m_alloc; protected: + /// The total size in bytes (including the header) of a new empty + /// array. Must be a multiple of 8 (i.e., 64-bit aligned). static const std::size_t initial_capacity = 128; + static ref_type create_empty_array(Type, WidthType, Allocator&); static ref_type clone(const char* header, Allocator& alloc, Allocator& clone_alloc); @@ -1395,7 +1398,15 @@ inline std::size_t Array::get_byte_size() const TIGHTDB_NOEXCEPT const char* header = get_header_from_data(m_data); switch (get_wtype_from_header(header)) { case wtype_Bits: { - std::size_t num_bits = (m_size * m_width); // FIXME: Prone to overflow + // FIXME: The following arithmetic could overflow, that + // is, even though both the total number of elements and + // the total number of bytes can be represented in + // uint_fast64_t, the total number of bits may not + // fit. Note that "num_bytes = width < 8 ? size / (8 / + // width) : size * (width / 8)" would be guaranteed to + // never overflow, but it potentially involves two slow + // divisions. + uint_fast64_t num_bits = uint_fast64_t(m_size) * m_width; num_bytes = num_bits / 8; if (num_bits & 0x7) ++num_bytes; From 4ffc6b6e1c0d98ad7423b4b39f23ce2b15a6d950 Mon Sep 17 00:00:00 2001 From: Finn Schiermer Andersen Date: Mon, 26 Aug 2013 14:29:04 +0200 Subject: [PATCH 16/20] bugfix: remap lock file during end_read if necessary --- src/tightdb/group_shared.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tightdb/group_shared.cpp b/src/tightdb/group_shared.cpp index 65aeaf27a7c..dd3094e8a9f 100644 --- a/src/tightdb/group_shared.cpp +++ b/src/tightdb/group_shared.cpp @@ -357,6 +357,10 @@ void SharedGroup::end_read() SharedInfo* info = m_file_map.get_addr(); ScopedMutexLock lock(&info->readmutex); + if (TIGHTDB_UNLIKELY(info->infosize > m_reader_map.get_size())) { + m_reader_map.remap(m_file, File::access_ReadWrite, info->infosize); + } + // FIXME: m_version may well be a 64-bit integer so this cast // to uint32_t seems quite dangerous. Should the type of // m_version be changed to uint32_t? The problem with uint32_t From 9ce762e240407e2fe3aa44aac9c5d97eedb65282 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Tue, 27 Aug 2013 15:40:16 +0200 Subject: [PATCH 17/20] A tiny bit of cleanup --- src/tightdb/array.hpp | 2 +- src/tightdb/column.hpp | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 9dc4628ade6..9a68ca41e1b 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -1628,7 +1628,7 @@ ref_type Array::btree_insert(std::size_t elem_ndx, TreeInsert& state // the first subtree, or it can be prepended to the second // one. We currently always append to the first subtree. It is // essentially a matter of using the lower vs. the upper bound - // when searching in in the offsets array. + // when searching through the offsets array. child_ndx = offsets.lower_bound_int(elem_ndx); TIGHTDB_ASSERT(child_ndx < refs.size()); std::size_t elem_ndx_offset = child_ndx == 0 ? 0 : to_size_t(offsets.get(child_ndx-1)); diff --git a/src/tightdb/column.hpp b/src/tightdb/column.hpp index 66fc67abcd7..285f806cee4 100644 --- a/src/tightdb/column.hpp +++ b/src/tightdb/column.hpp @@ -375,7 +375,8 @@ inline void Column::add(int64_t value) inline void Column::insert(std::size_t ndx, int64_t value) { TIGHTDB_ASSERT(ndx <= size()); - if (size() <= ndx) ndx = npos; + if (size() <= ndx) + ndx = npos; do_insert(ndx, value); } From 71405a0dbb39d644409369f72aa47ae21546bc32 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Tue, 27 Aug 2013 21:28:25 +0200 Subject: [PATCH 18/20] Fixing wrong assertion in Group::get_table_ptr() and adding a few more + some documentation --- src/tightdb/array.hpp | 2 +- src/tightdb/group.hpp | 37 +++++++++++++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/tightdb/array.hpp b/src/tightdb/array.hpp index 9a68ca41e1b..40f51078b16 100644 --- a/src/tightdb/array.hpp +++ b/src/tightdb/array.hpp @@ -212,7 +212,7 @@ class ArrayParent /// referenced array node. This 'reverse' reference is not explicitely /// present in the underlying node hierarchy, but it is needed when /// modifying an array. A modification may lead to relocation of the -/// undeerlying array node, and the parent must be updated +/// underlying array node, and the parent must be updated /// accordingly. Since this applies recursivly all the way to the root /// node, it is essential that the entire chain of parent accessors is /// constructed and propperly maintained when a particular array is diff --git a/src/tightdb/group.hpp b/src/tightdb/group.hpp index dc0ca9e7dbb..505f38ff51e 100644 --- a/src/tightdb/group.hpp +++ b/src/tightdb/group.hpp @@ -186,16 +186,47 @@ class Group: private Table::Parent { std::size_t size() const; StringData get_table_name(std::size_t table_ndx) const; + + /// Check whether this group has a table with the specified name. bool has_table(StringData name) const; /// Check whether this group has a table with the specified name - /// and type. + /// and a dynamic type that matches the specified static type. + /// + /// \tparam T An instance of the BasicTable<> class template. template bool has_table(StringData name) const; + //@{ + /// Get the table with the specified name from this group. + /// + /// The non-const versions of this function will create a table + /// with the specified name if one does not already exist. The + /// const versions will not. + /// + /// It is an error to call one of the const-qualified versions for + /// a table that does not already exist. Doing so will result in + /// undefined behavior. + /// + /// The non-template versions will return dynamically typed table + /// accessors, while the template versions will return statically + /// typed accessors. + /// + /// It is an error to call one of the templated versions for a + /// table whose dynamic type does not match the specified static + /// type. Doing so will result in undefined behavior. + /// + /// New tables created by the non-const non-template version will + /// have no columns initially. New tables created by the non-const + /// template version will have a dynamic type (set of columns) + /// that matches the specifed static type. + /// + /// \tparam T An instance of the BasicTable<> class template. TableRef get_table(StringData name); ConstTableRef get_table(StringData name) const; template typename T::Ref get_table(StringData name); template typename T::ConstRef get_table(StringData name) const; + //@} + // Serialization @@ -483,7 +514,7 @@ template inline const T* Group::get_table_ptr(StringData name) const { TIGHTDB_STATIC_ASSERT(IsBasicTable::value, "Invalid table type"); const Table* table = get_table_ptr(name); // Throws - TIGHTDB_ASSERT(table || T::matches_dynamic_spec(&table->get_spec())); + TIGHTDB_ASSERT(!table || T::matches_dynamic_spec(&table->get_spec())); return static_cast(table); } @@ -494,6 +525,7 @@ inline TableRef Group::get_table(StringData name) inline ConstTableRef Group::get_table(StringData name) const { + TIGHTDB_ASSERT(has_table(name)); return get_table_ptr(name)->get_table_ref(); } @@ -504,6 +536,7 @@ template inline typename T::Ref Group::get_table(StringData name) template inline typename T::ConstRef Group::get_table(StringData name) const { + TIGHTDB_ASSERT(has_table(name)); return get_table_ptr(name)->get_table_ref(); } From 6e705da6e4d1ec547b84c2631f34c365ef7d92d4 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Wed, 28 Aug 2013 03:33:01 +0200 Subject: [PATCH 19/20] Fixing Xcode project --- tightdb.xcodeproj/project.pbxproj | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tightdb.xcodeproj/project.pbxproj b/tightdb.xcodeproj/project.pbxproj index da0ebfd3951..dccb384113c 100644 --- a/tightdb.xcodeproj/project.pbxproj +++ b/tightdb.xcodeproj/project.pbxproj @@ -185,12 +185,10 @@ 4142C98A1623478700B3B902 /* file.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 3640797E15AD6687009A3A82 /* file.hpp */; }; 520588CA16C1DA9D009DA6D8 /* data_type.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 520588C916C1DA9D009DA6D8 /* data_type.hpp */; }; 52113CDE16C27EF800C301FB /* lang_bind_helper.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52113CDD16C27EF800C301FB /* lang_bind_helper.cpp */; }; + 525AAC8517CD6C9B00267037 /* thread.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52C3965616BC6704003619FF /* thread.cpp */; }; 52C3965716BC6704003619FF /* thread.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52C3965616BC6704003619FF /* thread.cpp */; }; - 52D6E064175BDBDD00B423E5 /* Makefile in Sources */ = {isa = PBXBuildFile; fileRef = 52D6E058175BDBDD00B423E5 /* Makefile */; }; - 52D6E065175BDBDD00B423E5 /* mem.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52D6E059175BDBDD00B423E5 /* mem.cpp */; }; 52D6E067175BDBDD00B423E5 /* mem.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 52D6E05B175BDBDD00B423E5 /* mem.hpp */; }; 52D6E069175BDBDD00B423E5 /* number_names.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 52D6E05D175BDBDD00B423E5 /* number_names.hpp */; }; - 52D6E06B175BDBDD00B423E5 /* timer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52D6E060175BDBDD00B423E5 /* timer.cpp */; }; 52D6E06D175BDBDD00B423E5 /* timer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 52D6E062175BDBDD00B423E5 /* timer.hpp */; }; 52D6E06F175BDC5D00B423E5 /* mem.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52D6E059175BDBDD00B423E5 /* mem.cpp */; }; 52D6E070175BDC6500B423E5 /* timer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52D6E060175BDBDD00B423E5 /* timer.cpp */; }; @@ -1043,6 +1041,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 525AAC8517CD6C9B00267037 /* thread.cpp in Sources */, 365CCE34157CC37D00172BF8 /* alloc_slab.cpp in Sources */, 365CCE37157CC37D00172BF8 /* array_binary.cpp in Sources */, 365CCE39157CC37D00172BF8 /* array_blob.cpp in Sources */, @@ -1070,9 +1069,6 @@ 36A1DC9316C3F34B0086A836 /* lang_bind_helper.cpp in Sources */, 36A1DC9716C3F3C50086A836 /* terminate.cpp in Sources */, 36A1DCA216C3F43D0086A836 /* string_buffer.cpp in Sources */, - 52D6E064175BDBDD00B423E5 /* Makefile in Sources */, - 52D6E065175BDBDD00B423E5 /* mem.cpp in Sources */, - 52D6E06B175BDBDD00B423E5 /* timer.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1311,6 +1307,7 @@ GCC_C_LANGUAGE_STANDARD = gnu99; GCC_DYNAMIC_NO_PIC = NO; GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_INLINES_ARE_PRIVATE_EXTERN = NO; GCC_OPTIMIZATION_LEVEL = 0; GCC_PREPROCESSOR_DEFINITIONS = ( "DEBUG=1", @@ -1348,6 +1345,8 @@ DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; GCC_C_LANGUAGE_STANDARD = gnu99; GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_INLINES_ARE_PRIVATE_EXTERN = NO; + GCC_SYMBOLS_PRIVATE_EXTERN = NO; GCC_VERSION = com.apple.compilers.llvm.clang.1_0; GCC_WARN_64_TO_32_BIT_CONVERSION = YES; GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; @@ -1415,6 +1414,7 @@ DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; GCC_C_LANGUAGE_STANDARD = gnu99; GCC_ENABLE_OBJC_EXCEPTIONS = YES; + GCC_SYMBOLS_PRIVATE_EXTERN = NO; GCC_VERSION = com.apple.compilers.llvm.clang.1_0; GCC_WARN_64_TO_32_BIT_CONVERSION = YES; GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; From 62b712fa7353511fb8da0d9ee62393bed6010454 Mon Sep 17 00:00:00 2001 From: Kristian Spangsege Date: Wed, 28 Aug 2013 03:37:59 +0200 Subject: [PATCH 20/20] Reintroducing use of function local static for default allocator. It was recently eliminated due to problem reports from Valgrind. The problem with not using function local statics, is that the order ot initialization of regular globals is undefined by the C++ standard. --- src/tightdb/alloc.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/tightdb/alloc.cpp b/src/tightdb/alloc.cpp index 1739590b502..80090ceaca7 100644 --- a/src/tightdb/alloc.cpp +++ b/src/tightdb/alloc.cpp @@ -86,13 +86,12 @@ class DefaultAllocator: public tightdb::Allocator { #endif }; -DefaultAllocator default_alloc; - } // anonymous namespace Allocator& Allocator::get_default() TIGHTDB_NOEXCEPT { + static DefaultAllocator default_alloc; return default_alloc; }