From 85e37798cb8783729f097ed7328994f1c170849d Mon Sep 17 00:00:00 2001 From: Stefan Weil Date: Wed, 23 Nov 2016 07:38:14 +0100 Subject: [PATCH] Simplify delete operations It is not necessary to check for null pointers. Signed-off-by: Stefan Weil --- api/baseapi.cpp | 3 +-- api/baseapi.h | 3 +-- ccmain/pageiterator.cpp | 8 +++----- ccmain/pgedit.cpp | 2 +- ccstruct/pdblock.h | 2 +- ccutil/hashfn.h | 2 +- classify/trainingsample.cpp | 4 ++-- classify/trainingsampleset.cpp | 6 ++---- cube/char_samp_set.cpp | 4 +--- cube/search_column.cpp | 4 +--- dict/dict.cpp | 8 +++----- textord/fpchop.cpp | 4 ++-- textord/makerow.cpp | 3 +-- wordrec/chopper.cpp | 8 ++------ wordrec/language_model.cpp | 2 +- 15 files changed, 23 insertions(+), 40 deletions(-) diff --git a/api/baseapi.cpp b/api/baseapi.cpp index e43b54e49c..b24742435f 100644 --- a/api/baseapi.cpp +++ b/api/baseapi.cpp @@ -830,8 +830,7 @@ int TessBaseAPI::Recognize(ETEXT_DESC* monitor) { return -1; if (FindLines() != 0) return -1; - if (page_res_ != NULL) - delete page_res_; + delete page_res_; if (block_list_->empty()) { page_res_ = new PAGE_RES(false, block_list_, &tesseract_->prev_word_best_choice_); diff --git a/api/baseapi.h b/api/baseapi.h index d6e532ba81..64f8ba0475 100644 --- a/api/baseapi.h +++ b/api/baseapi.h @@ -373,8 +373,7 @@ class TESS_API TessBaseAPI { * delete it when it it is replaced or the API is destructed. */ void SetThresholder(ImageThresholder* thresholder) { - if (thresholder_ != NULL) - delete thresholder_; + delete thresholder_; thresholder_ = thresholder; ClearResults(); } diff --git a/ccmain/pageiterator.cpp b/ccmain/pageiterator.cpp index fc15840c44..7d7865ae30 100644 --- a/ccmain/pageiterator.cpp +++ b/ccmain/pageiterator.cpp @@ -87,7 +87,7 @@ const PageIterator& PageIterator::operator=(const PageIterator& src) { rect_top_ = src.rect_top_; rect_width_ = src.rect_width_; rect_height_ = src.rect_height_; - if (it_ != NULL) delete it_; + delete it_; it_ = new PAGE_RES_IT(*src.it_); BeginWord(src.blob_index_); return *this; @@ -597,10 +597,8 @@ void PageIterator::BeginWord(int offset) { } word_ = NULL; // We will be iterating the box_word. - if (cblob_it_ != NULL) { - delete cblob_it_; - cblob_it_ = NULL; - } + delete cblob_it_; + cblob_it_ = NULL; } else { // No recognition yet, so a "symbol" is a cblob. word_ = word_res->word; diff --git a/ccmain/pgedit.cpp b/ccmain/pgedit.cpp index d78c0dacc0..5e23595422 100644 --- a/ccmain/pgedit.cpp +++ b/ccmain/pgedit.cpp @@ -191,7 +191,7 @@ ScrollView* bln_word_window_handle() { // return handle */ void build_image_window(int width, int height) { - if (image_win != NULL) { delete image_win; } + delete image_win; image_win = new ScrollView(editor_image_win_name.string(), editor_image_xpos, editor_image_ypos, width + 1, diff --git a/ccstruct/pdblock.h b/ccstruct/pdblock.h index e9139f2ac5..1edd9aff50 100644 --- a/ccstruct/pdblock.h +++ b/ccstruct/pdblock.h @@ -51,7 +51,7 @@ class PDBLK { /// destructor ~PDBLK() { - if (hand_poly) delete hand_poly; + delete hand_poly; } POLY_BLOCK *poly_block() const { return hand_poly; } diff --git a/ccutil/hashfn.h b/ccutil/hashfn.h index ec96932107..73e15be9a8 100644 --- a/ccutil/hashfn.h +++ b/ccutil/hashfn.h @@ -63,7 +63,7 @@ template class SmartPtr { return ptr_; } void reset(T* ptr) { - if (ptr_ != NULL) delete ptr_; + delete ptr_; ptr_ = ptr; } bool operator==(const T* ptr) const { diff --git a/classify/trainingsample.cpp b/classify/trainingsample.cpp index 7fe83b7718..a88eb98bf0 100644 --- a/classify/trainingsample.cpp +++ b/classify/trainingsample.cpp @@ -209,7 +209,7 @@ void TrainingSample::ExtractCharDesc(int int_feature_type, int geo_type, CHAR_DESC_STRUCT* char_desc) { // Extract the INT features. - if (features_ != NULL) delete [] features_; + delete [] features_; FEATURE_SET_STRUCT* char_features = char_desc->FeatureSets[int_feature_type]; if (char_features == NULL) { tprintf("Error: no features to train on of type %s\n", @@ -230,7 +230,7 @@ void TrainingSample::ExtractCharDesc(int int_feature_type, } } // Extract the Micro features. - if (micro_features_ != NULL) delete [] micro_features_; + delete [] micro_features_; char_features = char_desc->FeatureSets[micro_type]; if (char_features == NULL) { tprintf("Error: no features to train on of type %s\n", diff --git a/classify/trainingsampleset.cpp b/classify/trainingsampleset.cpp index afbf3f420e..93936fcae6 100644 --- a/classify/trainingsampleset.cpp +++ b/classify/trainingsampleset.cpp @@ -96,10 +96,8 @@ bool TrainingSampleSet::DeSerialize(bool swap, FILE* fp) { num_raw_samples_ = samples_.size(); if (!unicharset_.load_from_file(fp)) return false; if (!font_id_map_.DeSerialize(swap, fp)) return false; - if (font_class_array_ != NULL) { - delete font_class_array_; - font_class_array_ = NULL; - } + delete font_class_array_; + font_class_array_ = NULL; inT8 not_null; if (fread(¬_null, sizeof(not_null), 1, fp) != 1) return false; if (not_null) { diff --git a/cube/char_samp_set.cpp b/cube/char_samp_set.cpp index 2a495095ef..1e212b1957 100644 --- a/cube/char_samp_set.cpp +++ b/cube/char_samp_set.cpp @@ -40,9 +40,7 @@ void CharSampSet::Cleanup() { // only free samples if owned by class if (own_samples_ == true) { for (int samp_idx = 0; samp_idx < cnt_; samp_idx++) { - if (samp_buff_[samp_idx] != NULL) { - delete samp_buff_[samp_idx]; - } + delete samp_buff_[samp_idx]; } } delete []samp_buff_; diff --git a/cube/search_column.cpp b/cube/search_column.cpp index 9a042d016a..e13149d9f5 100644 --- a/cube/search_column.cpp +++ b/cube/search_column.cpp @@ -195,9 +195,7 @@ SearchNode *SearchColumn::AddNode(LangModEdge *edge, int reco_cost, } // free the edge - if (edge != NULL) { - delete edge; - } + delete edge; } // update Min and Max Costs diff --git a/dict/dict.cpp b/dict/dict.cpp index fec9fcce19..c5cc7acf08 100644 --- a/dict/dict.cpp +++ b/dict/dict.cpp @@ -191,7 +191,7 @@ Dict::Dict(CCUtil *ccutil) Dict::~Dict() { End(); - if (hyphen_word_ != NULL) delete hyphen_word_; + delete hyphen_word_; if (output_ambig_words_file_ != NULL) fclose(output_ambig_words_file_); } @@ -360,10 +360,8 @@ void Dict::End() { dawgs_.clear(); successors_.clear(); document_words_ = NULL; - if (pending_words_ != NULL) { - delete pending_words_; - pending_words_ = NULL; - } + delete pending_words_; + pending_words_ = NULL; } // Returns true if in light of the current state unichar_id is allowed diff --git a/textord/fpchop.cpp b/textord/fpchop.cpp index be2768cce9..699d419620 100644 --- a/textord/fpchop.cpp +++ b/textord/fpchop.cpp @@ -259,8 +259,8 @@ void split_to_blob( //split the blob pitch_error, left_coutlines, right_coutlines); - if (blob != NULL) - delete blob; //free it + + delete blob; } /********************************************************************** diff --git a/textord/makerow.cpp b/textord/makerow.cpp index c8170e5d16..a5749ad680 100644 --- a/textord/makerow.cpp +++ b/textord/makerow.cpp @@ -507,8 +507,7 @@ void vigorous_noise_removal(TO_BLOCK* block) { continue; // Looks OK. } // It might be noise so get rid of it. - if (blob->cblob() != NULL) - delete blob->cblob(); + delete blob->cblob(); delete b_it.extract(); } else { prev = blob; diff --git a/wordrec/chopper.cpp b/wordrec/chopper.cpp index 850cfcabda..dfda3e9183 100644 --- a/wordrec/chopper.cpp +++ b/wordrec/chopper.cpp @@ -568,9 +568,7 @@ int Wordrec::select_blob_to_split( for (x = 0; x < blob_choices.size(); ++x) { if (blob_choices[x] == NULL) { - if (fragments != NULL) { - delete[] fragments; - } + delete[] fragments; return x; } else { blob_choice = blob_choices[x]; @@ -614,9 +612,7 @@ int Wordrec::select_blob_to_split( } } } - if (fragments != NULL) { - delete[] fragments; - } + delete[] fragments; // TODO(daria): maybe a threshold of badness for // worst_near_fragment would be useful. return worst_index_near_fragment != -1 ? diff --git a/wordrec/language_model.cpp b/wordrec/language_model.cpp index 361fb5c585..99710478ed 100644 --- a/wordrec/language_model.cpp +++ b/wordrec/language_model.cpp @@ -988,7 +988,7 @@ float LanguageModel::ComputeNgramCost(const char *unichar, unichar, context_ptr, CertaintyScore(certainty)/denom, prob, ngram_and_classifier_cost); } - if (modified_context != NULL) delete[] modified_context; + delete[] modified_context; return ngram_and_classifier_cost; }