-
Notifications
You must be signed in to change notification settings - Fork 8.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix missing err check in the commit path #1543
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -78,7 +78,9 @@ func (p *PurgeMgr) UpdateExpiryInfoOfPvtDataOfOldBlocks(pvtUpdates *privacyenabl | |
builder := newExpiryScheduleBuilder(p.btlPolicy) | ||
pvtUpdateCompositeKeyMap := pvtUpdates.ToCompositeKeyMap() | ||
for k, vv := range pvtUpdateCompositeKeyMap { | ||
builder.add(k.Namespace, k.CollectionName, k.Key, util.ComputeStringHash(k.Key), vv) | ||
if err := builder.add(k.Namespace, k.CollectionName, k.Key, util.ComputeStringHash(k.Key), vv); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. An There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess that you mistakenly linked this function to the regular commit path. This function is for old missing pvt data which is in the reconciliation path and does cause a panic but rather retries. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah. Correct. My mistake. |
||
return err | ||
} | ||
} | ||
|
||
var expiryInfoUpdates []*expiryInfo | ||
|
@@ -212,7 +214,10 @@ func (p *PurgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset { | |
// Transform the keys into the form such that for each hashed key that is eligible for purge appears in 'toPurge' | ||
toPurge := transformToExpiryInfoMap(expiryInfo) | ||
// Load the latest versions of the hashed keys | ||
p.preloadCommittedVersionsInCache(toPurge) | ||
if err = p.preloadCommittedVersionsInCache(toPurge); err != nil { | ||
workingset.err = err | ||
return workingset | ||
} | ||
var expiryInfoKeysToClear []*expiryInfoKey | ||
|
||
if len(toPurge) == 0 { | ||
|
@@ -266,15 +271,15 @@ func (p *PurgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset { | |
return workingset | ||
} | ||
|
||
func (p *PurgeMgr) preloadCommittedVersionsInCache(expInfoMap expiryInfoMap) { | ||
func (p *PurgeMgr) preloadCommittedVersionsInCache(expInfoMap expiryInfoMap) error { | ||
if !p.db.IsBulkOptimizable() { | ||
return | ||
return nil | ||
} | ||
var hashedKeys []*privacyenabledstate.HashedCompositeKey | ||
for k := range expInfoMap { | ||
hashedKeys = append(hashedKeys, &k) | ||
} | ||
p.db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedKeys) | ||
return p.db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedKeys) | ||
} | ||
|
||
func transformToExpiryInfoMap(expiryInfo []*expiryInfo) expiryInfoMap { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -67,7 +67,9 @@ func (combiner *itrCombiner) Next() (commonledger.QueryResult, error) { | |
} | ||
} | ||
kv := combiner.kvAt(smallestHolderIndex) | ||
combiner.moveItrAndRemoveIfExhausted(smallestHolderIndex) | ||
if _, err := combiner.moveItrAndRemoveIfExhausted(smallestHolderIndex); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. an |
||
return nil, err | ||
} | ||
if kv.IsDelete() { | ||
return combiner.Next() | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -106,7 +106,9 @@ func (helper *RangeQueryResultsHelper) Done() ([]*kvrwset.KVRead, *kvrwset.Query | |
return helper.pendingResults, nil, err | ||
} | ||
} | ||
helper.mt.done() | ||
if err := helper.mt.done(); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. An |
||
return nil, nil, err | ||
} | ||
return helper.pendingResults, helper.mt.getSummery(), nil | ||
} | ||
|
||
|
@@ -132,8 +134,7 @@ func (helper *RangeQueryResultsHelper) processPendingResults() error { | |
if err != nil { | ||
return err | ||
} | ||
helper.mt.update(hash) | ||
return nil | ||
return helper.mt.update(hash) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same as the |
||
} | ||
|
||
func serializeKVReads(kvReads []*kvrwset.KVRead) ([]byte, error) { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -106,7 +106,9 @@ func NewLockBasedTxMgr(initializer *Initializer) (*LockBasedTxMgr, error) { | |
return nil, errors.New("create new lock based TxMgr failed: passed in nil ledger hasher") | ||
} | ||
|
||
initializer.DB.Open() | ||
if err := initializer.DB.Open(); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
return nil, err | ||
} | ||
txmgr := &LockBasedTxMgr{ | ||
ledgerid: initializer.LedgerID, | ||
db: initializer.DB, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -119,7 +119,9 @@ func (v *rangeQueryHashValidator) validate() (bool, error) { | |
return equals, nil | ||
} | ||
versionedKV := result.(*statedb.VersionedKV) | ||
v.resultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version)) | ||
if err := v.resultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version)); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. An There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll summarize it a bit differently... Though, practically its a remote chance, but theoretically, this error miss could already cause a fork, as per the existing code. Because, the peer that would miss this error will continue with evaluating the results further and will eventually find the query results different and hence will mark the transaction invalid. Now, this change, instead of marking the transaction invalid, will cause a panic in upstream - So, this should be an acceptable change without an additional capability check. On a side note, having said the above, I see the possibility of this error getting generated only if someone is using HSM for hash computation. Because, golang's crypto hash library does not return error. Also, as you mentioned the other possibility of the error is the proto marshal down stream, which is only possible if there would have been a programming error in our code, potentially passing nil (which is not happening here) |
||
return false, err | ||
} | ||
merkle := v.resultsHelper.GetMerkleSummary() | ||
|
||
if merkle.MaxLevel < inMerkle.MaxLevel { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,8 +18,9 @@ import ( | |
func prepareTxOps(rwset *rwsetutil.TxRwSet, txht *version.Height, | ||
precedingUpdates *publicAndHashUpdates, db *privacyenabledstate.DB) (txOps, error) { | ||
txops := txOps{} | ||
txops.applyTxRwset(rwset) | ||
//logger.Debugf("prepareTxOps() txops after applying raw rwset=%#v", spew.Sdump(txops)) | ||
if err := txops.applyTxRwset(rwset); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. An There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Rather, in the current code, the error miss could have caused a state fork (because of missing applying the metadata). Now, instead, this would cause panic in the upstream code instead of committing the transaction without metadata - So, safe and desired change as such. However, this error should never happen, as this is mainly a programming error (e.g., passing nil to proto.Marshal. |
||
return nil, err | ||
} | ||
for ck, keyop := range txops { | ||
// check if the final state of the key, value and metadata, is already present in the transaction, then skip | ||
// otherwise we need to retrieve latest state and merge in the current value or metadata update | ||
|
@@ -62,7 +63,9 @@ func (txops txOps) applyTxRwset(rwset *rwsetutil.TxRwSet) error { | |
txops.applyKVWrite(ns, "", kvWrite) | ||
} | ||
for _, kvMetadataWrite := range nsRWSet.KvRwSet.MetadataWrites { | ||
txops.applyMetadata(ns, "", kvMetadataWrite) | ||
if err := txops.applyMetadata(ns, "", kvMetadataWrite); err != nil { | ||
return err | ||
} | ||
} | ||
|
||
// apply collection level kvwrite and kvMetadataWrite | ||
|
@@ -79,12 +82,14 @@ func (txops txOps) applyTxRwset(rwset *rwsetutil.TxRwSet) error { | |
} | ||
|
||
for _, metadataWrite := range collHashRWset.HashedRwSet.MetadataWrites { | ||
txops.applyMetadata(ns, coll, | ||
if err := txops.applyMetadata(ns, coll, | ||
&kvrwset.KVMetadataWrite{ | ||
Key: string(metadataWrite.KeyHash), | ||
Entries: metadataWrite.Entries, | ||
}, | ||
) | ||
); err != nil { | ||
return err | ||
} | ||
} | ||
} | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -102,7 +102,9 @@ func (v *validator) validateAndPrepareBatch(blk *block, doMVCCValidation bool) ( | |
if validationCode == peer.TxValidationCode_VALID { | ||
logger.Debugf("Block [%d] Transaction index [%d] TxId [%s] marked as valid by state validator. ContainsPostOrderWrites [%t]", blk.num, tx.indexInBlock, tx.id, tx.containsPostOrderWrites) | ||
committingTxHeight := version.NewHeight(blk.num, uint64(tx.indexInBlock)) | ||
updates.applyWriteSet(tx.rwset, committingTxHeight, v.db, tx.containsPostOrderWrites) | ||
if err := updates.applyWriteSet(tx.rwset, committingTxHeight, v.db, tx.containsPostOrderWrites); err != nil { | ||
return nil, err | ||
} | ||
} else { | ||
logger.Warningf("Block [%d] Transaction index [%d] TxId [%s] marked as invalid by state validator. Reason code [%s]", | ||
blk.num, tx.indexInBlock, tx.id, validationCode.String()) | ||
|
@@ -228,7 +230,9 @@ func (v *validator) validateRangeQuery(ns string, rangeQueryInfo *kvrwset.RangeQ | |
logger.Debug(`Hashing results are not present in the range query info hence, initiating raw KVReads based validation`) | ||
qv = &rangeQueryResultsValidator{} | ||
} | ||
qv.init(rangeQueryInfo, combinedItr) | ||
if err := qv.init(rangeQueryInfo, combinedItr); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Even if the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To summarize it in simple terms, this does not change the current behavior but rather returns the error early on. |
||
return false, err | ||
} | ||
return qv.validate() | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -78,12 +78,16 @@ func prepareExpiryEntries(committingBlk uint64, dataEntries []*dataEntry, missin | |
|
||
// 1. prepare expiryData for non-missing data | ||
for _, dataEntry := range dataEntries { | ||
prepareExpiryEntriesForPresentData(mapByExpiringBlk, dataEntry.key, btlPolicy) | ||
if err := prepareExpiryEntriesForPresentData(mapByExpiringBlk, dataEntry.key, btlPolicy); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. An |
||
return nil, err | ||
} | ||
} | ||
|
||
// 2. prepare expiryData for missing data | ||
for missingDataKey := range missingDataEntries { | ||
prepareExpiryEntriesForMissingData(mapByExpiringBlk, &missingDataKey, btlPolicy) | ||
if err := prepareExpiryEntriesForMissingData(mapByExpiringBlk, &missingDataKey, btlPolicy); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same as above. |
||
return nil, err | ||
} | ||
} | ||
|
||
for expiryBlk, expiryData := range mapByExpiringBlk { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -776,7 +776,9 @@ func (s *Store) purgeExpiredData(minBlkNum, maxBlkNum uint64) error { | |
for _, missingDataKey := range missingDataKeys { | ||
batch.Delete(encodeMissingDataKey(missingDataKey)) | ||
} | ||
s.db.WriteBatch(batch, false) | ||
if err := s.db.WriteBatch(batch, false); err != nil { | ||
return err | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Only results in a warning message. |
||
} | ||
} | ||
logger.Infof("[%s] - [%d] Entries purged from private data storage till block number [%d]", s.ledgerid, len(expiryEntries), maxBlkNum) | ||
return nil | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
An
err
can occur due to a leveldb internal error. Further, this is part of the peer recovery code and executes during the peer startup. Hence, it is safe to process this error and it would not create any fork or make difficulties in joining a new peer (which would fetch and process all blocks). A leveldb error during the peer startup would result in a peer panic and is the expected behavior.