diff --git a/core/ledger/kvledger/hashcheck_pvtdata_test.go b/core/ledger/kvledger/hashcheck_pvtdata_test.go index a2e1240aefe..6cbfd560ec1 100644 --- a/core/ledger/kvledger/hashcheck_pvtdata_test.go +++ b/core/ledger/kvledger/hashcheck_pvtdata_test.go @@ -16,7 +16,6 @@ import ( "github.com/hyperledger/fabric/common/ledger/testutil" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" - "github.com/hyperledger/fabric/core/ledger/mock" "github.com/hyperledger/fabric/protoutil" "github.com/stretchr/testify/require" ) @@ -24,7 +23,38 @@ import ( func TestConstructValidInvalidBlocksPvtData(t *testing.T) { conf, cleanup := testConfig(t) defer cleanup() - provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{}) + nsCollBtlConfs := []*nsCollBtlConfig{ + { + namespace: "ns-1", + btlConfig: map[string]uint64{ + "coll-1": 0, + "coll-2": 0, + }, + }, + { + namespace: "ns-2", + btlConfig: map[string]uint64{ + "coll-2": 0, + }, + }, + { + namespace: "ns-4", + btlConfig: map[string]uint64{ + "coll-2": 0, + }, + }, + { + namespace: "ns-6", + btlConfig: map[string]uint64{ + "coll-2": 0, + }, + }, + } + provider := testutilNewProviderWithCollectionConfig( + t, + nsCollBtlConfs, + conf, + ) defer provider.Close() _, gb := testutil.NewBlockGenerator(t, "testLedger", false) diff --git a/core/ledger/kvledger/kv_ledger.go b/core/ledger/kvledger/kv_ledger.go index b581d9b7481..f4da40625ff 100644 --- a/core/ledger/kvledger/kv_ledger.go +++ b/core/ledger/kvledger/kv_ledger.go @@ -314,9 +314,7 @@ func (l *kvLedger) syncStateDBWithOldBlkPvtdata() error { return err } - l.pvtdataStore.ResetLastUpdatedOldBlocksList() - - return nil + return l.pvtdataStore.ResetLastUpdatedOldBlocksList() } func (l *kvLedger) filterYetToCommitBlocks(blocksPvtData map[uint64][]*ledger.TxPvtData) error { diff --git a/core/ledger/kvledger/kv_ledger_provider_test.go b/core/ledger/kvledger/kv_ledger_provider_test.go index b47a72fd781..f254a413122 100644 --- a/core/ledger/kvledger/kv_ledger_provider_test.go +++ b/core/ledger/kvledger/kv_ledger_provider_test.go @@ -623,43 +623,60 @@ func testutilNewProvider(conf *lgr.Config, t *testing.T, ccInfoProvider *mock.De return provider } +type nsCollBtlConfig struct { + namespace string + btlConfig map[string]uint64 +} + func testutilNewProviderWithCollectionConfig( t *testing.T, - namespace string, - btlConfigs map[string]uint64, + nsCollBtlConfigs []*nsCollBtlConfig, conf *lgr.Config, ) *Provider { provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{}) mockCCInfoProvider := provider.initializer.DeployedChaincodeInfoProvider.(*mock.DeployedChaincodeInfoProvider) - collMap := map[string]*peer.StaticCollectionConfig{} - var collConf []*peer.CollectionConfig - for collName, btl := range btlConfigs { - staticConf := &peer.StaticCollectionConfig{Name: collName, BlockToLive: btl} - collMap[collName] = staticConf - collectionConf := &peer.CollectionConfig{} - collectionConf.Payload = &peer.CollectionConfig_StaticCollectionConfig{StaticCollectionConfig: staticConf} - collConf = append(collConf, collectionConf) + collectionConfPkgs := []*peer.CollectionConfigPackage{} + + nsCollMap := map[string]map[string]*peer.StaticCollectionConfig{} + for _, nsCollBtlConf := range nsCollBtlConfigs { + collMap := map[string]*peer.StaticCollectionConfig{} + var collConf []*peer.CollectionConfig + for collName, btl := range nsCollBtlConf.btlConfig { + staticConf := &peer.StaticCollectionConfig{Name: collName, BlockToLive: btl} + collMap[collName] = staticConf + collectionConf := &peer.CollectionConfig{} + collectionConf.Payload = &peer.CollectionConfig_StaticCollectionConfig{StaticCollectionConfig: staticConf} + collConf = append(collConf, collectionConf) + } + collectionConfPkgs = append(collectionConfPkgs, &peer.CollectionConfigPackage{Config: collConf}) + nsCollMap[nsCollBtlConf.namespace] = collMap } - collectionConfPkg := &peer.CollectionConfigPackage{Config: collConf} mockCCInfoProvider.ChaincodeInfoStub = func(channelName, ccName string, qe lgr.SimpleQueryExecutor) (*lgr.DeployedChaincodeInfo, error) { - if ccName == namespace { - return &lgr.DeployedChaincodeInfo{ - Name: namespace, ExplicitCollectionConfigPkg: collectionConfPkg}, nil + for i, nsCollBtlConf := range nsCollBtlConfigs { + if ccName == nsCollBtlConf.namespace { + return &lgr.DeployedChaincodeInfo{ + Name: nsCollBtlConf.namespace, ExplicitCollectionConfigPkg: collectionConfPkgs[i]}, nil + } } return nil, nil } mockCCInfoProvider.AllCollectionsConfigPkgStub = func(channelName, ccName string, qe lgr.SimpleQueryExecutor) (*peer.CollectionConfigPackage, error) { - if ccName == namespace { - return collectionConfPkg, nil + for i, nsCollBtlConf := range nsCollBtlConfigs { + if ccName == nsCollBtlConf.namespace { + return collectionConfPkgs[i], nil + } } return nil, nil + } mockCCInfoProvider.CollectionInfoStub = func(channelName, ccName, collName string, qe lgr.SimpleQueryExecutor) (*peer.StaticCollectionConfig, error) { - if ccName == namespace { - return collMap[collName], nil + for _, nsCollBtlConf := range nsCollBtlConfigs { + if ccName == nsCollBtlConf.namespace { + return nsCollMap[nsCollBtlConf.namespace][collName], nil + } } return nil, nil } diff --git a/core/ledger/kvledger/kv_ledger_test.go b/core/ledger/kvledger/kv_ledger_test.go index be9682f81a8..7ae846bf2d6 100644 --- a/core/ledger/kvledger/kv_ledger_test.go +++ b/core/ledger/kvledger/kv_ledger_test.go @@ -258,10 +258,15 @@ func TestKVLedgerBlockStorageWithPvtdata(t *testing.T) { func TestKVLedgerDBRecovery(t *testing.T) { conf, cleanup := testConfig(t) defer cleanup() + nsCollBtlConfs := []*nsCollBtlConfig{ + { + namespace: "ns", + btlConfig: map[string]uint64{"coll": 0}, + }, + } provider1 := testutilNewProviderWithCollectionConfig( t, - "ns", - map[string]uint64{"coll": 0}, + nsCollBtlConfs, conf, ) defer provider1.Close() @@ -328,8 +333,7 @@ func TestKVLedgerDBRecovery(t *testing.T) { // StateDB and HistoryDB should be recovered before returning from NewKVLedger call provider2 := testutilNewProviderWithCollectionConfig( t, - "ns", - map[string]uint64{"coll": 0}, + nsCollBtlConfs, conf, ) defer provider2.Close() @@ -385,8 +389,7 @@ func TestKVLedgerDBRecovery(t *testing.T) { // history DB should be recovered before returning from NewKVLedger call provider3 := testutilNewProviderWithCollectionConfig( t, - "ns", - map[string]uint64{"coll": 0}, + nsCollBtlConfs, conf, ) defer provider3.Close() @@ -443,8 +446,7 @@ func TestKVLedgerDBRecovery(t *testing.T) { // state DB should be recovered before returning from NewKVLedger call provider4 := testutilNewProviderWithCollectionConfig( t, - "ns", - map[string]uint64{"coll": 0}, + nsCollBtlConfs, conf, ) defer provider4.Close() diff --git a/core/ledger/kvledger/snapshot_test.go b/core/ledger/kvledger/snapshot_test.go index e8df11d93b5..9c5f9438997 100644 --- a/core/ledger/kvledger/snapshot_test.go +++ b/core/ledger/kvledger/snapshot_test.go @@ -31,10 +31,15 @@ func TestGenerateSnapshot(t *testing.T) { conf, cleanup := testConfig(t) defer cleanup() snapshotRootDir := conf.SnapshotsConfig.RootDir + nsCollBtlConfs := []*nsCollBtlConfig{ + { + namespace: "ns", + btlConfig: map[string]uint64{"coll": 0}, + }, + } provider := testutilNewProviderWithCollectionConfig( t, - "ns", - map[string]uint64{"coll": 0}, + nsCollBtlConfs, conf, ) defer provider.Close() diff --git a/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go b/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go index a9bf41b4f97..4c6122c04ec 100644 --- a/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go +++ b/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go @@ -78,7 +78,9 @@ func (p *PurgeMgr) UpdateExpiryInfoOfPvtDataOfOldBlocks(pvtUpdates *privacyenabl builder := newExpiryScheduleBuilder(p.btlPolicy) pvtUpdateCompositeKeyMap := pvtUpdates.ToCompositeKeyMap() for k, vv := range pvtUpdateCompositeKeyMap { - builder.add(k.Namespace, k.CollectionName, k.Key, util.ComputeStringHash(k.Key), vv) + if err := builder.add(k.Namespace, k.CollectionName, k.Key, util.ComputeStringHash(k.Key), vv); err != nil { + return err + } } var expiryInfoUpdates []*expiryInfo @@ -212,7 +214,10 @@ func (p *PurgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset { // Transform the keys into the form such that for each hashed key that is eligible for purge appears in 'toPurge' toPurge := transformToExpiryInfoMap(expiryInfo) // Load the latest versions of the hashed keys - p.preloadCommittedVersionsInCache(toPurge) + if err = p.preloadCommittedVersionsInCache(toPurge); err != nil { + workingset.err = err + return workingset + } var expiryInfoKeysToClear []*expiryInfoKey if len(toPurge) == 0 { @@ -266,15 +271,15 @@ func (p *PurgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset { return workingset } -func (p *PurgeMgr) preloadCommittedVersionsInCache(expInfoMap expiryInfoMap) { +func (p *PurgeMgr) preloadCommittedVersionsInCache(expInfoMap expiryInfoMap) error { if !p.db.IsBulkOptimizable() { - return + return nil } var hashedKeys []*privacyenabledstate.HashedCompositeKey for k := range expInfoMap { hashedKeys = append(hashedKeys, &k) } - p.db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedKeys) + return p.db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedKeys) } func transformToExpiryInfoMap(expiryInfo []*expiryInfo) expiryInfoMap { diff --git a/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go b/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go index 56fbf8e35e0..7a8914ddbb0 100644 --- a/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go +++ b/core/ledger/kvledger/txmgmt/queryutil/iterator_combiner.go @@ -67,7 +67,9 @@ func (combiner *itrCombiner) Next() (commonledger.QueryResult, error) { } } kv := combiner.kvAt(smallestHolderIndex) - combiner.moveItrAndRemoveIfExhausted(smallestHolderIndex) + if _, err := combiner.moveItrAndRemoveIfExhausted(smallestHolderIndex); err != nil { + return nil, err + } if kv.IsDelete() { return combiner.Next() } diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go b/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go index 37709c22dd4..2f5fd5bf001 100644 --- a/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go @@ -106,7 +106,9 @@ func (helper *RangeQueryResultsHelper) Done() ([]*kvrwset.KVRead, *kvrwset.Query return helper.pendingResults, nil, err } } - helper.mt.done() + if err := helper.mt.done(); err != nil { + return nil, nil, err + } return helper.pendingResults, helper.mt.getSummery(), nil } @@ -132,8 +134,7 @@ func (helper *RangeQueryResultsHelper) processPendingResults() error { if err != nil { return err } - helper.mt.update(hash) - return nil + return helper.mt.update(hash) } func serializeKVReads(kvReads []*kvrwset.KVRead) ([]byte, error) { diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go b/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go index 9980c95617c..ef88374a706 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go @@ -106,7 +106,9 @@ func NewLockBasedTxMgr(initializer *Initializer) (*LockBasedTxMgr, error) { return nil, errors.New("create new lock based TxMgr failed: passed in nil ledger hasher") } - initializer.DB.Open() + if err := initializer.DB.Open(); err != nil { + return nil, err + } txmgr := &LockBasedTxMgr{ ledgerid: initializer.LedgerID, db: initializer.DB, diff --git a/core/ledger/kvledger/txmgmt/validation/rangequery_validator.go b/core/ledger/kvledger/txmgmt/validation/rangequery_validator.go index 1b706ca9988..4339268d7e0 100644 --- a/core/ledger/kvledger/txmgmt/validation/rangequery_validator.go +++ b/core/ledger/kvledger/txmgmt/validation/rangequery_validator.go @@ -119,7 +119,9 @@ func (v *rangeQueryHashValidator) validate() (bool, error) { return equals, nil } versionedKV := result.(*statedb.VersionedKV) - v.resultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version)) + if err := v.resultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version)); err != nil { + return false, err + } merkle := v.resultsHelper.GetMerkleSummary() if merkle.MaxLevel < inMerkle.MaxLevel { diff --git a/core/ledger/kvledger/txmgmt/validation/tx_ops.go b/core/ledger/kvledger/txmgmt/validation/tx_ops.go index 74903b5754f..f4ccda53319 100644 --- a/core/ledger/kvledger/txmgmt/validation/tx_ops.go +++ b/core/ledger/kvledger/txmgmt/validation/tx_ops.go @@ -18,8 +18,9 @@ import ( func prepareTxOps(rwset *rwsetutil.TxRwSet, txht *version.Height, precedingUpdates *publicAndHashUpdates, db *privacyenabledstate.DB) (txOps, error) { txops := txOps{} - txops.applyTxRwset(rwset) - //logger.Debugf("prepareTxOps() txops after applying raw rwset=%#v", spew.Sdump(txops)) + if err := txops.applyTxRwset(rwset); err != nil { + return nil, err + } for ck, keyop := range txops { // check if the final state of the key, value and metadata, is already present in the transaction, then skip // otherwise we need to retrieve latest state and merge in the current value or metadata update @@ -62,7 +63,9 @@ func (txops txOps) applyTxRwset(rwset *rwsetutil.TxRwSet) error { txops.applyKVWrite(ns, "", kvWrite) } for _, kvMetadataWrite := range nsRWSet.KvRwSet.MetadataWrites { - txops.applyMetadata(ns, "", kvMetadataWrite) + if err := txops.applyMetadata(ns, "", kvMetadataWrite); err != nil { + return err + } } // apply collection level kvwrite and kvMetadataWrite @@ -79,12 +82,14 @@ func (txops txOps) applyTxRwset(rwset *rwsetutil.TxRwSet) error { } for _, metadataWrite := range collHashRWset.HashedRwSet.MetadataWrites { - txops.applyMetadata(ns, coll, + if err := txops.applyMetadata(ns, coll, &kvrwset.KVMetadataWrite{ Key: string(metadataWrite.KeyHash), Entries: metadataWrite.Entries, }, - ) + ); err != nil { + return err + } } } } diff --git a/core/ledger/kvledger/txmgmt/validation/validator.go b/core/ledger/kvledger/txmgmt/validation/validator.go index cdca57a3d6d..acb239a51c2 100644 --- a/core/ledger/kvledger/txmgmt/validation/validator.go +++ b/core/ledger/kvledger/txmgmt/validation/validator.go @@ -102,7 +102,9 @@ func (v *validator) validateAndPrepareBatch(blk *block, doMVCCValidation bool) ( if validationCode == peer.TxValidationCode_VALID { logger.Debugf("Block [%d] Transaction index [%d] TxId [%s] marked as valid by state validator. ContainsPostOrderWrites [%t]", blk.num, tx.indexInBlock, tx.id, tx.containsPostOrderWrites) committingTxHeight := version.NewHeight(blk.num, uint64(tx.indexInBlock)) - updates.applyWriteSet(tx.rwset, committingTxHeight, v.db, tx.containsPostOrderWrites) + if err := updates.applyWriteSet(tx.rwset, committingTxHeight, v.db, tx.containsPostOrderWrites); err != nil { + return nil, err + } } else { logger.Warningf("Block [%d] Transaction index [%d] TxId [%s] marked as invalid by state validator. Reason code [%s]", blk.num, tx.indexInBlock, tx.id, validationCode.String()) @@ -228,7 +230,9 @@ func (v *validator) validateRangeQuery(ns string, rangeQueryInfo *kvrwset.RangeQ logger.Debug(`Hashing results are not present in the range query info hence, initiating raw KVReads based validation`) qv = &rangeQueryResultsValidator{} } - qv.init(rangeQueryInfo, combinedItr) + if err := qv.init(rangeQueryInfo, combinedItr); err != nil { + return false, err + } return qv.validate() } diff --git a/core/ledger/pvtdatastorage/helper.go b/core/ledger/pvtdatastorage/helper.go index 21324025d8a..c9e5c0cb536 100644 --- a/core/ledger/pvtdatastorage/helper.go +++ b/core/ledger/pvtdatastorage/helper.go @@ -78,12 +78,16 @@ func prepareExpiryEntries(committingBlk uint64, dataEntries []*dataEntry, missin // 1. prepare expiryData for non-missing data for _, dataEntry := range dataEntries { - prepareExpiryEntriesForPresentData(mapByExpiringBlk, dataEntry.key, btlPolicy) + if err := prepareExpiryEntriesForPresentData(mapByExpiringBlk, dataEntry.key, btlPolicy); err != nil { + return nil, err + } } // 2. prepare expiryData for missing data for missingDataKey := range missingDataEntries { - prepareExpiryEntriesForMissingData(mapByExpiringBlk, &missingDataKey, btlPolicy) + if err := prepareExpiryEntriesForMissingData(mapByExpiringBlk, &missingDataKey, btlPolicy); err != nil { + return nil, err + } } for expiryBlk, expiryData := range mapByExpiringBlk { diff --git a/core/ledger/pvtdatastorage/store.go b/core/ledger/pvtdatastorage/store.go index 9182b8c6ada..180c01d9ecf 100644 --- a/core/ledger/pvtdatastorage/store.go +++ b/core/ledger/pvtdatastorage/store.go @@ -776,7 +776,9 @@ func (s *Store) purgeExpiredData(minBlkNum, maxBlkNum uint64) error { for _, missingDataKey := range missingDataKeys { batch.Delete(encodeMissingDataKey(missingDataKey)) } - s.db.WriteBatch(batch, false) + if err := s.db.WriteBatch(batch, false); err != nil { + return err + } } logger.Infof("[%s] - [%d] Entries purged from private data storage till block number [%d]", s.ledgerid, len(expiryEntries), maxBlkNum) return nil