diff --git a/modAionImpl/src/org/aion/zero/impl/AionHub.java b/modAionImpl/src/org/aion/zero/impl/AionHub.java
index 6e3ddd9ba3..0328d86f9f 100644
--- a/modAionImpl/src/org/aion/zero/impl/AionHub.java
+++ b/modAionImpl/src/org/aion/zero/impl/AionHub.java
@@ -3,18 +3,18 @@
*
* This file is part of the aion network project.
*
- * The aion network project is free software: you can redistribute it
- * and/or modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, either version 3 of
+ * The aion network project is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 3 of
* the License, or any later version.
*
- * The aion network project is distributed in the hope that it will
- * be useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * The aion network project is distributed in the hope that it will
+ * be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with the aion network project source files.
+ * along with the aion network project source files.
* If not, see .
*
* Contributors:
@@ -271,9 +271,23 @@ private void loadBlockchain() {
bestBlock != null && // recover only for non-null blocks
!this.repository.isValidRoot(bestBlock.getStateRoot())) {
+ LOG.info("Recovery initiated due to corrupt world state at block " + bestBlock.getNumber() + ".");
+
long bestBlockNumber = bestBlock.getNumber();
byte[] bestBlockRoot = bestBlock.getStateRoot();
+ // ensure that the genesis state exists before attempting recovery
+ AionGenesis genesis = cfg.getGenesis();
+ if (!this.repository.isValidRoot(genesis.getStateRoot())) {
+ LOG.info(
+ "Corrupt world state for genesis block hash: " + genesis.getShortHash() + ", number: " + genesis
+ .getNumber() + ".");
+
+ buildGenesis(genesis);
+
+ LOG.info("Rebuilding genesis block SUCCEEDED.");
+ }
+
recovered = this.blockchain.recoverWorldState(this.repository, bestBlock);
if (!this.repository.isValidRoot(bestBlock.getStateRoot())) {
@@ -324,24 +338,8 @@ private void loadBlockchain() {
AionGenesis genesis = cfg.getGenesis();
- // initialization section for network balance contract
- IRepositoryCache track = repository.startTracking();
-
- Address networkBalanceAddress = PrecompiledContracts.totalCurrencyAddress;
- track.createAccount(networkBalanceAddress);
+ buildGenesis(genesis);
- for (Map.Entry addr : genesis.getNetworkBalances().entrySet()) {
- track.addStorageRow(networkBalanceAddress, new DataWord(addr.getKey()), new DataWord(addr.getValue()));
- }
-
- for (Address addr : genesis.getPremine().keySet()) {
- track.createAccount(addr);
- track.addBalance(addr, genesis.getPremine().get(addr).getBalance());
- }
- track.flush();
-
- repository.commitBlock(genesis.getHeader());
- this.repository.getBlockStore().saveBlock(genesis, genesis.getDifficultyBI(), true);
blockchain.setBestBlock(genesis);
blockchain.setTotalDifficulty(genesis.getDifficultyBI());
@@ -396,6 +394,30 @@ private void loadBlockchain() {
// this.repository.getBlockStore().load();
}
+ private void buildGenesis(AionGenesis genesis) {
+ // initialization section for network balance contract
+ IRepositoryCache track = repository.startTracking();
+
+ Address networkBalanceAddress = PrecompiledContracts.totalCurrencyAddress;
+ track.createAccount(networkBalanceAddress);
+
+ for (Map.Entry addr : genesis.getNetworkBalances().entrySet()) {
+ track.addStorageRow(
+ networkBalanceAddress,
+ new DataWord(addr.getKey()),
+ new DataWord(addr.getValue()));
+ }
+
+ for (Address addr : genesis.getPremine().keySet()) {
+ track.createAccount(addr);
+ track.addBalance(addr, genesis.getPremine().get(addr).getBalance());
+ }
+ track.flush();
+
+ this.repository.commitBlock(genesis.getHeader());
+ this.repository.getBlockStore().saveBlock(genesis, genesis.getCumulativeDifficulty(), true);
+ }
+
public void close() {
LOG.info("");
diff --git a/modAionImpl/src/org/aion/zero/impl/cli/Cli.java b/modAionImpl/src/org/aion/zero/impl/cli/Cli.java
index c21424dfa9..c7689ecad4 100644
--- a/modAionImpl/src/org/aion/zero/impl/cli/Cli.java
+++ b/modAionImpl/src/org/aion/zero/impl/cli/Cli.java
@@ -132,11 +132,52 @@ public int call(final String[] args, final Cfg cfg) {
}
}
break;
+ case "--dump-state-size":
+ long block_count = 2L;
+
+ if (args.length < 2) {
+ System.out.println("Retrieving state size for top " + block_count + " blocks.");
+ RecoveryUtils.printStateTrieSize(block_count);
+ } else {
+ try {
+ block_count = Long.parseLong(args[1]);
+ } catch (NumberFormatException e) {
+ System.out.println("The given argument <" + args[1] + "> cannot be converted to a number.");
+ }
+ if (block_count < 1) {
+ System.out.println("The given argument <" + args[1] + "> is not valid.");
+ block_count = 2L;
+ }
+
+ System.out.println("Retrieving state size for top " + block_count + " blocks.");
+ RecoveryUtils.printStateTrieSize(block_count);
+ }
+ break;
+ case "--dump-state":
+ long level = -1L;
+
+ if (args.length < 2) {
+ System.out.println("Retrieving state for top main chain block...");
+ RecoveryUtils.printStateTrieDump(level);
+ } else {
+ try {
+ level = Long.parseLong(args[1]);
+ } catch (NumberFormatException e) {
+ System.out.println("The given argument <" + args[1] + "> cannot be converted to a number.");
+ }
+ if (level == -1L) {
+ System.out.println("Retrieving state for top main chain block...");
+ } else {
+ System.out.println("Retrieving state for main chain block at level " + level + "...");
+ }
+ RecoveryUtils.printStateTrieDump(level);
+ }
+ break;
case "--db-compact":
RecoveryUtils.dbCompact();
break;
case "--dump-blocks":
- long count = 100L;
+ long count = 10L;
if (args.length < 2) {
System.out.println("Printing top " + count + " blocks from database.");
@@ -147,10 +188,14 @@ public int call(final String[] args, final Cfg cfg) {
} catch (NumberFormatException e) {
System.out.println("The given argument <" + args[1] + "> cannot be converted to a number.");
}
+ if (count < 1) {
+ System.out.println("The given argument <" + args[1] + "> is not valid.");
+ count = 10L;
+ }
+
System.out.println("Printing top " + count + " blocks from database.");
RecoveryUtils.dumpBlocks(count);
}
- System.out.println("Finished printing blocks.");
break;
case "-v":
System.out.println("\nVersion");
diff --git a/modAionImpl/src/org/aion/zero/impl/db/AionBlockStore.java b/modAionImpl/src/org/aion/zero/impl/db/AionBlockStore.java
index ada78a1c12..9ee514135b 100644
--- a/modAionImpl/src/org/aion/zero/impl/db/AionBlockStore.java
+++ b/modAionImpl/src/org/aion/zero/impl/db/AionBlockStore.java
@@ -750,11 +750,14 @@ public BigInteger correctIndexEntry(AionBlock block, BigInteger parentTotalDiffi
}
}
- public void dumpPastBlocks(long numberOfBlocks, String reportsFolder) throws IOException {
+ public String dumpPastBlocks(long numberOfBlocks, String reportsFolder) throws IOException {
lock.readLock().lock();
try {
long firstBlock = getMaxNumber();
+ if (firstBlock < 0) {
+ return null;
+ }
long lastBlock = firstBlock - numberOfBlocks;
File file = new File(reportsFolder, System.currentTimeMillis() + "-blocks-report.out");
@@ -787,6 +790,7 @@ public void dumpPastBlocks(long numberOfBlocks, String reportsFolder) throws IOE
}
writer.close();
+ return file.getName();
} finally {
lock.readLock().unlock();
}
diff --git a/modAionImpl/src/org/aion/zero/impl/db/AionRepositoryImpl.java b/modAionImpl/src/org/aion/zero/impl/db/AionRepositoryImpl.java
index 58824e11f2..057312da49 100644
--- a/modAionImpl/src/org/aion/zero/impl/db/AionRepositoryImpl.java
+++ b/modAionImpl/src/org/aion/zero/impl/db/AionRepositoryImpl.java
@@ -76,7 +76,12 @@ private static class AionRepositoryImplHolder {
// repository singleton instance
private final static AionRepositoryImpl inst = new AionRepositoryImpl(
new RepositoryConfig(new File(config.getBasePath(), config.getDb().getPath()).getAbsolutePath(),
- -1,
+ config.getDb().getPrune() > 0 ?
+ // if the value is smaller than backward step
+ // there is the risk of importing state-less blocks after reboot
+ (128 > config.getDb().getPrune() ? 128 : config.getDb().getPrune()) :
+ // negative value => pruning disabled
+ config.getDb().getPrune(),
ContractDetailsAion.getInstance(),
config.getDb()));
}
@@ -115,7 +120,7 @@ public TransactionStore getTransacti
}
private Trie createStateTrie() {
- return new SecureTrie(stateDatabase).withPruningEnabled(pruneBlockCount >= 0);
+ return new SecureTrie(stateDSPrune).withPruningEnabled(pruneBlockCount > 0);
}
@Override
@@ -501,34 +506,31 @@ public void commitBlock(A0BlockHeader blockHeader) {
worldState.sync();
detailsDS.syncLargeStorage();
- // temporarily removed since never used
- /* if (pruneBlockCount >= 0) {
- stateDSPrune.storeBlockChanges(blockHeader);
- detailsDS.getStorageDSPrune().storeBlockChanges(blockHeader);
- pruneBlocks(blockHeader);
- } */
+ if (pruneBlockCount > 0) {
+ stateDSPrune.storeBlockChanges(blockHeader);
+ detailsDS.getStorageDSPrune().storeBlockChanges(blockHeader);
+ pruneBlocks(blockHeader);
+ }
} finally {
rwLock.writeLock().unlock();
}
}
- // TODO-AR: reenable state pruning
- // temporarily removed since never used
- /* private void pruneBlocks(A0BlockHeader curBlock) {
- if (curBlock.getNumber() > bestBlockNumber) { // pruning only on
- // increasing blocks
+ private void pruneBlocks(A0BlockHeader curBlock) {
+ if (curBlock.getNumber() > bestBlockNumber) {
+ // pruning only on increasing blocks
long pruneBlockNumber = curBlock.getNumber() - pruneBlockCount;
if (pruneBlockNumber >= 0) {
byte[] pruneBlockHash = blockStore.getBlockHashByNumber(pruneBlockNumber);
if (pruneBlockHash != null) {
A0BlockHeader header = blockStore.getBlockByHash(pruneBlockHash).getHeader();
- // stateDSPrune.prune(header);
- // detailsDS.getStorageDSPrune().prune(header);
+ stateDSPrune.prune(header);
+ detailsDS.getStorageDSPrune().prune(header);
}
}
}
bestBlockNumber = curBlock.getNumber();
- } */
+ }
public Trie getWorldState() {
return worldState;
@@ -543,7 +545,7 @@ public IRepository getSnapshotTo(byte[] root) {
repo.blockStore = blockStore;
repo.cfg = cfg;
repo.stateDatabase = this.stateDatabase;
- // repo.stateDSPrune = this.stateDSPrune;
+ repo.stateDSPrune = this.stateDSPrune;
repo.pruneBlockCount = this.pruneBlockCount;
repo.detailsDS = this.detailsDS;
repo.isSnapshot = true;
diff --git a/modAionImpl/src/org/aion/zero/impl/db/RecoveryUtils.java b/modAionImpl/src/org/aion/zero/impl/db/RecoveryUtils.java
index f0f109a1f9..fa5590f630 100644
--- a/modAionImpl/src/org/aion/zero/impl/db/RecoveryUtils.java
+++ b/modAionImpl/src/org/aion/zero/impl/db/RecoveryUtils.java
@@ -28,8 +28,8 @@
import org.aion.zero.impl.AionBlockchainImpl;
import org.aion.zero.impl.config.CfgAion;
import org.aion.zero.impl.core.IAionBlockchain;
+import org.aion.zero.impl.types.AionBlock;
-import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -138,8 +138,8 @@ public static void dumpBlocks(long count) {
cfg.getDb().setHeapCacheEnabled(false);
Map cfgLog = new HashMap<>();
- cfgLog.put("DB", "INFO");
- cfgLog.put("GEN", "INFO");
+ cfgLog.put("DB", "ERROR");
+ cfgLog.put("GEN", "ERROR");
AionLoggerFactory.init(cfgLog);
@@ -148,7 +148,12 @@ public static void dumpBlocks(long count) {
AionBlockStore store = repository.getBlockStore();
try {
- store.dumpPastBlocks(count, cfg.getBasePath());
+ String file = store.dumpPastBlocks(count, cfg.getBasePath());
+ if (file == null) {
+ System.out.println("The database is empty. Cannot print block information.");
+ } else {
+ System.out.println("Block information stored in " + file);
+ }
} catch (IOException e) {
e.printStackTrace();
}
@@ -203,4 +208,97 @@ public static Status revertTo(IAionBlockchain blockchain, long nbBlock) {
// ok if we managed to get down to the expected block
return (nbBestBlock == nbBlock) ? Status.SUCCESS : Status.FAILURE;
}
+
+ public static void printStateTrieSize(long blockNumber) {
+ // ensure mining is disabled
+ CfgAion cfg = CfgAion.inst();
+ cfg.dbFromXML();
+ cfg.getConsensus().setMining(false);
+
+ Map cfgLog = new HashMap<>();
+ cfgLog.put("DB", "ERROR");
+
+ AionLoggerFactory.init(cfgLog);
+
+ // get the current blockchain
+ AionRepositoryImpl repository = AionRepositoryImpl.inst();
+ AionBlockStore store = repository.getBlockStore();
+
+ long topBlock = store.getMaxNumber();
+ if (topBlock < 0) {
+ System.out.println("The database is empty. Cannot print block information.");
+ return;
+ }
+
+ long targetBlock = topBlock - blockNumber + 1;
+ if (targetBlock < 0) {
+ targetBlock = 0;
+ }
+
+ AionBlock block;
+ byte[] stateRoot;
+
+ while (targetBlock <= topBlock) {
+ block = store.getChainBlockByNumber(targetBlock);
+ if (block != null) {
+ stateRoot = block.getStateRoot();
+ try {
+ System.out.println(
+ "Block hash: " + block.getShortHash() + ", number: " + block.getNumber() + ", tx count: "
+ + block.getTransactionsList().size() + ", state trie kv count = " + repository
+ .getWorldState().getTrieSize(stateRoot));
+ } catch (RuntimeException e) {
+ System.out.println(
+ "Block hash: " + block.getShortHash() + ", number: " + block.getNumber() + ", tx count: "
+ + block.getTransactionsList().size() + ", state trie kv count threw exception: " + e
+ .getMessage());
+ }
+ } else {
+ System.out.println("Null block found at level " + targetBlock + ".");
+ }
+ targetBlock++;
+ }
+
+ repository.close();
+ }
+
+ public static void printStateTrieDump(long blockNumber) {
+ // ensure mining is disabled
+ CfgAion cfg = CfgAion.inst();
+ cfg.dbFromXML();
+ cfg.getConsensus().setMining(false);
+
+ Map cfgLog = new HashMap<>();
+ cfgLog.put("DB", "ERROR");
+
+ AionLoggerFactory.init(cfgLog);
+
+ // get the current blockchain
+ AionRepositoryImpl repository = AionRepositoryImpl.inst();
+
+ AionBlockStore store = repository.getBlockStore();
+
+ AionBlock block;
+
+ if (blockNumber == -1L) {
+ block = store.getBestBlock();
+ if (block == null) {
+ System.out.println("The requested block does not exist in the database.");
+ return;
+ }
+ blockNumber = block.getNumber();
+ } else {
+ block = store.getChainBlockByNumber(blockNumber);
+ if (block == null) {
+ System.out.println("The requested block does not exist in the database.");
+ return;
+ }
+ }
+
+ byte[] stateRoot = block.getStateRoot();
+ System.out.println("\nBlock hash: " + block.getShortHash() + ", number: " + blockNumber + ", tx count: " + block
+ .getTransactionsList().size() + "\n\n" + repository.getWorldState().getTrieDump(stateRoot));
+
+ repository.close();
+ }
}
diff --git a/modAionImpl/test/org/aion/zero/impl/BlockchainDataRecoveryTest.java b/modAionImpl/test/org/aion/zero/impl/BlockchainDataRecoveryTest.java
index 993ba41f6a..2743ffa95f 100644
--- a/modAionImpl/test/org/aion/zero/impl/BlockchainDataRecoveryTest.java
+++ b/modAionImpl/test/org/aion/zero/impl/BlockchainDataRecoveryTest.java
@@ -98,11 +98,12 @@ public void testRecoverWorldStateWithPartialWorldState() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// delete some world state root entries from the database
- TrieImpl trie = (TrieImpl) ((AionRepositoryImpl) chain.getRepository()).getWorldState();
- IByteArrayKeyValueDatabase database = (IByteArrayKeyValueDatabase) trie.getCache().getDb();
+ TrieImpl trie = (TrieImpl) repo.getWorldState();
+ IByteArrayKeyValueDatabase database = repo.getStateDatabase();
for (byte[] key : statesToDelete) {
database.delete(key);
@@ -113,7 +114,7 @@ public void testRecoverWorldStateWithPartialWorldState() {
assertThat(trie.isValidRoot(chain.getBestBlock().getStateRoot())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverWorldState(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverWorldState(repo, bestBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -148,12 +149,13 @@ public void testRecoverWorldStateWithStartFromGenesis() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// System.out.println(Hex.toHexString(chain.getRepository().getRoot()));
// delete some world state root entries from the database
- TrieImpl trie = (TrieImpl) ((AionRepositoryImpl) chain.getRepository()).getWorldState();
- IByteArrayKeyValueDatabase database = (IByteArrayKeyValueDatabase) trie.getCache().getDb();
+ TrieImpl trie = (TrieImpl) repo.getWorldState();
+ IByteArrayKeyValueDatabase database = repo.getStateDatabase();
for (byte[] key : statesToDelete) {
database.delete(key);
@@ -165,7 +167,7 @@ public void testRecoverWorldStateWithStartFromGenesis() {
assertThat(trie.isValidRoot(chain.getBestBlock().getStateRoot())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverWorldState(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverWorldState(repo, bestBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -200,11 +202,12 @@ public void testRecoverWorldStateWithoutGenesis() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// delete some world state root entries from the database
- TrieImpl trie = (TrieImpl) ((AionRepositoryImpl) chain.getRepository()).getWorldState();
- IByteArrayKeyValueDatabase database = (IByteArrayKeyValueDatabase) trie.getCache().getDb();
+ TrieImpl trie = (TrieImpl) repo.getWorldState();
+ IByteArrayKeyValueDatabase database = repo.getStateDatabase();
List statesToDelete = new ArrayList<>();
statesToDelete.addAll(database.keys());
@@ -218,7 +221,7 @@ public void testRecoverWorldStateWithoutGenesis() {
assertThat(trie.isValidRoot(chain.getBestBlock().getStateRoot())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverWorldState(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverWorldState(repo, bestBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -259,10 +262,10 @@ public void testRecoverIndexWithPartialIndex_MainChain() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// delete index entries from the database
- AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
IByteArrayKeyValueDatabase indexDatabase = repo.getIndexDatabase();
Map deletedInfo = new HashMap<>();
@@ -281,7 +284,7 @@ public void testRecoverIndexWithPartialIndex_MainChain() {
assertThat(repo.isIndexed(bestBlock.getHash(), bestBlock.getNumber())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverIndexEntry(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverIndexEntry(repo, bestBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -369,10 +372,10 @@ public void testRecoverIndexWithPartialIndex_ShorterSideChain() {
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
assertThat(bestBlock.getHash()).isEqualTo(mainChainBlock.getHash());
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// delete index entries from the database
- AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
IByteArrayKeyValueDatabase indexDatabase = repo.getIndexDatabase();
Map deletedInfo = new HashMap<>();
@@ -388,8 +391,7 @@ public void testRecoverIndexWithPartialIndex_ShorterSideChain() {
}
// call the recovery functionality for the main chain subsection
- boolean worked = chain
- .recoverIndexEntry(chain.getRepository(), chain.getBlockByHash(mainChainBlock.getParentHash()));
+ boolean worked = chain.recoverIndexEntry(repo, chain.getBlockByHash(mainChainBlock.getParentHash()));
// ensure that the index was corrupted only for the side chain
assertThat(repo.isIndexed(sideChainBlock.getHash(), sideChainBlock.getNumber())).isFalse();
@@ -397,7 +399,7 @@ public void testRecoverIndexWithPartialIndex_ShorterSideChain() {
assertThat(worked).isTrue();
// call the recovery functionality
- worked = chain.recoverIndexEntry(chain.getRepository(), sideChainBlock);
+ worked = chain.recoverIndexEntry(repo, sideChainBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -448,10 +450,10 @@ public void testRecoverIndexWithStartFromGenesis() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// delete index entries from the database
- AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
IByteArrayKeyValueDatabase indexDatabase = repo.getIndexDatabase();
Map deletedInfo = new HashMap<>();
@@ -470,7 +472,7 @@ public void testRecoverIndexWithStartFromGenesis() {
assertThat(repo.isIndexed(bestBlock.getHash(), bestBlock.getNumber())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverIndexEntry(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverIndexEntry(repo, bestBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -519,9 +521,9 @@ public void testRecoverIndexWithoutGenesis() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
-
AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
+
IByteArrayKeyValueDatabase indexDatabase = repo.getIndexDatabase();
// deleting the entire index database
@@ -531,7 +533,7 @@ public void testRecoverIndexWithoutGenesis() {
assertThat(repo.isIndexed(bestBlock.getHash(), bestBlock.getNumber())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverIndexEntry(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverIndexEntry(repo, bestBlock);
// ensure that the best block is unchanged
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
@@ -566,10 +568,10 @@ public void testRecoverIndexWithStartFromGenesisWithoutSize() {
AionBlock bestBlock = chain.getBestBlock();
assertThat(bestBlock.getNumber()).isEqualTo(NUMBER_OF_BLOCKS);
- chain.getRepository().flush();
+ AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
+ repo.flush();
// delete index entries from the database
- AionRepositoryImpl repo = (AionRepositoryImpl) chain.getRepository();
IByteArrayKeyValueDatabase indexDatabase = repo.getIndexDatabase();
Map deletedInfo = new HashMap<>();
@@ -592,7 +594,7 @@ public void testRecoverIndexWithStartFromGenesisWithoutSize() {
assertThat(repo.isIndexed(bestBlock.getHash(), bestBlock.getNumber())).isFalse();
// call the recovery functionality
- boolean worked = chain.recoverIndexEntry(chain.getRepository(), bestBlock);
+ boolean worked = chain.recoverIndexEntry(repo, bestBlock);
// ensure that the blockchain is ok
assertThat(chain.getBestBlockHash()).isEqualTo(bestBlock.getHash());
diff --git a/modAionImpl/test/org/aion/zero/impl/db/AionRepositoryImplTest.java b/modAionImpl/test/org/aion/zero/impl/db/AionRepositoryImplTest.java
index 44f6aa4445..412e60b5b6 100644
--- a/modAionImpl/test/org/aion/zero/impl/db/AionRepositoryImplTest.java
+++ b/modAionImpl/test/org/aion/zero/impl/db/AionRepositoryImplTest.java
@@ -1,4 +1,4 @@
-/*******************************************************************************
+/* ******************************************************************************
* Copyright (c) 2017-2018 Aion foundation.
*
* This file is part of the aion network project.
@@ -34,67 +34,70 @@
******************************************************************************/
package org.aion.zero.impl.db;
+import static com.google.common.truth.Truth.assertThat;
+
+import java.math.BigInteger;
+import java.util.Optional;
+import java.util.Properties;
import org.aion.base.db.IByteArrayKeyValueDatabase;
import org.aion.base.db.IContractDetails;
import org.aion.base.db.IRepositoryCache;
import org.aion.base.db.IRepositoryConfig;
import org.aion.base.type.Address;
import org.aion.base.util.ByteUtil;
-import org.aion.db.impl.DatabaseFactory;
-import org.aion.db.impl.leveldb.LevelDBConstants;
-import org.aion.mcf.core.AccountState;
import org.aion.crypto.HashUtil;
import org.aion.db.impl.DBVendor;
+import org.aion.db.impl.DatabaseFactory;
+import org.aion.mcf.core.AccountState;
import org.aion.mcf.db.IBlockStoreBase;
import org.aion.mcf.vm.types.DataWord;
+import org.aion.zero.db.AionContractDetailsImpl;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
-import org.aion.zero.db.AionContractDetailsImpl;
-import org.aion.zero.impl.db.AionRepositoryImpl;
-import org.aion.zero.impl.db.ContractDetailsAion;
-
-import java.math.BigInteger;
-import java.util.Optional;
-import java.util.Properties;
-
-import static com.google.common.truth.Truth.assertThat;
-
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class AionRepositoryImplTest {
- protected IRepositoryConfig repoConfig = new IRepositoryConfig() {
- @Override
- public String getDbPath() {
- return "";
- }
-
- @Override
- public int getPrune() {
- return 0;
- }
-
- @Override
- public IContractDetails contractDetailsImpl() {
- return ContractDetailsAion.createForTesting(0, 1000000).getDetails();
- }
-
- @Override
- public Properties getDatabaseConfig(String db_name) {
- Properties props = new Properties();
- props.setProperty(DatabaseFactory.Props.DB_TYPE, DBVendor.MOCKDB.toValue());
- props.setProperty(DatabaseFactory.Props.ENABLE_HEAP_CACHE, "false");
- return props;
- }
- };
+ protected IRepositoryConfig repoConfig =
+ new IRepositoryConfig() {
+ @Override
+ public String getDbPath() {
+ return "";
+ }
+
+ @Override
+ public int getPrune() {
+ return 0;
+ }
+
+ @Override
+ public IContractDetails contractDetailsImpl() {
+ return ContractDetailsAion.createForTesting(0, 1000000).getDetails();
+ }
+
+ @Override
+ public Properties getDatabaseConfig(String db_name) {
+ Properties props = new Properties();
+ props.setProperty(DatabaseFactory.Props.DB_TYPE, DBVendor.MOCKDB.toValue());
+ props.setProperty(DatabaseFactory.Props.ENABLE_HEAP_CACHE, "false");
+ return props;
+ }
+ };
+
+ private static String value1 =
+ "CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3";
+ private static String value2 =
+ "CAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFE";
+ private static String value3 =
+ "BEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEF";
@Test
public void testAccountStateUpdate() {
AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
byte[] originalRoot = repository.getRoot();
- Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes("CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3"));
+ Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes(value1));
IRepositoryCache track = repository.startTracking();
track.addBalance(defaultAccount, BigInteger.valueOf(1));
@@ -112,7 +115,7 @@ public void testAccountAddCodeStorage() {
AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
IRepositoryCache track = repository.startTracking();
- Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes("CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3"));
+ Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes(value1));
track.addBalance(defaultAccount, BigInteger.valueOf(1));
byte[] originalRoot = repository.getRoot();
@@ -133,7 +136,7 @@ public void testAccountStateUpdateStorageRow() {
AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
IRepositoryCache track = repository.startTracking();
- Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes("CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3"));
+ Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes(value1));
track.addBalance(defaultAccount, BigInteger.valueOf(1));
// Consider the original root the one after an account has been added
@@ -144,7 +147,8 @@ public void testAccountStateUpdateStorageRow() {
track.flush();
- byte[] retrievedValue = repository.getStorageValue(defaultAccount, new DataWord(key)).getNoLeadZeroesData();
+ byte[] retrievedValue =
+ repository.getStorageValue(defaultAccount, new DataWord(key)).getNoLeadZeroesData();
assertThat(retrievedValue).isEqualTo(value);
byte[] newRoot = repository.getRoot();
@@ -158,7 +162,7 @@ public void testAccountStateUpdateStorageRowFlush() {
AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
IRepositoryCache track = repository.startTracking();
- Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes("CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3"));
+ Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes(value1));
track.addBalance(defaultAccount, BigInteger.valueOf(1));
// Consider the original root the one after an account has been added
@@ -172,9 +176,7 @@ public void testAccountStateUpdateStorageRowFlush() {
repository.flush();
- /**
- * Verify that the account has been flushed
- */
+ /** Verify that the account has been flushed */
IByteArrayKeyValueDatabase detailsDB = repository.getDetailsDatabase();
Optional serializedDetails = detailsDB.get(defaultAccount.toBytes());
@@ -185,20 +187,18 @@ public void testAccountStateUpdateStorageRowFlush() {
assertThat(details.get(new DataWord(key))).isEqualTo(new DataWord(value));
}
- /**
- * Repo track test suite
- */
-
+ /** Repo track test suite */
/**
- * This test confirms that updates done on the repo track are successfully translated
- * into the root repository.
+ * This test confirms that updates done on the repo track are successfully translated into the
+ * root repository.
*/
@Test
public void testRepoTrackUpdateStorageRow() {
final AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
- final IRepositoryCache> repoTrack = repository.startTracking();
- final Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes("CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3CAF3"));
+ final IRepositoryCache> repoTrack =
+ repository.startTracking();
+ final Address defaultAccount = Address.wrap(ByteUtil.hexStringToBytes(value1));
final byte[] key = HashUtil.blake128("hello".getBytes());
final byte[] value = HashUtil.blake128("world".getBytes());
@@ -208,29 +208,24 @@ public void testRepoTrackUpdateStorageRow() {
repoTrack.addStorageRow(defaultAccount, new DataWord(key), new DataWord(value));
- DataWord retrievedStorageValue = repoTrack.getStorageValue(defaultAccount, new DataWord(key));
+ DataWord retrievedStorageValue =
+ repoTrack.getStorageValue(defaultAccount, new DataWord(key));
assertThat(retrievedStorageValue).isEqualTo(new DataWord(value));
// commit changes, then check that the root has updated
repoTrack.flush();
- assertThat(repository.getStorageValue(defaultAccount, new DataWord(key))).isEqualTo(retrievedStorageValue);
+ assertThat(repository.getStorageValue(defaultAccount, new DataWord(key)))
+ .isEqualTo(retrievedStorageValue);
final byte[] newRoot = repository.getRoot();
assertThat(newRoot).isNotEqualTo(originalRoot);
}
- /**
- * Tests behaviour for trie when trying to revert to a previous root without
- * first flushing. Note the behaviour here. Interestingly enough, it seems like
- * the trie must first be flushed, so that the root node is in the caching/db layer.
- *
- * Otherwise the retrieval methods will not be able to find the temporal root value.
- */
@Test
public void testSyncToPreviousRootNoFlush() {
- final Address FIRST_ACC = Address.wrap("CAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFE");
- final Address SECOND_ACC = Address.wrap("BEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEFBEEF");
+ final Address FIRST_ACC = Address.wrap(value2);
+ final Address SECOND_ACC = Address.wrap(value3);
final AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
byte[] originalRoot = repository.getRoot();
@@ -243,6 +238,10 @@ public void testSyncToPreviousRootNoFlush() {
System.out.println("after first account added");
System.out.println(repository.getWorldState().getTrieDump());
+ // check the update on the repo
+ BigInteger balance = repository.getBalance(FIRST_ACC);
+ assertThat(balance).isEqualTo(BigInteger.ONE);
+
byte[] firstRoot = repository.getRoot();
track = repository.startTracking();
@@ -257,18 +256,21 @@ public void testSyncToPreviousRootNoFlush() {
assertThat(firstRoot).isNotEqualTo(originalRoot);
assertThat(secondRoot).isNotEqualTo(firstRoot);
+ System.out.println("after sync to after first account added");
repository.syncToRoot(firstRoot);
+ assertThat(repository.isValidRoot(firstRoot)).isTrue();
+ System.out.println(repository.getWorldState().getTrieDump());
assertThat(repository.getRoot()).isEqualTo(firstRoot);
- BigInteger balance = repository.getBalance(FIRST_ACC);
+ balance = repository.getBalance(FIRST_ACC);
// notice that the first blocks balance is also zero
- assertThat(balance).isEqualTo(BigInteger.ZERO);
+ assertThat(balance).isEqualTo(BigInteger.ONE);
}
@Test
public void testSyncToPreviousRootWithFlush() {
- final Address FIRST_ACC = Address.wrap("CAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFECAFE");
+ final Address FIRST_ACC = Address.wrap(value2);
AionRepositoryImpl repository = AionRepositoryImpl.createForTesting(repoConfig);
byte[] originalRoot = repository.getRoot();
diff --git a/modBoot/resource/config.xml b/modBoot/resource/config.xml
index 06a3f2de99..9b3a51e4e9 100644
--- a/modBoot/resource/config.xml
+++ b/modBoot/resource/config.xml
@@ -64,6 +64,8 @@
database
true
+
+ 1000
leveldb
diff --git a/modMcf/src/org/aion/mcf/config/CfgDb.java b/modMcf/src/org/aion/mcf/config/CfgDb.java
index 3917623956..9f39813acd 100644
--- a/modMcf/src/org/aion/mcf/config/CfgDb.java
+++ b/modMcf/src/org/aion/mcf/config/CfgDb.java
@@ -63,6 +63,7 @@ public static class Names {
private String vendor;
private boolean compression;
private boolean check_integrity;
+ private int prune;
/**
* Enabling expert mode allows more detailed database configurations.
@@ -79,6 +80,7 @@ public CfgDb() {
this.vendor = DBVendor.LEVELDB.toValue();
this.compression = false;
this.check_integrity = true;
+ this.prune = -1;
if (expert) {
this.specificConfig = new HashMap<>();
@@ -100,6 +102,9 @@ public void fromXML(final XMLStreamReader sr) throws XMLStreamException {
case "check_integrity":
this.check_integrity = Boolean.parseBoolean(Cfg.readValue(sr));
break;
+ case "prune":
+ this.prune = Integer.parseInt(Cfg.readValue(sr));
+ break;
// parameter considered only when expert==false
case "vendor":
this.vendor = Cfg.readValue(sr);
@@ -206,6 +211,13 @@ public String toXML() {
xmlWriter.writeCharacters(String.valueOf(this.check_integrity));
xmlWriter.writeEndElement();
+ xmlWriter.writeCharacters("\r\n\t\t");
+ xmlWriter.writeComment("Integer value. Number of blocks after which to prune. Pruning disabled when negative.");
+ xmlWriter.writeCharacters("\r\n\t\t");
+ xmlWriter.writeStartElement("prune");
+ xmlWriter.writeCharacters(String.valueOf(this.prune));
+ xmlWriter.writeEndElement();
+
if (!expert) {
xmlWriter.writeCharacters("\r\n\t\t");
xmlWriter.writeComment(
@@ -248,6 +260,10 @@ public String getPath() {
return this.path;
}
+ public int getPrune() {
+ return this.prune;
+ }
+
public Map asProperties() {
Map propSet = new HashMap<>();
diff --git a/modMcf/src/org/aion/mcf/db/AbstractRepository.java b/modMcf/src/org/aion/mcf/db/AbstractRepository.java
index 9125791d61..f894acd753 100644
--- a/modMcf/src/org/aion/mcf/db/AbstractRepository.java
+++ b/modMcf/src/org/aion/mcf/db/AbstractRepository.java
@@ -32,6 +32,7 @@
import org.aion.mcf.config.CfgDb;
import org.aion.mcf.core.AccountState;
import org.aion.mcf.db.exception.InvalidFilePathException;
+import org.aion.mcf.trie.JournalPruneDataSource;
import org.aion.mcf.trie.Trie;
import org.aion.mcf.types.AbstractBlock;
import org.aion.mcf.vm.types.DataWord;
@@ -48,7 +49,6 @@
import static org.aion.db.impl.DatabaseFactory.Props;
//import org.aion.dbmgr.exception.DriverManagerNoSuitableDriverRegisteredException;
-// import org.aion.mcf.trie.JournalPruneDataSource;
/**
* Abstract Repository class.
@@ -94,7 +94,7 @@ public abstract class AbstractRepository databaseGroup;
- // protected JournalPruneDataSource stateDSPrune;
+ protected JournalPruneDataSource stateDSPrune;
protected DetailsDataStore detailsDS;
// Read Write Lock
@@ -241,10 +241,13 @@ protected void initializeDatabasesAndCaches() throws Exception {
// Setup the cache for transaction data source.
this.detailsDS = new DetailsDataStore<>(detailsDatabase, storageDatabase, this.cfg);
- // disabling use of JournalPruneDataSource until functionality properly tested
- // TODO-AR: enable pruning with the JournalPruneDataSource
- // stateDSPrune = new JournalPruneDataSource<>(stateDatabase);
+ stateDSPrune = new JournalPruneDataSource<>(stateDatabase);
pruneBlockCount = pruneEnabled ? this.cfg.getPrune() : -1;
+ if (pruneEnabled && pruneBlockCount > 0) {
+ LOGGEN.info("Pruning block count set to {}.", pruneBlockCount);
+ } else {
+ stateDSPrune.setPruneEnabled(false);
+ }
} catch (Exception e) { // Setting up databases and caches went wrong.
throw e;
}
diff --git a/modMcf/src/org/aion/mcf/db/DetailsDataStore.java b/modMcf/src/org/aion/mcf/db/DetailsDataStore.java
index dd01eee23c..c97997f95f 100644
--- a/modMcf/src/org/aion/mcf/db/DetailsDataStore.java
+++ b/modMcf/src/org/aion/mcf/db/DetailsDataStore.java
@@ -27,6 +27,7 @@
import org.aion.base.type.IBlockHeader;
import org.aion.base.type.ITransaction;
import org.aion.base.util.ByteArrayWrapper;
+import org.aion.mcf.trie.JournalPruneDataSource;
import org.aion.mcf.types.AbstractBlock;
import org.aion.mcf.vm.types.DataWord;
@@ -34,14 +35,12 @@
import static org.aion.base.util.ByteArrayWrapper.wrap;
-// import org.aion.mcf.trie.JournalPruneDataSource;
-
/**
* Detail data storage ,
*/
public class DetailsDataStore, BH extends IBlockHeader> {
- // private JournalPruneDataSource storageDSPrune;
+ private JournalPruneDataSource storageDSPrune;
private IRepositoryConfig repoConfig;
private IByteArrayKeyValueDatabase detailsSrc;
@@ -62,7 +61,7 @@ public DetailsDataStore withDb(IByteArrayKeyValueDatabase detailsSrc,
IByteArrayKeyValueDatabase storageSrc) {
this.detailsSrc = detailsSrc;
this.storageSrc = storageSrc;
- // this.storageDSPrune = new JournalPruneDataSource<>(storageSrc);
+ this.storageDSPrune = new JournalPruneDataSource<>(storageSrc);
return this;
}
@@ -91,7 +90,7 @@ public synchronized IContractDetails get(byte[] key) {
// Found something from cache or database, return it by decoding it.
IContractDetails detailsImpl = repoConfig.contractDetailsImpl();
- detailsImpl.setDataSource(storageSrc);
+ detailsImpl.setDataSource(storageDSPrune);
detailsImpl.decode(rawDetails.get()); // We can safely get as we checked
// if it is present.
@@ -162,7 +161,7 @@ public void syncLargeStorage() {
// Decode the details.
IContractDetails detailsImpl = repoConfig.contractDetailsImpl();
- detailsImpl.setDataSource(storageSrc);
+ detailsImpl.setDataSource(storageDSPrune);
detailsImpl.decode(rawDetails.get()); // We can safely get as we
// checked if it is present.
@@ -171,9 +170,9 @@ public void syncLargeStorage() {
}
}
- /* public JournalPruneDataSource getStorageDSPrune() {
+ public JournalPruneDataSource getStorageDSPrune() {
return storageDSPrune;
- } */
+ }
public synchronized Set keys() {
// TODO - @yao do we wanted a sorted set?
diff --git a/modMcf/src/org/aion/mcf/trie/Cache.java b/modMcf/src/org/aion/mcf/trie/Cache.java
index e0235d5df5..cc52f5f5fb 100644
--- a/modMcf/src/org/aion/mcf/trie/Cache.java
+++ b/modMcf/src/org/aion/mcf/trie/Cache.java
@@ -164,9 +164,9 @@ public synchronized void commit(boolean flushCache) {
// batchMemorySize += length(key, value);
}
}
- /* for (ByteArrayWrapper removedNode : removedNodes) {
+ for (ByteArrayWrapper removedNode : removedNodes) {
batch.put(removedNode.getData(), null);
- } */
+ }
this.dataSource.putBatch(batch);
this.isDirty = false;
diff --git a/modMcf/src/org/aion/mcf/trie/JournalPruneDataSource.java b/modMcf/src/org/aion/mcf/trie/JournalPruneDataSource.java
index 753ba7614f..f547901dfd 100644
--- a/modMcf/src/org/aion/mcf/trie/JournalPruneDataSource.java
+++ b/modMcf/src/org/aion/mcf/trie/JournalPruneDataSource.java
@@ -1,41 +1,52 @@
-/*******************************************************************************
+/* ******************************************************************************
+ * Copyright (c) 2017-2018 Aion foundation.
*
- * Copyright (c) 2017, 2018 Aion foundation.
+ * This file is part of the aion network project.
*
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+ * The aion network project is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 3 of
+ * the License, or any later version.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * The aion network project is distributed in the hope that it will
+ * be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program. If not, see
+ * along with the aion network project source files.
+ * If not, see .
*
- * Contributors:
+ * The aion network project leverages useful source code from other
+ * open source projects. We greatly appreciate the effort that was
+ * invested in these projects and we thank the individual contributors
+ * for their work. For provenance information and contributors
+ * please see .
+ *
+ * Contributors to the aion source files in decreasing order of code volume:
* Aion foundation.
- *******************************************************************************/
+ * team through the ethereumJ library.
+ * Ether.Camp Inc. (US) team through Ethereum Harmony.
+ * John Tromp through the Equihash solver.
+ * Samuel Neves through the BLAKE2 implementation.
+ * Zcash project team.
+ * Bitcoinj team.
+ ******************************************************************************/
package org.aion.mcf.trie;
+import java.util.*;
import org.aion.base.db.IByteArrayKeyValueDatabase;
import org.aion.base.db.IByteArrayKeyValueStore;
import org.aion.base.type.IBlock;
import org.aion.base.type.IBlockHeader;
import org.aion.base.util.ByteArrayWrapper;
-import java.util.*;
-
/**
- * The DataSource which doesn't immediately forward delete updates (unlike
- * inserts) but collects them tied to the block where these changes were made
- * (the changes are mapped to a block upon [storeBlockChanges] call). When the
- * [prune] is called for a block the deletes for this block are submitted to the
- * underlying DataSource with respect to following inserts. E.g. if the key was
- * deleted at block N and then inserted at block N + 10 this delete is not
- * passed.
+ * The DataSource which doesn't immediately forward delete updates (unlike inserts) but collects
+ * them tied to the block where these changes were made (the changes are mapped to a block upon
+ * [storeBlockChanges] call). When the [prune] is called for a block the deletes for this block are
+ * submitted to the underlying DataSource with respect to following inserts. E.g. if the key was
+ * deleted at block N and then inserted at block N + 10 this delete is not passed.
*/
public class JournalPruneDataSource, BH extends IBlockHeader>
implements IByteArrayKeyValueStore {
@@ -77,9 +88,6 @@ public void setPruneEnabled(boolean e) {
enabled = e;
}
- /**
- * ***** updates ******
- */
public synchronized void put(byte[] key, byte[] value) {
ByteArrayWrapper keyW = new ByteArrayWrapper(key);
@@ -92,7 +100,7 @@ public synchronized void put(byte[] key, byte[] value) {
incRef(keyW);
}
- // Insert into the database.
+ // put to source database.
src.put(key, value);
} else {
@@ -100,12 +108,14 @@ public synchronized void put(byte[] key, byte[] value) {
if (enabled) {
currentUpdates.deletedKeys.add(keyW);
}
- // TODO: Do we delete the key?
+ // delete is not sent to source db
}
}
public synchronized void delete(byte[] key) {
- if (!enabled) { return; }
+ if (!enabled) {
+ return;
+ }
currentUpdates.deletedKeys.add(new ByteArrayWrapper(key));
// delete is delayed
}
@@ -129,10 +139,6 @@ public synchronized void updateBatch(Map rows) {
src.putBatch(insertsOnly);
}
- public synchronized void updateBatch(Map rows, boolean erasure) {
- throw new UnsupportedOperationException();
- }
-
private void incRef(ByteArrayWrapper keyW) {
Ref cnt = refCount.get(keyW);
if (cnt == null) {
@@ -152,14 +158,18 @@ private Ref decRef(ByteArrayWrapper keyW) {
}
public synchronized void storeBlockChanges(BH header) {
- if (!enabled) { return; }
+ if (!enabled) {
+ return;
+ }
currentUpdates.blockHeader = header;
blockUpdates.put(new ByteArrayWrapper(header.getHash()), currentUpdates);
currentUpdates = new Updates();
}
public synchronized void prune(BH header) {
- if (!enabled) { return; }
+ if (!enabled) {
+ return;
+ }
ByteArrayWrapper blockHashW = new ByteArrayWrapper(header.getHash());
Updates updates = blockUpdates.remove(blockHashW);
if (updates != null) {
@@ -167,16 +177,16 @@ public synchronized void prune(BH header) {
decRef(insertedKey).dbRef = true;
}
- Map batchRemove = new HashMap<>();
+ List batchRemove = new ArrayList<>();
for (ByteArrayWrapper key : updates.deletedKeys) {
Ref ref = refCount.get(key);
if (ref == null || ref.journalRefs == 0) {
- batchRemove.put(key.getData(), null);
+ batchRemove.add(key.getData());
} else if (ref != null) {
ref.dbRef = false;
}
}
- src.putBatch(batchRemove);
+ src.deleteBatch(batchRemove);
rollbackForkBlocks(header.getNumber());
}
@@ -211,9 +221,14 @@ public LinkedHashMap getBlockUpdates() {
return blockUpdates;
}
- /**
- * *** other ****
- */
+ public int getDeletedKeysCount() {
+ return currentUpdates.deletedKeys.size();
+ }
+
+ public int getInsertedKeysCount() {
+ return currentUpdates.insertedKeys.size();
+ }
+
public Optional get(byte[] key) {
return src.get(key);
}
@@ -244,12 +259,21 @@ public void commitBatch() {
@Override
public void deleteBatch(Collection keys) {
- throw new UnsupportedOperationException();
+ if (!enabled) {
+ return;
+ }
+ // deletes are delayed
+ keys.forEach(key -> currentUpdates.deletedKeys.add(new ByteArrayWrapper(key)));
}
@Override
public boolean isEmpty() {
- throw new UnsupportedOperationException();
+ // the delayed deletes are not considered by this check until applied to the db
+ if (!currentUpdates.insertedKeys.isEmpty()) {
+ return false;
+ } else {
+ return src.isEmpty();
+ }
}
public IByteArrayKeyValueDatabase getSrc() {
diff --git a/modMcf/src/org/aion/mcf/trie/Trie.java b/modMcf/src/org/aion/mcf/trie/Trie.java
index ca8a01742f..8a0f865c30 100644
--- a/modMcf/src/org/aion/mcf/trie/Trie.java
+++ b/modMcf/src/org/aion/mcf/trie/Trie.java
@@ -91,6 +91,8 @@ public interface Trie {
void undo();
String getTrieDump();
+ String getTrieDump(byte[] stateRoot);
+ int getTrieSize(byte[] stateRoot);
boolean validate();
diff --git a/modMcf/src/org/aion/mcf/trie/TrieImpl.java b/modMcf/src/org/aion/mcf/trie/TrieImpl.java
index 51c4c75577..7f459824f5 100644
--- a/modMcf/src/org/aion/mcf/trie/TrieImpl.java
+++ b/modMcf/src/org/aion/mcf/trie/TrieImpl.java
@@ -758,6 +758,13 @@ public String getTrieDump() {
}
}
+ @Override
+ public String getTrieDump(byte[] stateRoot) {
+ TraceAllNodes traceAction = new TraceAllNodes();
+ traceTrie(stateRoot, traceAction);
+ return "root: " + Hex.toHexString(stateRoot) + "\n" + traceAction.getOutput();
+ }
+
public Set getTrieKeys(byte[] stateRoot) {
CollectFullSetOfNodes traceAction = new CollectFullSetOfNodes();
traceTrie(stateRoot, traceAction);
diff --git a/modMcf/test/org/aion/trie/JournalPruneDataSourceTest.java b/modMcf/test/org/aion/trie/JournalPruneDataSourceTest.java
new file mode 100644
index 0000000000..83106efb95
--- /dev/null
+++ b/modMcf/test/org/aion/trie/JournalPruneDataSourceTest.java
@@ -0,0 +1,553 @@
+/* ******************************************************************************
+ * Copyright (c) 2017-2018 Aion foundation.
+ *
+ * This file is part of the aion network project.
+ *
+ * The aion network project is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 3 of
+ * the License, or any later version.
+ *
+ * The aion network project is distributed in the hope that it will
+ * be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aion network project source files.
+ * If not, see .
+ *
+ * The aion network project leverages useful source code from other
+ * open source projects. We greatly appreciate the effort that was
+ * invested in these projects and we thank the individual contributors
+ * for their work. For provenance information and contributors
+ * please see .
+ *
+ * Contributors to the aion source files in decreasing order of code volume:
+ * Aion foundation.
+ ******************************************************************************/
+package org.aion.trie;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.aion.base.db.IByteArrayKeyValueDatabase;
+import org.aion.db.impl.DatabaseFactory;
+import org.aion.log.AionLoggerFactory;
+import org.aion.mcf.trie.JournalPruneDataSource;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/** @author Alexandra Roatis */
+public class JournalPruneDataSourceTest {
+
+ private static final String dbName = "TestDB";
+ private static IByteArrayKeyValueDatabase source_db = DatabaseFactory.connect(dbName);
+ private static JournalPruneDataSource db;
+
+ private static final byte[] k1 = "key1".getBytes();
+ private static final byte[] v1 = "value1".getBytes();
+
+ private static final byte[] k2 = "key2".getBytes();
+ private static final byte[] v2 = "value2".getBytes();
+
+ private static final byte[] k3 = "key3".getBytes();
+ private static final byte[] v3 = "value3".getBytes();
+
+ @BeforeClass
+ public static void setup() {
+ // logging to see errors
+ Map cfg = new HashMap<>();
+ cfg.put("DB", "INFO");
+
+ AionLoggerFactory.init(cfg);
+ }
+
+ @Before
+ public void open() {
+ assertThat(source_db.open()).isTrue();
+ db = new JournalPruneDataSource(source_db);
+ }
+
+ @After
+ public void close() {
+ source_db.close();
+ assertThat(source_db.isClosed()).isTrue();
+ }
+
+ @Test
+ public void testPut_woPrune() {
+ db.setPruneEnabled(false);
+
+ assertThat(db.get(k1).isPresent()).isFalse();
+ db.put(k1, v1);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+
+ // ensure the insert was propagated
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+ }
+
+ @Test
+ public void testPutBatch_woPrune() {
+ db.setPruneEnabled(false);
+
+ assertThat(db.get(k1).isPresent()).isFalse();
+ assertThat(db.get(k2).isPresent()).isFalse();
+
+ Map map = new HashMap<>();
+ map.put(k1, v1);
+ map.put(k2, v2);
+ db.putBatch(map);
+
+ assertThat(v1).isEqualTo(db.get(k1).get());
+ assertThat(v2).isEqualTo(db.get(k2).get());
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+
+ // ensure the inserts were propagated
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+ assertThat(source_db.get(k2).get()).isEqualTo(v2);
+ }
+
+ @Test
+ public void testUpdate_woPrune() {
+ db.setPruneEnabled(false);
+
+ // insert
+ assertThat(db.get(k1).isPresent()).isFalse();
+ db.put(k1, v1);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+
+ // update
+ db.put(k1, v2);
+ assertThat(db.get(k1).get()).isEqualTo(v2);
+ assertThat(source_db.get(k1).get()).isEqualTo(v2);
+
+ // indirect delete
+ db.put(k1, null);
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(source_db.get(k1).isPresent()).isTrue();
+
+ // direct delete
+ db.put(k2, v2);
+ assertThat(db.get(k2).get()).isEqualTo(v2);
+ assertThat(source_db.get(k2).get()).isEqualTo(v2);
+ db.delete(k2);
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(source_db.get(k2).isPresent()).isTrue();
+
+ // ensure no cached values
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ }
+
+ @Test
+ public void testUpdateBatch_woPrune() {
+ db.setPruneEnabled(false);
+
+ // ensure existence
+ assertThat(db.get(k1).isPresent()).isFalse();
+ assertThat(db.get(k2).isPresent()).isFalse();
+ assertThat(db.get(k3).isPresent()).isFalse();
+ db.put(k1, v1);
+ db.put(k2, v2);
+
+ assertThat(v1).isEqualTo(db.get(k1).get());
+ assertThat(v2).isEqualTo(db.get(k2).get());
+
+ // check after update
+ Map ops = new HashMap<>();
+ ops.put(k1, null);
+ ops.put(k2, v1);
+ ops.put(k3, v3);
+ db.putBatch(ops);
+
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(db.get(k2).get()).isEqualTo(v1);
+ assertThat(db.get(k3).get()).isEqualTo(v3);
+
+ assertThat(source_db.get(k1).isPresent()).isTrue();
+ assertThat(source_db.get(k2).get()).isEqualTo(v1);
+ assertThat(source_db.get(k3).get()).isEqualTo(v3);
+
+ // ensure no cached values
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ }
+
+ @Test
+ public void testDelete_woPrune() {
+ db.setPruneEnabled(false);
+
+ // ensure existence
+ db.put(k1, v1);
+ assertThat(db.get(k1).isPresent()).isTrue();
+
+ // delete not propagated
+ db.delete(k1);
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+ }
+
+ @Test
+ public void testDeleteBatch_woPrune() {
+ db.setPruneEnabled(false);
+
+ // ensure existence
+ Map map = new HashMap<>();
+ map.put(k1, v1);
+ map.put(k2, v2);
+ map.put(k3, null);
+ db.putBatch(map);
+
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(db.get(k3).isPresent()).isFalse();
+
+ // deletes not propagated
+ db.deleteBatch(map.keySet());
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(db.get(k3).isPresent()).isFalse();
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+ }
+
+ @Test
+ public void testKeys_woPrune() {
+ db.setPruneEnabled(false);
+
+ // keys shouldn't be null even when empty
+ Set keys = db.keys();
+ assertThat(db.isEmpty()).isTrue();
+ assertThat(keys).isNotNull();
+ assertThat(keys.size()).isEqualTo(0);
+
+ // checking after put
+ db.put(k1, v1);
+ db.put(k2, v2);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+ assertThat(db.get(k2).get()).isEqualTo(v2);
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(2);
+
+ // checking after delete
+ db.delete(k2);
+ assertThat(db.get(k2).isPresent()).isTrue();
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(2);
+
+ // checking after putBatch
+ Map ops = new HashMap<>();
+ ops.put(k1, null);
+ ops.put(k2, v2);
+ ops.put(k3, v3);
+ db.putBatch(ops);
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(3);
+
+ // checking after deleteBatch
+ db.deleteBatch(ops.keySet());
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(3);
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+ }
+
+ @Test
+ public void testIsEmpty_woPrune() {
+ db.setPruneEnabled(false);
+
+ assertThat(db.isEmpty()).isTrue();
+
+ // checking after put
+ db.put(k1, v1);
+ db.put(k2, v2);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+ assertThat(db.get(k2).get()).isEqualTo(v2);
+
+ assertThat(db.isEmpty()).isFalse();
+
+ // checking after delete
+ db.delete(k2);
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(db.isEmpty()).isFalse();
+ db.delete(k1);
+ assertThat(db.isEmpty()).isFalse();
+
+ // checking after putBatch
+ Map ops = new HashMap<>();
+ ops.put(k1, null);
+ ops.put(k2, v2);
+ ops.put(k3, v3);
+ db.putBatch(ops);
+ assertThat(db.isEmpty()).isFalse();
+
+ // checking after deleteBatch
+ db.deleteBatch(ops.keySet());
+ assertThat(db.isEmpty()).isFalse();
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(0);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+ }
+
+ @Test
+ public void testPut_wPrune() {
+ db.setPruneEnabled(true);
+
+ assertThat(db.get(k1).isPresent()).isFalse();
+ db.put(k1, v1);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+
+ // ensure cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(1);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+
+ // ensure the insert was propagated
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+ }
+
+ @Test
+ public void testPutBatch_wPrune() {
+ db.setPruneEnabled(true);
+
+ assertThat(db.get(k1).isPresent()).isFalse();
+ assertThat(db.get(k2).isPresent()).isFalse();
+
+ Map map = new HashMap<>();
+ map.put(k1, v1);
+ map.put(k2, v2);
+ db.putBatch(map);
+
+ assertThat(v1).isEqualTo(db.get(k1).get());
+ assertThat(v2).isEqualTo(db.get(k2).get());
+
+ // ensure cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(2);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(0);
+
+ // ensure the inserts were propagated
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+ assertThat(source_db.get(k2).get()).isEqualTo(v2);
+ }
+
+ @Test
+ public void testUpdate_wPrune() {
+ db.setPruneEnabled(true);
+
+ // insert
+ assertThat(db.get(k1).isPresent()).isFalse();
+ db.put(k1, v1);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+
+ // update
+ db.put(k1, v2);
+ assertThat(db.get(k1).get()).isEqualTo(v2);
+ assertThat(source_db.get(k1).get()).isEqualTo(v2);
+
+ // indirect delete
+ db.put(k1, null);
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(source_db.get(k1).isPresent()).isTrue();
+
+ // direct delete
+ db.put(k2, v2);
+ assertThat(db.get(k2).get()).isEqualTo(v2);
+ assertThat(source_db.get(k2).get()).isEqualTo(v2);
+ db.delete(k2);
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(source_db.get(k2).isPresent()).isTrue();
+
+ // ensure cached values
+ assertThat(db.getDeletedKeysCount()).isEqualTo(2);
+ assertThat(db.getInsertedKeysCount()).isEqualTo(2);
+ }
+
+ @Test
+ public void testUpdateBatch_wPrune() {
+ db.setPruneEnabled(true);
+
+ // ensure existence
+ assertThat(db.get(k1).isPresent()).isFalse();
+ assertThat(db.get(k2).isPresent()).isFalse();
+ assertThat(db.get(k3).isPresent()).isFalse();
+ db.put(k1, v1);
+ db.put(k2, v2);
+
+ assertThat(v1).isEqualTo(db.get(k1).get());
+ assertThat(v2).isEqualTo(db.get(k2).get());
+
+ // check after update
+ Map ops = new HashMap<>();
+ ops.put(k1, null);
+ ops.put(k2, v1);
+ ops.put(k3, v3);
+ db.putBatch(ops);
+
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(db.get(k2).get()).isEqualTo(v1);
+ assertThat(db.get(k3).get()).isEqualTo(v3);
+
+ assertThat(source_db.get(k1).isPresent()).isTrue();
+ assertThat(source_db.get(k2).get()).isEqualTo(v1);
+ assertThat(source_db.get(k3).get()).isEqualTo(v3);
+
+ // ensure cached values
+ assertThat(db.getDeletedKeysCount()).isEqualTo(1);
+ assertThat(db.getInsertedKeysCount()).isEqualTo(3);
+ }
+
+ @Test
+ public void testDelete_wPrune() {
+ db.setPruneEnabled(true);
+
+ // ensure existence
+ db.put(k1, v1);
+ assertThat(db.get(k1).isPresent()).isTrue();
+
+ // delete not propagated
+ db.delete(k1);
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(source_db.get(k1).get()).isEqualTo(v1);
+
+ // ensure cached values
+ assertThat(db.getDeletedKeysCount()).isEqualTo(1);
+ assertThat(db.getInsertedKeysCount()).isEqualTo(1);
+ }
+
+ @Test
+ public void testDeleteBatchWithPrune() {
+ // ensure existence
+ Map map = new HashMap<>();
+ map.put(k1, v1);
+ map.put(k2, v2);
+ map.put(k3, null);
+ db.putBatch(map);
+
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(db.get(k3).isPresent()).isFalse();
+
+ // check presence after delete
+ db.deleteBatch(map.keySet());
+
+ // delete operations are delayed till pruning is called
+ assertThat(db.get(k1).isPresent()).isTrue();
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(db.get(k3).isPresent()).isFalse();
+
+ // ensure cached values
+ assertThat(db.getDeletedKeysCount()).isEqualTo(3);
+ assertThat(db.getInsertedKeysCount()).isEqualTo(2);
+ }
+
+ @Test
+ public void testKeys() {
+ db.setPruneEnabled(true);
+
+ // keys shouldn't be null even when empty
+ Set keys = db.keys();
+ assertThat(db.isEmpty()).isTrue();
+ assertThat(keys).isNotNull();
+ assertThat(keys.size()).isEqualTo(0);
+
+ // checking after put
+ db.put(k1, v1);
+ db.put(k2, v2);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+ assertThat(db.get(k2).get()).isEqualTo(v2);
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(2);
+
+ // checking after delete
+ db.delete(k2);
+ assertThat(db.get(k2).isPresent()).isTrue();
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(2);
+
+ // checking after putBatch
+ Map ops = new HashMap<>();
+ ops.put(k1, null);
+ ops.put(k2, v2);
+ ops.put(k3, v3);
+ db.putBatch(ops);
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(3);
+
+ // checking after deleteBatch
+ db.deleteBatch(ops.keySet());
+
+ keys = db.keys();
+ assertThat(keys.size()).isEqualTo(3);
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(3);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(3);
+ }
+
+ @Test
+ public void testIsEmpty_wPrune() {
+ db.setPruneEnabled(true);
+
+ assertThat(db.isEmpty()).isTrue();
+
+ // checking after put
+ db.put(k1, v1);
+ db.put(k2, v2);
+ assertThat(db.get(k1).get()).isEqualTo(v1);
+ assertThat(db.get(k2).get()).isEqualTo(v2);
+
+ assertThat(db.isEmpty()).isFalse();
+
+ // checking after delete
+ db.delete(k2);
+ assertThat(db.get(k2).isPresent()).isTrue();
+ assertThat(db.isEmpty()).isFalse();
+ db.delete(k1);
+ assertThat(db.isEmpty()).isFalse();
+
+ // checking after putBatch
+ Map ops = new HashMap<>();
+ ops.put(k1, null);
+ ops.put(k2, v2);
+ ops.put(k3, v3);
+ db.putBatch(ops);
+ assertThat(db.isEmpty()).isFalse();
+
+ // checking after deleteBatch
+ db.deleteBatch(ops.keySet());
+ assertThat(db.isEmpty()).isFalse();
+
+ // ensure no cached values
+ assertThat(db.getInsertedKeysCount()).isEqualTo(3);
+ assertThat(db.getDeletedKeysCount()).isEqualTo(3);
+ }
+}