Skip to content

Commit

Permalink
Merge pull request #186 from iquidus/dev/ecip-1099
Browse files Browse the repository at this point in the history
ecip 1099
  • Loading branch information
iquidus authored Sep 30, 2020
2 parents 1954185 + 4878a14 commit 3bd2233
Show file tree
Hide file tree
Showing 19 changed files with 251 additions and 108 deletions.
20 changes: 16 additions & 4 deletions cmd/geth/misccmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,10 @@ var (
Name: "makecache",
Usage: "Generate ethash verification cache (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.EthashEpochLengthFlag,
},
Category: "MISCELLANEOUS COMMANDS",
Description: `
The makecache command generates an ethash cache in <outputDir>.
Expand All @@ -49,7 +52,10 @@ Regular users do not need to execute it.
Name: "makedag",
Usage: "Generate ethash mining DAG (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.EthashEpochLengthFlag,
},
Category: "MISCELLANEOUS COMMANDS",
Description: `
The makedag command generates an ethash DAG in <outputDir>.
Expand Down Expand Up @@ -86,7 +92,10 @@ func makecache(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeCache(block, args[1])

epochLength := ctx.Uint64(utils.EthashEpochLengthFlag.Name)

ethash.MakeCache(block, epochLength, args[1])

return nil
}
Expand All @@ -101,7 +110,10 @@ func makedag(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeDataset(block, args[1])

epochLength := ctx.Uint64(utils.EthashEpochLengthFlag.Name)

ethash.MakeDataset(block, epochLength, args[1])

return nil
}
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/retesteth.go
Original file line number Diff line number Diff line change
Expand Up @@ -398,6 +398,7 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
DatasetsInMem: 1,
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
ECIP1099Block: api.chainConfig.GetEthashECIP1099Transition(),
}, nil, false)
default:
return false, fmt.Errorf("unrecognised seal engine: %s", chainParams.SealEngine)
Expand Down
42 changes: 39 additions & 3 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ import (
"io/ioutil"
"math/big"
"os"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"text/tabwriter"
Expand Down Expand Up @@ -321,6 +323,11 @@ var (
Name: "ethash.dagslockmmap",
Usage: "Lock memory maps for recent ethash mining DAGs",
}
EthashEpochLengthFlag = cli.Int64Flag{
Name: "epoch.length",
Usage: "Sets epoch length for makecache & makedag commands",
Value: 30000,
}
// Transaction pool settings
TxPoolLocalsFlag = cli.StringFlag{
Name: "txpool.locals",
Expand Down Expand Up @@ -1414,13 +1421,41 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
}
}

func setEthashDatasetDir(ctx *cli.Context, cfg *eth.Config) {
switch {
case ctx.GlobalIsSet(EthashDatasetDirFlag.Name):
cfg.Ethash.DatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name)

case (ctx.GlobalBool(ClassicFlag.Name) || ctx.GlobalBool(MordorFlag.Name)) && cfg.Ethash.DatasetDir == eth.DefaultConfig.Ethash.DatasetDir:
// ECIP-1099 is set, use etchash dir for DAGs instead
home := os.Getenv("HOME")
if home == "" {
if user, err := user.Current(); err == nil {
home = user.HomeDir
}
}
if runtime.GOOS == "darwin" {
cfg.Ethash.DatasetDir = filepath.Join(home, "Library", "Etchash")
} else if runtime.GOOS == "windows" {
localappdata := os.Getenv("LOCALAPPDATA")
if localappdata != "" {
cfg.Ethash.DatasetDir = filepath.Join(localappdata, "Etchash")
} else {
cfg.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "Etchash")
}
} else {
cfg.Ethash.DatasetDir = filepath.Join(home, ".etchash")
}
}
}

func setEthash(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
cfg.Ethash.CacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetDirFlag.Name) {
cfg.Ethash.DatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name)
}
// ECIP-1099
setEthashDatasetDir(ctx, cfg)

if ctx.GlobalIsSet(EthashCachesInMemoryFlag.Name) {
cfg.Ethash.CachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name)
}
Expand Down Expand Up @@ -1960,6 +1995,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.B
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
DatasetsLockMmap: eth.DefaultConfig.Ethash.DatasetsLockMmap,
ECIP1099Block: config.GetEthashECIP1099Transition(),
}, nil, false)
}
}
Expand Down
76 changes: 45 additions & 31 deletions consensus/ethash/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,34 +35,51 @@ import (
)

const (
datasetInitBytes = 1 << 30 // Bytes in dataset at genesis
datasetGrowthBytes = 1 << 23 // Dataset growth per epoch
cacheInitBytes = 1 << 24 // Bytes in cache at genesis
cacheGrowthBytes = 1 << 17 // Cache growth per epoch
epochLength = 30000 // Blocks per epoch
mixBytes = 128 // Width of mix
hashBytes = 64 // Hash length in bytes
hashWords = 16 // Number of 32 bit ints in a hash
datasetParents = 256 // Number of parents of each dataset element
cacheRounds = 3 // Number of rounds in cache production
loopAccesses = 64 // Number of accesses in hashimoto loop
datasetInitBytes = 1 << 30 // Bytes in dataset at genesis
datasetGrowthBytes = 1 << 23 // Dataset growth per epoch
cacheInitBytes = 1 << 24 // Bytes in cache at genesis
cacheGrowthBytes = 1 << 17 // Cache growth per epoch
epochLengthDefault = 30000 // Default epoch length (blocks per epoch)
epochLengthECIP1099 = 60000 // Blocks per epoch if ECIP-1099 is activated
mixBytes = 128 // Width of mix
hashBytes = 64 // Hash length in bytes
hashWords = 16 // Number of 32 bit ints in a hash
datasetParents = 256 // Number of parents of each dataset element
cacheRounds = 3 // Number of rounds in cache production
loopAccesses = 64 // Number of accesses in hashimoto loop
maxEpoch = 2048 // Max Epoch for included tables
)

// calcEpochLength returns the epoch length for a given block number (ECIP-1099)
func calcEpochLength(block uint64, ecip1099FBlock *uint64) uint64 {
if ecip1099FBlock != nil {
if block >= *ecip1099FBlock {
return epochLengthECIP1099
}
}
return epochLengthDefault
}

// calcEpoch returns the epoch for a given block number (ECIP-1099)
func calcEpoch(block uint64, epochLength uint64) uint64 {
epoch := block / epochLength
return epoch
}

// cacheSize returns the size of the ethash verification cache that belongs to a certain
// block number.
func cacheSize(block uint64) uint64 {
epoch := int(block / epochLength)
func cacheSize(block uint64, epoch uint64) uint64 {
if epoch < maxEpoch {
return cacheSizes[epoch]
return cacheSizes[int(epoch)]
}
return calcCacheSize(epoch)
}

// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
func calcCacheSize(epoch uint64) uint64 {
size := cacheInitBytes + cacheGrowthBytes*epoch - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
}
Expand All @@ -71,19 +88,18 @@ func calcCacheSize(epoch int) uint64 {

// datasetSize returns the size of the ethash mining dataset that belongs to a certain
// block number.
func datasetSize(block uint64) uint64 {
epoch := int(block / epochLength)
func datasetSize(block uint64, epoch uint64) uint64 {
if epoch < maxEpoch {
return datasetSizes[epoch]
return datasetSizes[int(epoch)]
}
return calcDatasetSize(epoch)
}

// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
func calcDatasetSize(epoch uint64) uint64 {
size := datasetInitBytes + datasetGrowthBytes*epoch - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes
}
Expand Down Expand Up @@ -120,11 +136,11 @@ func makeHasher(h hash.Hash) hasher {
// dataset.
func seedHash(block uint64) []byte {
seed := make([]byte, 32)
if block < epochLength {
if block < epochLengthDefault {
return seed
}
keccak256 := makeHasher(sha3.NewLegacyKeccak256())
for i := 0; i < int(block/epochLength); i++ {
for i := 0; i < int(block/epochLengthDefault); i++ {
keccak256(seed, seed)
}
return seed
Expand All @@ -136,7 +152,7 @@ func seedHash(block uint64) []byte {
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
// set of 524288 64-byte values.
// This method places the result into dest in machine byte order.
func generateCache(dest []uint32, epoch uint64, seed []byte) {
func generateCache(dest []uint32, epoch uint64, epochLength uint64, seed []byte) {
// Print some debug logs to allow analysis on low end devices
logger := log.New("epoch", epoch)

Expand All @@ -148,7 +164,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed))
}()
// Convert our destination slice to a byte buffer
header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
Expand All @@ -172,7 +188,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
case <-done:
return
case <-time.After(3 * time.Second):
logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
logger.Info("Generating ethash verification cache", "epochLength", epochLength, "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}()
Expand Down Expand Up @@ -264,7 +280,7 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte

// generateDataset generates the entire ethash dataset for mining.
// This method places the result into dest in machine byte order.
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
func generateDataset(dest []uint32, epoch uint64, epochLength uint64, cache []uint32) {
// Print some debug logs to allow analysis on low end devices
logger := log.New("epoch", epoch)

Expand All @@ -276,7 +292,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed))
}()

// Figure out whether the bytes need to be swapped for the machine
Expand Down Expand Up @@ -320,7 +336,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
copy(dataset[index*hashBytes:], item)

if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
logger.Info("Generating DAG in progress", "epochLength", epochLength, "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}(i)
Expand Down Expand Up @@ -400,8 +416,6 @@ func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte)
return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
}

const maxEpoch = 2048

// datasetSizes is a lookup table for the ethash dataset size for the first 2048
// epochs (i.e. 61440000 blocks).
var datasetSizes = [maxEpoch]uint64{
Expand Down
Loading

0 comments on commit 3bd2233

Please sign in to comment.