Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ecip 1099 #186

Merged
merged 26 commits into from
Sep 30, 2020
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
1af78e0
ecip-1099: minimal implementation
iquidus Sep 14, 2020
3c052e7
ecip-1099: add fix example for TestConcurrentDiskCacheGeneration failure
iquidus Sep 14, 2020
67c28d1
ecip-1099: update tests
iquidus Sep 14, 2020
88da6c6
ecip-1099: fix TestConcurrentDiskCacheGeneration
iquidus Sep 14, 2020
518b983
ecip-1099: add epochLength to DAG & cache generation logs
iquidus Sep 14, 2020
a4d9f2c
ecip-1099: optimize, calcEpoch() return epochLength with epoch
iquidus Sep 14, 2020
e35f1ed
ecip-1099: update algorithm_test
iquidus Sep 14, 2020
27708e9
ecip-1099: move to chain config; add epoch length to makecache & make…
iquidus Sep 17, 2020
34603f5
ecip-1099: change default Ethash.CacheDir & Ethash.DatasetDir to etchash
iquidus Sep 18, 2020
a735e7e
ecip-1099: rename (old/new)EpochLength
iquidus Sep 22, 2020
e8e2edc
ecip-1099: rename EtchashOnBlock
iquidus Sep 22, 2020
f79d473
ecip-1099: refactor makecache & maekdag commands, add optional --epoc…
iquidus Sep 24, 2020
a4deb5b
ecip-1099: return to ethash dir for defaults
iquidus Sep 24, 2020
6396cd2
ecip-1099: update retesteth
iquidus Sep 24, 2020
3e09bcb
ecip-1099: set DatasetDir to etchash if ECIP1099 block is defined
iquidus Sep 24, 2020
3da6ae6
ecip-1099: fix flags
iquidus Sep 24, 2020
d1c338c
ecip-1099: use stack.ResolvePath on datasetDir
iquidus Sep 24, 2020
cbc0eb0
ecip-1099: add setEthashDatasetDir()
iquidus Sep 25, 2020
7fab030
ecip-1099: define fork blocks for mordor and classic mainnet
iquidus Sep 25, 2020
c9c5000
Merge branch 'master' into dev/ecip-1099
iquidus Sep 25, 2020
6f88679
eth,coregeth: goimports -w
meowsbits Sep 28, 2020
107dfff
forkid: add mordor and classic ECIP1099 fork block numbers to forkid …
meowsbits Sep 29, 2020
3b258ca
move EthashEpochLengthFlag from node to misc
iquidus Sep 29, 2020
95db31b
remove --epoch.length from geth help
iquidus Sep 29, 2020
96fc7ee
remove classic 1099 mainnet block for now
iquidus Sep 30, 2020
4878a14
forkid: remove ecip1099 classic activation numbers from forkid tests
meowsbits Sep 30, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ var (
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.EthashDatasetsLockMmapFlag,
utils.EthashEpochLengthFlag,
iquidus marked this conversation as resolved.
Show resolved Hide resolved
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
Expand Down
20 changes: 16 additions & 4 deletions cmd/geth/misccmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,10 @@ var (
Name: "makecache",
Usage: "Generate ethash verification cache (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.EthashEpochLengthFlag,
},
Category: "MISCELLANEOUS COMMANDS",
Description: `
The makecache command generates an ethash cache in <outputDir>.

Expand All @@ -49,7 +52,10 @@ Regular users do not need to execute it.
Name: "makedag",
Usage: "Generate ethash mining DAG (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.EthashEpochLengthFlag,
},
Category: "MISCELLANEOUS COMMANDS",
Description: `
The makedag command generates an ethash DAG in <outputDir>.

Expand Down Expand Up @@ -86,7 +92,10 @@ func makecache(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeCache(block, args[1])

epochLength := ctx.Uint64(utils.EthashEpochLengthFlag.Name)

ethash.MakeCache(block, epochLength, args[1])

return nil
}
Expand All @@ -101,7 +110,10 @@ func makedag(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeDataset(block, args[1])

epochLength := ctx.Uint64(utils.EthashEpochLengthFlag.Name)

ethash.MakeDataset(block, epochLength, args[1])

return nil
}
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/retesteth.go
Original file line number Diff line number Diff line change
Expand Up @@ -398,6 +398,7 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
DatasetsInMem: 1,
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
ECIP1099Block: api.chainConfig.GetEthashECIP1099Transition(),
}, nil, false)
default:
return false, fmt.Errorf("unrecognised seal engine: %s", chainParams.SealEngine)
Expand Down
34 changes: 33 additions & 1 deletion cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ import (
"io/ioutil"
"math/big"
"os"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"text/tabwriter"
Expand Down Expand Up @@ -321,6 +323,11 @@ var (
Name: "ethash.dagslockmmap",
Usage: "Lock memory maps for recent ethash mining DAGs",
}
EthashEpochLengthFlag = cli.Int64Flag{
Name: "epoch.length",
Usage: "Sets epoch length for makecache & makedag commands",
Value: 30000, // TODO - iquidus
iquidus marked this conversation as resolved.
Show resolved Hide resolved
}
// Transaction pool settings
TxPoolLocalsFlag = cli.StringFlag{
Name: "txpool.locals",
Expand Down Expand Up @@ -1940,15 +1947,40 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.B
} else {
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
datasetDir := stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir)
ecip1099Block = config.GetEthashECIP1099Transition()
// check if ECIP-1099 is configured for this chain
if ecip1099Block != nil {
// ECIP-1099 is set, use etchash dir for DAGs instead
home := os.Getenv("HOME")
if home == "" {
if user, err := user.Current(); err == nil {
home = user.HomeDir
}
}
if runtime.GOOS == "darwin" {
datasetDir = filepath.Join(home, "Library", "Etchash")
} else if runtime.GOOS == "windows" {
localappdata := os.Getenv("LOCALAPPDATA")
if localappdata != "" {
datasetDir = filepath.Join(localappdata, "Etchash")
} else {
datasetDir = filepath.Join(home, "AppData", "Local", "Etchash")
}
} else {
datasetDir = filepath.Join(home, ".etchash")
}
}
iquidus marked this conversation as resolved.
Show resolved Hide resolved
engine = ethash.New(ethash.Config{
CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir),
CachesInMem: eth.DefaultConfig.Ethash.CachesInMem,
CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk,
CachesLockMmap: eth.DefaultConfig.Ethash.CachesLockMmap,
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
DatasetDir: datasetDir,
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
DatasetsLockMmap: eth.DefaultConfig.Ethash.DatasetsLockMmap,
ECIP1099Block: ecip1099Block,
}, nil, false)
}
}
Expand Down
76 changes: 45 additions & 31 deletions consensus/ethash/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,34 +35,51 @@ import (
)

const (
datasetInitBytes = 1 << 30 // Bytes in dataset at genesis
datasetGrowthBytes = 1 << 23 // Dataset growth per epoch
cacheInitBytes = 1 << 24 // Bytes in cache at genesis
cacheGrowthBytes = 1 << 17 // Cache growth per epoch
epochLength = 30000 // Blocks per epoch
mixBytes = 128 // Width of mix
hashBytes = 64 // Hash length in bytes
hashWords = 16 // Number of 32 bit ints in a hash
datasetParents = 256 // Number of parents of each dataset element
cacheRounds = 3 // Number of rounds in cache production
loopAccesses = 64 // Number of accesses in hashimoto loop
datasetInitBytes = 1 << 30 // Bytes in dataset at genesis
datasetGrowthBytes = 1 << 23 // Dataset growth per epoch
cacheInitBytes = 1 << 24 // Bytes in cache at genesis
cacheGrowthBytes = 1 << 17 // Cache growth per epoch
epochLengthDefault = 30000 // Default epoch length (blocks per epoch)
epochLengthECIP1099 = 60000 // Blocks per epoch if ECIP-1099 is activated
mixBytes = 128 // Width of mix
hashBytes = 64 // Hash length in bytes
hashWords = 16 // Number of 32 bit ints in a hash
datasetParents = 256 // Number of parents of each dataset element
cacheRounds = 3 // Number of rounds in cache production
loopAccesses = 64 // Number of accesses in hashimoto loop
maxEpoch = 2048 // Max Epoch for included tables
)

// calcEpochLength returns the epoch length for a given block number (ECIP-1099)
func calcEpochLength(block uint64, ecip1099FBlock *uint64) uint64 {
if ecip1099FBlock != nil {
if block >= *ecip1099FBlock {
return epochLengthECIP1099
}
}
return epochLengthDefault
}

// calcEpoch returns the epoch for a given block number (ECIP-1099)
func calcEpoch(block uint64, epochLength uint64) uint64 {
epoch := block / epochLength
return epoch
}

// cacheSize returns the size of the ethash verification cache that belongs to a certain
// block number.
func cacheSize(block uint64) uint64 {
epoch := int(block / epochLength)
func cacheSize(block uint64, epoch uint64) uint64 {
if epoch < maxEpoch {
return cacheSizes[epoch]
return cacheSizes[int(epoch)]
}
return calcCacheSize(epoch)
}

// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
func calcCacheSize(epoch uint64) uint64 {
size := cacheInitBytes + cacheGrowthBytes*epoch - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
}
Expand All @@ -71,19 +88,18 @@ func calcCacheSize(epoch int) uint64 {

// datasetSize returns the size of the ethash mining dataset that belongs to a certain
// block number.
func datasetSize(block uint64) uint64 {
epoch := int(block / epochLength)
func datasetSize(block uint64, epoch uint64) uint64 {
if epoch < maxEpoch {
return datasetSizes[epoch]
return datasetSizes[int(epoch)]
}
return calcDatasetSize(epoch)
}

// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
func calcDatasetSize(epoch uint64) uint64 {
size := datasetInitBytes + datasetGrowthBytes*epoch - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes
}
Expand Down Expand Up @@ -120,11 +136,11 @@ func makeHasher(h hash.Hash) hasher {
// dataset.
func seedHash(block uint64) []byte {
seed := make([]byte, 32)
if block < epochLength {
if block < epochLengthDefault {
return seed
}
keccak256 := makeHasher(sha3.NewLegacyKeccak256())
for i := 0; i < int(block/epochLength); i++ {
for i := 0; i < int(block/epochLengthDefault); i++ {
keccak256(seed, seed)
}
return seed
Expand All @@ -136,7 +152,7 @@ func seedHash(block uint64) []byte {
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
// set of 524288 64-byte values.
// This method places the result into dest in machine byte order.
func generateCache(dest []uint32, epoch uint64, seed []byte) {
func generateCache(dest []uint32, epoch uint64, epochLength uint64, seed []byte) {
// Print some debug logs to allow analysis on low end devices
logger := log.New("epoch", epoch)

Expand All @@ -148,7 +164,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed))
}()
// Convert our destination slice to a byte buffer
header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
Expand All @@ -172,7 +188,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
case <-done:
return
case <-time.After(3 * time.Second):
logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
logger.Info("Generating ethash verification cache", "epochLength", epochLength, "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}()
Expand Down Expand Up @@ -264,7 +280,7 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte

// generateDataset generates the entire ethash dataset for mining.
// This method places the result into dest in machine byte order.
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
func generateDataset(dest []uint32, epoch uint64, epochLength uint64, cache []uint32) {
// Print some debug logs to allow analysis on low end devices
logger := log.New("epoch", epoch)

Expand All @@ -276,7 +292,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed))
}()

// Figure out whether the bytes need to be swapped for the machine
Expand Down Expand Up @@ -320,7 +336,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
copy(dataset[index*hashBytes:], item)

if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
logger.Info("Generating DAG in progress", "epochLength", epochLength, "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}(i)
Expand Down Expand Up @@ -400,8 +416,6 @@ func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte)
return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
}

const maxEpoch = 2048

// datasetSizes is a lookup table for the ethash dataset size for the first 2048
// epochs (i.e. 61440000 blocks).
var datasetSizes = [maxEpoch]uint64{
Expand Down
Loading