Skip to content

Commit

Permalink
cmd, consensus: add option to disable mmap for DAG caches/datasets (#…
Browse files Browse the repository at this point in the history
…20484)

* cmd, consensus: add option to disable mmap for DAG caches/datasets

* consensus: add benchmarks for mmap with/with lock
  • Loading branch information
de1acr0ix authored Mar 31, 2020
1 parent 76eed9e commit 8f05cfa
Show file tree
Hide file tree
Showing 8 changed files with 108 additions and 48 deletions.
2 changes: 2 additions & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,11 @@ var (
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashCachesLockMmapFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.EthashDatasetsLockMmapFlag,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
Expand Down
12 changes: 7 additions & 5 deletions cmd/geth/retesteth.go
Original file line number Diff line number Diff line change
Expand Up @@ -387,11 +387,13 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
inner = ethash.NewFaker()
case "Ethash":
inner = ethash.New(ethash.Config{
CacheDir: "ethash",
CachesInMem: 2,
CachesOnDisk: 3,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
CacheDir: "ethash",
CachesInMem: 2,
CachesOnDisk: 3,
CachesLockMmap: false,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
}, nil, false)
default:
return false, fmt.Errorf("unrecognised seal engine: %s", chainParams.SealEngine)
Expand Down
2 changes: 2 additions & 0 deletions cmd/geth/usage.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,11 @@ var AppHelpFlagGroups = []flagGroup{
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashCachesLockMmapFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.EthashDatasetsLockMmapFlag,
},
},
{
Expand Down
28 changes: 22 additions & 6 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,10 @@ var (
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
Value: eth.DefaultConfig.Ethash.CachesOnDisk,
}
EthashCachesLockMmapFlag = cli.BoolFlag{
Name: "ethash.cacheslockmmap",
Usage: "Lock memory maps of recent ethash caches",
}
EthashDatasetDirFlag = DirectoryFlag{
Name: "ethash.dagdir",
Usage: "Directory to store the ethash mining DAGs",
Expand All @@ -320,6 +324,10 @@ var (
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
Value: eth.DefaultConfig.Ethash.DatasetsOnDisk,
}
EthashDatasetsLockMmapFlag = cli.BoolFlag{
Name: "ethash.dagslockmmap",
Usage: "Lock memory maps for recent ethash mining DAGs",
}
// Transaction pool settings
TxPoolLocalsFlag = cli.StringFlag{
Name: "txpool.locals",
Expand Down Expand Up @@ -1306,12 +1314,18 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(EthashCachesOnDiskFlag.Name) {
cfg.Ethash.CachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name)
}
if ctx.GlobalIsSet(EthashCachesLockMmapFlag.Name) {
cfg.Ethash.CachesLockMmap = ctx.GlobalBool(EthashCachesLockMmapFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetsInMemoryFlag.Name) {
cfg.Ethash.DatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetsOnDiskFlag.Name) {
cfg.Ethash.DatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetsLockMmapFlag.Name) {
cfg.Ethash.DatasetsLockMmap = ctx.GlobalBool(EthashDatasetsLockMmapFlag.Name)
}
}

func setMiner(ctx *cli.Context, cfg *miner.Config) {
Expand Down Expand Up @@ -1721,12 +1735,14 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New(ethash.Config{
CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir),
CachesInMem: eth.DefaultConfig.Ethash.CachesInMem,
CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk,
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir),
CachesInMem: eth.DefaultConfig.Ethash.CachesInMem,
CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk,
CachesLockMmap: eth.DefaultConfig.Ethash.CachesLockMmap,
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
DatasetsLockMmap: eth.DefaultConfig.Ethash.DatasetsLockMmap,
}, nil, false)
}
}
Expand Down
27 changes: 26 additions & 1 deletion consensus/ethash/algorithm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {

go func(idx int) {
defer pend.Done()
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal, nil}, nil, false)
ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil}, nil, false)
defer ethash.Close()
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
Expand Down Expand Up @@ -787,3 +787,28 @@ func BenchmarkHashimotoFullSmall(b *testing.B) {
hashimotoFull(dataset, hash, 0)
}
}

func benchmarkHashimotoFullMmap(b *testing.B, name string, lock bool) {
b.Run(name, func(b *testing.B) {
tmpdir, err := ioutil.TempDir("", "ethash-test")
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(tmpdir)

d := &dataset{epoch: 0}
d.generate(tmpdir, 1, lock, false)
var hash [common.HashLength]byte
b.ResetTimer()
for i := 0; i < b.N; i++ {
binary.PutVarint(hash[:], int64(i))
hashimotoFull(d.dataset, hash[:], 0)
}
})
}

// Benchmarks the full verification performance for mmap
func BenchmarkHashimotoFullMmap(b *testing.B) {
benchmarkHashimotoFullMmap(b, "WithLock", true)
benchmarkHashimotoFullMmap(b, "WithoutLock", false)
}
59 changes: 34 additions & 25 deletions consensus/ethash/ethash.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ var (
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))

// sharedEthash is a full instance that can be shared between multiple users.
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false)
sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil}, nil, false)

// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
Expand All @@ -65,7 +65,7 @@ func isLittleEndian() bool {
}

// memoryMap tries to memory map a file of uint32s for read only access.
func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) {
file, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil {
return nil, nil, nil, err
Expand All @@ -82,6 +82,13 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
return nil, nil, nil, ErrInvalidDumpMagic
}
}
if lock {
if err := mem.Lock(); err != nil {
mem.Unmap()
file.Close()
return nil, nil, nil, err
}
}
return file, mem, buffer[len(dumpMagic):], err
}

Expand All @@ -107,7 +114,7 @@ func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
// memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
// access, fill it with the data from a generator and then move it into the final
// path requested.
func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
// Ensure the data folder exists
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, nil, nil, err
Expand Down Expand Up @@ -142,7 +149,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
if err := os.Rename(temp, path); err != nil {
return nil, nil, nil, err
}
return memoryMap(path)
return memoryMap(path, lock)
}

// lru tracks caches or datasets by their last use time, keeping at most N of them.
Expand Down Expand Up @@ -213,7 +220,7 @@ func newCache(epoch uint64) interface{} {
}

// generate ensures that the cache content is generated before use.
func (c *cache) generate(dir string, limit int, test bool) {
func (c *cache) generate(dir string, limit int, lock bool, test bool) {
c.once.Do(func() {
size := cacheSize(c.epoch*epochLength + 1)
seed := seedHash(c.epoch*epochLength + 1)
Expand All @@ -240,15 +247,15 @@ func (c *cache) generate(dir string, limit int, test bool) {

// Try to load the file from disk and memory map it
var err error
c.dump, c.mmap, c.cache, err = memoryMap(path)
c.dump, c.mmap, c.cache, err = memoryMap(path, lock)
if err == nil {
logger.Debug("Loaded old ethash cache from disk")
return
}
logger.Debug("Failed to load old ethash cache", "err", err)

// No previous cache available, create a new cache file to fill
c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
if err != nil {
logger.Error("Failed to generate mapped ethash cache", "err", err)

Expand Down Expand Up @@ -290,7 +297,7 @@ func newDataset(epoch uint64) interface{} {
}

// generate ensures that the dataset content is generated before use.
func (d *dataset) generate(dir string, limit int, test bool) {
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
d.once.Do(func() {
// Mark the dataset generated after we're done. This is needed for remote
defer atomic.StoreUint32(&d.done, 1)
Expand Down Expand Up @@ -326,7 +333,7 @@ func (d *dataset) generate(dir string, limit int, test bool) {

// Try to load the file from disk and memory map it
var err error
d.dump, d.mmap, d.dataset, err = memoryMap(path)
d.dump, d.mmap, d.dataset, err = memoryMap(path, lock)
if err == nil {
logger.Debug("Loaded old ethash dataset from disk")
return
Expand All @@ -337,7 +344,7 @@ func (d *dataset) generate(dir string, limit int, test bool) {
cache := make([]uint32, csize/4)
generateCache(cache, d.epoch, seed)

d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
if err != nil {
logger.Error("Failed to generate mapped ethash dataset", "err", err)

Expand Down Expand Up @@ -372,13 +379,13 @@ func (d *dataset) finalizer() {
// MakeCache generates a new ethash cache and optionally stores it to disk.
func MakeCache(block uint64, dir string) {
c := cache{epoch: block / epochLength}
c.generate(dir, math.MaxInt32, false)
c.generate(dir, math.MaxInt32, false, false)
}

// MakeDataset generates a new ethash dataset and optionally stores it to disk.
func MakeDataset(block uint64, dir string) {
d := dataset{epoch: block / epochLength}
d.generate(dir, math.MaxInt32, false)
d.generate(dir, math.MaxInt32, false, false)
}

// Mode defines the type and amount of PoW verification an ethash engine makes.
Expand All @@ -394,13 +401,15 @@ const (

// Config are the configuration parameters of the ethash.
type Config struct {
CacheDir string
CachesInMem int
CachesOnDisk int
DatasetDir string
DatasetsInMem int
DatasetsOnDisk int
PowMode Mode
CacheDir string
CachesInMem int
CachesOnDisk int
CachesLockMmap bool
DatasetDir string
DatasetsInMem int
DatasetsOnDisk int
DatasetsLockMmap bool
PowMode Mode

Log log.Logger `toml:"-"`
}
Expand Down Expand Up @@ -549,12 +558,12 @@ func (ethash *Ethash) cache(block uint64) *cache {
current := currentI.(*cache)

// Wait for generation finish.
current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)

// If we need a new future cache, now's a good time to regenerate it.
if futureI != nil {
future := futureI.(*cache)
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
}
return current
}
Expand All @@ -574,20 +583,20 @@ func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
// If async is specified, generate everything in a background thread
if async && !current.generated() {
go func() {
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)

if futureI != nil {
future := futureI.(*dataset)
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
}
}()
} else {
// Either blocking generation was requested, or already done
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)

if futureI != nil {
future := futureI.(*dataset)
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
}
}
return current
Expand Down
14 changes: 8 additions & 6 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,12 +266,14 @@ func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainCo
return ethash.NewShared()
default:
engine := ethash.New(ethash.Config{
CacheDir: ctx.ResolvePath(config.CacheDir),
CachesInMem: config.CachesInMem,
CachesOnDisk: config.CachesOnDisk,
DatasetDir: config.DatasetDir,
DatasetsInMem: config.DatasetsInMem,
DatasetsOnDisk: config.DatasetsOnDisk,
CacheDir: ctx.ResolvePath(config.CacheDir),
CachesInMem: config.CachesInMem,
CachesOnDisk: config.CachesOnDisk,
CachesLockMmap: config.CachesLockMmap,
DatasetDir: config.DatasetDir,
DatasetsInMem: config.DatasetsInMem,
DatasetsOnDisk: config.DatasetsOnDisk,
DatasetsLockMmap: config.DatasetsLockMmap,
}, notify, noverify)
engine.SetThreads(-1) // Disable CPU mining
return engine
Expand Down
12 changes: 7 additions & 5 deletions eth/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,13 @@ import (
var DefaultConfig = Config{
SyncMode: downloader.FastSync,
Ethash: ethash.Config{
CacheDir: "ethash",
CachesInMem: 2,
CachesOnDisk: 3,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
CacheDir: "ethash",
CachesInMem: 2,
CachesOnDisk: 3,
CachesLockMmap: false,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
},
NetworkId: 1,
LightPeers: 100,
Expand Down

0 comments on commit 8f05cfa

Please sign in to comment.