From 2a75fe3308faf4d77054e00b55566c9f18591572 Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Sun, 25 Feb 2018 15:39:29 -0500 Subject: [PATCH 001/166] rpc: Add admin_addTrustedPeer and admin_removeTrustedPeer. These RPC calls are analogous to Parity's parity_addReservedPeer and parity_removeReservedPeer. They are useful for adjusting the trusted peer set during runtime, without requiring restarting the server. --- internal/web3ext/web3ext.go | 10 ++++++++ node/api.go | 33 +++++++++++++++++++++++++- p2p/server.go | 46 ++++++++++++++++++++++++++++++++++--- 3 files changed, 85 insertions(+), 4 deletions(-) diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 89ebceec7c00..99b982178a86 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -123,6 +123,16 @@ web3._extend({ call: 'admin_removePeer', params: 1 }), + new web3._extend.Method({ + name: 'addTrustedPeer', + call: 'admin_addTrustedPeer', + params: 1 + }), + new web3._extend.Method({ + name: 'removeTrustedPeer', + call: 'admin_removeTrustedPeer', + params: 1 + }), new web3._extend.Method({ name: 'exportChain', call: 'admin_exportChain', diff --git a/node/api.go b/node/api.go index 989d3884acfd..e5f38846311f 100644 --- a/node/api.go +++ b/node/api.go @@ -59,7 +59,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) { return true, nil } -// RemovePeer disconnects from a a remote node if the connection exists +// RemovePeer disconnects from a remote node if the connection exists func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) { // Make sure the server is running, fail otherwise server := api.node.Server() @@ -75,6 +75,37 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) { return true, nil } +// AddTrustedPeer allows a remote node to always connect, even if slots are full +func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) { + // Make sure the server is running, fail otherwise + server := api.node.Server() + if server == nil { + return false, ErrNodeStopped + } + node, err := discover.ParseNode(url) + if err != nil { + return false, fmt.Errorf("invalid enode: %v", err) + } + server.AddTrustedPeer(node) + return true, nil +} + +// RemoveTrustedPeer removes a remote node from the trusted peer set, but it +// does not disconnect it automatically. +func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) { + // Make sure the server is running, fail otherwise + server := api.node.Server() + if server == nil { + return false, ErrNodeStopped + } + node, err := discover.ParseNode(url) + if err != nil { + return false, fmt.Errorf("invalid enode: %v", err) + } + server.RemoveTrustedPeer(node) + return true, nil +} + // PeerEvents creates an RPC subscription which receives peer events from the // node's p2p.Server func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) { diff --git a/p2p/server.go b/p2p/server.go index cdb5b1926e59..39ff2f51e862 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -169,6 +169,8 @@ type Server struct { quit chan struct{} addstatic chan *discover.Node removestatic chan *discover.Node + addtrusted chan *discover.Node + removetrusted chan *discover.Node posthandshake chan *conn addpeer chan *conn delpeer chan peerDrop @@ -300,6 +302,23 @@ func (srv *Server) RemovePeer(node *discover.Node) { } } +// AddTrustedPeer adds the given node to a reserved whitelist which allows the +// node to always connect, even if the slot are full. +func (srv *Server) AddTrustedPeer(node *discover.Node) { + select { + case srv.addtrusted <- node: + case <-srv.quit: + } +} + +// RemoveTrustedPeer removes the given node from the trusted peer set. +func (srv *Server) RemoveTrustedPeer(node *discover.Node) { + select { + case srv.removetrusted <- node: + case <-srv.quit: + } +} + // SubscribePeers subscribes the given channel to peer events func (srv *Server) SubscribeEvents(ch chan *PeerEvent) event.Subscription { return srv.peerFeed.Subscribe(ch) @@ -410,6 +429,8 @@ func (srv *Server) Start() (err error) { srv.posthandshake = make(chan *conn) srv.addstatic = make(chan *discover.Node) srv.removestatic = make(chan *discover.Node) + srv.addtrusted = make(chan *discover.Node) + srv.removetrusted = make(chan *discover.Node) srv.peerOp = make(chan peerOpFunc) srv.peerOpDone = make(chan struct{}) @@ -546,8 +567,7 @@ func (srv *Server) run(dialstate dialer) { queuedTasks []task // tasks that can't run yet ) // Put trusted nodes into a map to speed up checks. - // Trusted peers are loaded on startup and cannot be - // modified while the server is running. + // Trusted peers are loaded on startup or added via AddTrustedPeer RPC. for _, n := range srv.TrustedNodes { trusted[n.ID] = true } @@ -599,12 +619,32 @@ running: case n := <-srv.removestatic: // This channel is used by RemovePeer to send a // disconnect request to a peer and begin the - // stop keeping the node connected + // stop keeping the node connected. srv.log.Trace("Removing static node", "node", n) dialstate.removeStatic(n) if p, ok := peers[n.ID]; ok { p.Disconnect(DiscRequested) } + case n := <-srv.addtrusted: + // This channel is used by AddTrustedPeer to add an enode + // to the trusted node set. + srv.log.Trace("Adding trusted node", "node", n) + trusted[n.ID] = true + // Mark any already-connected peer as trusted + if p, ok := peers[n.ID]; ok { + p.rw.flags |= trustedConn + } + case n := <-srv.removetrusted: + // This channel is used by RemoveTrustedPeer to remove an enode + // from the trusted node set. + srv.log.Trace("Removing trusted node", "node", n) + if _, ok := trusted[n.ID]; ok { + delete(trusted, n.ID) + } + // Unmark any already-connected peer as trusted + if p, ok := peers[n.ID]; ok { + p.rw.flags &= ^trustedConn + } case op := <-srv.peerOp: // This channel is used by Peers and PeerCount. op(peers) From 773857a5242a3fe7458a9c9b60a4ea6333582e56 Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Sun, 18 Mar 2018 12:25:35 -0400 Subject: [PATCH 002/166] p2p: Test for MaxPeers=0 and TrustedPeer override --- p2p/server_test.go | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/p2p/server_test.go b/p2p/server_test.go index 10c36528ebe9..efab9bb5e51f 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -363,7 +363,61 @@ func TestServerAtCap(t *testing.T) { if !c.is(trustedConn) { t.Error("Server did not set trusted flag") } +} + +func TestServerPeerLimits(t *testing.T) { + srvkey := newkey() + srvid := discover.PubkeyID(&srvkey.PublicKey) + + var tp *setupTransport = &setupTransport{id: srvid, phs: &protoHandshake{ID: srvid}} + var flags connFlag = dynDialedConn + var dialDest *discover.Node = &discover.Node{ID: srvid} + srv := &Server{ + Config: Config{ + PrivateKey: srvkey, + MaxPeers: 0, + NoDial: true, + Protocols: []Protocol{discard}, + }, + newTransport: func(fd net.Conn) transport { return tp }, + log: log.New(), + } + if err := srv.Start(); err != nil { + t.Fatalf("couldn't start server: %v", err) + } + + // Check that server is full (MaxPeers=0) + conn, _ := net.Pipe() + srv.SetupConn(conn, flags, dialDest) + if tp.closeErr != DiscTooManyPeers { + t.Errorf("unexpected close error: %q", tp.closeErr) + } + conn.Close() + + srv.AddTrustedPeer(dialDest) + + // Check that server allows a trusted peer despite being full. + conn, _ = net.Pipe() + srv.SetupConn(conn, flags, dialDest) + if tp.closeErr == DiscTooManyPeers { + t.Errorf("failed to bypass MaxPeers with trusted node: %q", tp.closeErr) + } + + if tp.closeErr != DiscSelf { + t.Errorf("unexpected close error: %q", tp.closeErr) + } + conn.Close() + + srv.RemoveTrustedPeer(dialDest) + + // Check that server is full again. + conn, _ = net.Pipe() + srv.SetupConn(conn, flags, dialDest) + if tp.closeErr != DiscTooManyPeers { + t.Errorf("unexpected close error: %q", tp.closeErr) + } + conn.Close() } func TestServerSetupConn(t *testing.T) { From 699794d88d86c30fa8ac74c0bbe3e0ac9cde88a2 Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Tue, 5 Jun 2018 15:45:43 -0400 Subject: [PATCH 003/166] p2p: More tests for AddTrustedPeer/RemoveTrustedPeer --- p2p/server_test.go | 60 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 52 insertions(+), 8 deletions(-) diff --git a/p2p/server_test.go b/p2p/server_test.go index efab9bb5e51f..5fad1d0a7b90 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -148,7 +148,8 @@ func TestServerDial(t *testing.T) { // tell the server to connect tcpAddr := listener.Addr().(*net.TCPAddr) - srv.AddPeer(&discover.Node{ID: remid, IP: tcpAddr.IP, TCP: uint16(tcpAddr.Port)}) + node := &discover.Node{ID: remid, IP: tcpAddr.IP, TCP: uint16(tcpAddr.Port)} + srv.AddPeer(node) select { case conn := <-accepted: @@ -170,6 +171,21 @@ func TestServerDial(t *testing.T) { if !reflect.DeepEqual(peers, []*Peer{peer}) { t.Errorf("Peers mismatch: got %v, want %v", peers, []*Peer{peer}) } + + // Test AddTrustedPeer/RemoveTrustedPeer and changing Trusted flags + // Particularly for race conditions on changing the flag state. + if peer := srv.Peers()[0]; peer.Info().Network.Trusted { + t.Errorf("peer is trusted prematurely: %v", peer) + } + srv.AddTrustedPeer(node) + if peer := srv.Peers()[0]; !peer.Info().Network.Trusted { + t.Errorf("peer is not trusted after AddTrustedPeer: %v", peer) + } + srv.RemoveTrustedPeer(node) + if peer := srv.Peers()[0]; peer.Info().Network.Trusted { + t.Errorf("peer is trusted after RemoveTrustedPeer: %v", peer) + } + case <-time.After(1 * time.Second): t.Error("server did not launch peer within one second") } @@ -351,7 +367,8 @@ func TestServerAtCap(t *testing.T) { } } // Try inserting a non-trusted connection. - c := newconn(randomID()) + anotherID := randomID() + c := newconn(anotherID) if err := srv.checkpoint(c, srv.posthandshake); err != DiscTooManyPeers { t.Error("wrong error for insert:", err) } @@ -363,15 +380,41 @@ func TestServerAtCap(t *testing.T) { if !c.is(trustedConn) { t.Error("Server did not set trusted flag") } + + // Remove from trusted set and try again + srv.RemoveTrustedPeer(&discover.Node{ID: trustedID}) + c = newconn(trustedID) + if err := srv.checkpoint(c, srv.posthandshake); err != DiscTooManyPeers { + t.Error("wrong error for insert:", err) + } + + // Add anotherID to trusted set and try again + srv.AddTrustedPeer(&discover.Node{ID: anotherID}) + c = newconn(anotherID) + if err := srv.checkpoint(c, srv.posthandshake); err != nil { + t.Error("unexpected error for trusted conn @posthandshake:", err) + } + if !c.is(trustedConn) { + t.Error("Server did not set trusted flag") + } } func TestServerPeerLimits(t *testing.T) { srvkey := newkey() - srvid := discover.PubkeyID(&srvkey.PublicKey) - var tp *setupTransport = &setupTransport{id: srvid, phs: &protoHandshake{ID: srvid}} + clientid := randomID() + clientnode := &discover.Node{ID: clientid} + + var tp *setupTransport = &setupTransport{ + id: clientid, + phs: &protoHandshake{ + ID: clientid, + // Force "DiscUselessPeer" due to unmatching caps + // Caps: []Cap{discard.cap()}, + }, + } var flags connFlag = dynDialedConn - var dialDest *discover.Node = &discover.Node{ID: srvid} + var dialDest *discover.Node = &discover.Node{ID: clientid} srv := &Server{ Config: Config{ @@ -386,6 +429,7 @@ func TestServerPeerLimits(t *testing.T) { if err := srv.Start(); err != nil { t.Fatalf("couldn't start server: %v", err) } + defer srv.Stop() // Check that server is full (MaxPeers=0) conn, _ := net.Pipe() @@ -395,7 +439,7 @@ func TestServerPeerLimits(t *testing.T) { } conn.Close() - srv.AddTrustedPeer(dialDest) + srv.AddTrustedPeer(clientnode) // Check that server allows a trusted peer despite being full. conn, _ = net.Pipe() @@ -404,12 +448,12 @@ func TestServerPeerLimits(t *testing.T) { t.Errorf("failed to bypass MaxPeers with trusted node: %q", tp.closeErr) } - if tp.closeErr != DiscSelf { + if tp.closeErr != DiscUselessPeer { t.Errorf("unexpected close error: %q", tp.closeErr) } conn.Close() - srv.RemoveTrustedPeer(dialDest) + srv.RemoveTrustedPeer(clientnode) // Check that server is full again. conn, _ = net.Pipe() From 399aa710d514561be571dc180aa4afe9fcc2138d Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Thu, 7 Jun 2018 10:31:09 -0400 Subject: [PATCH 004/166] p2p: Attempt to race check peer.Inbound() in TestServerDial --- p2p/server_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/p2p/server_test.go b/p2p/server_test.go index 5fad1d0a7b90..7eca46938c09 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -181,6 +181,9 @@ func TestServerDial(t *testing.T) { if peer := srv.Peers()[0]; !peer.Info().Network.Trusted { t.Errorf("peer is not trusted after AddTrustedPeer: %v", peer) } + if peer := srv.Peers()[0]; peer.Inbound() { + t.Errorf("peer is marked inbound") + } srv.RemoveTrustedPeer(node) if peer := srv.Peers()[0]; peer.Info().Network.Trusted { t.Errorf("peer is trusted after RemoveTrustedPeer: %v", peer) From dcca66bce8ec79bcf0e06c32f57d0011f8d9fa93 Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Thu, 7 Jun 2018 10:42:40 -0400 Subject: [PATCH 005/166] p2p: Cache inbound flag on Peer.isInbound to avoid a race --- p2p/peer.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/p2p/peer.go b/p2p/peer.go index c3907349fc5a..ff8602602845 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -95,10 +95,11 @@ type PeerEvent struct { // Peer represents a connected remote node. type Peer struct { - rw *conn - running map[string]*protoRW - log log.Logger - created mclock.AbsTime + rw *conn + isInbound bool // Cached from rw.flags to avoid a race condition + running map[string]*protoRW + log log.Logger + created mclock.AbsTime wg sync.WaitGroup protoErr chan error @@ -160,19 +161,20 @@ func (p *Peer) String() string { // Inbound returns true if the peer is an inbound connection func (p *Peer) Inbound() bool { - return p.rw.flags&inboundConn != 0 + return p.isInbound } func newPeer(conn *conn, protocols []Protocol) *Peer { protomap := matchProtocols(protocols, conn.caps, conn) p := &Peer{ - rw: conn, - running: protomap, - created: mclock.Now(), - disc: make(chan DiscReason), - protoErr: make(chan error, len(protomap)+1), // protocols + pingLoop - closed: make(chan struct{}), - log: log.New("id", conn.id, "conn", conn.flags), + rw: conn, + isInbound: conn.is(inboundConn), + running: protomap, + created: mclock.Now(), + disc: make(chan DiscReason), + protoErr: make(chan error, len(protomap)+1), // protocols + pingLoop + closed: make(chan struct{}), + log: log.New("id", conn.id, "conn", conn.flags), } return p } From 193a402cc08e69f8c6b92106e8e81104d260d26c Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Thu, 7 Jun 2018 12:49:07 -0400 Subject: [PATCH 006/166] p2p: Test for peer.rw.flags race conditions --- p2p/server_test.go | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/p2p/server_test.go b/p2p/server_test.go index 7eca46938c09..65897e018581 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -177,18 +177,25 @@ func TestServerDial(t *testing.T) { if peer := srv.Peers()[0]; peer.Info().Network.Trusted { t.Errorf("peer is trusted prematurely: %v", peer) } - srv.AddTrustedPeer(node) - if peer := srv.Peers()[0]; !peer.Info().Network.Trusted { - t.Errorf("peer is not trusted after AddTrustedPeer: %v", peer) - } - if peer := srv.Peers()[0]; peer.Inbound() { - t.Errorf("peer is marked inbound") - } - srv.RemoveTrustedPeer(node) - if peer := srv.Peers()[0]; peer.Info().Network.Trusted { - t.Errorf("peer is trusted after RemoveTrustedPeer: %v", peer) - } + done := make(chan bool) + go func() { + srv.AddTrustedPeer(node) + if peer := srv.Peers()[0]; !peer.Info().Network.Trusted { + t.Errorf("peer is not trusted after AddTrustedPeer: %v", peer) + } + srv.RemoveTrustedPeer(node) + if peer := srv.Peers()[0]; peer.Info().Network.Trusted { + t.Errorf("peer is trusted after RemoveTrustedPeer: %v", peer) + } + done <- true + }() + + // Trigger potential race conditions + peer = srv.Peers()[0] + _ = peer.Inbound() + _ = peer.Info() + <-done case <-time.After(1 * time.Second): t.Error("server did not launch peer within one second") } From 6209545083f656f2dccbe4561644a757ff6443b5 Mon Sep 17 00:00:00 2001 From: Andrey Petrov Date: Thu, 7 Jun 2018 21:50:08 -0400 Subject: [PATCH 007/166] p2p: Wrap conn.flags ops with atomic.Load/Store --- p2p/peer.go | 26 ++++++++++++-------------- p2p/server.go | 20 ++++++++++++++++---- p2p/server_test.go | 2 -- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/p2p/peer.go b/p2p/peer.go index ff8602602845..c4c1fcd7c724 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -95,11 +95,10 @@ type PeerEvent struct { // Peer represents a connected remote node. type Peer struct { - rw *conn - isInbound bool // Cached from rw.flags to avoid a race condition - running map[string]*protoRW - log log.Logger - created mclock.AbsTime + rw *conn + running map[string]*protoRW + log log.Logger + created mclock.AbsTime wg sync.WaitGroup protoErr chan error @@ -161,20 +160,19 @@ func (p *Peer) String() string { // Inbound returns true if the peer is an inbound connection func (p *Peer) Inbound() bool { - return p.isInbound + return p.rw.is(inboundConn) } func newPeer(conn *conn, protocols []Protocol) *Peer { protomap := matchProtocols(protocols, conn.caps, conn) p := &Peer{ - rw: conn, - isInbound: conn.is(inboundConn), - running: protomap, - created: mclock.Now(), - disc: make(chan DiscReason), - protoErr: make(chan error, len(protomap)+1), // protocols + pingLoop - closed: make(chan struct{}), - log: log.New("id", conn.id, "conn", conn.flags), + rw: conn, + running: protomap, + created: mclock.Now(), + disc: make(chan DiscReason), + protoErr: make(chan error, len(protomap)+1), // protocols + pingLoop + closed: make(chan struct{}), + log: log.New("id", conn.id, "conn", conn.flags), } return p } diff --git a/p2p/server.go b/p2p/server.go index 39ff2f51e862..d2cb949255d9 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -23,6 +23,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -187,7 +188,7 @@ type peerDrop struct { requested bool // true if signaled by the peer } -type connFlag int +type connFlag int32 const ( dynDialedConn connFlag = 1 << iota @@ -252,7 +253,18 @@ func (f connFlag) String() string { } func (c *conn) is(f connFlag) bool { - return c.flags&f != 0 + flags := connFlag(atomic.LoadInt32((*int32)(&c.flags))) + return flags&f != 0 +} + +func (c *conn) set(f connFlag, val bool) { + flags := connFlag(atomic.LoadInt32((*int32)(&c.flags))) + if val { + flags |= f + } else { + flags &= ^f + } + atomic.StoreInt32((*int32)(&c.flags), int32(flags)) } // Peers returns all connected peers. @@ -632,7 +644,7 @@ running: trusted[n.ID] = true // Mark any already-connected peer as trusted if p, ok := peers[n.ID]; ok { - p.rw.flags |= trustedConn + p.rw.set(trustedConn, true) } case n := <-srv.removetrusted: // This channel is used by RemoveTrustedPeer to remove an enode @@ -643,7 +655,7 @@ running: } // Unmark any already-connected peer as trusted if p, ok := peers[n.ID]; ok { - p.rw.flags &= ^trustedConn + p.rw.set(trustedConn, false) } case op := <-srv.peerOp: // This channel is used by Peers and PeerCount. diff --git a/p2p/server_test.go b/p2p/server_test.go index 65897e018581..3f24a79baef9 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -189,12 +189,10 @@ func TestServerDial(t *testing.T) { } done <- true }() - // Trigger potential race conditions peer = srv.Peers()[0] _ = peer.Inbound() _ = peer.Info() - <-done case <-time.After(1 * time.Second): t.Error("server did not launch peer within one second") From 3b0745156428376e4ca4ae0449db5864e672bc3b Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 5 Jul 2018 01:09:02 +0200 Subject: [PATCH 008/166] params, VERSION: v1.8.13 unstable --- VERSION | 2 +- params/version.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/VERSION b/VERSION index 7d2424c90b0f..59009bc578c7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.12 +1.8.13 diff --git a/params/version.go b/params/version.go index 8991622ba6cf..c4d83f0a4929 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 8 // Minor version component of the current release - VersionPatch = 12 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 8 // Minor version component of the current release + VersionPatch = 13 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From dbae1dc7b3a3e1ebe1721ee3e324a44ae9a7e2c6 Mon Sep 17 00:00:00 2001 From: LeoLiao Date: Mon, 9 Jul 2018 16:31:59 +0800 Subject: [PATCH 009/166] rpc: fixed comment grammar issue (#17146) --- rpc/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/client.go b/rpc/client.go index 1c88cfab8417..a2ef2ed6b63d 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -304,7 +304,7 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str return err } - // dispatch has accepted the request and will close the channel it when it quits. + // dispatch has accepted the request and will close the channel when it quits. switch resp, err := op.wait(ctx); { case err != nil: return err From 4dbefc1f2577e46b8fd4a7f670c3319d9e6d7f71 Mon Sep 17 00:00:00 2001 From: LeoLiao Date: Mon, 9 Jul 2018 16:38:52 +0800 Subject: [PATCH 010/166] cmd/geth: fixed comment typo (#17140) --- cmd/geth/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f4a0ac5df647..52308948fc00 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -202,7 +202,7 @@ func init() { if err := debug.Setup(ctx); err != nil { return err } - // Cap the cache allowance and tune the garbage colelctor + // Cap the cache allowance and tune the garbage collector var mem gosigar.Mem if err := mem.Get(); err == nil { allowance := int(mem.Total / 1024 / 1024 / 3) From 0b20b1a050c2ee08a238b287b2f621ae1b9dec53 Mon Sep 17 00:00:00 2001 From: LeoLiao Date: Mon, 9 Jul 2018 16:39:43 +0800 Subject: [PATCH 011/166] consensus/clique: fixed documentation copy-paste issue (#17137) --- consensus/clique/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/clique/api.go b/consensus/clique/api.go index b875eef0126a..6bcf987af55e 100644 --- a/consensus/clique/api.go +++ b/consensus/clique/api.go @@ -75,7 +75,7 @@ func (api *API) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) { return snap.signers(), nil } -// GetSignersAtHash retrieves the state snapshot at a given block. +// GetSignersAtHash retrieves the list of authorized signers at the specified block. func (api *API) GetSignersAtHash(hash common.Hash) ([]common.Address, error) { header := api.chain.GetHeaderByHash(hash) if header == nil { From fbeb4f20f97988c3715347fb06e0cb2f0e08f20b Mon Sep 17 00:00:00 2001 From: Wenbiao Zheng Date: Mon, 9 Jul 2018 16:41:28 +0800 Subject: [PATCH 012/166] cmd/geth: fix usage formatting (#17136) --- cmd/geth/usage.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 06db64664ee5..6a12a66cc22b 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -83,7 +83,8 @@ var AppHelpFlagGroups = []flagGroup{ utils.LightKDFFlag, }, }, - {Name: "DEVELOPER CHAIN", + { + Name: "DEVELOPER CHAIN", Flags: []cli.Flag{ utils.DeveloperFlag, utils.DeveloperPeriodFlag, From 30bdf817a0d0afb33f3635f1de877f9caf09be05 Mon Sep 17 00:00:00 2001 From: Smilenator Date: Mon, 9 Jul 2018 11:48:54 +0300 Subject: [PATCH 013/166] core/types: polish TxDifference code and docs a bit (#17130) * core: fix func TxDifference fix a typo in func comment; change named return to unnamed as there's explicit return in the body * fix another typo in TxDifference --- core/types/transaction.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/types/transaction.go b/core/types/transaction.go index b824a77f61dd..82af9335ff2a 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -266,9 +266,9 @@ func (s Transactions) GetRlp(i int) []byte { return enc } -// TxDifference returns a new set t which is the difference between a to b. -func TxDifference(a, b Transactions) (keep Transactions) { - keep = make(Transactions, 0, len(a)) +// TxDifference returns a new set which is the difference between a and b. +func TxDifference(a, b Transactions) Transactions { + keep := make(Transactions, 0, len(a)) remove := make(map[common.Hash]struct{}) for _, tx := range b { From b3711af05176f446fad5ee90e2be4bd09c4086a2 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 9 Jul 2018 14:11:49 +0200 Subject: [PATCH 014/166] swarm: ctx propagation; bmt fixes; pss generic notification framework (#17150) * cmd/swarm: minor cli flag text adjustments * swarm/api/http: sticky footer for swarm landing page using flex * swarm/api/http: sticky footer for error pages and fix for multiple choices * cmd/swarm, swarm/storage, swarm: fix mingw on windows test issues * cmd/swarm: update description of swarm cmd * swarm: added network ID test * cmd/swarm: support for smoke tests on the production swarm cluster * cmd/swarm/swarm-smoke: simplify cluster logic as per suggestion * swarm: propagate ctx to internal apis (#754) * swarm/metrics: collect disk measurements * swarm/bmt: fix io.Writer interface * Write now tolerates arbitrary variable buffers * added variable buffer tests * Write loop and finalise optimisation * refactor / rename * add tests for empty input * swarm/pss: (UPDATE) Generic notifications package (#744) swarm/pss: Generic package for creating pss notification svcs * swarm: Adding context to more functions * swarm/api: change colour of landing page in templates * swarm/api: change landing page to react to enter keypress --- cmd/swarm/fs_test.go | 2 + cmd/swarm/hash.go | 3 +- cmd/swarm/main.go | 4 +- cmd/swarm/swarm-smoke/upload_and_sync.go | 8 +- metrics/metrics.go | 6 + swarm/api/api.go | 75 ++-- swarm/api/api_test.go | 13 +- swarm/api/filesystem.go | 14 +- swarm/api/filesystem_test.go | 20 +- swarm/api/http/error.go | 8 + swarm/api/http/error_templates.go | 132 +++--- swarm/api/http/server.go | 113 ++--- swarm/api/http/server_test.go | 11 +- swarm/api/http/templates.go | 113 +++-- swarm/api/manifest.go | 38 +- swarm/api/storage.go | 17 +- swarm/api/storage_test.go | 11 +- swarm/bmt/bmt.go | 220 +++++----- swarm/bmt/bmt_r.go | 3 +- swarm/bmt/bmt_test.go | 122 ++++-- swarm/fuse/fuse_file.go | 4 +- swarm/fuse/swarmfs_test.go | 3 +- swarm/fuse/swarmfs_unix.go | 3 +- swarm/fuse/swarmfs_util.go | 6 +- swarm/metrics/flags.go | 3 + swarm/network/networkid_test.go | 266 ++++++++++++ swarm/network/stream/common_test.go | 2 +- swarm/network/stream/delivery_test.go | 16 +- swarm/network/stream/intervals_test.go | 8 +- .../network/stream/snapshot_retrieval_test.go | 12 +- swarm/network/stream/snapshot_sync_test.go | 8 +- swarm/network/stream/syncer_test.go | 7 +- swarm/network_test.go | 9 +- swarm/pss/handshake.go | 2 +- swarm/pss/notify/notify.go | 394 ++++++++++++++++++ swarm/pss/notify/notify_test.go | 252 +++++++++++ swarm/pss/protocol.go | 8 + swarm/pss/pss.go | 10 +- swarm/pss/pss_test.go | 8 +- swarm/storage/chunker.go | 13 +- swarm/storage/chunker_test.go | 67 ++- swarm/storage/filestore.go | 9 +- swarm/storage/filestore_test.go | 27 +- swarm/storage/hasherstore.go | 4 +- swarm/storage/hasherstore_test.go | 6 +- swarm/storage/ldbstore_test.go | 4 +- swarm/storage/pyramid.go | 13 +- swarm/storage/types.go | 3 +- swarm/swarm_test.go | 20 +- 49 files changed, 1631 insertions(+), 489 deletions(-) create mode 100644 swarm/network/networkid_test.go create mode 100644 swarm/pss/notify/notify.go create mode 100644 swarm/pss/notify/notify_test.go diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go index 25705c0a49f9..0cbf0eb1378a 100644 --- a/cmd/swarm/fs_test.go +++ b/cmd/swarm/fs_test.go @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . +// +build linux darwin freebsd + package main import ( diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go index c82456b3cdda..bca4955b158d 100644 --- a/cmd/swarm/hash.go +++ b/cmd/swarm/hash.go @@ -18,6 +18,7 @@ package main import ( + "context" "fmt" "os" @@ -39,7 +40,7 @@ func hash(ctx *cli.Context) { stat, _ := f.Stat() fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams()) - addr, _, err := fileStore.Store(f, stat.Size(), false) + addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false) if err != nil { utils.Fatalf("%v\n", err) } else { diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 9877e9150d47..8e1a69cb2a08 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -143,7 +143,7 @@ var ( } SwarmWantManifestFlag = cli.BoolTFlag{ Name: "manifest", - Usage: "Automatic manifest upload", + Usage: "Automatic manifest upload (default true)", } SwarmUploadDefaultPath = cli.StringFlag{ Name: "defaultpath", @@ -155,7 +155,7 @@ var ( } SwarmUploadMimeType = cli.StringFlag{ Name: "mime", - Usage: "force mime type", + Usage: "Manually specify MIME type", } SwarmEncryptedFlag = cli.BoolFlag{ Name: "encrypt", diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go index 7f9051e7fecb..b914eecd80b7 100644 --- a/cmd/swarm/swarm-smoke/upload_and_sync.go +++ b/cmd/swarm/swarm-smoke/upload_and_sync.go @@ -37,8 +37,14 @@ import ( ) func generateEndpoints(scheme string, cluster string, from int, to int) { + if cluster == "prod" { + cluster = "" + } else { + cluster = cluster + "." + } + for port := from; port <= to; port++ { - endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster)) + endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster)) } if includeLocalhost { diff --git a/metrics/metrics.go b/metrics/metrics.go index 2356f2b148dc..2a2b804e7c96 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -58,11 +58,14 @@ func CollectProcessMetrics(refresh time.Duration) { memPauses := GetOrRegisterMeter("system/memory/pauses", DefaultRegistry) var diskReads, diskReadBytes, diskWrites, diskWriteBytes Meter + var diskReadBytesCounter, diskWriteBytesCounter Counter if err := ReadDiskStats(diskstats[0]); err == nil { diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry) diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry) + diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry) diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry) diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry) + diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry) } else { log.Debug("Failed to read disk metrics", "err", err) } @@ -82,6 +85,9 @@ func CollectProcessMetrics(refresh time.Duration) { diskReadBytes.Mark(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) diskWrites.Mark(diskstats[location1].WriteCount - diskstats[location2].WriteCount) diskWriteBytes.Mark(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) + + diskReadBytesCounter.Inc(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) + diskWriteBytesCounter.Inc(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) } time.Sleep(refresh) } diff --git a/swarm/api/api.go b/swarm/api/api.go index 36f19998af34..efc03d139bc8 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -227,28 +227,28 @@ func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Han } // Upload to be used only in TEST -func (a *API) Upload(uploadDir, index string, toEncrypt bool) (hash string, err error) { +func (a *API) Upload(ctx context.Context, uploadDir, index string, toEncrypt bool) (hash string, err error) { fs := NewFileSystem(a) hash, err = fs.Upload(uploadDir, index, toEncrypt) return hash, err } // Retrieve FileStore reader API -func (a *API) Retrieve(addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) { - return a.fileStore.Retrieve(addr) +func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) { + return a.fileStore.Retrieve(ctx, addr) } // Store wraps the Store API call of the embedded FileStore -func (a *API) Store(data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(), err error) { +func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(ctx context.Context) error, err error) { log.Debug("api.store", "size", size) - return a.fileStore.Store(data, size, toEncrypt) + return a.fileStore.Store(ctx, data, size, toEncrypt) } // ErrResolve is returned when an URI cannot be resolved from ENS. type ErrResolve error // Resolve resolves a URI to an Address using the MultiResolver. -func (a *API) Resolve(uri *URI) (storage.Address, error) { +func (a *API) Resolve(ctx context.Context, uri *URI) (storage.Address, error) { apiResolveCount.Inc(1) log.Trace("resolving", "uri", uri.Addr) @@ -286,34 +286,37 @@ func (a *API) Resolve(uri *URI) (storage.Address, error) { } // Put provides singleton manifest creation on top of FileStore store -func (a *API) Put(content, contentType string, toEncrypt bool) (k storage.Address, wait func(), err error) { +func (a *API) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) { apiPutCount.Inc(1) r := strings.NewReader(content) - key, waitContent, err := a.fileStore.Store(r, int64(len(content)), toEncrypt) + key, waitContent, err := a.fileStore.Store(ctx, r, int64(len(content)), toEncrypt) if err != nil { apiPutFail.Inc(1) return nil, nil, err } manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) r = strings.NewReader(manifest) - key, waitManifest, err := a.fileStore.Store(r, int64(len(manifest)), toEncrypt) + key, waitManifest, err := a.fileStore.Store(ctx, r, int64(len(manifest)), toEncrypt) if err != nil { apiPutFail.Inc(1) return nil, nil, err } - return key, func() { - waitContent() - waitManifest() + return key, func(ctx context.Context) error { + err := waitContent(ctx) + if err != nil { + return err + } + return waitManifest(ctx) }, nil } // Get uses iterative manifest retrieval and prefix matching // to resolve basePath to content using FileStore retrieve // it returns a section reader, mimeType, status, the key of the actual content and an error -func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { +func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { log.Debug("api.get", "key", manifestAddr, "path", path) apiGetCount.Inc(1) - trie, err := loadManifest(a.fileStore, manifestAddr, nil) + trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -375,7 +378,7 @@ func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.Laz log.Trace("resource is multihash", "key", manifestAddr) // get the manifest the multihash digest points to - trie, err := loadManifest(a.fileStore, manifestAddr, nil) + trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -410,7 +413,7 @@ func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.Laz } mimeType = entry.ContentType log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType) - reader, _ = a.fileStore.Retrieve(contentAddr) + reader, _ = a.fileStore.Retrieve(ctx, contentAddr) } else { // no entry found status = http.StatusNotFound @@ -422,10 +425,10 @@ func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.Laz } // Modify loads manifest and checks the content hash before recalculating and storing the manifest. -func (a *API) Modify(addr storage.Address, path, contentHash, contentType string) (storage.Address, error) { +func (a *API) Modify(ctx context.Context, addr storage.Address, path, contentHash, contentType string) (storage.Address, error) { apiModifyCount.Inc(1) quitC := make(chan bool) - trie, err := loadManifest(a.fileStore, addr, quitC) + trie, err := loadManifest(ctx, a.fileStore, addr, quitC) if err != nil { apiModifyFail.Inc(1) return nil, err @@ -449,7 +452,7 @@ func (a *API) Modify(addr storage.Address, path, contentHash, contentType string } // AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm. -func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) { +func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) { apiAddFileCount.Inc(1) uri, err := Parse("bzz:/" + mhash) @@ -457,7 +460,7 @@ func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bo apiAddFileFail.Inc(1) return nil, "", err } - mkey, err := a.Resolve(uri) + mkey, err := a.Resolve(ctx, uri) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -476,13 +479,13 @@ func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bo ModTime: time.Now(), } - mw, err := a.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(ctx, mkey, nil) if err != nil { apiAddFileFail.Inc(1) return nil, "", err } - fkey, err := mw.AddEntry(bytes.NewReader(content), entry) + fkey, err := mw.AddEntry(ctx, bytes.NewReader(content), entry) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -496,11 +499,10 @@ func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bo } return fkey, newMkey.String(), nil - } // RemoveFile removes a file entry in a manifest. -func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { +func (a *API) RemoveFile(ctx context.Context, mhash string, path string, fname string, nameresolver bool) (string, error) { apiRmFileCount.Inc(1) uri, err := Parse("bzz:/" + mhash) @@ -508,7 +510,7 @@ func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, apiRmFileFail.Inc(1) return "", err } - mkey, err := a.Resolve(uri) + mkey, err := a.Resolve(ctx, uri) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -519,7 +521,7 @@ func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, path = path[1:] } - mw, err := a.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(ctx, mkey, nil) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -542,7 +544,7 @@ func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, } // AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm. -func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) { +func (a *API) AppendFile(ctx context.Context, mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) { apiAppendFileCount.Inc(1) buffSize := offset + addSize @@ -552,7 +554,7 @@ func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content buf := make([]byte, buffSize) - oldReader, _ := a.Retrieve(oldAddr) + oldReader, _ := a.Retrieve(ctx, oldAddr) io.ReadAtLeast(oldReader, buf, int(offset)) newReader := bytes.NewReader(content) @@ -575,7 +577,7 @@ func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content apiAppendFileFail.Inc(1) return nil, "", err } - mkey, err := a.Resolve(uri) + mkey, err := a.Resolve(ctx, uri) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -586,7 +588,7 @@ func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content path = path[1:] } - mw, err := a.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(ctx, mkey, nil) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -606,7 +608,7 @@ func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content ModTime: time.Now(), } - fkey, err := mw.AddEntry(io.Reader(combinedReader), entry) + fkey, err := mw.AddEntry(ctx, io.Reader(combinedReader), entry) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -620,23 +622,22 @@ func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content } return fkey, newMkey.String(), nil - } // BuildDirectoryTree used by swarmfs_unix -func (a *API) BuildDirectoryTree(mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) { +func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) { uri, err := Parse("bzz:/" + mhash) if err != nil { return nil, nil, err } - addr, err = a.Resolve(uri) + addr, err = a.Resolve(ctx, uri) if err != nil { return nil, nil, err } quitC := make(chan bool) - rootTrie, err := loadManifest(a.fileStore, addr, quitC) + rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC) if err != nil { return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err) } @@ -725,8 +726,8 @@ func (a *API) ResourceIsValidated() bool { } // ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk. -func (a *API) ResolveResourceManifest(addr storage.Address) (storage.Address, error) { - trie, err := loadManifest(a.fileStore, addr, nil) +func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, nil) if err != nil { return nil, fmt.Errorf("cannot load resource manifest: %v", err) } diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go index e607dd4fc301..d1fd49b5bb49 100644 --- a/swarm/api/api_test.go +++ b/swarm/api/api_test.go @@ -85,7 +85,7 @@ func expResponse(content string, mimeType string, status int) *Response { func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse { addr := storage.Address(common.Hex2Bytes(bzzhash)) - reader, mimeType, status, _, err := api.Get(addr, path) + reader, mimeType, status, _, err := api.Get(context.TODO(), addr, path) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -109,12 +109,15 @@ func TestApiPut(t *testing.T) { testAPI(t, func(api *API, toEncrypt bool) { content := "hello" exp := expResponse(content, "text/plain", 0) - // exp := expResponse([]byte(content), "text/plain", 0) - addr, wait, err := api.Put(content, exp.MimeType, toEncrypt) + ctx := context.TODO() + addr, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + err = wait(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } - wait() resp := testGet(t, api, addr.Hex(), "") checkResponse(t, resp, exp) }) @@ -226,7 +229,7 @@ func TestAPIResolve(t *testing.T) { if x.immutable { uri.Scheme = "bzz-immutable" } - res, err := api.Resolve(uri) + res, err := api.Resolve(context.TODO(), uri) if err == nil { if x.expectErr != nil { t.Fatalf("expected error %q, got result %q", x.expectErr, res) diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go index 297cbec79f42..adf6bfbafb44 100644 --- a/swarm/api/filesystem.go +++ b/swarm/api/filesystem.go @@ -18,6 +18,7 @@ package api import ( "bufio" + "context" "fmt" "io" "net/http" @@ -113,12 +114,13 @@ func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error if err == nil { stat, _ := f.Stat() var hash storage.Address - var wait func() - hash, wait, err = fs.api.fileStore.Store(f, stat.Size(), toEncrypt) + var wait func(context.Context) error + ctx := context.TODO() + hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt) if hash != nil { list[i].Hash = hash.Hex() } - wait() + err = wait(ctx) awg.Done() if err == nil { first512 := make([]byte, 512) @@ -189,7 +191,7 @@ func (fs *FileSystem) Download(bzzpath, localpath string) error { if err != nil { return err } - addr, err := fs.api.Resolve(uri) + addr, err := fs.api.Resolve(context.TODO(), uri) if err != nil { return err } @@ -200,7 +202,7 @@ func (fs *FileSystem) Download(bzzpath, localpath string) error { } quitC := make(chan bool) - trie, err := loadManifest(fs.api.fileStore, addr, quitC) + trie, err := loadManifest(context.TODO(), fs.api.fileStore, addr, quitC) if err != nil { log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) return err @@ -273,7 +275,7 @@ func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage. if err != nil { return err } - reader, _ := fileStore.Retrieve(addr) + reader, _ := fileStore.Retrieve(context.TODO(), addr) writer := bufio.NewWriter(f) size, err := reader.Size(quitC) if err != nil { diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go index 915dc4e0b9e5..84a2989d6eb1 100644 --- a/swarm/api/filesystem_test.go +++ b/swarm/api/filesystem_test.go @@ -18,6 +18,7 @@ package api import ( "bytes" + "context" "io/ioutil" "os" "path/filepath" @@ -63,7 +64,7 @@ func TestApiDirUpload0(t *testing.T) { checkResponse(t, resp, exp) addr := storage.Address(common.Hex2Bytes(bzzhash)) - _, _, _, _, err = api.Get(addr, "") + _, _, _, _, err = api.Get(context.TODO(), addr, "") if err == nil { t.Fatalf("expected error: %v", err) } @@ -95,7 +96,7 @@ func TestApiDirUploadModify(t *testing.T) { } addr := storage.Address(common.Hex2Bytes(bzzhash)) - addr, err = api.Modify(addr, "index.html", "", "") + addr, err = api.Modify(context.TODO(), addr, "index.html", "", "") if err != nil { t.Errorf("unexpected error: %v", err) return @@ -105,18 +106,23 @@ func TestApiDirUploadModify(t *testing.T) { t.Errorf("unexpected error: %v", err) return } - hash, wait, err := api.Store(bytes.NewReader(index), int64(len(index)), toEncrypt) - wait() + ctx := context.TODO() + hash, wait, err := api.Store(ctx, bytes.NewReader(index), int64(len(index)), toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return } - addr, err = api.Modify(addr, "index2.html", hash.Hex(), "text/html; charset=utf-8") + err = wait(ctx) if err != nil { t.Errorf("unexpected error: %v", err) return } - addr, err = api.Modify(addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") + addr, err = api.Modify(context.TODO(), addr, "index2.html", hash.Hex(), "text/html; charset=utf-8") + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + addr, err = api.Modify(context.TODO(), addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") if err != nil { t.Errorf("unexpected error: %v", err) return @@ -137,7 +143,7 @@ func TestApiDirUploadModify(t *testing.T) { exp = expResponse(content, "text/css", 0) checkResponse(t, resp, exp) - _, _, _, _, err = api.Get(addr, "") + _, _, _, _, err = api.Get(context.TODO(), addr, "") if err == nil { t.Errorf("expected error: %v", err) } diff --git a/swarm/api/http/error.go b/swarm/api/http/error.go index 5fff7575e8a5..254a0e8d48c5 100644 --- a/swarm/api/http/error.go +++ b/swarm/api/http/error.go @@ -147,6 +147,14 @@ func Respond(w http.ResponseWriter, req *Request, msg string, code int) { switch code { case http.StatusInternalServerError: log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code) + case http.StatusMultipleChoices: + log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) + listURI := api.URI{ + Scheme: "bzz-list", + Addr: req.uri.Addr, + Path: req.uri.Path, + } + additionalMessage = fmt.Sprintf(`multiple choices`, listURI.String()) default: log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) } diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go index f3c643c90d39..78f24065a915 100644 --- a/swarm/api/http/error_templates.go +++ b/swarm/api/http/error_templates.go @@ -38,6 +38,26 @@ func GetGenericErrorPage() string { Swarm::HTTP Disambiguation Page @@ -494,7 +505,7 @@ func GetMultipleChoicesErrorPage() string { -
+
@@ -513,21 +524,10 @@ func GetMultipleChoicesErrorPage() string { - - - - - - -
- Your request yields ambiguous results! + Your request may refer to {{ .Details}}.
- Your request may refer to: -
- {{ .Details}} -
Error code: @@ -543,16 +543,14 @@ func GetMultipleChoicesErrorPage() string {
+ -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
+
+

+ Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution +

+
- -
diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index ba8b2b7ba915..5897a1cb9df3 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -23,6 +23,7 @@ import ( "archive/tar" "bufio" "bytes" + "context" "encoding/json" "errors" "fmt" @@ -120,7 +121,7 @@ type Request struct { // HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request // body in swarm and returns the resulting storage address as a text/plain response -func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { +func (s *Server) HandlePostRaw(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.post.raw", "ruid", r.ruid) postRawCount.Inc(1) @@ -147,7 +148,7 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { Respond(w, r, "missing Content-Length header in request", http.StatusBadRequest) return } - addr, _, err := s.api.Store(r.Body, r.ContentLength, toEncrypt) + addr, _, err := s.api.Store(ctx, r.Body, r.ContentLength, toEncrypt) if err != nil { postRawFail.Inc(1) Respond(w, r, err.Error(), http.StatusInternalServerError) @@ -166,7 +167,7 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { // (either a tar archive or multipart form), adds those files either to an // existing manifest or to a new manifest under and returns the // resulting manifest hash as a text/plain response -func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { +func (s *Server) HandlePostFiles(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.post.files", "ruid", r.ruid) postFilesCount.Inc(1) @@ -184,7 +185,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { var addr storage.Address if r.uri.Addr != "" && r.uri.Addr != "encrypt" { - addr, err = s.api.Resolve(r.uri) + addr, err = s.api.Resolve(ctx, r.uri) if err != nil { postFilesFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) @@ -192,7 +193,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { } log.Debug("resolved key", "ruid", r.ruid, "key", addr) } else { - addr, err = s.api.NewManifest(toEncrypt) + addr, err = s.api.NewManifest(ctx, toEncrypt) if err != nil { postFilesFail.Inc(1) Respond(w, r, err.Error(), http.StatusInternalServerError) @@ -201,17 +202,17 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { log.Debug("new manifest", "ruid", r.ruid, "key", addr) } - newAddr, err := s.updateManifest(addr, func(mw *api.ManifestWriter) error { + newAddr, err := s.updateManifest(ctx, addr, func(mw *api.ManifestWriter) error { switch contentType { case "application/x-tar": - return s.handleTarUpload(r, mw) + return s.handleTarUpload(ctx, r, mw) case "multipart/form-data": - return s.handleMultipartUpload(r, params["boundary"], mw) + return s.handleMultipartUpload(ctx, r, params["boundary"], mw) default: - return s.handleDirectUpload(r, mw) + return s.handleDirectUpload(ctx, r, mw) } }) if err != nil { @@ -227,7 +228,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { fmt.Fprint(w, newAddr) } -func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { +func (s *Server) handleTarUpload(ctx context.Context, req *Request, mw *api.ManifestWriter) error { log.Debug("handle.tar.upload", "ruid", req.ruid) tr := tar.NewReader(req.Body) for { @@ -253,7 +254,7 @@ func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { ModTime: hdr.ModTime, } log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(tr, entry) + contentKey, err := mw.AddEntry(ctx, tr, entry) if err != nil { return fmt.Errorf("error adding manifest entry from tar stream: %s", err) } @@ -261,7 +262,7 @@ func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { } } -func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.ManifestWriter) error { +func (s *Server) handleMultipartUpload(ctx context.Context, req *Request, boundary string, mw *api.ManifestWriter) error { log.Debug("handle.multipart.upload", "ruid", req.ruid) mr := multipart.NewReader(req.Body, boundary) for { @@ -311,7 +312,7 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma ModTime: time.Now(), } log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(reader, entry) + contentKey, err := mw.AddEntry(ctx, reader, entry) if err != nil { return fmt.Errorf("error adding manifest entry from multipart form: %s", err) } @@ -319,9 +320,9 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma } } -func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error { +func (s *Server) handleDirectUpload(ctx context.Context, req *Request, mw *api.ManifestWriter) error { log.Debug("handle.direct.upload", "ruid", req.ruid) - key, err := mw.AddEntry(req.Body, &api.ManifestEntry{ + key, err := mw.AddEntry(ctx, req.Body, &api.ManifestEntry{ Path: req.uri.Path, ContentType: req.Header.Get("Content-Type"), Mode: 0644, @@ -338,18 +339,18 @@ func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error // HandleDelete handles a DELETE request to bzz://, removes // from and returns the resulting manifest hash as a // text/plain response -func (s *Server) HandleDelete(w http.ResponseWriter, r *Request) { +func (s *Server) HandleDelete(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.delete", "ruid", r.ruid) deleteCount.Inc(1) - key, err := s.api.Resolve(r.uri) + key, err := s.api.Resolve(ctx, r.uri) if err != nil { deleteFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) return } - newKey, err := s.updateManifest(key, func(mw *api.ManifestWriter) error { + newKey, err := s.updateManifest(ctx, key, func(mw *api.ManifestWriter) error { log.Debug(fmt.Sprintf("removing %s from manifest %s", r.uri.Path, key.Log()), "ruid", r.ruid) return mw.RemoveEntry(r.uri.Path) }) @@ -399,7 +400,7 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { // The resource name will be verbatim what is passed as the address part of the url. // For example, if a POST is made to /bzz-resource:/foo.eth/raw/13 a new resource with frequency 13 // and name "foo.eth" will be created -func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { +func (s *Server) HandlePostResource(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.post.resource", "ruid", r.ruid) var err error var addr storage.Address @@ -428,7 +429,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // we create a manifest so we can retrieve the resource with bzz:// later // this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource // root chunk - m, err := s.api.NewResourceManifest(addr.Hex()) + m, err := s.api.NewResourceManifest(ctx, addr.Hex()) if err != nil { Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return @@ -448,7 +449,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // that means that we retrieve the manifest and inspect its Hash member. manifestAddr := r.uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -459,7 +460,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { } // get the root chunk key from the manifest - addr, err = s.api.ResolveResourceManifest(manifestAddr) + addr, err = s.api.ResolveResourceManifest(ctx, manifestAddr) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -518,19 +519,19 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // bzz-resource:/// - get latest update on period n // bzz-resource://// - get update version m of period n // = ens name or hash -func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { - s.handleGetResource(w, r) +func (s *Server) HandleGetResource(ctx context.Context, w http.ResponseWriter, r *Request) { + s.handleGetResource(ctx, w, r) } // TODO: Enable pass maxPeriod parameter -func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { +func (s *Server) handleGetResource(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.resource", "ruid", r.ruid) var err error // resolve the content key. manifestAddr := r.uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -541,7 +542,7 @@ func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { } // get the root chunk key from the manifest - key, err := s.api.ResolveResourceManifest(manifestAddr) + key, err := s.api.ResolveResourceManifest(ctx, manifestAddr) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -623,13 +624,13 @@ func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supEr // given storage key // - bzz-hash:// and responds with the hash of the content stored // at the given storage key as a text/plain response -func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGet(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get", "ruid", r.ruid, "uri", r.uri) getCount.Inc(1) var err error addr := r.uri.Address() if addr == nil { - addr, err = s.api.Resolve(r.uri) + addr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -644,7 +645,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { // if path is set, interpret as a manifest and return the // raw entry at the given path if r.uri.Path != "" { - walker, err := s.api.NewManifestWalker(addr, nil) + walker, err := s.api.NewManifestWalker(ctx, addr, nil) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) @@ -692,7 +693,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { } // check the root chunk exists by retrieving the file's size - reader, isEncrypted := s.api.Retrieve(addr) + reader, isEncrypted := s.api.Retrieve(ctx, addr) if _, err := reader.Size(nil); err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) @@ -721,7 +722,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { // HandleGetFiles handles a GET request to bzz:/ with an Accept // header of "application/x-tar" and returns a tar stream of all files // contained in the manifest -func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGetFiles(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.files", "ruid", r.ruid, "uri", r.uri) getFilesCount.Inc(1) if r.uri.Path != "" { @@ -730,7 +731,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { return } - addr, err := s.api.Resolve(r.uri) + addr, err := s.api.Resolve(ctx, r.uri) if err != nil { getFilesFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -738,7 +739,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { } log.Debug("handle.get.files: resolved", "ruid", r.ruid, "key", addr) - walker, err := s.api.NewManifestWalker(addr, nil) + walker, err := s.api.NewManifestWalker(ctx, addr, nil) if err != nil { getFilesFail.Inc(1) Respond(w, r, err.Error(), http.StatusInternalServerError) @@ -757,7 +758,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { } // retrieve the entry's key and size - reader, isEncrypted := s.api.Retrieve(storage.Address(common.Hex2Bytes(entry.Hash))) + reader, isEncrypted := s.api.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash))) size, err := reader.Size(nil) if err != nil { return err @@ -797,7 +798,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { // HandleGetList handles a GET request to bzz-list:// and returns // a list of all files contained in under grouped into // common prefixes using "/" as a delimiter -func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGetList(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.list", "ruid", r.ruid, "uri", r.uri) getListCount.Inc(1) // ensure the root path has a trailing slash so that relative URLs work @@ -806,7 +807,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { return } - addr, err := s.api.Resolve(r.uri) + addr, err := s.api.Resolve(ctx, r.uri) if err != nil { getListFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -814,7 +815,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { } log.Debug("handle.get.list: resolved", "ruid", r.ruid, "key", addr) - list, err := s.getManifestList(addr, r.uri.Path) + list, err := s.getManifestList(ctx, addr, r.uri.Path) if err != nil { getListFail.Inc(1) @@ -845,8 +846,8 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { json.NewEncoder(w).Encode(&list) } -func (s *Server) getManifestList(addr storage.Address, prefix string) (list api.ManifestList, err error) { - walker, err := s.api.NewManifestWalker(addr, nil) +func (s *Server) getManifestList(ctx context.Context, addr storage.Address, prefix string) (list api.ManifestList, err error) { + walker, err := s.api.NewManifestWalker(ctx, addr, nil) if err != nil { return } @@ -903,7 +904,7 @@ func (s *Server) getManifestList(addr storage.Address, prefix string) (list api. // HandleGetFile handles a GET request to bzz:/// and responds // with the content of the file at from the given -func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGetFile(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.file", "ruid", r.ruid) getFileCount.Inc(1) // ensure the root path has a trailing slash so that relative URLs work @@ -915,7 +916,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { manifestAddr := r.uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFileFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -927,7 +928,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr) - reader, contentType, status, contentKey, err := s.api.Get(manifestAddr, r.uri.Path) + reader, contentType, status, contentKey, err := s.api.Get(ctx, manifestAddr, r.uri.Path) etag := common.Bytes2Hex(contentKey) noneMatchEtag := r.Header.Get("If-None-Match") @@ -954,7 +955,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { //the request results in ambiguous files //e.g. /read with readme.md and readinglist.txt available in manifest if status == http.StatusMultipleChoices { - list, err := s.getManifestList(manifestAddr, r.uri.Path) + list, err := s.getManifestList(ctx, manifestAddr, r.uri.Path) if err != nil { getFileFail.Inc(1) @@ -1011,6 +1012,8 @@ func (b bufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { } func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + ctx := context.TODO() + defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).UpdateSince(time.Now()) req := &Request{Request: *r, ruid: uuid.New()[:8]} metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) @@ -1055,16 +1058,16 @@ func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { case "POST": if uri.Raw() { log.Debug("handlePostRaw") - s.HandlePostRaw(w, req) + s.HandlePostRaw(ctx, w, req) } else if uri.Resource() { log.Debug("handlePostResource") - s.HandlePostResource(w, req) + s.HandlePostResource(ctx, w, req) } else if uri.Immutable() || uri.List() || uri.Hash() { log.Debug("POST not allowed on immutable, list or hash") Respond(w, req, fmt.Sprintf("POST method on scheme %s not allowed", uri.Scheme), http.StatusMethodNotAllowed) } else { log.Debug("handlePostFiles") - s.HandlePostFiles(w, req) + s.HandlePostFiles(ctx, w, req) } case "PUT": @@ -1076,31 +1079,31 @@ func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { Respond(w, req, fmt.Sprintf("DELETE method to %s not allowed", uri), http.StatusBadRequest) return } - s.HandleDelete(w, req) + s.HandleDelete(ctx, w, req) case "GET": if uri.Resource() { - s.HandleGetResource(w, req) + s.HandleGetResource(ctx, w, req) return } if uri.Raw() || uri.Hash() { - s.HandleGet(w, req) + s.HandleGet(ctx, w, req) return } if uri.List() { - s.HandleGetList(w, req) + s.HandleGetList(ctx, w, req) return } if r.Header.Get("Accept") == "application/x-tar" { - s.HandleGetFiles(w, req) + s.HandleGetFiles(ctx, w, req) return } - s.HandleGetFile(w, req) + s.HandleGetFile(ctx, w, req) default: Respond(w, req, fmt.Sprintf("%s method is not supported", r.Method), http.StatusMethodNotAllowed) @@ -1109,8 +1112,8 @@ func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { log.Info("served response", "ruid", req.ruid, "code", w.statusCode) } -func (s *Server) updateManifest(addr storage.Address, update func(mw *api.ManifestWriter) error) (storage.Address, error) { - mw, err := s.api.NewManifestWriter(addr, nil) +func (s *Server) updateManifest(ctx context.Context, addr storage.Address, update func(mw *api.ManifestWriter) error) (storage.Address, error) { + mw, err := s.api.NewManifestWriter(ctx, addr, nil) if err != nil { return nil, err } diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 9fb21f7a357a..bfbc0a79dbe7 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -18,6 +18,7 @@ package http import ( "bytes" + "context" "crypto/rand" "encoding/json" "errors" @@ -382,15 +383,19 @@ func testBzzGetPath(encrypted bool, t *testing.T) { for i, mf := range testmanifest { reader[i] = bytes.NewReader([]byte(mf)) - var wait func() - addr[i], wait, err = srv.FileStore.Store(reader[i], int64(len(mf)), encrypted) + var wait func(context.Context) error + ctx := context.TODO() + addr[i], wait, err = srv.FileStore.Store(ctx, reader[i], int64(len(mf)), encrypted) for j := i + 1; j < len(testmanifest); j++ { testmanifest[j] = strings.Replace(testmanifest[j], fmt.Sprintf("", i), addr[i].Hex(), -1) } if err != nil { t.Fatal(err) } - wait() + err = wait(ctx) + if err != nil { + t.Fatal(err) + } } rootRef := addr[2].Hex() diff --git a/swarm/api/http/templates.go b/swarm/api/http/templates.go index ffd816493074..8897b9694604 100644 --- a/swarm/api/http/templates.go +++ b/swarm/api/http/templates.go @@ -79,20 +79,25 @@ var landingPageTemplate = template.Must(template.New("landingPage").Parse(` Swarm :: Welcome to Swarm - - - -
-
- -
-
-

Welcome to Swarm

-
-
- - - - -

Enter the hash or ENS of a Swarm-hosted file below:

- - - -
-
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- + + +
+
+ +
+
+

Welcome to Swarm

+
+
+ + + + +

Enter the hash or ENS of a Swarm-hosted file below:

+
+ + +
+
+
+
+

+ Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution +

+
+ `[1:])) diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 28597636eed6..78d1418bc27d 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -18,6 +18,7 @@ package api import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -61,20 +62,20 @@ type ManifestList struct { } // NewManifest creates and stores a new, empty manifest -func (a *API) NewManifest(toEncrypt bool) (storage.Address, error) { +func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, error) { var manifest Manifest data, err := json.Marshal(&manifest) if err != nil { return nil, err } - key, wait, err := a.Store(bytes.NewReader(data), int64(len(data)), toEncrypt) - wait() + key, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt) + wait(ctx) return key, err } // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ Hash: resourceAddr, @@ -85,7 +86,7 @@ func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) if err != nil { return nil, err } - key, _, err := a.Store(bytes.NewReader(data), int64(len(data)), false) + key, _, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false) return key, err } @@ -96,8 +97,8 @@ type ManifestWriter struct { quitC chan bool } -func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*ManifestWriter, error) { - trie, err := loadManifest(a.fileStore, addr, quitC) +func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWriter, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -105,9 +106,8 @@ func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*Manifes } // AddEntry stores the given data and adds the resulting key to the manifest -func (m *ManifestWriter) AddEntry(data io.Reader, e *ManifestEntry) (storage.Address, error) { - - key, _, err := m.api.Store(data, e.Size, m.trie.encrypted) +func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (storage.Address, error) { + key, _, err := m.api.Store(ctx, data, e.Size, m.trie.encrypted) if err != nil { return nil, err } @@ -136,8 +136,8 @@ type ManifestWalker struct { quitC chan bool } -func (a *API) NewManifestWalker(addr storage.Address, quitC chan bool) (*ManifestWalker, error) { - trie, err := loadManifest(a.fileStore, addr, quitC) +func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWalker, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -204,10 +204,10 @@ type manifestTrieEntry struct { subtrie *manifestTrie } -func loadManifest(fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand log.Trace("manifest lookup", "key", hash) // retrieve manifest via FileStore - manifestReader, isEncrypted := fileStore.Retrieve(hash) + manifestReader, isEncrypted := fileStore.Retrieve(ctx, hash) log.Trace("reader retrieved", "key", hash) return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC) } @@ -382,8 +382,12 @@ func (mt *manifestTrie) recalcAndStore() error { } sr := bytes.NewReader(manifest) - key, wait, err2 := mt.fileStore.Store(sr, int64(len(manifest)), mt.encrypted) - wait() + ctx := context.TODO() + key, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted) + if err2 != nil { + return err2 + } + err2 = wait(ctx) mt.ref = key return err2 } @@ -391,7 +395,7 @@ func (mt *manifestTrie) recalcAndStore() error { func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) { if entry.subtrie == nil { hash := common.Hex2Bytes(entry.Hash) - entry.subtrie, err = loadManifest(mt.fileStore, hash, quitC) + entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC) entry.Hash = "" // might not match, should be recalculated } return diff --git a/swarm/api/storage.go b/swarm/api/storage.go index 6ab4af6c4b20..8646dc41f841 100644 --- a/swarm/api/storage.go +++ b/swarm/api/storage.go @@ -17,6 +17,7 @@ package api import ( + "context" "path" "github.com/ethereum/go-ethereum/swarm/storage" @@ -45,8 +46,8 @@ func NewStorage(api *API) *Storage { // its content type // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Address, func(), error) { - return s.api.Put(content, contentType, toEncrypt) +func (s *Storage) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (storage.Address, func(context.Context) error, error) { + return s.api.Put(ctx, content, contentType, toEncrypt) } // Get retrieves the content from bzzpath and reads the response in full @@ -57,16 +58,16 @@ func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Addr // size is resp.Size // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Get(bzzpath string) (*Response, error) { +func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) { uri, err := Parse(path.Join("bzz:/", bzzpath)) if err != nil { return nil, err } - addr, err := s.api.Resolve(uri) + addr, err := s.api.Resolve(ctx, uri) if err != nil { return nil, err } - reader, mimeType, status, _, err := s.api.Get(addr, uri.Path) + reader, mimeType, status, _, err := s.api.Get(ctx, addr, uri.Path) if err != nil { return nil, err } @@ -87,16 +88,16 @@ func (s *Storage) Get(bzzpath string) (*Response, error) { // and merge on to it. creating an entry w conentType (mime) // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) { +func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) { uri, err := Parse("bzz:/" + rootHash) if err != nil { return "", err } - addr, err := s.api.Resolve(uri) + addr, err := s.api.Resolve(ctx, uri) if err != nil { return "", err } - addr, err = s.api.Modify(addr, path, contentHash, contentType) + addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType) if err != nil { return "", err } diff --git a/swarm/api/storage_test.go b/swarm/api/storage_test.go index 9d23e8f13699..ef96972b68a6 100644 --- a/swarm/api/storage_test.go +++ b/swarm/api/storage_test.go @@ -17,6 +17,7 @@ package api import ( + "context" "testing" ) @@ -31,18 +32,22 @@ func TestStoragePutGet(t *testing.T) { content := "hello" exp := expResponse(content, "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0) - bzzkey, wait, err := api.Put(content, exp.MimeType, toEncrypt) + ctx := context.TODO() + bzzkey, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + err = wait(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } - wait() bzzhash := bzzkey.Hex() // to check put against the API#Get resp0 := testGet(t, api.api, bzzhash, "") checkResponse(t, resp0, exp) // check storage#Get - resp, err := api.Get(bzzhash) + resp, err := api.Get(context.TODO(), bzzhash) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go index 71aee24955aa..835587020c39 100644 --- a/swarm/bmt/bmt.go +++ b/swarm/bmt/bmt.go @@ -117,10 +117,7 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool { zerohashes[0] = zeros h := hasher() for i := 1; i < depth; i++ { - h.Reset() - h.Write(zeros) - h.Write(zeros) - zeros = h.Sum(nil) + zeros = doHash(h, nil, zeros, zeros) zerohashes[i] = zeros } return &TreePool{ @@ -318,41 +315,19 @@ func (h *Hasher) Sum(b []byte) (r []byte) { // * if sequential write is used (can read sections) func (h *Hasher) sum(b []byte, release, section bool) (r []byte) { t := h.bmt - h.finalise(section) - if t.offset > 0 { // get the last node (double segment) - - // padding the segment with zero - copy(t.segment[t.offset:], h.pool.zerohashes[0]) - } - if section { - if t.cur%2 == 1 { - // if just finished current segment, copy it to the right half of the chunk - copy(t.section[h.pool.SegmentSize:], t.segment) - } else { - // copy segment to front of section, zero pad the right half - copy(t.section, t.segment) - copy(t.section[h.pool.SegmentSize:], h.pool.zerohashes[0]) - } - h.writeSection(t.cur, t.section) - } else { - // TODO: h.writeSegment(t.cur, t.segment) - panic("SegmentWriter not implemented") - } + bh := h.pool.hasher() + go h.writeSection(t.cur, t.section, true) bmtHash := <-t.result span := t.span - + // fmt.Println(t.draw(bmtHash)) if release { h.releaseTree() } - // sha3(span + BMT(pure_chunk)) + // b + sha3(span + BMT(pure_chunk)) if span == nil { - return bmtHash + return append(b, bmtHash...) } - bh := h.pool.hasher() - bh.Reset() - bh.Write(span) - bh.Write(bmtHash) - return bh.Sum(b) + return doHash(bh, b, span, bmtHash) } // Hasher implements the SwarmHash interface @@ -367,37 +342,41 @@ func (h *Hasher) Write(b []byte) (int, error) { return 0, nil } t := h.bmt - need := (h.pool.SegmentCount - t.cur) * h.pool.SegmentSize - if l < need { - need = l - } - // calculate missing bit to complete current open segment - rest := h.pool.SegmentSize - t.offset - if need < rest { - rest = need - } - copy(t.segment[t.offset:], b[:rest]) - need -= rest - size := (t.offset + rest) % h.pool.SegmentSize - // read full segments and the last possibly partial segment - for need > 0 { - // push all finished chunks we read - if t.cur%2 == 0 { - copy(t.section, t.segment) - } else { - copy(t.section[h.pool.SegmentSize:], t.segment) - h.writeSection(t.cur, t.section) + secsize := 2 * h.pool.SegmentSize + // calculate length of missing bit to complete current open section + smax := secsize - t.offset + // if at the beginning of chunk or middle of the section + if t.offset < secsize { + // fill up current segment from buffer + copy(t.section[t.offset:], b) + // if input buffer consumed and open section not complete, then + // advance offset and return + if smax == 0 { + smax = secsize + } + if l <= smax { + t.offset += l + return l, nil } - size = h.pool.SegmentSize - if need < size { - size = need + } else { + if t.cur == h.pool.SegmentCount*2 { + return 0, nil } - copy(t.segment, b[rest:rest+size]) - need -= size - rest += size + } + // read full segments and the last possibly partial segment from the input buffer + for smax < l { + // section complete; push to tree asynchronously + go h.writeSection(t.cur, t.section, false) + // reset section + t.section = make([]byte, secsize) + // copy from imput buffer at smax to right half of section + copy(t.section, b[smax:]) + // advance cursor t.cur++ + // smax here represents successive offsets in the input buffer + smax += secsize } - t.offset = size % h.pool.SegmentSize + t.offset = l - smax + secsize return l, nil } @@ -426,6 +405,8 @@ func (h *Hasher) releaseTree() { t.span = nil t.hash = nil h.bmt = nil + t.section = make([]byte, h.pool.SegmentSize*2) + t.segment = make([]byte, h.pool.SegmentSize) h.pool.release(t) } } @@ -435,29 +416,37 @@ func (h *Hasher) releaseTree() { // go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s) // } -// writeSection writes the hash of i/2-th segction into right level 1 node of the BMT tree -func (h *Hasher) writeSection(i int, section []byte) { - n := h.bmt.leaves[i/2] +// writeSection writes the hash of i-th section into level 1 node of the BMT tree +func (h *Hasher) writeSection(i int, section []byte, final bool) { + // select the leaf node for the section + n := h.bmt.leaves[i] isLeft := n.isLeft n = n.parent bh := h.pool.hasher() - bh.Write(section) - go func() { - sum := bh.Sum(nil) - if n == nil { - h.bmt.result <- sum - return - } - h.run(n, bh, isLeft, sum) - }() + // hash the section + s := doHash(bh, nil, section) + // write hash into parent node + if final { + // for the last segment use writeFinalNode + h.writeFinalNode(1, n, bh, isLeft, s) + } else { + h.writeNode(n, bh, isLeft, s) + } } -// run pushes the data to the node +// writeNode pushes the data to the node // if it is the first of 2 sisters written the routine returns // if it is the second, it calculates the hash and writes it // to the parent node recursively -func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) { +func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) { + level := 1 for { + // at the root of the bmt just write the result to the result channel + if n == nil { + h.bmt.result <- s + return + } + // otherwise assign child hash to branc if isLeft { n.left = s } else { @@ -467,44 +456,68 @@ func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) { if n.toggle() { return } - // the second thread now can be sure both left and right children are written - // it calculates the hash of left|right and take it to the next level - bh.Reset() - bh.Write(n.left) - bh.Write(n.right) - s = bh.Sum(nil) - - // at the root of the bmt just write the result to the result channel - if n.parent == nil { - h.bmt.result <- s - return - } - - // otherwise iterate on parent + // the thread coming later now can be sure both left and right children are written + // it calculates the hash of left|right and pushes it to the parent + s = doHash(bh, nil, n.left, n.right) isLeft = n.isLeft n = n.parent + level++ } } -// finalise is following the path starting from the final datasegment to the +// writeFinalNode is following the path starting from the final datasegment to the // BMT root via parents // for unbalanced trees it fills in the missing right sister nodes using // the pool's lookup table for BMT subtree root hashes for all-zero sections -func (h *Hasher) finalise(skip bool) { - t := h.bmt - isLeft := t.cur%2 == 0 - n := t.leaves[t.cur/2] - for level := 0; n != nil; level++ { - // when the final segment's path is going via left child node - // we include an all-zero subtree hash for the right level and toggle the node. - // when the path is going through right child node, nothing to do - if isLeft && !skip { +// otherwise behaves like `writeNode` +func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s []byte) { + + for { + // at the root of the bmt just write the result to the result channel + if n == nil { + if s != nil { + h.bmt.result <- s + } + return + } + var noHash bool + if isLeft { + // coming from left sister branch + // when the final section's path is going via left child node + // we include an all-zero subtree hash for the right level and toggle the node. + // when the path is going through right child node, nothing to do n.right = h.pool.zerohashes[level] - n.toggle() + if s != nil { + n.left = s + // if a left final node carries a hash, it must be the first (and only thread) + // so the toggle is already in passive state no need no call + // yet thread needs to carry on pushing hash to parent + } else { + // if again first thread then propagate nil and calculate no hash + noHash = n.toggle() + } + } else { + // right sister branch + // if s is nil, then thread arrived first at previous node and here there will be two, + // so no need to do anything + if s != nil { + n.right = s + noHash = n.toggle() + } else { + noHash = true + } + } + // the child-thread first arriving will just continue resetting s to nil + // the second thread now can be sure both left and right children are written + // it calculates the hash of left|right and pushes it to the parent + if noHash { + s = nil + } else { + s = doHash(bh, nil, n.left, n.right) } - skip = false isLeft = n.isLeft n = n.parent + level++ } } @@ -525,6 +538,15 @@ func (n *node) toggle() bool { return atomic.AddInt32(&n.state, 1)%2 == 1 } +// calculates the hash of the data using hash.Hash +func doHash(h hash.Hash, b []byte, data ...[]byte) []byte { + h.Reset() + for _, v := range data { + h.Write(v) + } + return h.Sum(b) +} + func hashstr(b []byte) string { end := len(b) if end > 4 { diff --git a/swarm/bmt/bmt_r.go b/swarm/bmt/bmt_r.go index c61d2dc73212..0cb6c146f5d7 100644 --- a/swarm/bmt/bmt_r.go +++ b/swarm/bmt/bmt_r.go @@ -80,6 +80,5 @@ func (rh *RefHasher) hash(data []byte, length int) []byte { } rh.hasher.Reset() rh.hasher.Write(section) - s := rh.hasher.Sum(nil) - return s + return rh.hasher.Sum(nil) } diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go index e074d90e73dd..ae40eadab8a0 100644 --- a/swarm/bmt/bmt_test.go +++ b/swarm/bmt/bmt_test.go @@ -34,12 +34,12 @@ import ( // the actual data length generated (could be longer than max datalength of the BMT) const BufferSize = 4128 +var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128} + +// calculates the Keccak256 SHA3 hash of the data func sha3hash(data ...[]byte) []byte { h := sha3.NewKeccak256() - for _, v := range data { - h.Write(v) - } - return h.Sum(nil) + return doHash(h, nil, data...) } // TestRefHasher tests that the RefHasher computes the expected BMT hash for @@ -129,31 +129,48 @@ func TestRefHasher(t *testing.T) { } } -func TestHasherCorrectness(t *testing.T) { - err := testHasher(testBaseHasher) - if err != nil { - t.Fatal(err) +// tests if hasher responds with correct hash +func TestHasherEmptyData(t *testing.T) { + hasher := sha3.NewKeccak256 + var data []byte + for _, count := range counts { + t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { + pool := NewTreePool(hasher, count, PoolSize) + defer pool.Drain(0) + bmt := New(pool) + rbmt := NewRefHasher(hasher, count) + refHash := rbmt.Hash(data) + expHash := Hash(bmt, nil, data) + if !bytes.Equal(expHash, refHash) { + t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash) + } + }) } } -func testHasher(f func(BaseHasherFunc, []byte, int, int) error) error { +func TestHasherCorrectness(t *testing.T) { data := newData(BufferSize) hasher := sha3.NewKeccak256 size := hasher().Size() - counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} var err error for _, count := range counts { - max := count * size - incr := 1 - for n := 1; n <= max; n += incr { - err = f(hasher, data, n, count) - if err != nil { - return err + t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) { + max := count * size + incr := 1 + capacity := 1 + pool := NewTreePool(hasher, count, capacity) + defer pool.Drain(0) + for n := 0; n <= max; n += incr { + incr = 1 + rand.Intn(5) + bmt := New(pool) + err = testHasherCorrectness(bmt, hasher, data, n, count) + if err != nil { + t.Fatal(err) + } } - } + }) } - return nil } // Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize @@ -215,12 +232,69 @@ LOOP: } } -// helper function that creates a tree pool -func testBaseHasher(hasher BaseHasherFunc, d []byte, n, count int) error { - pool := NewTreePool(hasher, count, 1) - defer pool.Drain(0) - bmt := New(pool) - return testHasherCorrectness(bmt, hasher, d, n, count) +// Tests BMT Hasher io.Writer interface is working correctly +// even multiple short random write buffers +func TestBMTHasherWriterBuffers(t *testing.T) { + hasher := sha3.NewKeccak256 + + for _, count := range counts { + t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { + errc := make(chan error) + pool := NewTreePool(hasher, count, PoolSize) + defer pool.Drain(0) + n := count * 32 + bmt := New(pool) + data := newData(n) + rbmt := NewRefHasher(hasher, count) + refHash := rbmt.Hash(data) + expHash := Hash(bmt, nil, data) + if !bytes.Equal(expHash, refHash) { + t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash) + } + attempts := 10 + f := func() error { + bmt := New(pool) + bmt.Reset() + var buflen int + for offset := 0; offset < n; offset += buflen { + buflen = rand.Intn(n-offset) + 1 + read, err := bmt.Write(data[offset : offset+buflen]) + if err != nil { + return err + } + if read != buflen { + return fmt.Errorf("incorrect read. expected %v bytes, got %v", buflen, read) + } + } + hash := bmt.Sum(nil) + if !bytes.Equal(hash, expHash) { + return fmt.Errorf("hash mismatch. expected %x, got %x", hash, expHash) + } + return nil + } + + for j := 0; j < attempts; j++ { + go func() { + errc <- f() + }() + } + timeout := time.NewTimer(2 * time.Second) + for { + select { + case err := <-errc: + if err != nil { + t.Fatal(err) + } + attempts-- + if attempts == 0 { + return + } + case <-timeout.C: + t.Fatalf("timeout") + } + } + }) + } } // helper function that compares reference and optimised implementations on diff --git a/swarm/fuse/fuse_file.go b/swarm/fuse/fuse_file.go index 80c26fe05fcd..be3b01c8c476 100644 --- a/swarm/fuse/fuse_file.go +++ b/swarm/fuse/fuse_file.go @@ -84,7 +84,7 @@ func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error { a.Gid = uint32(os.Getegid()) if sf.fileSize == -1 { - reader, _ := sf.mountInfo.swarmApi.Retrieve(sf.addr) + reader, _ := sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) quitC := make(chan bool) size, err := reader.Size(quitC) if err != nil { @@ -104,7 +104,7 @@ func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse sf.lock.RLock() defer sf.lock.RUnlock() if sf.reader == nil { - sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(sf.addr) + sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) } buf := make([]byte, req.Size) n, err := sf.reader.ReadAt(buf, req.Offset) diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go index ed2021c4e0ee..d579d15a02b8 100644 --- a/swarm/fuse/swarmfs_test.go +++ b/swarm/fuse/swarmfs_test.go @@ -20,6 +20,7 @@ package fuse import ( "bytes" + "context" "crypto/rand" "flag" "fmt" @@ -110,7 +111,7 @@ func createTestFilesAndUploadToSwarm(t *testing.T, api *api.API, files map[strin } //upload directory to swarm and return hash - bzzhash, err := api.Upload(uploadDir, "", toEncrypt) + bzzhash, err := api.Upload(context.TODO(), uploadDir, "", toEncrypt) if err != nil { t.Fatalf("Error uploading directory %v: %vm encryption: %v", uploadDir, err, toEncrypt) } diff --git a/swarm/fuse/swarmfs_unix.go b/swarm/fuse/swarmfs_unix.go index 74dd84a90351..7a913b0dee7d 100644 --- a/swarm/fuse/swarmfs_unix.go +++ b/swarm/fuse/swarmfs_unix.go @@ -19,6 +19,7 @@ package fuse import ( + "context" "errors" "fmt" "os" @@ -104,7 +105,7 @@ func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { } log.Trace("swarmfs mount: getting manifest tree") - _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true) + _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true) if err != nil { return nil, err } diff --git a/swarm/fuse/swarmfs_util.go b/swarm/fuse/swarmfs_util.go index 9bbb0f6ac0e7..4f2e1416b615 100644 --- a/swarm/fuse/swarmfs_util.go +++ b/swarm/fuse/swarmfs_util.go @@ -47,7 +47,7 @@ func externalUnmount(mountPoint string) error { } func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) + fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) if err != nil { return err } @@ -66,7 +66,7 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { } func removeFileFromSwarm(sf *SwarmFile) error { - mkey, err := sf.mountInfo.swarmApi.RemoveFile(sf.mountInfo.LatestManifest, sf.path, sf.name, true) + mkey, err := sf.mountInfo.swarmApi.RemoveFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, true) if err != nil { return err } @@ -102,7 +102,7 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error { } func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true) + fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true) if err != nil { return err } diff --git a/swarm/metrics/flags.go b/swarm/metrics/flags.go index 795fc402ff08..79490fd36012 100644 --- a/swarm/metrics/flags.go +++ b/swarm/metrics/flags.go @@ -81,6 +81,9 @@ func Setup(ctx *cli.Context) { hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name) ) + // Start system runtime metrics collection + go gethmetrics.CollectProcessMetrics(2 * time.Second) + if enableExport { log.Info("Enabling swarm metrics export to InfluxDB") go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", map[string]string{ diff --git a/swarm/network/networkid_test.go b/swarm/network/networkid_test.go new file mode 100644 index 000000000000..05134b083b16 --- /dev/null +++ b/swarm/network/networkid_test.go @@ -0,0 +1,266 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package network + +import ( + "bytes" + "context" + "flag" + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/rpc" +) + +var ( + currentNetworkID int + cnt int + nodeMap map[int][]discover.NodeID + kademlias map[discover.NodeID]*Kademlia +) + +const ( + NumberOfNets = 4 + MaxTimeout = 6 +) + +func init() { + flag.Parse() + rand.Seed(time.Now().Unix()) +} + +/* +Run the network ID test. +The test creates one simulations.Network instance, +a number of nodes, then connects nodes with each other in this network. + +Each node gets a network ID assigned according to the number of networks. +Having more network IDs is just arbitrary in order to exclude +false positives. + +Nodes should only connect with other nodes with the same network ID. +After the setup phase, the test checks on each node if it has the +expected node connections (excluding those not sharing the network ID). +*/ +func TestNetworkID(t *testing.T) { + log.Debug("Start test") + //arbitrarily set the number of nodes. It could be any number + numNodes := 24 + //the nodeMap maps all nodes (slice value) with the same network ID (key) + nodeMap = make(map[int][]discover.NodeID) + //set up the network and connect nodes + net, err := setupNetwork(numNodes) + if err != nil { + t.Fatalf("Error setting up network: %v", err) + } + defer func() { + //shutdown the snapshot network + log.Trace("Shutting down network") + net.Shutdown() + }() + //let's sleep to ensure all nodes are connected + time.Sleep(1 * time.Second) + //for each group sharing the same network ID... + for _, netIDGroup := range nodeMap { + log.Trace("netIDGroup size", "size", len(netIDGroup)) + //...check that their size of the kademlia is of the expected size + //the assumption is that it should be the size of the group minus 1 (the node itself) + for _, node := range netIDGroup { + if kademlias[node].addrs.Size() != len(netIDGroup)-1 { + t.Fatalf("Kademlia size has not expected peer size. Kademlia size: %d, expected size: %d", kademlias[node].addrs.Size(), len(netIDGroup)-1) + } + kademlias[node].EachAddr(nil, 0, func(addr OverlayAddr, _ int, _ bool) bool { + found := false + for _, nd := range netIDGroup { + p := ToOverlayAddr(nd.Bytes()) + if bytes.Equal(p, addr.Address()) { + found = true + } + } + if !found { + t.Fatalf("Expected node not found for node %s", node.String()) + } + return true + }) + } + } + log.Info("Test terminated successfully") +} + +// setup simulated network with bzz/discovery and pss services. +// connects nodes in a circle +// if allowRaw is set, omission of builtin pss encryption is enabled (see PssParams) +func setupNetwork(numnodes int) (net *simulations.Network, err error) { + log.Debug("Setting up network") + quitC := make(chan struct{}) + errc := make(chan error) + nodes := make([]*simulations.Node, numnodes) + if numnodes < 16 { + return nil, fmt.Errorf("Minimum sixteen nodes in network") + } + adapter := adapters.NewSimAdapter(newServices()) + //create the network + net = simulations.NewNetwork(adapter, &simulations.NetworkConfig{ + ID: "NetworkIdTestNet", + DefaultService: "bzz", + }) + log.Debug("Creating networks and nodes") + + var connCount int + + //create nodes and connect them to each other + for i := 0; i < numnodes; i++ { + log.Trace("iteration: ", "i", i) + nodeconf := adapters.RandomNodeConfig() + nodes[i], err = net.NewNodeWithConfig(nodeconf) + if err != nil { + return nil, fmt.Errorf("error creating node %d: %v", i, err) + } + err = net.Start(nodes[i].ID()) + if err != nil { + return nil, fmt.Errorf("error starting node %d: %v", i, err) + } + client, err := nodes[i].Client() + if err != nil { + return nil, fmt.Errorf("create node %d rpc client fail: %v", i, err) + } + //now setup and start event watching in order to know when we can upload + ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second) + defer watchCancel() + watchSubscriptionEvents(ctx, nodes[i].ID(), client, errc, quitC) + //on every iteration we connect to all previous ones + for k := i - 1; k >= 0; k-- { + connCount++ + log.Debug(fmt.Sprintf("Connecting node %d with node %d; connection count is %d", i, k, connCount)) + err = net.Connect(nodes[i].ID(), nodes[k].ID()) + if err != nil { + if !strings.Contains(err.Error(), "already connected") { + return nil, fmt.Errorf("error connecting nodes: %v", err) + } + } + } + } + //now wait until the number of expected subscriptions has been finished + //`watchSubscriptionEvents` will write with a `nil` value to errc + for err := range errc { + if err != nil { + return nil, err + } + //`nil` received, decrement count + connCount-- + log.Trace("count down", "cnt", connCount) + //all subscriptions received + if connCount == 0 { + close(quitC) + break + } + } + log.Debug("Network setup phase terminated") + return net, nil +} + +func newServices() adapters.Services { + kademlias = make(map[discover.NodeID]*Kademlia) + kademlia := func(id discover.NodeID) *Kademlia { + if k, ok := kademlias[id]; ok { + return k + } + addr := NewAddrFromNodeID(id) + params := NewKadParams() + params.MinProxBinSize = 2 + params.MaxBinSize = 3 + params.MinBinSize = 1 + params.MaxRetries = 1000 + params.RetryExponent = 2 + params.RetryInterval = 1000000 + kademlias[id] = NewKademlia(addr.Over(), params) + return kademlias[id] + } + return adapters.Services{ + "bzz": func(ctx *adapters.ServiceContext) (node.Service, error) { + addr := NewAddrFromNodeID(ctx.Config.ID) + hp := NewHiveParams() + hp.Discovery = false + cnt++ + //assign the network ID + currentNetworkID = cnt % NumberOfNets + if ok := nodeMap[currentNetworkID]; ok == nil { + nodeMap[currentNetworkID] = make([]discover.NodeID, 0) + } + //add this node to the group sharing the same network ID + nodeMap[currentNetworkID] = append(nodeMap[currentNetworkID], ctx.Config.ID) + log.Debug("current network ID:", "id", currentNetworkID) + config := &BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + NetworkID: uint64(currentNetworkID), + } + return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil), nil + }, + } +} + +func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) { + events := make(chan *p2p.PeerEvent) + sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents") + if err != nil { + log.Error(err.Error()) + errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err) + return + } + go func() { + defer func() { + sub.Unsubscribe() + log.Trace("watch subscription events: unsubscribe", "id", id) + }() + + for { + select { + case <-quitC: + return + case <-ctx.Done(): + select { + case errc <- ctx.Err(): + case <-quitC: + } + return + case e := <-events: + if e.Type == p2p.PeerEventTypeAdd { + errc <- nil + } + case err := <-sub.Err(): + if err != nil { + select { + case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err): + case <-quitC: + } + return + } + } + } + }() +} diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go index 9d1f997f29fb..6a2c27401f86 100644 --- a/swarm/network/stream/common_test.go +++ b/swarm/network/stream/common_test.go @@ -250,7 +250,7 @@ func (r *TestRegistry) APIs() []rpc.API { } func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) { - r, _ := fileStore.Retrieve(hash) + r, _ := fileStore.Retrieve(context.TODO(), hash) buf := make([]byte, 1024) var n int var total int64 diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go index b03028c88816..cd87557b184d 100644 --- a/swarm/network/stream/delivery_test.go +++ b/swarm/network/stream/delivery_test.go @@ -345,9 +345,13 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck // here we distribute chunks of a random file into Stores of nodes 1 to nodes rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) size := chunkCount * chunkSize - fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) + ctx := context.TODO() + fileHash, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) // wait until all chunks stored - wait() + if err != nil { + t.Fatal(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatal(err.Error()) } @@ -627,9 +631,13 @@ Loop: hashes := make([]storage.Address, chunkCount) for i := 0; i < chunkCount; i++ { // create actual size real chunks - hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) + ctx := context.TODO() + hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) + if err != nil { + b.Fatalf("expected no error. got %v", err) + } // wait until all chunks stored - wait() + err = wait(ctx) if err != nil { b.Fatalf("expected no error. got %v", err) } diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go index 4e2721cb0faa..d996cdc7e533 100644 --- a/swarm/network/stream/intervals_test.go +++ b/swarm/network/stream/intervals_test.go @@ -117,8 +117,12 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) { fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams()) size := chunkCount * chunkSize - _, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - wait() + ctx := context.TODO() + _, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + t.Fatal(err) + } + err = wait(ctx) if err != nil { t.Fatal(err) } diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go index 59c776c3027c..da5253e8af10 100644 --- a/swarm/network/stream/snapshot_retrieval_test.go +++ b/swarm/network/stream/snapshot_retrieval_test.go @@ -410,7 +410,7 @@ func runFileRetrievalTest(nodeCount int) error { fileStore := registries[id].fileStore //check all chunks for i, hash := range conf.hashes { - reader, _ := fileStore.Retrieve(hash) + reader, _ := fileStore.Retrieve(context.TODO(), hash) //check that we can read the file size and that it corresponds to the generated file size if s, err := reader.Size(nil); err != nil || s != int64(len(randomFiles[i])) { allSuccess = false @@ -697,7 +697,7 @@ func runRetrievalTest(chunkCount int, nodeCount int) error { fileStore := registries[id].fileStore //check all chunks for _, chnk := range conf.hashes { - reader, _ := fileStore.Retrieve(chnk) + reader, _ := fileStore.Retrieve(context.TODO(), chnk) //assuming that reading the Size of the chunk is enough to know we found it if s, err := reader.Size(nil); err != nil || s != chunkSize { allSuccess = false @@ -765,9 +765,13 @@ func uploadFilesToNodes(nodes []*simulations.Node) ([]storage.Address, []string, return nil, nil, err } //store it (upload it) on the FileStore - rk, wait, err := fileStore.Store(strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false) + ctx := context.TODO() + rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false) log.Debug("Uploaded random string file to node") - wait() + if err != nil { + return nil, nil, err + } + err = wait(ctx) if err != nil { return nil, nil, err } diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go index ff1c39319d17..fd8863d435e4 100644 --- a/swarm/network/stream/snapshot_sync_test.go +++ b/swarm/network/stream/snapshot_sync_test.go @@ -581,8 +581,12 @@ func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage. fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) var rootAddrs []storage.Address for i := 0; i < chunkCount; i++ { - rk, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - wait() + ctx := context.TODO() + rk, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + return nil, err + } + err = wait(ctx) if err != nil { return nil, err } diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go index 68e20841dfb8..5fea7befe3ce 100644 --- a/swarm/network/stream/syncer_test.go +++ b/swarm/network/stream/syncer_test.go @@ -202,9 +202,12 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck // here we distribute chunks of a random file into stores 1...nodes rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) size := chunkCount * chunkSize - _, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) + _, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + t.Fatal(err.Error()) + } // need to wait cos we then immediately collect the relevant bin content - wait() + wait(ctx) if err != nil { t.Fatal(err.Error()) } diff --git a/swarm/network_test.go b/swarm/network_test.go index c291fce3b60a..606a83be2213 100644 --- a/swarm/network_test.go +++ b/swarm/network_test.go @@ -508,14 +508,15 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) { // File data is very short, but it is ensured that its // uniqueness is very certain. data := fmt.Sprintf("test content %s %x", time.Now().Round(0), b) - k, wait, err := swarm.api.Put(data, "text/plain", false) + ctx := context.TODO() + k, wait, err := swarm.api.Put(ctx, data, "text/plain", false) if err != nil { return nil, "", err } if wait != nil { - wait() + err = wait(ctx) } - return k, data, nil + return k, data, err } // retrieve is the function that is used for checking the availability of @@ -570,7 +571,7 @@ func retrieve( log.Debug("api get: check file", "node", id.String(), "key", f.addr.String(), "total files found", atomic.LoadUint64(totalFoundCount)) - r, _, _, _, err := swarm.api.Get(f.addr, "/") + r, _, _, _, err := swarm.api.Get(context.TODO(), f.addr, "/") if err != nil { errc <- fmt.Errorf("api get: node %s, key %s, kademlia %s: %v", id, f.addr, swarm.bzz.Hive, err) return diff --git a/swarm/pss/handshake.go b/swarm/pss/handshake.go index 3b44847ecc1c..e3ead77d0492 100644 --- a/swarm/pss/handshake.go +++ b/swarm/pss/handshake.go @@ -385,7 +385,7 @@ func (ctl *HandshakeController) sendKey(pubkeyid string, topic *Topic, keycount // generate new keys to send for i := 0; i < len(recvkeyids); i++ { var err error - recvkeyids[i], err = ctl.pss.generateSymmetricKey(*topic, to, true) + recvkeyids[i], err = ctl.pss.GenerateSymmetricKey(*topic, to, true) if err != nil { return []string{}, fmt.Errorf("set receive symkey fail (pubkey %x topic %x): %v", pubkeyid, topic, err) } diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go new file mode 100644 index 000000000000..723092c32d2b --- /dev/null +++ b/swarm/pss/notify/notify.go @@ -0,0 +1,394 @@ +package notify + +import ( + "crypto/ecdsa" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/pss" +) + +const ( + // sent from requester to updater to request start of notifications + MsgCodeStart = iota + + // sent from updater to requester, contains a notification plus a new symkey to replace the old + MsgCodeNotifyWithKey + + // sent from updater to requester, contains a notification + MsgCodeNotify + + // sent from requester to updater to request stop of notifications (currently unused) + MsgCodeStop + MsgCodeMax +) + +const ( + DefaultAddressLength = 1 + symKeyLength = 32 // this should be gotten from source +) + +var ( + // control topic is used before symmetric key issuance completes + controlTopic = pss.Topic{0x00, 0x00, 0x00, 0x01} +) + +// when code is MsgCodeStart, Payload is address +// when code is MsgCodeNotifyWithKey, Payload is notification | symkey +// when code is MsgCodeNotify, Payload is notification +// when code is MsgCodeStop, Payload is address +type Msg struct { + Code byte + Name []byte + Payload []byte + namestring string +} + +// NewMsg creates a new notification message object +func NewMsg(code byte, name string, payload []byte) *Msg { + return &Msg{ + Code: code, + Name: []byte(name), + Payload: payload, + namestring: name, + } +} + +// NewMsgFromPayload decodes a serialized message payload into a new notification message object +func NewMsgFromPayload(payload []byte) (*Msg, error) { + msg := &Msg{} + err := rlp.DecodeBytes(payload, msg) + if err != nil { + return nil, err + } + msg.namestring = string(msg.Name) + return msg, nil +} + +// a notifier has one sendBin entry for each address space it sends messages to +type sendBin struct { + address pss.PssAddress + symKeyId string + count int +} + +// represents a single notification service +// only subscription address bins that match the address of a notification client have entries. +type notifier struct { + bins map[string]*sendBin + topic pss.Topic // identifies the resource for pss receiver + threshold int // amount of address bytes used in bins + updateC <-chan []byte + quitC chan struct{} +} + +func (n *notifier) removeSubscription() { + n.quitC <- struct{}{} +} + +// represents an individual subscription made by a public key at a specific address/neighborhood +type subscription struct { + pubkeyId string + address pss.PssAddress + handler func(string, []byte) error +} + +// Controller is the interface to control, add and remove notification services and subscriptions +type Controller struct { + pss *pss.Pss + notifiers map[string]*notifier + subscriptions map[string]*subscription + mu sync.Mutex +} + +// NewController creates a new Controller object +func NewController(ps *pss.Pss) *Controller { + ctrl := &Controller{ + pss: ps, + notifiers: make(map[string]*notifier), + subscriptions: make(map[string]*subscription), + } + ctrl.pss.Register(&controlTopic, ctrl.Handler) + return ctrl +} + +// IsActive is used to check if a notification service exists for a specified id string +// Returns true if exists, false if not +func (c *Controller) IsActive(name string) bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.isActive(name) +} + +func (c *Controller) isActive(name string) bool { + _, ok := c.notifiers[name] + return ok +} + +// Subscribe is used by a client to request notifications from a notification service provider +// It will create a MsgCodeStart message and send asymmetrically to the provider using its public key and routing address +// The handler function is a callback that will be called when notifications are received +// Fails if the request pss cannot be sent or if the update message could not be serialized +func (c *Controller) Subscribe(name string, pubkey *ecdsa.PublicKey, address pss.PssAddress, handler func(string, []byte) error) error { + c.mu.Lock() + defer c.mu.Unlock() + msg := NewMsg(MsgCodeStart, name, c.pss.BaseAddr()) + c.pss.SetPeerPublicKey(pubkey, controlTopic, &address) + pubkeyId := hexutil.Encode(crypto.FromECDSAPub(pubkey)) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + err = c.pss.SendAsym(pubkeyId, controlTopic, smsg) + if err != nil { + return err + } + c.subscriptions[name] = &subscription{ + pubkeyId: pubkeyId, + address: address, + handler: handler, + } + return nil +} + +// Unsubscribe, perhaps unsurprisingly, undoes the effects of Subscribe +// Fails if the subscription does not exist, if the request pss cannot be sent or if the update message could not be serialized +func (c *Controller) Unsubscribe(name string) error { + c.mu.Lock() + defer c.mu.Unlock() + sub, ok := c.subscriptions[name] + if !ok { + return fmt.Errorf("Unknown subscription '%s'", name) + } + msg := NewMsg(MsgCodeStop, name, sub.address) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + err = c.pss.SendAsym(sub.pubkeyId, controlTopic, smsg) + if err != nil { + return err + } + delete(c.subscriptions, name) + return nil +} + +// NewNotifier is used by a notification service provider to create a new notification service +// It takes a name as identifier for the resource, a threshold indicating the granularity of the subscription address bin +// It then starts an event loop which listens to the supplied update channel and executes notifications on channel receives +// Fails if a notifier already is registered on the name +//func (c *Controller) NewNotifier(name string, threshold int, contentFunc func(string) ([]byte, error)) error { +func (c *Controller) NewNotifier(name string, threshold int, updateC <-chan []byte) (func(), error) { + c.mu.Lock() + if c.isActive(name) { + c.mu.Unlock() + return nil, fmt.Errorf("Notification service %s already exists in controller", name) + } + quitC := make(chan struct{}) + c.notifiers[name] = ¬ifier{ + bins: make(map[string]*sendBin), + topic: pss.BytesToTopic([]byte(name)), + threshold: threshold, + updateC: updateC, + quitC: quitC, + //contentFunc: contentFunc, + } + c.mu.Unlock() + go func() { + for { + select { + case <-quitC: + return + case data := <-updateC: + c.notify(name, data) + } + } + }() + + return c.notifiers[name].removeSubscription, nil +} + +// RemoveNotifier is used to stop a notification service. +// It cancels the event loop listening to the notification provider's update channel +func (c *Controller) RemoveNotifier(name string) error { + c.mu.Lock() + defer c.mu.Unlock() + currentNotifier, ok := c.notifiers[name] + if !ok { + return fmt.Errorf("Unknown notification service %s", name) + } + currentNotifier.removeSubscription() + delete(c.notifiers, name) + return nil +} + +// Notify is called by a notification service provider to issue a new notification +// It takes the name of the notification service and the data to be sent. +// It fails if a notifier with this name does not exist or if data could not be serialized +// Note that it does NOT fail on failure to send a message +func (c *Controller) notify(name string, data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + if !c.isActive(name) { + return fmt.Errorf("Notification service %s doesn't exist", name) + } + msg := NewMsg(MsgCodeNotify, name, data) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + for _, m := range c.notifiers[name].bins { + log.Debug("sending pss notify", "name", name, "addr", fmt.Sprintf("%x", m.address), "topic", fmt.Sprintf("%x", c.notifiers[name].topic), "data", data) + go func(m *sendBin) { + err = c.pss.SendSym(m.symKeyId, c.notifiers[name].topic, smsg) + if err != nil { + log.Warn("Failed to send notify to addr %x: %v", m.address, err) + } + }(m) + } + return nil +} + +// check if we already have the bin +// if we do, retrieve the symkey from it and increment the count +// if we dont make a new symkey and a new bin entry +func (c *Controller) addToBin(ntfr *notifier, address []byte) (symKeyId string, pssAddress pss.PssAddress, err error) { + + // parse the address from the message and truncate if longer than our bins threshold + if len(address) > ntfr.threshold { + address = address[:ntfr.threshold] + } + + pssAddress = pss.PssAddress(address) + hexAddress := fmt.Sprintf("%x", address) + currentBin, ok := ntfr.bins[hexAddress] + if ok { + currentBin.count++ + symKeyId = currentBin.symKeyId + } else { + symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, &pssAddress, false) + if err != nil { + return "", nil, err + } + ntfr.bins[hexAddress] = &sendBin{ + address: address, + symKeyId: symKeyId, + count: 1, + } + } + return symKeyId, pssAddress, nil +} + +func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) { + + keyidbytes, err := hexutil.Decode(keyid) + if err != nil { + return err + } + pubkey, err := crypto.UnmarshalPubkey(keyidbytes) + if err != nil { + return err + } + + // if name is not registered for notifications we will not react + currentNotifier, ok := c.notifiers[msg.namestring] + if !ok { + return fmt.Errorf("Subscribe attempted on unknown resource '%s'", msg.namestring) + } + + // add to or open new bin + symKeyId, pssAddress, err := c.addToBin(currentNotifier, msg.Payload) + if err != nil { + return err + } + + // add to address book for send initial notify + symkey, err := c.pss.GetSymmetricKey(symKeyId) + if err != nil { + return err + } + err = c.pss.SetPeerPublicKey(pubkey, controlTopic, &pssAddress) + if err != nil { + return err + } + + // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of MRU is sent upon subscription + notify := []byte{} + replyMsg := NewMsg(MsgCodeNotifyWithKey, msg.namestring, make([]byte, len(notify)+symKeyLength)) + copy(replyMsg.Payload, notify) + copy(replyMsg.Payload[len(notify):], symkey) + sReplyMsg, err := rlp.EncodeToBytes(replyMsg) + if err != nil { + return err + } + return c.pss.SendAsym(keyid, controlTopic, sReplyMsg) +} + +func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error { + symkey := msg.Payload[len(msg.Payload)-symKeyLength:] + topic := pss.BytesToTopic(msg.Name) + + // \TODO keep track of and add actual address + updaterAddr := pss.PssAddress([]byte{}) + c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true) + c.pss.Register(&topic, c.Handler) + return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength]) +} + +func (c *Controller) handleStopMsg(msg *Msg) error { + // if name is not registered for notifications we will not react + currentNotifier, ok := c.notifiers[msg.namestring] + if !ok { + return fmt.Errorf("Unsubscribe attempted on unknown resource '%s'", msg.namestring) + } + + // parse the address from the message and truncate if longer than our bins' address length threshold + address := msg.Payload + if len(msg.Payload) > currentNotifier.threshold { + address = address[:currentNotifier.threshold] + } + + // remove the entry from the bin if it exists, and remove the bin if it's the last remaining one + hexAddress := fmt.Sprintf("%x", address) + currentBin, ok := currentNotifier.bins[hexAddress] + if !ok { + return fmt.Errorf("found no active bin for address %s", hexAddress) + } + currentBin.count-- + if currentBin.count == 0 { // if no more clients in this bin, remove it + delete(currentNotifier.bins, hexAddress) + } + return nil +} + +// Handler is the pss topic handler to be used to process notification service messages +// It should be registered in the pss of both to any notification service provides and clients using the service +func (c *Controller) Handler(smsg []byte, p *p2p.Peer, asymmetric bool, keyid string) error { + c.mu.Lock() + defer c.mu.Unlock() + log.Debug("notify controller handler", "keyid", keyid) + + // see if the message is valid + msg, err := NewMsgFromPayload(smsg) + if err != nil { + return err + } + + switch msg.Code { + case MsgCodeStart: + return c.handleStartMsg(msg, keyid) + case MsgCodeNotifyWithKey: + return c.handleNotifyWithKeyMsg(msg) + case MsgCodeNotify: + return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload) + case MsgCodeStop: + return c.handleStopMsg(msg) + } + + return fmt.Errorf("Invalid message code: %d", msg.Code) +} diff --git a/swarm/pss/notify/notify_test.go b/swarm/pss/notify/notify_test.go new file mode 100644 index 000000000000..3c655f215ccb --- /dev/null +++ b/swarm/pss/notify/notify_test.go @@ -0,0 +1,252 @@ +package notify + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/pss" + "github.com/ethereum/go-ethereum/swarm/state" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv5" +) + +var ( + loglevel = flag.Int("l", 3, "loglevel") + psses map[string]*pss.Pss + w *whisper.Whisper + wapi *whisper.PublicWhisperAPI +) + +func init() { + flag.Parse() + hs := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) + hf := log.LvlFilterHandler(log.Lvl(*loglevel), hs) + h := log.CallerFileHandler(hf) + log.Root().SetHandler(h) + + w = whisper.New(&whisper.DefaultConfig) + wapi = whisper.NewPublicWhisperAPI(w) + psses = make(map[string]*pss.Pss) +} + +// Creates a client node and notifier node +// Client sends pss notifications requests +// notifier sends initial notification with symmetric key, and +// second notification symmetrically encrypted +func TestStart(t *testing.T) { + adapter := adapters.NewSimAdapter(newServices(false)) + net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ + ID: "0", + DefaultService: "bzz", + }) + leftNodeConf := adapters.RandomNodeConfig() + leftNodeConf.Services = []string{"bzz", "pss"} + leftNode, err := net.NewNodeWithConfig(leftNodeConf) + if err != nil { + t.Fatal(err) + } + err = net.Start(leftNode.ID()) + if err != nil { + t.Fatal(err) + } + + rightNodeConf := adapters.RandomNodeConfig() + rightNodeConf.Services = []string{"bzz", "pss"} + rightNode, err := net.NewNodeWithConfig(rightNodeConf) + if err != nil { + t.Fatal(err) + } + err = net.Start(rightNode.ID()) + if err != nil { + t.Fatal(err) + } + + err = net.Connect(rightNode.ID(), leftNode.ID()) + if err != nil { + t.Fatal(err) + } + + leftRpc, err := leftNode.Client() + if err != nil { + t.Fatal(err) + } + + rightRpc, err := rightNode.Client() + if err != nil { + t.Fatal(err) + } + + var leftAddr string + err = leftRpc.Call(&leftAddr, "pss_baseAddr") + if err != nil { + t.Fatal(err) + } + + var rightAddr string + err = rightRpc.Call(&rightAddr, "pss_baseAddr") + if err != nil { + t.Fatal(err) + } + + var leftPub string + err = leftRpc.Call(&leftPub, "pss_getPublicKey") + if err != nil { + t.Fatal(err) + } + + var rightPub string + err = rightRpc.Call(&rightPub, "pss_getPublicKey") + if err != nil { + t.Fatal(err) + } + + rsrcName := "foo.eth" + rsrcTopic := pss.BytesToTopic([]byte(rsrcName)) + + // wait for kademlia table to populate + time.Sleep(time.Second) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + rmsgC := make(chan *pss.APIMsg) + rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic) + if err != nil { + t.Fatal(err) + } + defer rightSub.Unsubscribe() + + updateC := make(chan []byte) + updateMsg := []byte{} + ctrlClient := NewController(psses[rightPub]) + ctrlNotifier := NewController(psses[leftPub]) + ctrlNotifier.NewNotifier("foo.eth", 2, updateC) + + pubkeybytes, err := hexutil.Decode(leftPub) + if err != nil { + t.Fatal(err) + } + pubkey, err := crypto.UnmarshalPubkey(pubkeybytes) + if err != nil { + t.Fatal(err) + } + addrbytes, err := hexutil.Decode(leftAddr) + if err != nil { + t.Fatal(err) + } + ctrlClient.Subscribe(rsrcName, pubkey, addrbytes, func(s string, b []byte) error { + if s != "foo.eth" || !bytes.Equal(updateMsg, b) { + t.Fatalf("unexpected result in client handler: '%s':'%x'", s, b) + } + log.Info("client handler receive", "s", s, "b", b) + return nil + }) + + var inMsg *pss.APIMsg + select { + case inMsg = <-rmsgC: + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + dMsg, err := NewMsgFromPayload(inMsg.Msg) + if err != nil { + t.Fatal(err) + } + if dMsg.namestring != rsrcName { + t.Fatalf("expected name '%s', got '%s'", rsrcName, dMsg.namestring) + } + if !bytes.Equal(dMsg.Payload[:len(updateMsg)], updateMsg) { + t.Fatalf("expected payload first %d bytes '%x', got '%x'", len(updateMsg), updateMsg, dMsg.Payload[:len(updateMsg)]) + } + if len(updateMsg)+symKeyLength != len(dMsg.Payload) { + t.Fatalf("expected payload length %d, have %d", len(updateMsg)+symKeyLength, len(dMsg.Payload)) + } + + rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic) + if err != nil { + t.Fatal(err) + } + defer rightSubUpdate.Unsubscribe() + + updateMsg = []byte("plugh") + updateC <- updateMsg + select { + case inMsg = <-rmsgC: + case <-ctx.Done(): + log.Error("timed out waiting for msg", "topic", fmt.Sprintf("%x", rsrcTopic)) + t.Fatal(ctx.Err()) + } + dMsg, err = NewMsgFromPayload(inMsg.Msg) + if err != nil { + t.Fatal(err) + } + if dMsg.namestring != rsrcName { + t.Fatalf("expected name %s, got %s", rsrcName, dMsg.namestring) + } + if !bytes.Equal(dMsg.Payload, updateMsg) { + t.Fatalf("expected payload '%x', got '%x'", updateMsg, dMsg.Payload) + } + +} + +func newServices(allowRaw bool) adapters.Services { + stateStore := state.NewInmemoryStore() + kademlias := make(map[discover.NodeID]*network.Kademlia) + kademlia := func(id discover.NodeID) *network.Kademlia { + if k, ok := kademlias[id]; ok { + return k + } + addr := network.NewAddrFromNodeID(id) + params := network.NewKadParams() + params.MinProxBinSize = 2 + params.MaxBinSize = 3 + params.MinBinSize = 1 + params.MaxRetries = 1000 + params.RetryExponent = 2 + params.RetryInterval = 1000000 + kademlias[id] = network.NewKademlia(addr.Over(), params) + return kademlias[id] + } + return adapters.Services{ + "pss": func(ctx *adapters.ServiceContext) (node.Service, error) { + ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + keys, err := wapi.NewKeyPair(ctxlocal) + privkey, err := w.GetPrivateKey(keys) + pssp := pss.NewPssParams().WithPrivateKey(privkey) + pssp.MsgTTL = time.Second * 30 + pssp.AllowRaw = allowRaw + pskad := kademlia(ctx.Config.ID) + ps, err := pss.NewPss(pskad, pssp) + if err != nil { + return nil, err + } + //psses[common.ToHex(crypto.FromECDSAPub(&privkey.PublicKey))] = ps + psses[hexutil.Encode(crypto.FromECDSAPub(&privkey.PublicKey))] = ps + return ps, nil + }, + "bzz": func(ctx *adapters.ServiceContext) (node.Service, error) { + addr := network.NewAddrFromNodeID(ctx.Config.ID) + hp := network.NewHiveParams() + hp.Discovery = false + config := &network.BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + } + return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil), nil + }, + } +} diff --git a/swarm/pss/protocol.go b/swarm/pss/protocol.go index bf23e49dafad..5fcae090efb9 100644 --- a/swarm/pss/protocol.go +++ b/swarm/pss/protocol.go @@ -172,6 +172,8 @@ func (p *Protocol) Handle(msg []byte, peer *p2p.Peer, asymmetric bool, keyid str rw, err := p.AddPeer(peer, *p.topic, asymmetric, keyid) if err != nil { return err + } else if rw == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for new key " + keyid) } vrw = rw.(*PssReadWriter) } @@ -181,8 +183,14 @@ func (p *Protocol) Handle(msg []byte, peer *p2p.Peer, asymmetric bool, keyid str return fmt.Errorf("could not decode pssmsg") } if asymmetric { + if p.pubKeyRWPool[keyid] == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for key " + keyid) + } vrw = p.pubKeyRWPool[keyid].(*PssReadWriter) } else { + if p.symKeyRWPool[keyid] == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for key " + keyid) + } vrw = p.symKeyRWPool[keyid].(*PssReadWriter) } vrw.injectMsg(pmsg) diff --git a/swarm/pss/pss.go b/swarm/pss/pss.go index 77191b25a012..dd081e93a569 100644 --- a/swarm/pss/pss.go +++ b/swarm/pss/pss.go @@ -41,7 +41,7 @@ import ( const ( defaultPaddingByteSize = 16 - defaultMsgTTL = time.Second * 120 + DefaultMsgTTL = time.Second * 120 defaultDigestCacheTTL = time.Second * 10 defaultSymKeyCacheCapacity = 512 digestLength = 32 // byte length of digest used for pss cache (currently same as swarm chunk hash) @@ -94,7 +94,7 @@ type PssParams struct { // Sane defaults for Pss func NewPssParams() *PssParams { return &PssParams{ - MsgTTL: defaultMsgTTL, + MsgTTL: DefaultMsgTTL, CacheTTL: defaultDigestCacheTTL, SymKeyCacheCapacity: defaultSymKeyCacheCapacity, } @@ -354,11 +354,11 @@ func (p *Pss) handlePssMsg(msg interface{}) error { } if int64(pssmsg.Expire) < time.Now().Unix() { metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1) - log.Warn("pss filtered expired message", "from", fmt.Sprintf("%x", p.Overlay.BaseAddr()), "to", fmt.Sprintf("%x", common.ToHex(pssmsg.To))) + log.Warn("pss filtered expired message", "from", common.ToHex(p.Overlay.BaseAddr()), "to", common.ToHex(pssmsg.To)) return nil } if p.checkFwdCache(pssmsg) { - log.Trace(fmt.Sprintf("pss relay block-cache match (process): FROM %x TO %x", p.Overlay.BaseAddr(), common.ToHex(pssmsg.To))) + log.Trace("pss relay block-cache match (process)", "from", common.ToHex(p.Overlay.BaseAddr()), "to", (common.ToHex(pssmsg.To))) return nil } p.addFwdCache(pssmsg) @@ -480,7 +480,7 @@ func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *Ps } // Automatically generate a new symkey for a topic and address hint -func (p *Pss) generateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) { +func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) { keyid, err := p.w.GenerateSymKey() if err != nil { return "", err diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go index a59a5e4270a4..c738247f1f4c 100644 --- a/swarm/pss/pss_test.go +++ b/swarm/pss/pss_test.go @@ -470,7 +470,7 @@ func TestKeys(t *testing.T) { } // make a symmetric key that we will send to peer for encrypting messages to us - inkeyid, err := ps.generateSymmetricKey(topicobj, &addr, true) + inkeyid, err := ps.GenerateSymmetricKey(topicobj, &addr, true) if err != nil { t.Fatalf("failed to set 'our' incoming symmetric key") } @@ -1296,7 +1296,7 @@ func benchmarkSymKeySend(b *testing.B) { topic := BytesToTopic([]byte("foo")) to := make(PssAddress, 32) copy(to[:], network.RandomAddr().Over()) - symkeyid, err := ps.generateSymmetricKey(topic, &to, true) + symkeyid, err := ps.GenerateSymmetricKey(topic, &to, true) if err != nil { b.Fatalf("could not generate symkey: %v", err) } @@ -1389,7 +1389,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) { for i := 0; i < int(keycount); i++ { to := make(PssAddress, 32) copy(to[:], network.RandomAddr().Over()) - keyid, err = ps.generateSymmetricKey(topic, &to, true) + keyid, err = ps.GenerateSymmetricKey(topic, &to, true) if err != nil { b.Fatalf("cant generate symkey #%d: %v", i, err) } @@ -1471,7 +1471,7 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) { topic := BytesToTopic([]byte("foo")) for i := 0; i < int(keycount); i++ { copy(addr[i], network.RandomAddr().Over()) - keyid, err = ps.generateSymmetricKey(topic, &addr[i], true) + keyid, err = ps.GenerateSymmetricKey(topic, &addr[i], true) if err != nil { b.Fatalf("cant generate symkey #%d: %v", i, err) } diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go index 5780742e38a4..2d197fefa930 100644 --- a/swarm/storage/chunker.go +++ b/swarm/storage/chunker.go @@ -16,6 +16,7 @@ package storage import ( + "context" "encoding/binary" "errors" "fmt" @@ -126,7 +127,7 @@ type TreeChunker struct { The chunks are not meant to be validated by the chunker when joining. This is because it is left to the DPA to decide which sources are trusted. */ -func TreeJoin(addr Address, getter Getter, depth int) *LazyChunkReader { +func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader { jp := &JoinerParams{ ChunkerParams: ChunkerParams{ chunkSize: DefaultChunkSize, @@ -137,14 +138,14 @@ func TreeJoin(addr Address, getter Getter, depth int) *LazyChunkReader { depth: depth, } - return NewTreeJoiner(jp).Join() + return NewTreeJoiner(jp).Join(ctx) } /* When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ -func TreeSplit(data io.Reader, size int64, putter Putter) (k Address, wait func(), err error) { +func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error) { tsp := &TreeSplitterParams{ SplitterParams: SplitterParams{ ChunkerParams: ChunkerParams{ @@ -156,7 +157,7 @@ func TreeSplit(data io.Reader, size int64, putter Putter) (k Address, wait func( }, size: size, } - return NewTreeSplitter(tsp).Split() + return NewTreeSplitter(tsp).Split(ctx) } func NewTreeJoiner(params *JoinerParams) *TreeChunker { @@ -224,7 +225,7 @@ func (tc *TreeChunker) decrementWorkerCount() { tc.workerCount -= 1 } -func (tc *TreeChunker) Split() (k Address, wait func(), err error) { +func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) { if tc.chunkSize <= 0 { panic("chunker must be initialised") } @@ -380,7 +381,7 @@ type LazyChunkReader struct { getter Getter } -func (tc *TreeChunker) Join() *LazyChunkReader { +func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader { return &LazyChunkReader{ key: tc.addr, chunkSize: tc.chunkSize, diff --git a/swarm/storage/chunker_test.go b/swarm/storage/chunker_test.go index d8be13ef6bb2..69c388b39eba 100644 --- a/swarm/storage/chunker_test.go +++ b/swarm/storage/chunker_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto/rand" "encoding/binary" "errors" @@ -81,7 +82,7 @@ func testRandomBrokenData(n int, tester *chunkerTester) { putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash) expectedError := fmt.Errorf("Broken reader") - addr, _, err := TreeSplit(brokendata, int64(n), putGetter) + addr, _, err := TreeSplit(context.TODO(), brokendata, int64(n), putGetter) if err == nil || err.Error() != expectedError.Error() { tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err) } @@ -104,20 +105,24 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) putGetter := newTestHasherStore(NewMapChunkStore(), hash) var addr Address - var wait func() + var wait func(context.Context) error var err error + ctx := context.TODO() if usePyramid { - addr, wait, err = PyramidSplit(data, putGetter, putGetter) + addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter) } else { - addr, wait, err = TreeSplit(data, int64(n), putGetter) + addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter) } if err != nil { tester.t.Fatalf(err.Error()) } tester.t.Logf(" Key = %v\n", addr) - wait() + err = wait(ctx) + if err != nil { + tester.t.Fatalf(err.Error()) + } - reader := TreeJoin(addr, putGetter, 0) + reader := TreeJoin(context.TODO(), addr, putGetter, 0) output := make([]byte, n) r, err := reader.Read(output) if r != n || err != io.EOF { @@ -200,11 +205,15 @@ func TestDataAppend(t *testing.T) { chunkStore := NewMapChunkStore() putGetter := newTestHasherStore(chunkStore, SHA3Hash) - addr, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) + if err != nil { + tester.t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { tester.t.Fatalf(err.Error()) } - wait() //create a append data stream appendInput, found := tester.inputs[uint64(m)] @@ -217,13 +226,16 @@ func TestDataAppend(t *testing.T) { } putGetter = newTestHasherStore(chunkStore, SHA3Hash) - newAddr, wait, err := PyramidAppend(addr, appendData, putGetter, putGetter) + newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter) + if err != nil { + tester.t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { tester.t.Fatalf(err.Error()) } - wait() - reader := TreeJoin(newAddr, putGetter, 0) + reader := TreeJoin(ctx, newAddr, putGetter, 0) newOutput := make([]byte, n+m) r, err := reader.Read(newOutput) if r != (n + m) { @@ -282,12 +294,16 @@ func benchmarkSplitJoin(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash) - key, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } - wait() - reader := TreeJoin(key, putGetter, 0) + err = wait(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + reader := TreeJoin(ctx, key, putGetter, 0) benchReadAll(reader) } } @@ -298,7 +314,7 @@ func benchmarkSplitTreeSHA3(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash) - _, _, err := TreeSplit(data, int64(n), putGetter) + _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -311,7 +327,7 @@ func benchmarkSplitTreeBMT(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash) - _, _, err := TreeSplit(data, int64(n), putGetter) + _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -324,7 +340,7 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash) - _, _, err := PyramidSplit(data, putGetter, putGetter) + _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -338,7 +354,7 @@ func benchmarkSplitPyramidBMT(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash) - _, _, err := PyramidSplit(data, putGetter, putGetter) + _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -354,18 +370,25 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) { chunkStore := NewMapChunkStore() putGetter := newTestHasherStore(chunkStore, SHA3Hash) - key, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) + if err != nil { + t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatalf(err.Error()) } - wait() putGetter = newTestHasherStore(chunkStore, SHA3Hash) - _, wait, err = PyramidAppend(key, data1, putGetter, putGetter) + _, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter) + if err != nil { + t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatalf(err.Error()) } - wait() } } diff --git a/swarm/storage/filestore.go b/swarm/storage/filestore.go index c0b463debdd1..2d8d82d95a50 100644 --- a/swarm/storage/filestore.go +++ b/swarm/storage/filestore.go @@ -17,6 +17,7 @@ package storage import ( + "context" "io" ) @@ -78,18 +79,18 @@ func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore { // Chunk retrieval blocks on netStore requests with a timeout so reader will // report error if retrieval of chunks within requested range time out. // It returns a reader with the chunk data and whether the content was encrypted -func (f *FileStore) Retrieve(addr Address) (reader *LazyChunkReader, isEncrypted bool) { +func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool) { isEncrypted = len(addr) > f.hashFunc().Size() getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted) - reader = TreeJoin(addr, getter, 0) + reader = TreeJoin(ctx, addr, getter, 0) return } // Public API. Main entry point for document storage directly. Used by the // FS-aware API and httpaccess -func (f *FileStore) Store(data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(), err error) { +func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error) { putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt) - return PyramidSplit(data, putter, putter) + return PyramidSplit(ctx, data, putter, putter) } func (f *FileStore) HashSize() int { diff --git a/swarm/storage/filestore_test.go b/swarm/storage/filestore_test.go index 1aaec5e5cc4b..f3f597255884 100644 --- a/swarm/storage/filestore_test.go +++ b/swarm/storage/filestore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "io" "io/ioutil" "os" @@ -49,12 +50,16 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) { defer os.RemoveAll("/tmp/bzz") reader, slice := generateRandomData(testDataSize) - key, wait, err := fileStore.Store(reader, testDataSize, toEncrypt) + ctx := context.TODO() + key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt) if err != nil { t.Errorf("Store error: %v", err) } - wait() - resultReader, isEncrypted := fileStore.Retrieve(key) + err = wait(ctx) + if err != nil { + t.Fatalf("Store waitt error: %v", err.Error()) + } + resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -72,7 +77,7 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) { ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666) ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) localStore.memStore = NewMemStore(NewDefaultStoreParams(), db) - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -110,12 +115,16 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { } fileStore := NewFileStore(localStore, NewFileStoreParams()) reader, slice := generateRandomData(testDataSize) - key, wait, err := fileStore.Store(reader, testDataSize, toEncrypt) + ctx := context.TODO() + key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt) + if err != nil { + t.Errorf("Store error: %v", err) + } + err = wait(ctx) if err != nil { t.Errorf("Store error: %v", err) } - wait() - resultReader, isEncrypted := fileStore.Retrieve(key) + resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -134,7 +143,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { memStore.setCapacity(0) // check whether it is, indeed, empty fileStore.ChunkStore = memStore - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -144,7 +153,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { // check how it works with localStore fileStore.ChunkStore = localStore // localStore.dbStore.setCapacity(0) - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go index e659c3681ef6..e18b66ddcb9f 100644 --- a/swarm/storage/hasherstore.go +++ b/swarm/storage/hasherstore.go @@ -17,6 +17,7 @@ package storage import ( + "context" "fmt" "sync" @@ -126,9 +127,10 @@ func (h *hasherStore) Close() { // Wait returns when // 1) the Close() function has been called and // 2) all the chunks which has been Put has been stored -func (h *hasherStore) Wait() { +func (h *hasherStore) Wait(ctx context.Context) error { <-h.closed h.wg.Wait() + return nil } func (h *hasherStore) createHash(chunkData ChunkData) Address { diff --git a/swarm/storage/hasherstore_test.go b/swarm/storage/hasherstore_test.go index ccb37524a01b..cf7b0dcc343c 100644 --- a/swarm/storage/hasherstore_test.go +++ b/swarm/storage/hasherstore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "testing" "github.com/ethereum/go-ethereum/swarm/storage/encryption" @@ -60,7 +61,10 @@ func TestHasherStore(t *testing.T) { hasherStore.Close() // Wait until chunks are really stored - hasherStore.Wait() + err = hasherStore.Wait(context.TODO()) + if err != nil { + t.Fatalf("Expected no error got \"%v\"", err) + } // Get the first chunk retrievedChunkData1, err := hasherStore.Get(key1) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 2c706a75bd31..2453d2f30b95 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -59,12 +59,12 @@ func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { } cleanup := func() { - if err != nil { + if db != nil { db.Close() } err = os.RemoveAll(dir) if err != nil { - panic("db cleanup failed") + panic(fmt.Sprintf("db cleanup failed: %v", err)) } } diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go index 01172cb77a47..6643e989a1c6 100644 --- a/swarm/storage/pyramid.go +++ b/swarm/storage/pyramid.go @@ -17,6 +17,7 @@ package storage import ( + "context" "encoding/binary" "errors" "io" @@ -99,12 +100,12 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ -func PyramidSplit(reader io.Reader, putter Putter, getter Getter) (Address, func(), error) { - return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split() +func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { + return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split(ctx) } -func PyramidAppend(addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(), error) { - return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append() +func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { + return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append(ctx) } // Entry to create a tree node @@ -203,7 +204,7 @@ func (pc *PyramidChunker) decrementWorkerCount() { pc.workerCount -= 1 } -func (pc *PyramidChunker) Split() (k Address, wait func(), err error) { +func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Split()") pc.wg.Add(1) @@ -235,7 +236,7 @@ func (pc *PyramidChunker) Split() (k Address, wait func(), err error) { } -func (pc *PyramidChunker) Append() (k Address, wait func(), err error) { +func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Append()") // Load the right most unfinished tree chunks in every level pc.loadTree() diff --git a/swarm/storage/types.go b/swarm/storage/types.go index b75f64205ff1..32880ead760f 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto" "crypto/rand" "encoding/binary" @@ -303,7 +304,7 @@ type Putter interface { // Close is to indicate that no more chunk data will be Put on this Putter Close() // Wait returns if all data has been store and the Close() was called. - Wait() + Wait(context.Context) error } // Getter is an interface to retrieve a chunk's data by its reference diff --git a/swarm/swarm_test.go b/swarm/swarm_test.go index f82a9c6fac04..0827748ae200 100644 --- a/swarm/swarm_test.go +++ b/swarm/swarm_test.go @@ -17,10 +17,13 @@ package swarm import ( + "context" + "encoding/hex" "io/ioutil" "math/rand" "os" "path" + "runtime" "strings" "testing" "time" @@ -42,6 +45,13 @@ func TestNewSwarm(t *testing.T) { // a simple rpc endpoint for testing dialing ipcEndpoint := path.Join(dir, "TestSwarm.ipc") + // windows namedpipes are not on filesystem but on NPFS + if runtime.GOOS == "windows" { + b := make([]byte, 8) + rand.Read(b) + ipcEndpoint = `\\.\pipe\TestSwarm-` + hex.EncodeToString(b) + } + _, server, err := rpc.StartIPCEndpoint(ipcEndpoint, nil) if err != nil { t.Error(err) @@ -338,15 +348,19 @@ func testLocalStoreAndRetrieve(t *testing.T, swarm *Swarm, n int, randomData boo } dataPut := string(slice) - k, wait, err := swarm.api.Store(strings.NewReader(dataPut), int64(len(dataPut)), false) + ctx := context.TODO() + k, wait, err := swarm.api.Store(ctx, strings.NewReader(dataPut), int64(len(dataPut)), false) if err != nil { t.Fatal(err) } if wait != nil { - wait() + err = wait(ctx) + if err != nil { + t.Fatal(err) + } } - r, _ := swarm.api.Retrieve(k) + r, _ := swarm.api.Retrieve(context.TODO(), k) d, err := ioutil.ReadAll(r) if err != nil { From 2eedbe799f5eb8766e4808d8a1810cc1c90c4b93 Mon Sep 17 00:00:00 2001 From: Wenbiao Zheng Date: Mon, 9 Jul 2018 22:34:59 +0800 Subject: [PATCH 015/166] cmd: typo fixed, isntance -> instance (#17149) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a191e4430660..46ea7b96bf86 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -998,7 +998,7 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) { } } -// checkExclusive verifies that only a single isntance of the provided flags was +// checkExclusive verifies that only a single instance of the provided flags was // set by the user. Each flag might optionally be followed by a string type to // specialize it further. func checkExclusive(ctx *cli.Context, args ...interface{}) { From a9835c1816bc49ee54c82b4f2a5b05cbcd89881b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kurk=C3=B3=20Mih=C3=A1ly?= Date: Wed, 11 Jul 2018 10:59:04 +0300 Subject: [PATCH 016/166] cmd, dashboard, log: log collection and exploration (#17097) * cmd, dashboard, internal, log, node: logging feature * cmd, dashboard, internal, log: requested changes * dashboard, vendor: gofmt, govendor, use vendored file watcher * dashboard, log: gofmt -s -w, goimports * dashboard, log: gosimple --- cmd/geth/main.go | 7 +- cmd/swarm/main.go | 2 +- cmd/utils/flags.go | 4 +- dashboard/assets.go | 17652 +++++++++------- dashboard/assets/common.jsx | 2 +- dashboard/assets/components/Body.jsx | 10 +- dashboard/assets/components/CustomTooltip.jsx | 2 +- dashboard/assets/components/Dashboard.jsx | 45 +- dashboard/assets/components/Logs.jsx | 310 + dashboard/assets/components/Main.jsx | 54 +- dashboard/assets/index.html | 3 + dashboard/assets/types/content.jsx | 60 +- dashboard/assets/yarn.lock | 195 +- dashboard/dashboard.go | 171 +- dashboard/log.go | 288 + dashboard/message.go | 25 +- internal/debug/flags.go | 21 +- log/format.go | 46 +- log/handler.go | 110 + log/handler_glog.go | 5 + log/logger.go | 3 + node/config.go | 12 +- node/node.go | 4 +- node/service.go | 4 +- vendor/github.com/mohae/deepcopy/LICENSE | 21 + vendor/github.com/mohae/deepcopy/README.md | 8 + vendor/github.com/mohae/deepcopy/deepcopy.go | 125 + vendor/vendor.json | 6 + 28 files changed, 11214 insertions(+), 7981 deletions(-) create mode 100644 dashboard/assets/components/Logs.jsx create mode 100644 dashboard/log.go create mode 100644 vendor/github.com/mohae/deepcopy/LICENSE create mode 100644 vendor/github.com/mohae/deepcopy/README.md create mode 100644 vendor/github.com/mohae/deepcopy/deepcopy.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 52308948fc00..e42aab30acd9 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -199,7 +199,12 @@ func init() { app.Before = func(ctx *cli.Context) error { runtime.GOMAXPROCS(runtime.NumCPU()) - if err := debug.Setup(ctx); err != nil { + + logdir := "" + if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) { + logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs") + } + if err := debug.Setup(ctx, logdir); err != nil { return err } // Cap the cache allowance and tune the garbage collector diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 8e1a69cb2a08..7a058b0cb5ea 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -432,7 +432,7 @@ pv(1) tool to get a progress bar: app.Flags = append(app.Flags, swarmmetrics.Flags...) app.Before = func(ctx *cli.Context) error { runtime.GOMAXPROCS(runtime.NumCPU()) - if err := debug.Setup(ctx); err != nil { + if err := debug.Setup(ctx, ""); err != nil { return err } swarmmetrics.Setup(ctx) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 46ea7b96bf86..fb122136511e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -193,7 +193,7 @@ var ( } // Dashboard settings DashboardEnabledFlag = cli.BoolFlag{ - Name: "dashboard", + Name: metrics.DashboardEnabledFlag, Usage: "Enable the dashboard", } DashboardAddrFlag = cli.StringFlag{ @@ -1185,7 +1185,7 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) { // RegisterDashboardService adds a dashboard to the stack. func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) { stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return dashboard.New(cfg, commit) + return dashboard.New(cfg, commit, ctx.ResolvePath("logs")), nil }) } diff --git a/dashboard/assets.go b/dashboard/assets.go index 521d134a6ab1..07bc9c4e4d82 100644 --- a/dashboard/assets.go +++ b/dashboard/assets.go @@ -64,6 +64,9 @@ var _indexHtml = []byte(` ::-webkit-scrollbar-thumb { background: #212121; } + ::-webkit-scrollbar-corner { + background: transparent; + } @@ -84,7 +87,7 @@ func indexHtml() (*asset, error) { } info := bindataFileInfo{name: "index.html", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0xd9, 0xa6, 0xeb, 0x32, 0x49, 0x9b, 0xe5, 0x3a, 0xcb, 0x99, 0xd3, 0xb6, 0x69, 0x7f, 0xde, 0x35, 0x9d, 0x5, 0x96, 0x84, 0xc0, 0x14, 0xef, 0xbe, 0x58, 0x10, 0x5e, 0x40, 0xf2, 0x12, 0x97}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x22, 0xc8, 0x3d, 0x86, 0x2f, 0xb4, 0x6a, 0x1f, 0xda, 0xd, 0x54, 0x14, 0xa3, 0x6e, 0x80, 0x56, 0x28, 0xea, 0x44, 0xcf, 0xf5, 0xf2, 0xe, 0xad, 0x19, 0xf5, 0x93, 0xd6, 0x8d, 0x6d, 0x2f, 0x35}} return a, nil } @@ -116,11 +119,11 @@ var _bundleJs = []byte((((((((((`!function(modules) { return __webpack_require__.d(getter, "a", getter), getter; }, __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); - }, __webpack_require__.p = "", __webpack_require__(__webpack_require__.s = 336); + }, __webpack_require__.p = "", __webpack_require__(__webpack_require__.s = 375); }([ function(module, exports, __webpack_require__) { "use strict"; (function(process) { - "production" === process.env.NODE_ENV ? module.exports = __webpack_require__(337) : module.exports = __webpack_require__(338); + "production" === process.env.NODE_ENV ? module.exports = __webpack_require__(376) : module.exports = __webpack_require__(377); }).call(exports, __webpack_require__(2)); }, function(module, exports, __webpack_require__) { (function(process) { @@ -128,8 +131,8 @@ var _bundleJs = []byte((((((((((`!function(modules) { var REACT_ELEMENT_TYPE = "function" == typeof Symbol && Symbol.for && Symbol.for("react.element") || 60103, isValidElement = function(object) { return "object" == typeof object && null !== object && object.$$typeof === REACT_ELEMENT_TYPE; }; - module.exports = __webpack_require__(379)(isValidElement, !0); - } else module.exports = __webpack_require__(380)(); + module.exports = __webpack_require__(418)(isValidElement, !0); + } else module.exports = __webpack_require__(419)(); }).call(exports, __webpack_require__(2)); }, function(module, exports) { function defaultSetTimout() { @@ -289,7 +292,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "o", function() { return parseChildIndex; }); - var __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_lodash_isString__ = __webpack_require__(163), __WEBPACK_IMPORTED_MODULE_1_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isString__), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject__ = __webpack_require__(31), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_8__PureRender__ = __webpack_require__(5), PRESENTATION_ATTRIBUTES = { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_lodash_isString__ = __webpack_require__(173), __WEBPACK_IMPORTED_MODULE_1_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isString__), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject__ = __webpack_require__(32), __WEBPACK_IMPORTED_MODULE_2_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_3_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_8__PureRender__ = __webpack_require__(5), PRESENTATION_ATTRIBUTES = { alignmentBaseline: __WEBPACK_IMPORTED_MODULE_6_prop_types___default.a.string, angle: __WEBPACK_IMPORTED_MODULE_6_prop_types___default.a.number, baselineShift: __WEBPACK_IMPORTED_MODULE_6_prop_types___default.a.string, @@ -502,7 +505,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _assign = __webpack_require__(204), _assign2 = function(obj) { + var _assign = __webpack_require__(222), _assign2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -527,7 +530,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { var tag = baseGetTag(value); return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag; } - var baseGetTag = __webpack_require__(41), isObject = __webpack_require__(31), asyncTag = "[object AsyncFunction]", funcTag = "[object Function]", genTag = "[object GeneratorFunction]", proxyTag = "[object Proxy]"; + var baseGetTag = __webpack_require__(41), isObject = __webpack_require__(32), asyncTag = "[object AsyncFunction]", funcTag = "[object Function]", genTag = "[object GeneratorFunction]", proxyTag = "[object Proxy]"; module.exports = isFunction; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; @@ -554,7 +557,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "c", function() { return getLinearRegression; }); - var __WEBPACK_IMPORTED_MODULE_0_lodash_get__ = __webpack_require__(164), __WEBPACK_IMPORTED_MODULE_0_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_get__), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(116), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__ = __webpack_require__(169), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__), __WEBPACK_IMPORTED_MODULE_4_lodash_isString__ = __webpack_require__(163), __WEBPACK_IMPORTED_MODULE_4_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isString__), mathSign = function(value) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_get__ = __webpack_require__(174), __WEBPACK_IMPORTED_MODULE_0_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_get__), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_1_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(120), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__ = __webpack_require__(272), __WEBPACK_IMPORTED_MODULE_3_lodash_isNumber___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isNumber__), __WEBPACK_IMPORTED_MODULE_4_lodash_isString__ = __webpack_require__(173), __WEBPACK_IMPORTED_MODULE_4_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isString__), mathSign = function(value) { return 0 === value ? 0 : value > 0 ? 1 : -1; }, isPercent = function(value) { return __WEBPACK_IMPORTED_MODULE_4_lodash_isString___default()(value) && value.indexOf("%") === value.length - 1; @@ -623,12 +626,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { Object.defineProperty(exports, "__esModule", { value: !0 }), exports.sheetsManager = void 0; - var _keys = __webpack_require__(50), _keys2 = _interopRequireDefault(_keys), _extends2 = __webpack_require__(6), _extends3 = _interopRequireDefault(_extends2), _getPrototypeOf = __webpack_require__(26), _getPrototypeOf2 = _interopRequireDefault(_getPrototypeOf), _classCallCheck2 = __webpack_require__(27), _classCallCheck3 = _interopRequireDefault(_classCallCheck2), _createClass2 = __webpack_require__(28), _createClass3 = _interopRequireDefault(_createClass2), _possibleConstructorReturn2 = __webpack_require__(29), _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2), _inherits2 = __webpack_require__(30), _inherits3 = _interopRequireDefault(_inherits2), _objectWithoutProperties2 = __webpack_require__(7), _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2), _map = __webpack_require__(401), _map2 = _interopRequireDefault(_map), _minSafeInteger = __webpack_require__(417), _minSafeInteger2 = _interopRequireDefault(_minSafeInteger), _react = __webpack_require__(0), _react2 = _interopRequireDefault(_react), _propTypes = __webpack_require__(1), _propTypes2 = _interopRequireDefault(_propTypes), _warning = __webpack_require__(12), _warning2 = _interopRequireDefault(_warning), _hoistNonReactStatics = __webpack_require__(151), _hoistNonReactStatics2 = _interopRequireDefault(_hoistNonReactStatics), _getDisplayName = __webpack_require__(226), _getDisplayName2 = _interopRequireDefault(_getDisplayName), _wrapDisplayName = __webpack_require__(75), _wrapDisplayName2 = _interopRequireDefault(_wrapDisplayName), _contextTypes = __webpack_require__(420), _contextTypes2 = _interopRequireDefault(_contextTypes), _jss = __webpack_require__(228), _ns = __webpack_require__(227), ns = function(obj) { + var _keys = __webpack_require__(55), _keys2 = _interopRequireDefault(_keys), _extends2 = __webpack_require__(6), _extends3 = _interopRequireDefault(_extends2), _getPrototypeOf = __webpack_require__(26), _getPrototypeOf2 = _interopRequireDefault(_getPrototypeOf), _classCallCheck2 = __webpack_require__(27), _classCallCheck3 = _interopRequireDefault(_classCallCheck2), _createClass2 = __webpack_require__(28), _createClass3 = _interopRequireDefault(_createClass2), _possibleConstructorReturn2 = __webpack_require__(29), _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2), _inherits2 = __webpack_require__(30), _inherits3 = _interopRequireDefault(_inherits2), _objectWithoutProperties2 = __webpack_require__(7), _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2), _map = __webpack_require__(440), _map2 = _interopRequireDefault(_map), _minSafeInteger = __webpack_require__(456), _minSafeInteger2 = _interopRequireDefault(_minSafeInteger), _react = __webpack_require__(0), _react2 = _interopRequireDefault(_react), _propTypes = __webpack_require__(1), _propTypes2 = _interopRequireDefault(_propTypes), _warning = __webpack_require__(11), _warning2 = _interopRequireDefault(_warning), _hoistNonReactStatics = __webpack_require__(162), _hoistNonReactStatics2 = _interopRequireDefault(_hoistNonReactStatics), _getDisplayName = __webpack_require__(244), _getDisplayName2 = _interopRequireDefault(_getDisplayName), _wrapDisplayName = __webpack_require__(79), _wrapDisplayName2 = _interopRequireDefault(_wrapDisplayName), _contextTypes = __webpack_require__(459), _contextTypes2 = _interopRequireDefault(_contextTypes), _jss = __webpack_require__(246), _ns = __webpack_require__(245), ns = function(obj) { if (obj && obj.__esModule) return obj; var newObj = {}; if (null != obj) for (var key in obj) Object.prototype.hasOwnProperty.call(obj, key) && (newObj[key] = obj[key]); return newObj.default = obj, newObj; - }(_ns), _jssPreset = __webpack_require__(442), _jssPreset2 = _interopRequireDefault(_jssPreset), _createMuiTheme = __webpack_require__(150), _createMuiTheme2 = _interopRequireDefault(_createMuiTheme), _themeListener = __webpack_require__(149), _themeListener2 = _interopRequireDefault(_themeListener), _createGenerateClassName = __webpack_require__(455), _createGenerateClassName2 = _interopRequireDefault(_createGenerateClassName), _getStylesCreator = __webpack_require__(456), _getStylesCreator2 = _interopRequireDefault(_getStylesCreator), jss = (0, + }(_ns), _jssPreset = __webpack_require__(481), _jssPreset2 = _interopRequireDefault(_jssPreset), _createMuiTheme = __webpack_require__(161), _createMuiTheme2 = _interopRequireDefault(_createMuiTheme), _themeListener = __webpack_require__(160), _themeListener2 = _interopRequireDefault(_themeListener), _createGenerateClassName = __webpack_require__(494), _createGenerateClassName2 = _interopRequireDefault(_createGenerateClassName), _getStylesCreator = __webpack_require__(495), _getStylesCreator2 = _interopRequireDefault(_getStylesCreator), jss = (0, _jss.create)((0, _jssPreset2.default)()), generateClassName = (0, _createGenerateClassName2.default)(), indexCounter = _minSafeInteger2.default, sheetsManager = exports.sheetsManager = new _map2.default(), noopTheme = {}, defaultTheme = void 0, withStyles = function(stylesOrCreator) { var options = arguments.length > 1 && void 0 !== arguments[1] ? arguments[1] : {}; return function(Component) { @@ -730,10 +733,10 @@ var _bundleJs = []byte((((((((((`!function(modules) { renderedClasses = sheetsManagerTheme.sheet.classes; } classes = classesProp ? (0, _extends3.default)({}, renderedClasses, (0, _keys2.default)(classesProp).reduce(function(accumulator, key) { - return "production" !== process.env.NODE_ENV && (0, _warning2.default)(renderedClasses[key] || _this3.disableStylesGeneration, [ "Material-UI: the key ` + "`") + (`" + key + "` + ("`" + ` provided to the classes property is not implemented in " + (0, + return "production" !== process.env.NODE_ENV && (0, _warning2.default)(renderedClasses[key] || _this3.disableStylesGeneration, [ "Material-UI: the key ` + ("`" + `" + key + "`)) + ("`" + (` provided to the classes property is not implemented in " + (0, _getDisplayName2.default)(Component) + ".", "You can only override one of the following: " + (0, _keys2.default)(renderedClasses).join(",") ].join("\n")), "production" !== process.env.NODE_ENV && (0, - _warning2.default)(!classesProp[key] || "string" == typeof classesProp[key], [ "Material-UI: the key `))) + (("`" + (`" + key + "` + "`")) + (` provided to the classes property is not valid for " + (0, + _warning2.default)(!classesProp[key] || "string" == typeof classesProp[key], [ "Material-UI: the key ` + "`"))) + ((`" + key + "` + ("`" + ` provided to the classes property is not valid for " + (0, _getDisplayName2.default)(Component) + ".", "You need to provide a non empty string instead of: " + classesProp[key] + "." ].join("\n")), classesProp[key] && (accumulator[key] = renderedClasses[key] + " " + classesProp[key]), accumulator; @@ -761,9 +764,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; exports.default = withStyles; }).call(exports, __webpack_require__(2)); -}, function(module, exports) { - var isArray = Array.isArray; - module.exports = isArray; }, function(module, exports, __webpack_require__) { "use strict"; (function(process) { @@ -772,7 +772,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { var len = arguments.length; args = new Array(len > 2 ? len - 2 : 0); for (var key = 2; key < len; key++) args[key - 2] = arguments[key]; - if (void 0 === format) throw new Error("` + ("`" + `warning(condition, format, ...args)`)))) + ((("`" + (` requires a warning message argument"); + if (void 0 === format) throw new Error("`)) + ("`" + (`warning(condition, format, ...args)` + "`")))) + (((` requires a warning message argument"); if (format.length < 10 || /^[s\W]*$/.test(format)) throw new Error("The warning format should be able to uniquely identify this warning. Please, use a more descriptive format than: " + format); if (!condition) { var argIndex = 0, message = "Warning: " + format.replace(/%s/g, function() { @@ -788,7 +788,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _defineProperty = __webpack_require__(142), _defineProperty2 = function(obj) { + var _defineProperty = __webpack_require__(154), _defineProperty2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -801,6 +801,9 @@ var _bundleJs = []byte((((((((((`!function(modules) { writable: !0 }) : obj[key] = value, obj; }; +}, function(module, exports) { + var isArray = Array.isArray; + module.exports = isArray; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _objectWithoutProperties(obj, keys) { @@ -826,7 +829,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; Layer.propTypes = propTypes, __webpack_exports__.a = Layer; }, function(module, exports, __webpack_require__) { - var global = __webpack_require__(157), core = __webpack_require__(158), hide = __webpack_require__(245), redefine = __webpack_require__(536), ctx = __webpack_require__(539), $export = function(type, name, source) { + var global = __webpack_require__(167), core = __webpack_require__(168), hide = __webpack_require__(266), redefine = __webpack_require__(580), ctx = __webpack_require__(583), $export = function(type, name, source) { var key, own, out, exp, IS_FORCED = type & $export.F, IS_GLOBAL = type & $export.G, IS_STATIC = type & $export.S, IS_PROTO = type & $export.P, IS_BIND = type & $export.B, target = IS_GLOBAL ? global : IS_STATIC ? global[name] || (global[name] = {}) : (global[name] || {}).prototype, exports = IS_GLOBAL ? core : core[name] || (core[name] = {}), expProto = exports.prototype || (exports.prototype = {}); IS_GLOBAL && (source = name); for (key in source) own = !IS_FORCED && target && void 0 !== target[key], out = (own ? target : source)[key], @@ -914,8 +917,8 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "y", function() { return parseDomainOfCategoryAxis; }); - var __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__ = __webpack_require__(34), __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__ = __webpack_require__(284), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(116), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isString__ = __webpack_require__(163), __WEBPACK_IMPORTED_MODULE_3_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isString__), __WEBPACK_IMPORTED_MODULE_4_lodash_max__ = __webpack_require__(700), __WEBPACK_IMPORTED_MODULE_4_lodash_max___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_max__), __WEBPACK_IMPORTED_MODULE_5_lodash_min__ = __webpack_require__(289), __WEBPACK_IMPORTED_MODULE_5_lodash_min___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_lodash_min__), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__ = __webpack_require__(701), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_9_lodash_get__ = __webpack_require__(164), __WEBPACK_IMPORTED_MODULE_9_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_9_lodash_get__), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_10_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_11_recharts_scale__ = __webpack_require__(703), __WEBPACK_IMPORTED_MODULE_12_d3_scale__ = (__webpack_require__.n(__WEBPACK_IMPORTED_MODULE_11_recharts_scale__), - __webpack_require__(292)), __WEBPACK_IMPORTED_MODULE_13_d3_shape__ = __webpack_require__(172), __WEBPACK_IMPORTED_MODULE_14__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_15__cartesian_ReferenceDot__ = __webpack_require__(325), __WEBPACK_IMPORTED_MODULE_16__cartesian_ReferenceLine__ = __webpack_require__(326), __WEBPACK_IMPORTED_MODULE_17__cartesian_ReferenceArea__ = __webpack_require__(327), __WEBPACK_IMPORTED_MODULE_18__cartesian_ErrorBar__ = __webpack_require__(92), __WEBPACK_IMPORTED_MODULE_19__component_Legend__ = __webpack_require__(170), __WEBPACK_IMPORTED_MODULE_20__ReactUtils__ = __webpack_require__(4), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__ = __webpack_require__(45), __WEBPACK_IMPORTED_MODULE_0_lodash_isEqual___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isEqual__), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__ = __webpack_require__(321), __WEBPACK_IMPORTED_MODULE_1_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__ = __webpack_require__(120), __WEBPACK_IMPORTED_MODULE_2_lodash_isNaN___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNaN__), __WEBPACK_IMPORTED_MODULE_3_lodash_isString__ = __webpack_require__(173), __WEBPACK_IMPORTED_MODULE_3_lodash_isString___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_isString__), __WEBPACK_IMPORTED_MODULE_4_lodash_max__ = __webpack_require__(840), __WEBPACK_IMPORTED_MODULE_4_lodash_max___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_max__), __WEBPACK_IMPORTED_MODULE_5_lodash_min__ = __webpack_require__(328), __WEBPACK_IMPORTED_MODULE_5_lodash_min___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_lodash_min__), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_6_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__ = __webpack_require__(841), __WEBPACK_IMPORTED_MODULE_7_lodash_flatMap___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_lodash_flatMap__), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_8_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_8_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_9_lodash_get__ = __webpack_require__(174), __WEBPACK_IMPORTED_MODULE_9_lodash_get___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_9_lodash_get__), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_10_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_10_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_11_recharts_scale__ = __webpack_require__(843), __WEBPACK_IMPORTED_MODULE_12_d3_scale__ = (__webpack_require__.n(__WEBPACK_IMPORTED_MODULE_11_recharts_scale__), + __webpack_require__(331)), __WEBPACK_IMPORTED_MODULE_13_d3_shape__ = __webpack_require__(182), __WEBPACK_IMPORTED_MODULE_14__DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_15__cartesian_ReferenceDot__ = __webpack_require__(364), __WEBPACK_IMPORTED_MODULE_16__cartesian_ReferenceLine__ = __webpack_require__(365), __WEBPACK_IMPORTED_MODULE_17__cartesian_ReferenceArea__ = __webpack_require__(366), __WEBPACK_IMPORTED_MODULE_18__cartesian_ErrorBar__ = __webpack_require__(95), __WEBPACK_IMPORTED_MODULE_19__component_Legend__ = __webpack_require__(180), __WEBPACK_IMPORTED_MODULE_20__ReactUtils__ = __webpack_require__(4), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -1406,7 +1409,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; }, function(module, exports) { var core = module.exports = { - version: "2.5.3" + version: "2.5.7" }; "number" == typeof __e && (__e = core); }, function(module, __webpack_exports__, __webpack_require__) { @@ -1449,10 +1452,10 @@ var _bundleJs = []byte((((((((((`!function(modules) { __webpack_exports__.a = newInterval; var t0 = new Date(), t1 = new Date(); }, function(module, exports, __webpack_require__) { - var global = __webpack_require__(24), core = __webpack_require__(17), ctx = __webpack_require__(46), hide = __webpack_require__(40), $export = function(type, name, source) { + var global = __webpack_require__(24), core = __webpack_require__(17), ctx = __webpack_require__(51), hide = __webpack_require__(39), has = __webpack_require__(54), $export = function(type, name, source) { var key, own, out, IS_FORCED = type & $export.F, IS_GLOBAL = type & $export.G, IS_STATIC = type & $export.S, IS_PROTO = type & $export.P, IS_BIND = type & $export.B, IS_WRAP = type & $export.W, exports = IS_GLOBAL ? core : core[name] || (core[name] = {}), expProto = exports.prototype, target = IS_GLOBAL ? global : IS_STATIC ? global[name] : (global[name] || {}).prototype; IS_GLOBAL && (source = name); - for (key in source) (own = !IS_FORCED && target && void 0 !== target[key]) && key in exports || (out = own ? target[key] : source[key], + for (key in source) (own = !IS_FORCED && target && void 0 !== target[key]) && has(exports, key) || (out = own ? target[key] : source[key], exports[key] = IS_GLOBAL && "function" != typeof target[key] ? source[key] : IS_BIND && own ? ctx(out, global) : IS_WRAP && target[key] == out ? function(C) { var F = function(a, b, c) { if (this instanceof C) { @@ -1482,12 +1485,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { } module.exports = isNil; }, function(module, exports, __webpack_require__) { - var store = __webpack_require__(139)("wks"), uid = __webpack_require__(99), Symbol = __webpack_require__(24).Symbol, USE_SYMBOL = "function" == typeof Symbol; + var store = __webpack_require__(151)("wks"), uid = __webpack_require__(103), Symbol = __webpack_require__(24).Symbol, USE_SYMBOL = "function" == typeof Symbol; (module.exports = function(name) { return store[name] || (store[name] = USE_SYMBOL && Symbol[name] || (USE_SYMBOL ? Symbol : uid)("Symbol." + name)); }).store = store; }, function(module, exports, __webpack_require__) { - var anObject = __webpack_require__(47), IE8_DOM_DEFINE = __webpack_require__(206), toPrimitive = __webpack_require__(133), dP = Object.defineProperty; + var anObject = __webpack_require__(52), IE8_DOM_DEFINE = __webpack_require__(224), toPrimitive = __webpack_require__(145), dP = Object.defineProperty; exports.f = __webpack_require__(25) ? Object.defineProperty : function(O, P, Attributes) { if (anObject(O), P = toPrimitive(P, !0), anObject(Attributes), IE8_DOM_DEFINE) try { return dP(O, P, Attributes); @@ -1617,7 +1620,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { var global = module.exports = "undefined" != typeof window && window.Math == Math ? window : "undefined" != typeof self && self.Math == Math ? self : Function("return this")(); "number" == typeof __g && (__g = global); }, function(module, exports, __webpack_require__) { - module.exports = !__webpack_require__(48)(function() { + module.exports = !__webpack_require__(53)(function() { return 7 != Object.defineProperty({}, "a", { get: function() { return 7; @@ -1626,7 +1629,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }); }, function(module, exports, __webpack_require__) { module.exports = { - default: __webpack_require__(355), + default: __webpack_require__(394), __esModule: !0 }; }, function(module, exports, __webpack_require__) { @@ -1637,7 +1640,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _defineProperty = __webpack_require__(142), _defineProperty2 = function(obj) { + var _defineProperty = __webpack_require__(154), _defineProperty2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -1658,7 +1661,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _typeof2 = __webpack_require__(101), _typeof3 = function(obj) { + var _typeof2 = __webpack_require__(105), _typeof3 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -1675,7 +1678,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; } exports.__esModule = !0; - var _setPrototypeOf = __webpack_require__(372), _setPrototypeOf2 = _interopRequireDefault(_setPrototypeOf), _create = __webpack_require__(376), _create2 = _interopRequireDefault(_create), _typeof2 = __webpack_require__(101), _typeof3 = _interopRequireDefault(_typeof2); + var _setPrototypeOf = __webpack_require__(411), _setPrototypeOf2 = _interopRequireDefault(_setPrototypeOf), _create = __webpack_require__(415), _create2 = _interopRequireDefault(_create), _typeof2 = __webpack_require__(105), _typeof3 = _interopRequireDefault(_typeof2); exports.default = function(subClass, superClass) { if ("function" != typeof superClass && null !== superClass) throw new TypeError("Super expression must either be null or a function, not " + (void 0 === superClass ? "undefined" : (0, _typeof3.default)(superClass))); @@ -1688,15 +1691,15 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (_setPrototypeOf2.default ? (0, _setPrototypeOf2.default)(subClass, superClass) : subClass.__proto__ = superClass); }; +}, function(module, exports, __webpack_require__) { + var freeGlobal = __webpack_require__(268), freeSelf = "object" == typeof self && self && self.Object === Object && self, root = freeGlobal || freeSelf || Function("return this")(); + module.exports = root; }, function(module, exports) { function isObject(value) { var type = typeof value; return null != value && ("object" == type || "function" == type); } module.exports = isObject; -}, function(module, exports, __webpack_require__) { - var freeGlobal = __webpack_require__(243), freeSelf = "object" == typeof self && self && self.Object === Object && self, root = freeGlobal || freeSelf || Function("return this")(); - module.exports = root; }, function(module, exports, __webpack_require__) { "use strict"; function _interopRequireDefault(obj) { @@ -1707,53 +1710,48 @@ var _bundleJs = []byte((((((((((`!function(modules) { Object.defineProperty(exports, "__esModule", { value: !0 }), exports.translateStyle = exports.AnimateGroup = exports.configBezier = exports.configSpring = void 0; - var _Animate = __webpack_require__(264), _Animate2 = _interopRequireDefault(_Animate), _easing = __webpack_require__(277), _util = __webpack_require__(122), _AnimateGroup = __webpack_require__(681), _AnimateGroup2 = _interopRequireDefault(_AnimateGroup); + var _Animate = __webpack_require__(287), _Animate2 = _interopRequireDefault(_Animate), _easing = __webpack_require__(305), _util = __webpack_require__(132), _AnimateGroup = __webpack_require__(761), _AnimateGroup2 = _interopRequireDefault(_AnimateGroup); exports.configSpring = _easing.configSpring, exports.configBezier = _easing.configBezier, exports.AnimateGroup = _AnimateGroup2.default, exports.translateStyle = _util.translateStyle, exports.default = _Animate2.default; -}, function(module, exports, __webpack_require__) { - function isEqual(value, other) { - return baseIsEqual(value, other); - } - var baseIsEqual = __webpack_require__(177); - module.exports = isEqual; +}, function(module, exports) { + var isArray = Array.isArray; + module.exports = isArray; }, function(module, exports) { module.exports = function(it) { return "object" == typeof it ? null !== it : "function" == typeof it; }; -}, function(module, exports) { - function isObjectLike(value) { - return null != value && "object" == typeof value; - } - module.exports = isObjectLike; +}, function(module, exports, __webpack_require__) { + var freeGlobal = __webpack_require__(292), freeSelf = "object" == typeof self && self && self.Object === Object && self, root = freeGlobal || freeSelf || Function("return this")(); + module.exports = root; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_bisect__ = __webpack_require__(293); + var __WEBPACK_IMPORTED_MODULE_0__src_bisect__ = __webpack_require__(332); __webpack_require__.d(__webpack_exports__, "b", function() { return __WEBPACK_IMPORTED_MODULE_0__src_bisect__.a; }); - var __WEBPACK_IMPORTED_MODULE_1__src_ascending__ = __webpack_require__(64); + var __WEBPACK_IMPORTED_MODULE_1__src_ascending__ = __webpack_require__(69); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_1__src_ascending__.a; }); - var __WEBPACK_IMPORTED_MODULE_2__src_bisector__ = __webpack_require__(294); + var __WEBPACK_IMPORTED_MODULE_2__src_bisector__ = __webpack_require__(333); __webpack_require__.d(__webpack_exports__, "c", function() { return __WEBPACK_IMPORTED_MODULE_2__src_bisector__.a; }); - var __WEBPACK_IMPORTED_MODULE_18__src_quantile__ = (__webpack_require__(707), __webpack_require__(708), - __webpack_require__(296), __webpack_require__(298), __webpack_require__(709), __webpack_require__(712), - __webpack_require__(713), __webpack_require__(302), __webpack_require__(714), __webpack_require__(715), - __webpack_require__(716), __webpack_require__(717), __webpack_require__(303), __webpack_require__(295), - __webpack_require__(718), __webpack_require__(184)); + var __WEBPACK_IMPORTED_MODULE_18__src_quantile__ = (__webpack_require__(847), __webpack_require__(848), + __webpack_require__(335), __webpack_require__(337), __webpack_require__(849), __webpack_require__(852), + __webpack_require__(853), __webpack_require__(341), __webpack_require__(854), __webpack_require__(855), + __webpack_require__(856), __webpack_require__(857), __webpack_require__(342), __webpack_require__(334), + __webpack_require__(858), __webpack_require__(204)); __webpack_require__.d(__webpack_exports__, "d", function() { return __WEBPACK_IMPORTED_MODULE_18__src_quantile__.a; }); - var __WEBPACK_IMPORTED_MODULE_19__src_range__ = __webpack_require__(300); + var __WEBPACK_IMPORTED_MODULE_19__src_range__ = __webpack_require__(339); __webpack_require__.d(__webpack_exports__, "e", function() { return __WEBPACK_IMPORTED_MODULE_19__src_range__.a; }); - var __WEBPACK_IMPORTED_MODULE_23__src_ticks__ = (__webpack_require__(719), __webpack_require__(720), - __webpack_require__(721), __webpack_require__(301)); + var __WEBPACK_IMPORTED_MODULE_23__src_ticks__ = (__webpack_require__(859), __webpack_require__(860), + __webpack_require__(861), __webpack_require__(340)); __webpack_require__.d(__webpack_exports__, "h", function() { return __WEBPACK_IMPORTED_MODULE_23__src_ticks__.a; }), __webpack_require__.d(__webpack_exports__, "f", function() { @@ -1761,7 +1759,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "g", function() { return __WEBPACK_IMPORTED_MODULE_23__src_ticks__.c; }); - __webpack_require__(304), __webpack_require__(297), __webpack_require__(722); + __webpack_require__(343), __webpack_require__(336), __webpack_require__(862); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.d(__webpack_exports__, "d", function() { @@ -1777,33 +1775,39 @@ var _bundleJs = []byte((((((((((`!function(modules) { }); var durationSecond = 1e3, durationMinute = 6e4, durationHour = 36e5, durationDay = 864e5, durationWeek = 6048e5; }, function(module, exports, __webpack_require__) { - "use strict"; - function makeEmptyFunction(arg) { - return function() { - return arg; - }; - } - var emptyFunction = function() {}; - emptyFunction.thatReturns = makeEmptyFunction, emptyFunction.thatReturnsFalse = makeEmptyFunction(!1), - emptyFunction.thatReturnsTrue = makeEmptyFunction(!0), emptyFunction.thatReturnsNull = makeEmptyFunction(null), - emptyFunction.thatReturnsThis = function() { - return this; - }, emptyFunction.thatReturnsArgument = function(arg) { - return arg; - }, module.exports = emptyFunction; -}, function(module, exports, __webpack_require__) { - var dP = __webpack_require__(22), createDesc = __webpack_require__(71); + var dP = __webpack_require__(22), createDesc = __webpack_require__(75); module.exports = __webpack_require__(25) ? function(object, key, value) { return dP.f(object, key, createDesc(1, value)); } : function(object, key, value) { return object[key] = value, object; }; +}, function(module, exports) { + var g; + g = function() { + return this; + }(); + try { + g = g || Function("return this")() || (0, eval)("this"); + } catch (e) { + "object" == typeof window && (g = window); + } + module.exports = g; }, function(module, exports, __webpack_require__) { function baseGetTag(value) { return null == value ? void 0 === value ? undefinedTag : nullTag : symToStringTag && symToStringTag in Object(value) ? getRawTag(value) : objectToString(value); } - var Symbol = __webpack_require__(78), getRawTag = __webpack_require__(522), objectToString = __webpack_require__(523), nullTag = "[object Null]", undefinedTag = "[object Undefined]", symToStringTag = Symbol ? Symbol.toStringTag : void 0; + var Symbol = __webpack_require__(83), getRawTag = __webpack_require__(602), objectToString = __webpack_require__(603), nullTag = "[object Null]", undefinedTag = "[object Undefined]", symToStringTag = Symbol ? Symbol.toStringTag : void 0; module.exports = baseGetTag; +}, function(module, exports) { + function isObjectLike(value) { + return null != value && "object" == typeof value; + } + module.exports = isObjectLike; +}, function(module, exports) { + function isObjectLike(value) { + return null != value && "object" == typeof value; + } + module.exports = isObjectLike; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _toConsumableArray(arr) { @@ -1828,7 +1832,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { className: __WEBPACK_IMPORTED_MODULE_5_classnames___default()("recharts-label", className) }, attrs, positionAttrs), label); } - var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(31), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_3_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_react__), __WEBPACK_IMPORTED_MODULE_4_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_4_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_prop_types__), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__Text__ = __webpack_require__(54), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_9__util_PolarUtils__ = __webpack_require__(23), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(32), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_3_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_react__), __WEBPACK_IMPORTED_MODULE_4_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_4_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_prop_types__), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__Text__ = __webpack_require__(61), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_9__util_PolarUtils__ = __webpack_require__(23), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -2038,9 +2042,15 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; Label.parseViewBox = parseViewBox, Label.renderCallByParent = renderCallByParent, __webpack_exports__.a = Label; +}, function(module, exports, __webpack_require__) { + function isEqual(value, other) { + return baseIsEqual(value, other); + } + var baseIsEqual = __webpack_require__(199); + module.exports = isEqual; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_color__ = __webpack_require__(187); + var __WEBPACK_IMPORTED_MODULE_0__src_color__ = __webpack_require__(207); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_0__src_color__.e; }), __webpack_require__.d(__webpack_exports__, "f", function() { @@ -2048,13 +2058,13 @@ var _bundleJs = []byte((((((((((`!function(modules) { }), __webpack_require__.d(__webpack_exports__, "d", function() { return __WEBPACK_IMPORTED_MODULE_0__src_color__.f; }); - var __WEBPACK_IMPORTED_MODULE_1__src_lab__ = __webpack_require__(730); + var __WEBPACK_IMPORTED_MODULE_1__src_lab__ = __webpack_require__(870); __webpack_require__.d(__webpack_exports__, "e", function() { return __WEBPACK_IMPORTED_MODULE_1__src_lab__.a; }), __webpack_require__.d(__webpack_exports__, "c", function() { return __WEBPACK_IMPORTED_MODULE_1__src_lab__.b; }); - var __WEBPACK_IMPORTED_MODULE_2__src_cubehelix__ = __webpack_require__(731); + var __WEBPACK_IMPORTED_MODULE_2__src_cubehelix__ = __webpack_require__(871); __webpack_require__.d(__webpack_exports__, "b", function() { return __WEBPACK_IMPORTED_MODULE_2__src_cubehelix__.a; }); @@ -2090,7 +2100,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { })); })) : null; } - var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(31), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_lodash_last__ = __webpack_require__(781), __WEBPACK_IMPORTED_MODULE_3_lodash_last___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_last__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__Label__ = __webpack_require__(42), __WEBPACK_IMPORTED_MODULE_8__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_9__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_10__util_ChartUtils__ = __webpack_require__(16), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_isObject__ = __webpack_require__(32), __WEBPACK_IMPORTED_MODULE_0_lodash_isObject___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isObject__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_2_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_3_lodash_last__ = __webpack_require__(921), __WEBPACK_IMPORTED_MODULE_3_lodash_last___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_last__), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_4_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7__Label__ = __webpack_require__(44), __WEBPACK_IMPORTED_MODULE_8__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_9__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_10__util_ChartUtils__ = __webpack_require__(16), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -2174,7 +2184,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass); } - var __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__ = __webpack_require__(284), __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_range__ = __webpack_require__(334), __WEBPACK_IMPORTED_MODULE_2_lodash_range___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_range__), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle__ = __webpack_require__(790), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_throttle__), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_7_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_classnames__), __WEBPACK_IMPORTED_MODULE_8__container_Surface__ = __webpack_require__(79), __WEBPACK_IMPORTED_MODULE_9__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_10__component_Tooltip__ = __webpack_require__(121), __WEBPACK_IMPORTED_MODULE_11__component_Legend__ = __webpack_require__(170), __WEBPACK_IMPORTED_MODULE_12__shape_Curve__ = __webpack_require__(66), __WEBPACK_IMPORTED_MODULE_13__shape_Cross__ = __webpack_require__(328), __WEBPACK_IMPORTED_MODULE_14__shape_Sector__ = __webpack_require__(127), __WEBPACK_IMPORTED_MODULE_15__shape_Dot__ = __webpack_require__(56), __WEBPACK_IMPORTED_MODULE_16__shape_Rectangle__ = __webpack_require__(65), __WEBPACK_IMPORTED_MODULE_17__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_18__cartesian_CartesianAxis__ = __webpack_require__(335), __WEBPACK_IMPORTED_MODULE_19__cartesian_Brush__ = __webpack_require__(333), __WEBPACK_IMPORTED_MODULE_20__util_DOMUtils__ = __webpack_require__(183), __WEBPACK_IMPORTED_MODULE_21__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_22__util_ChartUtils__ = __webpack_require__(16), __WEBPACK_IMPORTED_MODULE_23__util_PolarUtils__ = __webpack_require__(23), __WEBPACK_IMPORTED_MODULE_24__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_25__util_Events__ = __webpack_require__(791), _extends = Object.assign || function(target) { + var __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__ = __webpack_require__(321), __WEBPACK_IMPORTED_MODULE_0_lodash_sortBy___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_sortBy__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_lodash_range__ = __webpack_require__(373), __WEBPACK_IMPORTED_MODULE_2_lodash_range___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_lodash_range__), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle__ = __webpack_require__(932), __WEBPACK_IMPORTED_MODULE_3_lodash_throttle___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_lodash_throttle__), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_4_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_5_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_5_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_react__), __WEBPACK_IMPORTED_MODULE_6_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_6_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_6_prop_types__), __WEBPACK_IMPORTED_MODULE_7_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_7_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_7_classnames__), __WEBPACK_IMPORTED_MODULE_8__container_Surface__ = __webpack_require__(82), __WEBPACK_IMPORTED_MODULE_9__container_Layer__ = __webpack_require__(14), __WEBPACK_IMPORTED_MODULE_10__component_Tooltip__ = __webpack_require__(125), __WEBPACK_IMPORTED_MODULE_11__component_Legend__ = __webpack_require__(180), __WEBPACK_IMPORTED_MODULE_12__shape_Curve__ = __webpack_require__(71), __WEBPACK_IMPORTED_MODULE_13__shape_Cross__ = __webpack_require__(367), __WEBPACK_IMPORTED_MODULE_14__shape_Sector__ = __webpack_require__(139), __WEBPACK_IMPORTED_MODULE_15__shape_Dot__ = __webpack_require__(63), __WEBPACK_IMPORTED_MODULE_16__shape_Rectangle__ = __webpack_require__(70), __WEBPACK_IMPORTED_MODULE_17__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_18__cartesian_CartesianAxis__ = __webpack_require__(374), __WEBPACK_IMPORTED_MODULE_19__cartesian_Brush__ = __webpack_require__(372), __WEBPACK_IMPORTED_MODULE_20__util_DOMUtils__ = __webpack_require__(198), __WEBPACK_IMPORTED_MODULE_21__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_22__util_ChartUtils__ = __webpack_require__(16), __WEBPACK_IMPORTED_MODULE_23__util_PolarUtils__ = __webpack_require__(23), __WEBPACK_IMPORTED_MODULE_24__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_25__util_Events__ = __webpack_require__(933), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -3194,7 +3204,42 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; __webpack_exports__.a = generateCategoricalChart; }, function(module, exports, __webpack_require__) { - var aFunction = __webpack_require__(205); + "use strict"; + (function(process) { + function invariant(condition, format, a, b, c, d, e, f) { + if (validateFormat(format), !condition) { + var error; + if (void 0 === format) error = new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings."); else { + var args = [ a, b, c, d, e, f ], argIndex = 0; + error = new Error(format.replace(/%s/g, function() { + return args[argIndex++]; + })), error.name = "Invariant Violation"; + } + throw error.framesToPop = 1, error; + } + } + var validateFormat = function(format) {}; + "production" !== process.env.NODE_ENV && (validateFormat = function(format) { + if (void 0 === format) throw new Error("invariant requires an error message argument"); + }), module.exports = invariant; + }).call(exports, __webpack_require__(2)); +}, function(module, exports, __webpack_require__) { + "use strict"; + function makeEmptyFunction(arg) { + return function() { + return arg; + }; + } + var emptyFunction = function() {}; + emptyFunction.thatReturns = makeEmptyFunction, emptyFunction.thatReturnsFalse = makeEmptyFunction(!1), + emptyFunction.thatReturnsTrue = makeEmptyFunction(!0), emptyFunction.thatReturnsNull = makeEmptyFunction(null), + emptyFunction.thatReturnsThis = function() { + return this; + }, emptyFunction.thatReturnsArgument = function(arg) { + return arg; + }, module.exports = emptyFunction; +}, function(module, exports, __webpack_require__) { + var aFunction = __webpack_require__(223); module.exports = function(fn, that, length) { if (aFunction(fn), void 0 === that) return fn; switch (length) { @@ -3238,7 +3283,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; }, function(module, exports, __webpack_require__) { module.exports = { - default: __webpack_require__(382), + default: __webpack_require__(421), __esModule: !0 }; }, function(module, exports, __webpack_require__) { @@ -3285,17 +3330,17 @@ var _bundleJs = []byte((((((((((`!function(modules) { Object.defineProperty(exports, "__esModule", { value: !0 }); - var _typeof2 = __webpack_require__(101), _typeof3 = _interopRequireDefault(_typeof2), _keys = __webpack_require__(50), _keys2 = _interopRequireDefault(_keys); + var _typeof2 = __webpack_require__(105), _typeof3 = _interopRequireDefault(_typeof2), _keys = __webpack_require__(55), _keys2 = _interopRequireDefault(_keys); exports.capitalize = capitalize, exports.contains = contains, exports.findIndex = findIndex, exports.find = find, exports.createChainedFunction = createChainedFunction; - var _warning = __webpack_require__(12), _warning2 = _interopRequireDefault(_warning); + var _warning = __webpack_require__(11), _warning2 = _interopRequireDefault(_warning); }).call(exports, __webpack_require__(2)); }, function(module, exports, __webpack_require__) { function getNative(object, key) { var value = getValue(object, key); return baseIsNative(value) ? value : void 0; } - var baseIsNative = __webpack_require__(564), getValue = __webpack_require__(567); + var baseIsNative = __webpack_require__(610), getValue = __webpack_require__(613); module.exports = getNative; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; @@ -3304,6 +3349,19 @@ var _bundleJs = []byte((((((((((`!function(modules) { return x; }; }; +}, function(module, exports, __webpack_require__) { + function getNative(object, key) { + var value = getValue(object, key); + return baseIsNative(value) ? value : void 0; + } + var baseIsNative = __webpack_require__(667), getValue = __webpack_require__(672); + module.exports = getNative; +}, function(module, exports, __webpack_require__) { + function baseGetTag(value) { + return null == value ? void 0 === value ? undefinedTag : nullTag : symToStringTag && symToStringTag in Object(value) ? getRawTag(value) : objectToString(value); + } + var Symbol = __webpack_require__(128), getRawTag = __webpack_require__(668), objectToString = __webpack_require__(669), nullTag = "[object Null]", undefinedTag = "[object Undefined]", symToStringTag = Symbol ? Symbol.toStringTag : void 0; + module.exports = baseGetTag; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _objectWithoutProperties(obj, keys) { @@ -3329,7 +3387,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass); } - var _class, _temp2, __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_1_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_react__), __WEBPACK_IMPORTED_MODULE_2_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_2_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_prop_types__), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__ = __webpack_require__(688), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__), __WEBPACK_IMPORTED_MODULE_4_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_4_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_classnames__), __WEBPACK_IMPORTED_MODULE_5__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_6__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_7__util_DOMUtils__ = __webpack_require__(183), _extends = Object.assign || function(target) { + var _class, _temp2, __WEBPACK_IMPORTED_MODULE_0_lodash_isNil__ = __webpack_require__(20), __WEBPACK_IMPORTED_MODULE_0_lodash_isNil___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isNil__), __WEBPACK_IMPORTED_MODULE_1_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_1_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_react__), __WEBPACK_IMPORTED_MODULE_2_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_2_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_prop_types__), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__ = __webpack_require__(770), __WEBPACK_IMPORTED_MODULE_3_reduce_css_calc___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_reduce_css_calc__), __WEBPACK_IMPORTED_MODULE_4_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_4_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_classnames__), __WEBPACK_IMPORTED_MODULE_5__util_DataUtils__ = __webpack_require__(9), __WEBPACK_IMPORTED_MODULE_6__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_7__util_DOMUtils__ = __webpack_require__(198), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -3549,12 +3607,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, _class = _temp)) || _class; __webpack_exports__.a = Dot; }, function(module, exports, __webpack_require__) { - var IObject = __webpack_require__(134), defined = __webpack_require__(136); + var IObject = __webpack_require__(146), defined = __webpack_require__(148); module.exports = function(it) { return IObject(defined(it)); }; }, function(module, exports, __webpack_require__) { - var defined = __webpack_require__(136); + var defined = __webpack_require__(148); module.exports = function(it) { return Object(defined(it)); }; @@ -3593,7 +3651,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { return protoProps && defineProperties(Constructor.prototype, protoProps), staticProps && defineProperties(Constructor, staticProps), Constructor; }; - }(), _warning = __webpack_require__(12), _warning2 = _interopRequireDefault(_warning), _toCss = __webpack_require__(152), _toCss2 = _interopRequireDefault(_toCss), _toCssValue = __webpack_require__(106), _toCssValue2 = _interopRequireDefault(_toCssValue), StyleRule = function() { + }(), _warning = __webpack_require__(11), _warning2 = _interopRequireDefault(_warning), _toCss = __webpack_require__(163), _toCss2 = _interopRequireDefault(_toCss), _toCssValue = __webpack_require__(110), _toCssValue2 = _interopRequireDefault(_toCssValue), StyleRule = function() { function StyleRule(key, style, options) { _classCallCheck(this, StyleRule), this.type = "style", this.isProcessed = !1; var sheet = options.sheet, Renderer = options.Renderer, selector = options.selector; @@ -3657,34 +3715,17 @@ var _bundleJs = []byte((((((((((`!function(modules) { } ]), StyleRule; }(); exports.default = StyleRule; -}, function(module, exports) { - var g; - g = function() { - return this; - }(); - try { - g = g || Function("return this")() || (0, eval)("this"); - } catch (e) { - "object" == typeof window && (g = window); - } - module.exports = g; }, function(module, exports, __webpack_require__) { function isSymbol(value) { return "symbol" == typeof value || isObjectLike(value) && baseGetTag(value) == symbolTag; } - var baseGetTag = __webpack_require__(41), isObjectLike = __webpack_require__(36), symbolTag = "[object Symbol]"; + var baseGetTag = __webpack_require__(41), isObjectLike = __webpack_require__(42), symbolTag = "[object Symbol]"; module.exports = isSymbol; }, function(module, exports) { function identity(value) { return value; } module.exports = identity; -}, function(module, exports, __webpack_require__) { - function baseIteratee(value) { - return "function" == typeof value ? value : null == value ? identity : "object" == typeof value ? isArray(value) ? baseMatchesProperty(value[0], value[1]) : baseMatches(value) : property(value); - } - var baseMatches = __webpack_require__(671), baseMatchesProperty = __webpack_require__(674), identity = __webpack_require__(62), isArray = __webpack_require__(11), property = __webpack_require__(678); - module.exports = baseIteratee; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_exports__.a = function(a, b) { @@ -3855,7 +3896,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } }), superClass && (Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass); } - var _class, _class2, _temp, __WEBPACK_IMPORTED_MODULE_0_lodash_isArray__ = __webpack_require__(11), __WEBPACK_IMPORTED_MODULE_0_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_2_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_react__), __WEBPACK_IMPORTED_MODULE_3_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_3_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_prop_types__), __WEBPACK_IMPORTED_MODULE_4_d3_shape__ = __webpack_require__(172), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), _extends = Object.assign || function(target) { + var _class, _class2, _temp, __WEBPACK_IMPORTED_MODULE_0_lodash_isArray__ = __webpack_require__(13), __WEBPACK_IMPORTED_MODULE_0_lodash_isArray___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_lodash_isArray__), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__ = __webpack_require__(8), __WEBPACK_IMPORTED_MODULE_1_lodash_isFunction___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_1_lodash_isFunction__), __WEBPACK_IMPORTED_MODULE_2_react__ = __webpack_require__(0), __WEBPACK_IMPORTED_MODULE_2_react___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_react__), __WEBPACK_IMPORTED_MODULE_3_prop_types__ = __webpack_require__(1), __WEBPACK_IMPORTED_MODULE_3_prop_types___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_prop_types__), __WEBPACK_IMPORTED_MODULE_4_d3_shape__ = __webpack_require__(182), __WEBPACK_IMPORTED_MODULE_5_classnames__ = __webpack_require__(3), __WEBPACK_IMPORTED_MODULE_5_classnames___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_classnames__), __WEBPACK_IMPORTED_MODULE_6__util_PureRender__ = __webpack_require__(5), __WEBPACK_IMPORTED_MODULE_7__util_ReactUtils__ = __webpack_require__(4), __WEBPACK_IMPORTED_MODULE_8__util_DataUtils__ = __webpack_require__(9), _extends = Object.assign || function(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) Object.prototype.hasOwnProperty.call(source, key) && (target[key] = source[key]); @@ -4173,26 +4214,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { } return to; }; -}, function(module, exports, __webpack_require__) { - "use strict"; - (function(process) { - function invariant(condition, format, a, b, c, d, e, f) { - if (validateFormat(format), !condition) { - var error; - if (void 0 === format) error = new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings."); else { - var args = [ a, b, c, d, e, f ], argIndex = 0; - error = new Error(format.replace(/%s/g, function() { - return args[argIndex++]; - })), error.name = "Invariant Violation"; - } - throw error.framesToPop = 1, error; - } - } - var validateFormat = function(format) {}; - "production" !== process.env.NODE_ENV && (validateFormat = function(format) { - if (void 0 === format) throw new Error("invariant requires an error message argument"); - }), module.exports = invariant; - }).call(exports, __webpack_require__(2)); }, function(module, exports) { module.exports = function(bitmap, value) { return { @@ -4203,7 +4224,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; }; }, function(module, exports, __webpack_require__) { - var $keys = __webpack_require__(208), enumBugKeys = __webpack_require__(140); + var $keys = __webpack_require__(226), enumBugKeys = __webpack_require__(152); module.exports = Object.keys || function(O) { return $keys(O, enumBugKeys); }; @@ -4261,7 +4282,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = !0; - var _getDisplayName = __webpack_require__(226), _getDisplayName2 = function(obj) { + var _getDisplayName = __webpack_require__(244), _getDisplayName2 = function(obj) { return obj && obj.__esModule ? obj : { default: obj }; @@ -4300,7 +4321,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { return protoProps && defineProperties(Constructor.prototype, protoProps), staticProps && defineProperties(Constructor, staticProps), Constructor; }; - }(), _createRule = __webpack_require__(107), _createRule2 = _interopRequireDefault(_createRule), _linkRule = __webpack_require__(231), _linkRule2 = _interopRequireDefault(_linkRule), _StyleRule = __webpack_require__(59), _StyleRule2 = _interopRequireDefault(_StyleRule), _escape = __webpack_require__(428), _escape2 = _interopRequireDefault(_escape), RuleList = function() { + }(), _createRule = __webpack_require__(111), _createRule2 = _interopRequireDefault(_createRule), _linkRule = __webpack_require__(249), _linkRule2 = _interopRequireDefault(_linkRule), _StyleRule = __webpack_require__(66), _StyleRule2 = _interopRequireDefault(_StyleRule), _escape = __webpack_require__(467), _escape2 = _interopRequireDefault(_escape), RuleList = function() { function RuleList(options) { _classCallCheck(this, RuleList), this.map = {}, this.raw = {}, this.index = [], this.options = options, this.classes = options.classes; @@ -4444,9 +4465,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { color: "rgba(255, 255, 255, 0.54)" } }; -}, function(module, exports, __webpack_require__) { - var root = __webpack_require__(32), Symbol = root.Symbol; - module.exports = Symbol; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function _objectWithoutProperties(obj, keys) { @@ -4490,15 +4508,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { children: __WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.oneOfType([ __WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.arrayOf(__WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.node), __WEBPACK_IMPORTED_MODULE_1_prop_types___default.a.node ]) }; Surface.propTypes = propTypes, __webpack_exports__.a = Surface; -}, function(module, exports) { - function arrayMap(array, iteratee) { - for (var index = -1, length = null == array ? 0 : array.length, result = Array(length); ++index < length; ) result[index] = iteratee(array[index], index, array); - return result; - } - module.exports = arrayMap; +}, function(module, exports, __webpack_require__) { + var root = __webpack_require__(31), Symbol = root.Symbol; + module.exports = Symbol; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_path__ = __webpack_require__(586); + var __WEBPACK_IMPORTED_MODULE_0__src_path__ = __webpack_require__(632); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_0__src_path__.a; }); @@ -4546,12 +4561,6 @@ var _bundleJs = []byte((((((((((`!function(modules) { for (var n = series.length, o = new Array(n); --n >= 0; ) o[n] = n; return o; }; -}, function(module, exports, __webpack_require__) { - function isArrayLike(value) { - return null != value && isLength(value.length) && !isFunction(value); - } - var isFunction = __webpack_require__(8), isLength = __webpack_require__(181); - module.exports = isArrayLike; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function Cell() { @@ -4567,6 +4576,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; Cell.propTypes = _extends({}, __WEBPACK_IMPORTED_MODULE_1__util_ReactUtils__.c), Cell.displayName = "Cell", __webpack_exports__.a = Cell; +}, function(module, exports, __webpack_require__) { + function baseIteratee(value) { + return "function" == typeof value ? value : null == value ? identity : "object" == typeof value ? isArray(value) ? baseMatchesProperty(value[0], value[1]) : baseMatches(value) : property(value); + } + var baseMatches = __webpack_require__(814), baseMatchesProperty = __webpack_require__(817), identity = __webpack_require__(68), isArray = __webpack_require__(13), property = __webpack_require__(821); + module.exports = baseIteratee; }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_exports__.a = function(x) { @@ -4601,29 +4616,29 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, linearish(scale); } __webpack_exports__.b = linearish, __webpack_exports__.a = linear; - var __WEBPACK_IMPORTED_MODULE_0_d3_array__ = __webpack_require__(37), __WEBPACK_IMPORTED_MODULE_1_d3_interpolate__ = __webpack_require__(89), __WEBPACK_IMPORTED_MODULE_2__continuous__ = __webpack_require__(125), __WEBPACK_IMPORTED_MODULE_3__tickFormat__ = __webpack_require__(742); + var __WEBPACK_IMPORTED_MODULE_0_d3_array__ = __webpack_require__(37), __WEBPACK_IMPORTED_MODULE_1_d3_interpolate__ = __webpack_require__(92), __WEBPACK_IMPORTED_MODULE_2__continuous__ = __webpack_require__(137), __WEBPACK_IMPORTED_MODULE_3__tickFormat__ = __webpack_require__(882); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; - var __WEBPACK_IMPORTED_MODULE_0__src_value__ = __webpack_require__(186); + var __WEBPACK_IMPORTED_MODULE_0__src_value__ = __webpack_require__(206); __webpack_require__.d(__webpack_exports__, "a", function() { return __WEBPACK_IMPORTED_MODULE_0__src_value__.a; }); - var __WEBPACK_IMPORTED_MODULE_5__src_number__ = (__webpack_require__(310), __webpack_require__(189), - __webpack_require__(308), __webpack_require__(311), __webpack_require__(124)); + var __WEBPACK_IMPORTED_MODULE_5__src_number__ = (__webpack_require__(349), __webpack_require__(209), + __webpack_require__(347), __webpack_require__(350), __webpack_require__(136)); __webpack_require__.d(__webpack_exports__, "c", function() { return __WEBPACK_IMPORTED_MODULE_5__src_number__.a; }); - var __WEBPACK_IMPORTED_MODULE_7__src_round__ = (__webpack_require__(312), __webpack_require__(732)); + var __WEBPACK_IMPORTED_MODULE_7__src_round__ = (__webpack_require__(351), __webpack_require__(872)); __webpack_require__.d(__webpack_exports__, "d", function() { return __WEBPACK_IMPORTED_MODULE_7__src_round__.a; }); - var __WEBPACK_IMPORTED_MODULE_15__src_cubehelix__ = (__webpack_require__(313), __webpack_require__(733), - __webpack_require__(736), __webpack_require__(307), __webpack_require__(737), __webpack_require__(738), - __webpack_require__(739), __webpack_require__(740)); + var __WEBPACK_IMPORTED_MODULE_15__src_cubehelix__ = (__webpack_require__(352), __webpack_require__(873), + __webpack_require__(876), __webpack_require__(346), __webpack_require__(877), __webpack_require__(878), + __webpack_require__(879), __webpack_require__(880)); __webpack_require__.d(__webpack_exports__, "b", function() { return __WEBPACK_IMPORTED_MODULE_15__src_cubehelix__.a; }); - __webpack_require__(741); + __webpack_require__(881); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; function linear(a, d) { @@ -4650,7 +4665,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { return d ? linear(a, d) : Object(__WEBPACK_IMPORTED_MODULE_0__constant__.a)(isNaN(a) ? b : a); } __webpack_exports__.c = hue, __webpack_exports__.b = gamma, __webpack_exports__.a = nogamma; - var __WEBPACK_IMPORTED_MODULE_0__constant__ = __webpack_require__(309); + var __WEBPACK_IMPORTED_MODULE_0__constant__ = __webpack_require__(348); }, function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_exports__.a = function(s) { @@ -4845,7 +4860,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }, function(module, exports, __webpack_require__) { "use strict"; (function(process) { - var emptyFunction = __webpack_require__(39), warning = emptyFunction; + var emptyFunction = __webpack_require__(50), warning = emptyFunction; if ("production" !== process.env.NODE_ENV) { var printWarning = function(format) { for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) args[_key - 1] = arguments[_key]; @@ -4858,7 +4873,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } catch (x) {} }; warning = function(condition, format) { - if (void 0 === format) throw new Error("` + "`")) + (`warning(condition, format, ...args)` + ("`" + ` requires a warning message argument"); + if (void 0 === format) throw new Error("` + ("`" + `warning(condition, format, ...args)`)) + ("`" + (` requires a warning message argument"); if (0 !== format.indexOf("Failed Composite propType: ") && !condition) { for (var _len2 = arguments.length, args = Array(_len2 > 2 ? _len2 - 2 : 0), _key2 = 2; _key2 < _len2; _key2++) args[_key2 - 2] = arguments[_key2]; printWarning.apply(void 0, [ format ].concat(args)); @@ -4880,7 +4895,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { } } } - "production" === process.env.NODE_ENV ? (checkDCE(), module.exports = __webpack_require__(339)) : module.exports = __webpack_require__(342); + "production" === process.env.NODE_ENV ? (checkDCE(), module.exports = __webpack_require__(378)) : module.exports = __webpack_require__(381); }).call(exports, __webpack_require__(2)); }, function(module, exports, __webpack_require__) { "use strict"; @@ -4898,10 +4913,12 @@ var _bundleJs = []byte((((((((((`!function(modules) { var hasOwnProperty = Object.prototype.hasOwnProperty; module.exports = shallowEqual; }, function(module, exports, __webpack_require__) { - var toInteger = __webpack_require__(137), min = Math.min; + var toInteger = __webpack_require__(149), min = Math.min; module.exports = function(it) { return it > 0 ? min(toInteger(it), 9007199254740991) : 0; }; +}, function(module, exports) { + module.exports = !0; }, function(module, exports) { var id = 0, px = Math.random(); module.exports = function(key) { @@ -4917,7 +4934,7 @@ var _bundleJs = []byte((((((((((`!function(modules) { }; } exports.__esModule = !0; - var _iterator = __webpack_require__(357), _iterator2 = _interopRequireDefault(_iterator), _symbol = __webpack_require__(365), _symbol2 = _interopRequireDefault(_symbol), _typeof = "function" == typeof _symbol2.default && "symbol" == typeof _iterator2.default ? function(obj) { + var _iterator = __webpack_require__(396), _iterator2 = _interopRequireDefault(_iterator), _symbol = __webpack_require__(404), _symbol2 = _interopRequireDefault(_symbol), _typeof = "function" == typeof _symbol2.default && "symbol" == typeof _iterator2.default ? function(obj) { return typeof obj; } : function(obj) { return obj && "function" == typeof _symbol2.default && obj.constructor === _symbol2.default && obj !== _symbol2.default.prototype ? "symbol" : typeof obj; @@ -4928,9 +4945,9 @@ var _bundleJs = []byte((((((((((`!function(modules) { return obj && "function" == typeof _symbol2.default && obj.constructor === _symbol2.default && obj !== _symbol2.default.prototype ? "symbol" : void 0 === obj ? "undefined" : _typeof(obj); }; }, function(module, exports, __webpack_require__) { - var anObject = __webpack_require__(47), dPs = __webpack_require__(361), enumBugKeys = __webpack_require__(140), IE_PROTO = __webpack_require__(138)("IE_PROTO"), Empty = function() {}, createDict = function() { - var iframeDocument, iframe = __webpack_require__(207)("iframe"), i = enumBugKeys.length; - for (iframe.style.display = "none", __webpack_require__(362).appendChild(iframe), + var anObject = __webpack_require__(52), dPs = __webpack_require__(400), enumBugKeys = __webpack_require__(152), IE_PROTO = __webpack_require__(150)("IE_PROTO"), Empty = function() {}, createDict = function() { + var iframeDocument, iframe = __webpack_require__(225)("iframe"), i = enumBugKeys.length; + for (iframe.style.display = "none", __webpack_require__(401).appendChild(iframe), iframe.src = "javascript:", iframeDocument = iframe.contentWindow.document, iframeDocument.open(), iframeDocument.write("`, "/"+uriCopy.String()) - }, - }} -} - -//ValidateCaseErrors is a method that process the request object through certain validators -//that assert if certain conditions are met for further information to log as an error -func ValidateCaseErrors(r *Request) string { - for _, err := range caseErrors { - if err.Validator(r) { - return err.Msg(r) - } - } - - return "" -} - -//ShowMultipeChoices is used when a user requests a resource in a manifest which results -//in ambiguous results. It returns a HTML page with clickable links of each of the entry -//in the manifest which fits the request URI ambiguity. -//For example, if the user requests bzz://read and that manifest contains entries -//"readme.md" and "readinglist.txt", a HTML page is returned with this two links. -//This only applies if the manifest has no default entry -func ShowMultipleChoices(w http.ResponseWriter, req *Request, list api.ManifestList) { - msg := "" - if list.Entries == nil { - Respond(w, req, "Could not resolve", http.StatusInternalServerError) - return - } - //make links relative - //requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt" - //to get clickable links, need to remove the ambiguous path, i.e. "read" - idx := strings.LastIndex(req.RequestURI, "/") - if idx == -1 { - Respond(w, req, "Internal Server Error", http.StatusInternalServerError) - return - } - //remove ambiguous part - base := req.RequestURI[:idx+1] - for _, e := range list.Entries { - //create clickable link for each entry - msg += "" + e.Path + "
" - } - Respond(w, req, msg, http.StatusMultipleChoices) -} - -//Respond is used to show an HTML page to a client. -//If there is an `Accept` header of `application/json`, JSON will be returned instead -//The function just takes a string message which will be displayed in the error page. -//The code is used to evaluate which template will be displayed -//(and return the correct HTTP status code) -func Respond(w http.ResponseWriter, req *Request, msg string, code int) { - additionalMessage := ValidateCaseErrors(req) - switch code { - case http.StatusInternalServerError: - log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code) - case http.StatusMultipleChoices: - log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) - listURI := api.URI{ - Scheme: "bzz-list", - Addr: req.uri.Addr, - Path: req.uri.Path, - } - additionalMessage = fmt.Sprintf(`multiple choices`, listURI.String()) - default: - log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) - } - - if code >= 400 { - w.Header().Del("Cache-Control") //avoid sending cache headers for errors! - w.Header().Del("ETag") - } - - respond(w, &req.Request, &ResponseParams{ - Code: code, - Msg: msg, - Details: template.HTML(additionalMessage), - Timestamp: time.Now().Format(time.RFC1123), - template: getTemplate(code), - }) -} - -//evaluate if client accepts html or json response -func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { - w.WriteHeader(params.Code) - if r.Header.Get("Accept") == "application/json" { - respondJSON(w, params) - } else { - respondHTML(w, params) - } -} - -//return a HTML page -func respondHTML(w http.ResponseWriter, params *ResponseParams) { - htmlCounter.Inc(1) - err := params.template.Execute(w, params) - if err != nil { - log.Error(err.Error()) - } -} - -//return JSON -func respondJSON(w http.ResponseWriter, params *ResponseParams) { - jsonCounter.Inc(1) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(params) -} - -//get the HTML template for a given code -func getTemplate(code int) *template.Template { - if val, tmpl := templateMap[code]; tmpl { - return val - } - return templateMap[0] -} diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go deleted file mode 100644 index 78f24065a915..000000000000 --- a/swarm/api/http/error_templates.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -We use html templates to handle simple but as informative as possible error pages. - -To eliminate circular dependency in case of an error, we don't store error pages on swarm. -We can't save the error pages as html files on disk, or when deploying compiled binaries -they won't be found. - -For this reason we resort to save the HTML error pages as strings, which then can be -parsed by Go's html/template package -*/ -package http - -//This returns the HTML for generic errors -func GetGenericErrorPage() string { - page := ` - - - - - - - - - - - Swarm::HTTP Error Page - - - - -
- -
-
- -
-
-

There was a problem serving the requested page

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - -
- Hmmmmm....Swarm was not able to serve your request! -
- Error message: -
- {{.Msg}} -
- {{.Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} - -//This returns the HTML for a 404 Not Found error -func GetNotFoundErrorPage() string { - page := ` - - - - - - - - - - - Swarm::404 HTTP Not Found - - - - - - -
-
- -
-
-

Resource Not Found

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - -
- Unfortunately, the resource you were trying to access could not be found on swarm. -
- {{.Msg}} -
- {{.Details}} -
- Error code: -
- {{.Code}} -
-
-
-
- -
-

- Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution -

-
- - - - -` - return page -} - -//This returns the HTML for a page listing disambiguation options -//i.e. if user requested bzz://read and the manifest contains "readme.md" and "readinglist.txt", -//this page is returned with a clickable list the existing disambiguation links in the manifest -func GetMultipleChoicesErrorPage() string { - page := ` - - - - - - - - - - - Swarm::HTTP Disambiguation Page - - - - - - -
-
- -
-
-

Swarm: disambiguation

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - -
- Your request may refer to {{ .Details}}. -
- Error code: -
- {{.Code}} -
-
-
-
- -
-

- Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution -

-
- - - - -` - return page -} diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go new file mode 100644 index 000000000000..d338a782cefe --- /dev/null +++ b/swarm/api/http/middleware.go @@ -0,0 +1,95 @@ +package http + +import ( + "fmt" + "net/http" + "runtime/debug" + "strings" + + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/spancontext" + "github.com/pborman/uuid" +) + +// Adapt chains h (main request handler) main handler to adapters (middleware handlers) +// Please note that the order of execution for `adapters` is FIFO (adapters[0] will be executed first) +func Adapt(h http.Handler, adapters ...Adapter) http.Handler { + for i := range adapters { + adapter := adapters[len(adapters)-1-i] + h = adapter(h) + } + return h +} + +type Adapter func(http.Handler) http.Handler + +func SetRequestID(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(SetRUID(r.Context(), uuid.New()[:8])) + metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) + log.Info("created ruid for request", "ruid", GetRUID(r.Context()), "method", r.Method, "url", r.RequestURI) + + h.ServeHTTP(w, r) + }) +} + +func ParseURI(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + RespondError(w, r, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) + return + } + if uri.Addr != "" && strings.HasPrefix(uri.Addr, "0x") { + uri.Addr = strings.TrimPrefix(uri.Addr, "0x") + + msg := fmt.Sprintf(`The requested hash seems to be prefixed with '0x'. You will be redirected to the correct URL within 5 seconds.
+ Please click here if your browser does not redirect you within 5 seconds.`, "/"+uri.String()) + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(msg)) + return + } + + ctx := r.Context() + r = r.WithContext(SetURI(ctx, uri)) + log.Debug("parsed request path", "ruid", GetRUID(r.Context()), "method", r.Method, "uri.Addr", uri.Addr, "uri.Path", uri.Path, "uri.Scheme", uri.Scheme) + + h.ServeHTTP(w, r) + }) +} + +func InitLoggingResponseWriter(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + writer := newLoggingResponseWriter(w) + + h.ServeHTTP(writer, r) + }) +} + +func InstrumentOpenTracing(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + uri := GetURI(r.Context()) + if uri == nil || r.Method == "" || (uri != nil && uri.Scheme == "") { + h.ServeHTTP(w, r) // soft fail + return + } + spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme) + ctx, sp := spancontext.StartSpan(r.Context(), spanName) + defer sp.Finish() + h.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func RecoverPanic(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Error("panic recovery!", "stack trace", debug.Stack(), "url", r.URL.String(), "headers", r.Header) + } + }() + h.ServeHTTP(w, r) + }) +} diff --git a/swarm/api/http/response.go b/swarm/api/http/response.go new file mode 100644 index 000000000000..32c09b1f5729 --- /dev/null +++ b/swarm/api/http/response.go @@ -0,0 +1,139 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package http + +import ( + "encoding/json" + "fmt" + "html/template" + "net/http" + "strings" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/api" +) + +//metrics variables +var ( + htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil) + jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil) + plaintextCounter = metrics.NewRegisteredCounter("api.http.errorpage.plaintext.count", nil) +) + +//parameters needed for formatting the correct HTML page +type ResponseParams struct { + Msg template.HTML + Code int + Timestamp string + template *template.Template + Details template.HTML +} + +//ShowMultipeChoices is used when a user requests a resource in a manifest which results +//in ambiguous results. It returns a HTML page with clickable links of each of the entry +//in the manifest which fits the request URI ambiguity. +//For example, if the user requests bzz://read and that manifest contains entries +//"readme.md" and "readinglist.txt", a HTML page is returned with this two links. +//This only applies if the manifest has no default entry +func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) { + log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) + msg := "" + if list.Entries == nil { + RespondError(w, r, "Could not resolve", http.StatusInternalServerError) + return + } + requestUri := strings.TrimPrefix(r.RequestURI, "/") + + uri, err := api.Parse(requestUri) + if err != nil { + RespondError(w, r, "Bad Request", http.StatusBadRequest) + } + + uri.Scheme = "bzz-list" + //request the same url just with bzz-list + msg += fmt.Sprintf("Disambiguation:
Your request may refer to multiple choices.
Click here if your browser does not redirect you within 5 seconds.
", "/"+uri.String()) + RespondTemplate(w, r, "error", msg, http.StatusMultipleChoices) +} + +func RespondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) { + log.Debug("RespondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) + respond(w, r, &ResponseParams{ + Code: code, + Msg: template.HTML(msg), + Timestamp: time.Now().Format(time.RFC1123), + template: TemplatesMap[templateName], + }) +} + +func RespondError(w http.ResponseWriter, r *http.Request, msg string, code int) { + log.Debug("RespondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) + RespondTemplate(w, r, "error", msg, code) +} + +//evaluate if client accepts html or json response +func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { + w.WriteHeader(params.Code) + + if params.Code >= 400 { + w.Header().Del("Cache-Control") //avoid sending cache headers for errors! + w.Header().Del("ETag") + } + + acceptHeader := r.Header.Get("Accept") + // this cannot be in a switch form since an Accept header can be in the form of "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8" + if strings.Contains(acceptHeader, "application/json") { + if err := respondJSON(w, r, params); err != nil { + RespondError(w, r, "Internal server error", http.StatusInternalServerError) + } + } else if strings.Contains(acceptHeader, "text/html") { + respondHTML(w, r, params) + } else { + respondPlaintext(w, r, params) //returns nice errors for curl + } +} + +//return a HTML page +func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) { + htmlCounter.Inc(1) + log.Debug("respondHTML", "ruid", GetRUID(r.Context())) + err := params.template.Execute(w, params) + if err != nil { + log.Error(err.Error()) + } +} + +//return JSON +func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { + jsonCounter.Inc(1) + log.Debug("respondJSON", "ruid", GetRUID(r.Context())) + w.Header().Set("Content-Type", "application/json") + return json.NewEncoder(w).Encode(params) +} + +//return plaintext +func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { + plaintextCounter.Inc(1) + log.Debug("respondPlaintext", "ruid", GetRUID(r.Context())) + w.Header().Set("Content-Type", "text/plain") + strToWrite := "Code: " + fmt.Sprintf("%d", params.Code) + "\n" + strToWrite += "Message: " + string(params.Msg) + "\n" + strToWrite += "Timestamp: " + params.Timestamp + "\n" + _, err := w.Write([]byte(strToWrite)) + return err +} diff --git a/swarm/api/http/error_test.go b/swarm/api/http/response_test.go similarity index 98% rename from swarm/api/http/error_test.go rename to swarm/api/http/response_test.go index 990961f60edf..2e24bda6d1dd 100644 --- a/swarm/api/http/error_test.go +++ b/swarm/api/http/response_test.go @@ -44,7 +44,7 @@ func TestError(t *testing.T) { defer resp.Body.Close() respbody, err = ioutil.ReadAll(resp.Body) - if resp.StatusCode != 400 && !strings.Contains(string(respbody), "Invalid URI "/this_should_fail_as_no_bzz_protocol_present": unknown scheme") { + if resp.StatusCode != 404 && !strings.Contains(string(respbody), "Invalid URI "/this_should_fail_as_no_bzz_protocol_present": unknown scheme") { t.Fatalf("Response body does not match, expected: %v, to contain: %v; received code %d, expected code: %d", string(respbody), "Invalid bzz URI: unknown scheme", 400, resp.StatusCode) } diff --git a/swarm/api/http/sctx.go b/swarm/api/http/sctx.go new file mode 100644 index 000000000000..431e11735409 --- /dev/null +++ b/swarm/api/http/sctx.go @@ -0,0 +1,38 @@ +package http + +import ( + "context" + + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/sctx" +) + +type contextKey int + +const ( + uriKey contextKey = iota +) + +func GetRUID(ctx context.Context) string { + v, ok := ctx.Value(sctx.HTTPRequestIDKey).(string) + if ok { + return v + } + return "xxxxxxxx" +} + +func SetRUID(ctx context.Context, ruid string) context.Context { + return context.WithValue(ctx, sctx.HTTPRequestIDKey, ruid) +} + +func GetURI(ctx context.Context) *api.URI { + v, ok := ctx.Value(uriKey).(*api.URI) + if ok { + return v + } + return nil +} + +func SetURI(ctx context.Context, uri *api.URI) context.Context { + return context.WithValue(ctx, uriKey, uri) +} diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 3122654b6c4c..bd6949de6caf 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -41,12 +41,9 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/mru" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pborman/uuid" "github.com/rs/cors" ) @@ -72,6 +69,17 @@ var ( getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil) ) +type methodHandler map[string]http.Handler + +func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + v, ok := m[r.Method] + if ok { + v.ServeHTTP(rw, r) + return + } + rw.WriteHeader(http.StatusMethodNotAllowed) +} + func NewServer(api *api.API, corsString string) *Server { var allowedOrigins []string for _, domain := range strings.Split(corsString, ",") { @@ -84,20 +92,79 @@ func NewServer(api *api.API, corsString string) *Server { AllowedHeaders: []string{"*"}, }) - mux := http.NewServeMux() server := &Server{api: api} - mux.HandleFunc("/bzz:/", server.WrapHandler(true, server.HandleBzz)) - mux.HandleFunc("/bzz-raw:/", server.WrapHandler(true, server.HandleBzzRaw)) - mux.HandleFunc("/bzz-immutable:/", server.WrapHandler(true, server.HandleBzzImmutable)) - mux.HandleFunc("/bzz-hash:/", server.WrapHandler(true, server.HandleBzzHash)) - mux.HandleFunc("/bzz-list:/", server.WrapHandler(true, server.HandleBzzList)) - mux.HandleFunc("/bzz-resource:/", server.WrapHandler(true, server.HandleBzzResource)) - mux.HandleFunc("/", server.WrapHandler(false, server.HandleRootPaths)) - mux.HandleFunc("/robots.txt", server.WrapHandler(false, server.HandleRootPaths)) - mux.HandleFunc("/favicon.ico", server.WrapHandler(false, server.HandleRootPaths)) + defaultMiddlewares := []Adapter{ + RecoverPanic, + SetRequestID, + InitLoggingResponseWriter, + ParseURI, + InstrumentOpenTracing, + } + mux := http.NewServeMux() + mux.Handle("/bzz:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleBzzGet), + defaultMiddlewares..., + ), + "POST": Adapt( + http.HandlerFunc(server.HandlePostFiles), + defaultMiddlewares..., + ), + "DELETE": Adapt( + http.HandlerFunc(server.HandleDelete), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-raw:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGet), + defaultMiddlewares..., + ), + "POST": Adapt( + http.HandlerFunc(server.HandlePostRaw), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-immutable:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGet), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-hash:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGet), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-list:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGetList), + defaultMiddlewares..., + ), + }) + mux.Handle("/bzz-resource:/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleGetResource), + defaultMiddlewares..., + ), + "POST": Adapt( + http.HandlerFunc(server.HandlePostResource), + defaultMiddlewares..., + ), + }) + + mux.Handle("/", methodHandler{ + "GET": Adapt( + http.HandlerFunc(server.HandleRootPaths), + SetRequestID, + InitLoggingResponseWriter, + ), + }) server.Handler = c.Handler(mux) + return server } @@ -105,139 +172,6 @@ func (s *Server) ListenAndServe(addr string) error { return http.ListenAndServe(addr, s) } -func (s *Server) HandleRootPaths(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - if r.RequestURI == "/" { - if strings.Contains(r.Header.Get("Accept"), "text/html") { - err := landingPageTemplate.Execute(w, nil) - if err != nil { - log.Error(fmt.Sprintf("error rendering landing page: %s", err)) - } - return - } - if strings.Contains(r.Header.Get("Accept"), "application/json") { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode("Welcome to Swarm!") - return - } - } - - if r.URL.Path == "/robots.txt" { - w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) - fmt.Fprintf(w, "User-agent: *\nDisallow: /") - return - } - Respond(w, r, "Bad Request", http.StatusBadRequest) - default: - Respond(w, r, "Not Found", http.StatusNotFound) - } -} - -func (s *Server) HandleBzz(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - log.Debug("handleGetBzz") - if r.Header.Get("Accept") == "application/x-tar" { - reader, err := s.api.GetDirectoryTar(r.Context(), r.uri) - if err != nil { - Respond(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError) - } - defer reader.Close() - - w.Header().Set("Content-Type", "application/x-tar") - w.WriteHeader(http.StatusOK) - io.Copy(w, reader) - return - } - s.HandleGetFile(w, r) - case http.MethodPost: - log.Debug("handlePostFiles") - s.HandlePostFiles(w, r) - case http.MethodDelete: - log.Debug("handleBzzDelete") - s.HandleDelete(w, r) - default: - Respond(w, r, "Method not allowed", http.StatusMethodNotAllowed) - } -} -func (s *Server) HandleBzzRaw(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - log.Debug("handleGetRaw") - s.HandleGet(w, r) - case http.MethodPost: - log.Debug("handlePostRaw") - s.HandlePostRaw(w, r) - default: - Respond(w, r, "Method not allowed", http.StatusMethodNotAllowed) - } -} -func (s *Server) HandleBzzImmutable(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - log.Debug("handleGetHash") - s.HandleGetList(w, r) - default: - Respond(w, r, "Method not allowed", http.StatusMethodNotAllowed) - } -} -func (s *Server) HandleBzzHash(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - log.Debug("handleGetHash") - s.HandleGet(w, r) - default: - Respond(w, r, "Method not allowed", http.StatusMethodNotAllowed) - } -} -func (s *Server) HandleBzzList(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - log.Debug("handleGetHash") - s.HandleGetList(w, r) - default: - Respond(w, r, "Method not allowed", http.StatusMethodNotAllowed) - } -} -func (s *Server) HandleBzzResource(w http.ResponseWriter, r *Request) { - switch r.Method { - case http.MethodGet: - log.Debug("handleGetResource") - s.HandleGetResource(w, r) - case http.MethodPost: - log.Debug("handlePostResource") - s.HandlePostResource(w, r) - default: - Respond(w, r, "Method not allowed", http.StatusMethodNotAllowed) - } -} -func (s *Server) WrapHandler(parseBzzUri bool, h func(http.ResponseWriter, *Request)) http.HandlerFunc { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).UpdateSince(time.Now()) - req := &Request{Request: *r, ruid: uuid.New()[:8]} - metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) - log.Info("serving request", "ruid", req.ruid, "method", r.Method, "url", r.RequestURI) - - // wrapping the ResponseWriter, so that we get the response code set by http.ServeContent - w := newLoggingResponseWriter(rw) - if parseBzzUri { - uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) - if err != nil { - Respond(w, req, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) - return - } - req.uri = uri - - log.Debug("parsed request path", "ruid", req.ruid, "method", req.Method, "uri.Addr", req.uri.Addr, "uri.Path", req.uri.Path, "uri.Scheme", req.uri.Scheme) - } - - h(w, req) // call original - log.Info("served response", "ruid", req.ruid, "code", w.statusCode) - }) -} - // browser API for registering bzz url scheme handlers: // https://developer.mozilla.org/en/docs/Web-based_protocol_handlers // electron (chromium) api for registering bzz url scheme handlers: @@ -247,59 +181,81 @@ type Server struct { api *api.API } -// Request wraps http.Request and also includes the parsed bzz URI -type Request struct { - http.Request +func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) { + log.Debug("handleBzzGet", "ruid", GetRUID(r.Context())) + if r.Header.Get("Accept") == "application/x-tar" { + uri := GetURI(r.Context()) + reader, err := s.api.GetDirectoryTar(r.Context(), uri) + if err != nil { + RespondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError) + } + defer reader.Close() + + w.Header().Set("Content-Type", "application/x-tar") + w.WriteHeader(http.StatusOK) + io.Copy(w, reader) + return + } + + s.HandleGetFile(w, r) +} - uri *api.URI - ruid string // request unique id +func (s *Server) HandleRootPaths(w http.ResponseWriter, r *http.Request) { + switch r.RequestURI { + case "/": + RespondTemplate(w, r, "landing-page", "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", 200) + return + case "/robots.txt": + w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) + fmt.Fprintf(w, "User-agent: *\nDisallow: /") + case "/favicon.ico": + w.WriteHeader(http.StatusOK) + w.Write(faviconBytes) + default: + RespondError(w, r, "Not Found", http.StatusNotFound) + } } // HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request // body in swarm and returns the resulting storage address as a text/plain response -func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { - log.Debug("handle.post.raw", "ruid", r.ruid) +func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + log.Debug("handle.post.raw", "ruid", ruid) postRawCount.Inc(1) - ctx := r.Context() - var sp opentracing.Span - ctx, sp = spancontext.StartSpan( - ctx, - "http.post.raw") - defer sp.Finish() - toEncrypt := false - if r.uri.Addr == "encrypt" { + uri := GetURI(r.Context()) + if uri.Addr == "encrypt" { toEncrypt = true } - if r.uri.Path != "" { + if uri.Path != "" { postRawFail.Inc(1) - Respond(w, r, "raw POST request cannot contain a path", http.StatusBadRequest) + RespondError(w, r, "raw POST request cannot contain a path", http.StatusBadRequest) return } - if r.uri.Addr != "" && r.uri.Addr != "encrypt" { + if uri.Addr != "" && uri.Addr != "encrypt" { postRawFail.Inc(1) - Respond(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest) + RespondError(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest) return } if r.Header.Get("Content-Length") == "" { postRawFail.Inc(1) - Respond(w, r, "missing Content-Length header in request", http.StatusBadRequest) + RespondError(w, r, "missing Content-Length header in request", http.StatusBadRequest) return } - addr, _, err := s.api.Store(ctx, r.Body, r.ContentLength, toEncrypt) + addr, _, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt) if err != nil { postRawFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - log.Debug("stored content", "ruid", r.ruid, "key", addr) + log.Debug("stored content", "ruid", ruid, "key", addr) w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) @@ -311,55 +267,49 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { // (either a tar archive or multipart form), adds those files either to an // existing manifest or to a new manifest under and returns the // resulting manifest hash as a text/plain response -func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { - log.Debug("handle.post.files", "ruid", r.ruid) +func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + log.Debug("handle.post.files", "ruid", ruid) postFilesCount.Inc(1) - var sp opentracing.Span - ctx := r.Context() - ctx, sp = spancontext.StartSpan( - ctx, - "http.post.files") - defer sp.Finish() - contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) if err != nil { postFilesFail.Inc(1) - Respond(w, r, err.Error(), http.StatusBadRequest) + RespondError(w, r, err.Error(), http.StatusBadRequest) return } toEncrypt := false - if r.uri.Addr == "encrypt" { + uri := GetURI(r.Context()) + if uri.Addr == "encrypt" { toEncrypt = true } var addr storage.Address - if r.uri.Addr != "" && r.uri.Addr != "encrypt" { - addr, err = s.api.Resolve(r.Context(), r.uri) + if uri.Addr != "" && uri.Addr != "encrypt" { + addr, err = s.api.Resolve(r.Context(), uri) if err != nil { postFilesFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError) return } - log.Debug("resolved key", "ruid", r.ruid, "key", addr) + log.Debug("resolved key", "ruid", ruid, "key", addr) } else { addr, err = s.api.NewManifest(r.Context(), toEncrypt) if err != nil { postFilesFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - log.Debug("new manifest", "ruid", r.ruid, "key", addr) + log.Debug("new manifest", "ruid", ruid, "key", addr) } - newAddr, err := s.api.UpdateManifest(ctx, addr, func(mw *api.ManifestWriter) error { + newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error { switch contentType { - case "application/x-tar": _, err := s.handleTarUpload(r, mw) if err != nil { - Respond(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError) return err } return nil @@ -372,30 +322,31 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { }) if err != nil { postFilesFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError) return } - log.Debug("stored content", "ruid", r.ruid, "key", newAddr) + log.Debug("stored content", "ruid", ruid, "key", newAddr) w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) fmt.Fprint(w, newAddr) } -func (s *Server) handleTarUpload(r *Request, mw *api.ManifestWriter) (storage.Address, error) { - log.Debug("handle.tar.upload", "ruid", r.ruid) +func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) { + log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context())) - key, err := s.api.UploadTar(r.Context(), r.Body, r.uri.Path, mw) + key, err := s.api.UploadTar(r.Context(), r.Body, GetURI(r.Context()).Path, mw) if err != nil { return nil, err } return key, nil } -func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.ManifestWriter) error { - log.Debug("handle.multipart.upload", "ruid", req.ruid) - mr := multipart.NewReader(req.Body, boundary) +func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api.ManifestWriter) error { + ruid := GetRUID(r.Context()) + log.Debug("handle.multipart.upload", "ruid", ruid) + mr := multipart.NewReader(r.Body, boundary) for { part, err := mr.NextPart() if err == io.EOF { @@ -435,48 +386,52 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma if name == "" { name = part.FormName() } - path := path.Join(req.uri.Path, name) + uri := GetURI(r.Context()) + path := path.Join(uri.Path, name) entry := &api.ManifestEntry{ Path: path, ContentType: part.Header.Get("Content-Type"), Size: size, ModTime: time.Now(), } - log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(req.Context(), reader, entry) + log.Debug("adding path to new manifest", "ruid", ruid, "bytes", entry.Size, "path", entry.Path) + contentKey, err := mw.AddEntry(r.Context(), reader, entry) if err != nil { return fmt.Errorf("error adding manifest entry from multipart form: %s", err) } - log.Debug("stored content", "ruid", req.ruid, "key", contentKey) + log.Debug("stored content", "ruid", ruid, "key", contentKey) } } -func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error { - log.Debug("handle.direct.upload", "ruid", req.ruid) - key, err := mw.AddEntry(req.Context(), req.Body, &api.ManifestEntry{ - Path: req.uri.Path, - ContentType: req.Header.Get("Content-Type"), +func (s *Server) handleDirectUpload(r *http.Request, mw *api.ManifestWriter) error { + ruid := GetRUID(r.Context()) + log.Debug("handle.direct.upload", "ruid", ruid) + key, err := mw.AddEntry(r.Context(), r.Body, &api.ManifestEntry{ + Path: GetURI(r.Context()).Path, + ContentType: r.Header.Get("Content-Type"), Mode: 0644, - Size: req.ContentLength, + Size: r.ContentLength, ModTime: time.Now(), }) if err != nil { return err } - log.Debug("stored content", "ruid", req.ruid, "key", key) + log.Debug("stored content", "ruid", ruid, "key", key) return nil } // HandleDelete handles a DELETE request to bzz://, removes // from and returns the resulting manifest hash as a // text/plain response -func (s *Server) HandleDelete(w http.ResponseWriter, r *Request) { - log.Debug("handle.delete", "ruid", r.ruid) +func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.delete", "ruid", ruid) deleteCount.Inc(1) - newKey, err := s.api.Delete(r.Context(), r.uri.Addr, r.uri.Path) + newKey, err := s.api.Delete(r.Context(), uri.Addr, uri.Path) if err != nil { deleteFail.Inc(1) - Respond(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError) return } @@ -519,27 +474,20 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { // // The POST request admits a JSON structure as defined in the mru package: `mru.updateRequestJSON` // The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content -func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { - log.Debug("handle.post.resource", "ruid", r.ruid) - - var sp opentracing.Span - ctx := r.Context() - ctx, sp = spancontext.StartSpan( - ctx, - "http.post.resource") - defer sp.Finish() - +func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + log.Debug("handle.post.resource", "ruid", ruid) var err error // Creation and update must send mru.updateRequestJSON JSON structure body, err := ioutil.ReadAll(r.Body) if err != nil { - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } var updateRequest mru.Request if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON - Respond(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error + RespondError(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error return } @@ -548,7 +496,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // to update this resource // Check this early, to avoid creating a resource and then not being able to set its first update. if err = updateRequest.Verify(); err != nil { - Respond(w, r, err.Error(), http.StatusForbidden) + RespondError(w, r, err.Error(), http.StatusForbidden) return } } @@ -557,7 +505,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { err = s.api.ResourceCreate(r.Context(), &updateRequest) if err != nil { code, err2 := s.translateResourceError(w, r, "resource creation fail", err) - Respond(w, r, err2.Error(), code) + RespondError(w, r, err2.Error(), code) return } } @@ -565,7 +513,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { if updateRequest.IsUpdate() { _, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate) if err != nil { - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } } @@ -579,7 +527,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // metadata chunk (rootAddr) m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex()) if err != nil { - Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return } @@ -589,7 +537,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // \TODO update manifest key automatically in ENS outdata, err := json.Marshal(m) if err != nil { - Respond(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) return } fmt.Fprint(w, string(outdata)) @@ -604,17 +552,19 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // bzz-resource:///meta - get metadata and next version information // = ens name or hash // TODO: Enable pass maxPeriod parameter -func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.resource", "ruid", r.ruid) +func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.get.resource", "ruid", ruid) var err error // resolve the content key. - manifestAddr := r.uri.Address() + manifestAddr := uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.Context(), r.uri) + manifestAddr, err = s.api.Resolve(r.Context(), uri) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } } else { @@ -625,25 +575,25 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", uri.Addr, err), http.StatusNotFound) return } - log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr) + log.Debug("handle.get.resource: resolved", "ruid", ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr) // determine if the query specifies period and version or it is a metadata query var params []string - if len(r.uri.Path) > 0 { - if r.uri.Path == "meta" { + if len(uri.Path) > 0 { + if uri.Path == "meta" { unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound) return } rawResponse, err := unsignedUpdateRequest.MarshalJSON() if err != nil { - Respond(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) return } w.Header().Add("Content-type", "application/json") @@ -653,7 +603,7 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { } - params = strings.Split(r.uri.Path, "/") + params = strings.Split(uri.Path, "/") } var name string @@ -689,17 +639,17 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { // any error from the switch statement will end up here if err != nil { code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err) - Respond(w, r, err2.Error(), code) + RespondError(w, r, err2.Error(), code) return } // All ok, serve the retrieved update - log.Debug("Found update", "name", name, "ruid", r.ruid) + log.Debug("Found update", "name", name, "ruid", ruid) w.Header().Set("Content-Type", "application/octet-stream") - http.ServeContent(w, &r.Request, "", now, bytes.NewReader(data)) + http.ServeContent(w, r, "", now, bytes.NewReader(data)) } -func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supErr string, err error) (int, error) { +func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { code := 0 defaultErr := fmt.Errorf("%s: %v", supErr, err) rsrcErr, ok := err.(*mru.Error) @@ -725,46 +675,41 @@ func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supEr // given storage key // - bzz-hash:// and responds with the hash of the content stored // at the given storage key as a text/plain response -func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { - log.Debug("handle.get", "ruid", r.ruid, "uri", r.uri) +func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.get", "ruid", ruid, "uri", uri) getCount.Inc(1) - var sp opentracing.Span - ctx := r.Context() - ctx, sp = spancontext.StartSpan( - ctx, - "http.get") - defer sp.Finish() - var err error - addr := r.uri.Address() + addr := uri.Address() if addr == nil { - addr, err = s.api.Resolve(r.Context(), r.uri) + addr, err = s.api.Resolve(r.Context(), uri) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } } else { w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. } - log.Debug("handle.get: resolved", "ruid", r.ruid, "key", addr) + log.Debug("handle.get: resolved", "ruid", ruid, "key", addr) // if path is set, interpret as a manifest and return the // raw entry at the given path - if r.uri.Path != "" { + if uri.Path != "" { walker, err := s.api.NewManifestWalker(r.Context(), addr, nil) if err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) + RespondError(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) return } var entry *api.ManifestEntry walker.Walk(func(e *api.ManifestEntry) error { // if the entry matches the path, set entry and stop // the walk - if e.Path == r.uri.Path { + if e.Path == uri.Path { entry = e // return an error to cancel the walk return errors.New("found") @@ -778,7 +723,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { // if the manifest's path is a prefix of the // requested path, recurse into it by returning // nil and continuing the walk - if strings.HasPrefix(r.uri.Path, e.Path) { + if strings.HasPrefix(uri.Path, e.Path) { return nil } @@ -786,7 +731,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { }) if entry == nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("manifest entry could not be loaded"), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("manifest entry could not be loaded"), http.StatusNotFound) return } addr = storage.Address(common.Hex2Bytes(entry.Hash)) @@ -796,23 +741,23 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to manifest key or raw entry key. if noneMatchEtag != "" { if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), addr) { - Respond(w, r, "Not Modified", http.StatusNotModified) + w.WriteHeader(http.StatusNotModified) return } } // check the root chunk exists by retrieving the file's size - reader, isEncrypted := s.api.Retrieve(ctx, addr) - if _, err := reader.Size(ctx, nil); err != nil { + reader, isEncrypted := s.api.Retrieve(r.Context(), addr) + if _, err := reader.Size(r.Context(), nil); err != nil { getFail.Inc(1) - Respond(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) return } w.Header().Set("X-Decrypted", fmt.Sprintf("%v", isEncrypted)) switch { - case r.uri.Raw(): + case uri.Raw(): // allow the request to overwrite the content type using a query // parameter contentType := "application/octet-stream" @@ -820,8 +765,8 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { contentType = typ } w.Header().Set("Content-Type", contentType) - http.ServeContent(w, &r.Request, "", time.Now(), reader) - case r.uri.Hash(): + http.ServeContent(w, r, "", time.Now(), reader) + case uri.Hash(): w.Header().Set("Content-Type", "text/plain") w.WriteHeader(http.StatusOK) fmt.Fprint(w, addr) @@ -831,35 +776,30 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { // HandleGetList handles a GET request to bzz-list:// and returns // a list of all files contained in under grouped into // common prefixes using "/" as a delimiter -func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.list", "ruid", r.ruid, "uri", r.uri) +func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.get.list", "ruid", ruid, "uri", uri) getListCount.Inc(1) - var sp opentracing.Span - ctx := r.Context() - ctx, sp = spancontext.StartSpan( - ctx, - "http.get.list") - defer sp.Finish() - // ensure the root path has a trailing slash so that relative URLs work - if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) + if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { + http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently) return } - addr, err := s.api.Resolve(r.Context(), r.uri) + addr, err := s.api.Resolve(r.Context(), uri) if err != nil { getListFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } - log.Debug("handle.get.list: resolved", "ruid", r.ruid, "key", addr) + log.Debug("handle.get.list: resolved", "ruid", ruid, "key", addr) - list, err := s.api.GetManifestList(ctx, addr, r.uri.Path) + list, err := s.api.GetManifestList(r.Context(), addr, uri.Path) if err != nil { getListFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -867,11 +807,11 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { // HTML index with relative URLs if strings.Contains(r.Header.Get("Accept"), "text/html") { w.Header().Set("Content-Type", "text/html") - err := htmlListTemplate.Execute(w, &htmlListData{ + err := TemplatesMap["bzz-list"].Execute(w, &htmlListData{ URI: &api.URI{ Scheme: "bzz", - Addr: r.uri.Addr, - Path: r.uri.Path, + Addr: uri.Addr, + Path: uri.Path, }, List: &list, }) @@ -888,45 +828,40 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { // HandleGetFile handles a GET request to bzz:/// and responds // with the content of the file at from the given -func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { - log.Debug("handle.get.file", "ruid", r.ruid) +func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { + ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) + log.Debug("handle.get.file", "ruid", ruid) getFileCount.Inc(1) - var sp opentracing.Span - ctx := r.Context() - ctx, sp = spancontext.StartSpan( - ctx, - "http.get.file") - defer sp.Finish() - // ensure the root path has a trailing slash so that relative URLs work - if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) + if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { + http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently) return } var err error - manifestAddr := r.uri.Address() + manifestAddr := uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.Context(), r.uri) + manifestAddr, err = s.api.Resolve(r.Context(), uri) if err != nil { getFileFail.Inc(1) - Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } } else { w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. } - log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr) - reader, contentType, status, contentKey, err := s.api.Get(r.Context(), manifestAddr, r.uri.Path) + log.Debug("handle.get.file: resolved", "ruid", ruid, "key", manifestAddr) + reader, contentType, status, contentKey, err := s.api.Get(r.Context(), manifestAddr, uri.Path) etag := common.Bytes2Hex(contentKey) noneMatchEtag := r.Header.Get("If-None-Match") w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to actual content key. if noneMatchEtag != "" { if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), contentKey) { - Respond(w, r, "Not Modified", http.StatusNotModified) + w.WriteHeader(http.StatusNotModified) return } } @@ -935,10 +870,10 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { switch status { case http.StatusNotFound: getFileNotFound.Inc(1) - Respond(w, r, err.Error(), http.StatusNotFound) + RespondError(w, r, err.Error(), http.StatusNotFound) default: getFileFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) } return } @@ -946,28 +881,28 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { //the request results in ambiguous files //e.g. /read with readme.md and readinglist.txt available in manifest if status == http.StatusMultipleChoices { - list, err := s.api.GetManifestList(ctx, manifestAddr, r.uri.Path) + list, err := s.api.GetManifestList(r.Context(), manifestAddr, uri.Path) if err != nil { getFileFail.Inc(1) - Respond(w, r, err.Error(), http.StatusInternalServerError) + RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", r.ruid) + log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", ruid) //show a nice page links to available entries ShowMultipleChoices(w, r, list) return } // check the root chunk exists by retrieving the file's size - if _, err := reader.Size(ctx, nil); err != nil { + if _, err := reader.Size(r.Context(), nil); err != nil { getFileNotFound.Inc(1) - Respond(w, r, fmt.Sprintf("file not found %s: %s", r.uri, err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("file not found %s: %s", uri, err), http.StatusNotFound) return } w.Header().Set("Content-Type", contentType) - http.ServeContent(w, &r.Request, "", time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) + http.ServeContent(w, r, "", time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) } // The size of buffer used for bufio.Reader on LazyChunkReader passed to diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index e8bc1bdaea90..dfa8a51877cb 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -443,11 +443,6 @@ func TestBzzGetPath(t *testing.T) { testBzzGetPath(true, t) } -func TestBzzTar(t *testing.T) { - testBzzTar(false, t) - testBzzTar(true, t) -} - func testBzzGetPath(encrypted bool, t *testing.T) { var err error @@ -561,24 +556,35 @@ func testBzzGetPath(encrypted bool, t *testing.T) { ref := addr[2].Hex() for _, c := range []struct { - path string - json string - html string + path string + json string + pageFragments []string }{ { path: "/", json: `{"common_prefixes":["a/"]}`, - html: fmt.Sprintf("\n\n\n \n \n\t\t\n\tSwarm index of bzz:/%s/\n\n\n\n

Swarm index of bzz:/%s/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\t\n\t \n\t \n\t \n\t\n \n\n \n
PathTypeSize
a/DIR-
\n
\n\n", ref, ref), + pageFragments: []string{ + fmt.Sprintf("Swarm index of bzz:/%s/", ref), + `a/`, + }, }, { path: "/a/", json: `{"common_prefixes":["a/b/"],"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/a","mod_time":"0001-01-01T00:00:00Z"}]}`, - html: fmt.Sprintf("\n\n\n \n \n\t\t\n\tSwarm index of bzz:/%s/a/\n\n\n\n

Swarm index of bzz:/%s/a/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\t\n\t \n\t \n\t \n\t\n \n\n \n\t\n\t \n\t \n\t \n\t\n \n
PathTypeSize
b/DIR-
a0
\n
\n\n", ref, ref), + pageFragments: []string{ + fmt.Sprintf("Swarm index of bzz:/%s/a/", ref), + `b/`, + `a`, + }, }, { path: "/a/b/", json: `{"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/b","mod_time":"0001-01-01T00:00:00Z"},{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/c","mod_time":"0001-01-01T00:00:00Z"}]}`, - html: fmt.Sprintf("\n\n\n \n \n\t\t\n\tSwarm index of bzz:/%s/a/b/\n\n\n\n

Swarm index of bzz:/%s/a/b/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\n \n\t\n\t \n\t \n\t \n\t\n \n\t\n\t \n\t \n\t \n\t\n \n
PathTypeSize
b0
c0
\n
\n\n", ref, ref), + pageFragments: []string{ + fmt.Sprintf("Swarm index of bzz:/%s/a/b/", ref), + `b`, + `c`, + }, }, { path: "/x", @@ -628,21 +634,25 @@ func testBzzGetPath(encrypted bool, t *testing.T) { t.Fatalf("HTTP request: %v", err) } defer resp.Body.Close() - respbody, err := ioutil.ReadAll(resp.Body) + b, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("Read response body: %v", err) } - if string(respbody) != c.html { - isexpectedfailrequest := false + body := string(b) - for _, r := range expectedfailrequests { - if k[:] == r { - isexpectedfailrequest = true + for _, f := range c.pageFragments { + if !strings.Contains(body, f) { + isexpectedfailrequest := false + + for _, r := range expectedfailrequests { + if k[:] == r { + isexpectedfailrequest = true + } + } + if !isexpectedfailrequest { + t.Errorf("Response list body %q does not contain %q: body %q", k, f, body) } - } - if !isexpectedfailrequest { - t.Errorf("Response list body %q does not match, expected: %q, got %q", k, c.html, string(respbody)) } } }) @@ -657,11 +667,11 @@ func testBzzGetPath(encrypted bool, t *testing.T) { } nonhashresponses := []string{ - "cannot resolve name: no DNS to resolve name: "name"", - "cannot resolve nonhash: immutable address not a content hash: "nonhash"", - "cannot resolve nonhash: no DNS to resolve name: "nonhash"", - "cannot resolve nonhash: no DNS to resolve name: "nonhash"", - "cannot resolve nonhash: no DNS to resolve name: "nonhash"", + `cannot resolve name: no DNS to resolve name: "name"`, + `cannot resolve nonhash: immutable address not a content hash: "nonhash"`, + `cannot resolve nonhash: no DNS to resolve name: "nonhash"`, + `cannot resolve nonhash: no DNS to resolve name: "nonhash"`, + `cannot resolve nonhash: no DNS to resolve name: "nonhash"`, } for i, url := range nonhashtests { @@ -684,6 +694,11 @@ func testBzzGetPath(encrypted bool, t *testing.T) { } } +func TestBzzTar(t *testing.T) { + testBzzTar(false, t) + testBzzTar(true, t) +} + func testBzzTar(encrypted bool, t *testing.T) { srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() @@ -738,7 +753,6 @@ func testBzzTar(encrypted bool, t *testing.T) { } swarmHash, err := ioutil.ReadAll(resp2.Body) resp2.Body.Close() - t.Logf("uploaded tarball successfully and got manifest address at %s", string(swarmHash)) if err != nil { t.Fatal(err) } @@ -887,7 +901,7 @@ func TestMethodsNotAllowed(t *testing.T) { } { res, _ := http.Post(c.url, "text/plain", bytes.NewReader([]byte(databytes))) if res.StatusCode != c.code { - t.Fatal("should have failed") + t.Fatalf("should have failed. requested url: %s, expected code %d, got %d", c.url, c.code, res.StatusCode) } } diff --git a/swarm/api/http/templates.go b/swarm/api/http/templates.go index 8897b9694604..1cd42ca3714a 100644 --- a/swarm/api/http/templates.go +++ b/swarm/api/http/templates.go @@ -17,6 +17,7 @@ package http import ( + "encoding/hex" "html/template" "path" @@ -28,178 +29,269 @@ type htmlListData struct { List *api.ManifestList } -var htmlListTemplate = template.Must(template.New("html-list").Funcs(template.FuncMap{"basename": path.Base}).Parse(` - - +var TemplatesMap = make(map[string]*template.Template) +var faviconBytes []byte + +func init() { + for _, v := range []struct { + templateName string + partial string + funcs template.FuncMap + }{ + { + templateName: "error", + partial: errorResponse, + }, + { + templateName: "bzz-list", + partial: bzzList, + funcs: template.FuncMap{"basename": path.Base}, + }, + { + templateName: "landing-page", + partial: landing, + }, + } { + TemplatesMap[v.templateName] = template.Must(template.New(v.templateName).Funcs(v.funcs).Parse(baseTemplate + css + v.partial + logo)) + } + + bytes, err := hex.DecodeString(favicon) + if err != nil { + panic(err) + } + faviconBytes = bytes +} + +const bzzList = `{{ define "content" }} +

Swarm index of {{ .URI }}

+
+ + + + + + + + + + + {{ range .List.CommonPrefixes }} + + + + + + {{ end }} {{ range .List.Entries }} + + + + + + {{ end }} +
PathTypeSize
+ {{ basename . }}/ + DIR-
+ {{ basename .Path }} + {{ .ContentType }}{{ .Size }}
+
+ + {{ end }}` + +const errorResponse = `{{ define "content" }} +
+ + +
+

{{.Msg}}

+
+ +
+
Error code: {{.Code}}
+
+ + +
+{{ end }}` + +const landing = `{{ define "content" }} + + + +
+ + + + + +
+ +{{ end }}` + +const baseTemplate = ` - - - - Swarm index of {{ .URI }} + + + + + - -

Swarm index of {{ .URI }}

-
- - - - - - - - - - - {{ range .List.CommonPrefixes }} - - - - - - {{ end }} - - {{ range .List.Entries }} - - - - - - {{ end }} -
PathTypeSize
{{ basename . }}/DIR-
{{ basename .Path }}{{ .ContentType }}{{ .Size }}
-
+ {{ template "content" . }} -`[1:])) - -var landingPageTemplate = template.Must(template.New("landingPage").Parse(` - - - - - - - - Swarm :: Welcome to Swarm - - - -
-
- -
-
-

Welcome to Swarm

-
-
- - - - -

Enter the hash or ENS of a Swarm-hosted file below:

-
- - -
-
-
-
-

- Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution -

-
- - -`[1:])) +` + +const css = `{{ define "css" }} +html { + font-size: 18px; + font-size: 1.13rem; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; + font-family: Helvetica, Arial, sans-serif; +} + +body { + background: #f6f6f6; + color: #333; +} + +a, a:visited, a:active { + color: darkorange; +} + +a.normal-link, a.normal-link:active { color: #0000EE; } +a.normal-link:visited { color: #551A8B; } + +table { + border-collapse: separate; +} + +td { + padding: 3px 10px; +} + + +.container { + max-width: 600px; + margin: 40px auto 40px; + text-align: center; +} + +.separate-block { + margin: 40px 0; + word-wrap: break-word; +} + +.footer { + font-size: 12px; + font-size: 0.75rem; + text-align: center; +} + +.orange { + color: #ffa500; +} + +.top-space { + margin-top: 20px; + margin-bottom: 20px; +} + +/* SVG Logos, editable */ + +.searchbar { + padding: 20px 20px 0; +} + +.logo { + margin: 100px 80px 0; +} + +.logo a img { + max-width: 140px; +} + +/* Tablet < 600p*/ + +@media only screen and (max-width: 600px) {} + +/* Mobile phone < 360p*/ + +@media only screen and (max-width: 360px) { + h1 { + font-size: 20px; + font-size: 1.5rem; + } + h2 { + font-size: 0.88rem; + margin: 0; + } + .logo { + margin: 50px 40px 0; + } + .footer { + font-size: 0.63rem; + text-align: center; + } +} + +input[type=text] { + width: 100%; + box-sizing: border-box; + border: 2px solid #777; + border-radius: 2px; + font-size: 16px; + padding: 12px 20px 12px 20px; + transition: border 250ms ease-in-out; +} + +input[type=text]:focus { + border: 2px solid #ffce73; +} + +.button { + background-color: #ffa500; + margin: 20px 0; + border: none; + border-radius: 2px; + color: #222; + padding: 15px 32px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; +} +{{ end }}` + +const logo = `{{ define "logo" }} + +{{ end }}` + +const favicon = `000001000400101000000000200068040000460000002020000000002000a8100000ae0400003030000000002000a825000056150000404000000000200028420000fe3a000028000000100000002000000001002000000000004004000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e0362626263545454c548484849ffffff01ffffff01ffffff01ffffff01646464375b5b5bbf4545457758585809ffffff01ffffff01ffffff0164646443626262cf626262ff535353ff454545ff454545b74949492b6868681d626262a5626262fd5c5c5cff464646ff454545dd47474755ffffff01ffffff013f3f3feb565656ff636363ff535353ff464646ff3f3f3fff373737ab393939894d4d4dff626262ff5c5c5cff464646ff424242ff3a3a3af7ffffff01ffffff01383838e9353535ff424242ff474747ff383838ff353535ff363636ab35353587363636ff3a3a3aff4a4a4aff3b3b3bff353535ff363636f5ffffff01ffffff01383838e9303030ff181818ff131313ff232323ff343434ff363636ab35353587343434ff202020ff101010ff1d1d1dff303030ff373737f5ffffff01ffffff01232323c50c0c0cff0d0d0dff131313ff171717ff171717ff2929298b2727276b0f0f0ffd0d0d0dff101010ff171717ff161616ff232323d9ffffff01ffffff014d4d4d030f0f0f650c0c0ce7131313ff161616d51d1d1d4b63636363464646691717173b0d0d0dc50f0f0fff161616ef171717752e2e2e07ffffff01ffffff01ffffff01ffffff011d1d1d0f1515155360606045626262cf636363ff464646ff454545d3484848491414144d24242417ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013c3c3c374f4f4fff636363ff636363ff464646ff464646ff3f3f3fff3c3c3c41ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636363d353535ff3c3c3cff575757ff363636ff181818ff282828ff37373747ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636363d363636ff303030ff181818ff292929ff131313ef17171771696969136565653bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01323232371e1e1eff0d0d0dff0c0c0cff363636ff363636a3ffffff0185858515606060ff4747476bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01111111450d0d0dd10c0c0cff1b1b1bff2a2a2a993e3e3e0b30303085292929ff37373787ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636030e0e0e671616166b45454505323232432e2e2ed9151515c31d1d1d2dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014e4e4e05ffffff01ffffff01ffffff01ffffff010000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff28000000200000004000000001002000000000008010000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017272721b646464a54646466f72727205ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0168686845575757b74f4f4f39ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e0b6262627d616161f3636363ff424242ff444444d74f4f4f49ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016c6c6c27636363b5616161ff555555ff434343ff464646a35858581dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016666665d616161e3626262ff636363ff636363ff444444ff464646ff434343ff454545b95252522bffffff01ffffff01ffffff01ffffff016c6c6c1363636393616161fb636363ff636363ff555555ff464646ff464646ff444444f5464646836666660bffffff01ffffff01ffffff01ffffff01ffffff016a6a6a3f626262c9616161ff636363ff636363ff636363ff636363ff444444ff464646ff464646ff464646ff434343fb48484897545454135b5b5b036868686f616161ef626262ff636363ff636363ff636363ff555555ff464646ff464646ff464646ff454545ff444444e54a4a4a5fffffff01ffffff01ffffff01ffffff013b3b3bd7505050ff646464ff636363ff636363ff636363ff636363ff444444ff464646ff464646ff464646ff454545ff3a3a3aff33333357313131113c3c3cff5a5a5aff646464ff636363ff636363ff636363ff555555ff464646ff464646ff464646ff464646ff424242ff383838f1ffffff01ffffff01ffffff01ffffff013a3a3ad5353535ff3a3a3aff575757ff646464ff626262ff636363ff444444ff464646ff464646ff3d3d3dff353535ff363636ff3636365535353511363636ff343434ff434343ff606060ff636363ff636363ff555555ff464646ff464646ff444444ff393939ff353535ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff343434ff3f3f3fff5d5d5dff646464ff444444ff404040ff363636ff353535ff363636ff363636ff3636365535353511363636ff363636ff363636ff343434ff4a4a4aff636363ff555555ff454545ff3c3c3cff353535ff363636ff363636ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff363636ff363636ff353535ff3f3f3fff363636ff353535ff363636ff363636ff363636ff363636ff3636365535353511363636ff363636ff363636ff363636ff353535ff383838ff3a3a3aff373737ff353535ff363636ff363636ff363636ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff363636ff323232ff181818ff0e0e0eff171717ff282828ff373737ff363636ff363636ff363636ff3636365535353511363636ff363636ff353535ff373737ff292929ff0f0f0fff111111ff1b1b1bff2f2f2fff373737ff363636ff363636ff373737edffffff01ffffff01ffffff01ffffff013a3a3ad5363636ff363636ff1e1e1eff0b0b0bff0d0d0dff0f0f0fff171717ff161616ff191919ff2c2c2cff373737ff363636ff3636365535353511363636ff373737ff2f2f2fff141414ff0b0b0bff0d0d0dff131313ff171717ff151515ff1f1f1fff333333ff363636ff373737edffffff01ffffff01ffffff01ffffff013b3b3bd5252525ff0d0d0dff0c0c0cff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff151515ff1c1c1cff313131ff3535355734343411333333ff1a1a1aff0b0b0bff0d0d0dff0d0d0dff0d0d0dff131313ff171717ff171717ff171717ff161616ff242424ff373737efffffff01ffffff01ffffff01ffffff012020205d0b0b0be50b0b0bff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff131313ff161616b73333331f3b3b3b05111111970a0a0afb0d0d0dff0d0d0dff0d0d0dff0d0d0dff131313ff171717ff171717ff171717ff161616ff141414f51c1c1c7fffffff01ffffff01ffffff01ffffff01ffffff014d4d4d0b1212127f0a0a0af50d0d0dff0d0d0dff0f0f0fff171717ff171717ff151515ff151515d522222249ffffff017373731b51515121ffffff011d1d1d2b101010b50a0a0aff0d0d0dff0d0d0dff131313ff171717ff171717ff131313ff181818a12e2e2e1dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012c2c2c1b0f0f0fa10a0a0afd0f0f0fff161616ff141414e91b1b1b69656565057878780b6363637b626262f3464646f7454545896969690fffffff011c1c1c470c0c0cd30b0b0bff131313ff141414ff151515c32a2a2a37ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff011d1d1d35111111bd1a1a1a8d2f2f2f11ffffff0166666659616161e1626262ff646464ff474747ff454545ff444444e9494949677b7b7b054040400517171769131313cd24242455ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0169696939626262c7616161ff636363ff636363ff646464ff474747ff464646ff464646ff444444ff454545d14e4e4e45ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01424242615e5e5eff636363ff636363ff636363ff636363ff646464ff474747ff464646ff464646ff464646ff464646ff434343ff3f3f3f77ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679343434ff494949ff636363ff636363ff636363ff646464ff474747ff464646ff464646ff474747ff3d3d3dff353535ff3a3a3a8dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff353535ff363636ff505050ff646464ff636363ff474747ff484848ff2f2f2fff1c1c1cff323232ff363636ff3a3a3a8dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff363636ff363636ff353535ff3a3a3aff5a5a5aff393939ff0f0f0fff040404ff111111ff151515ff232323ff3535358fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff363636ff363636ff363636ff323232ff171717ff2a2a2aff0c0c0cff030303ff111111ff141414fb171717992e2e2e17a3a3a305ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679363636ff363636ff363636ff1f1f1fff0b0b0bff0d0d0dff363636ff383838ff242424ff121212bf2a2a2a2dffffff01ffffff018484842b636363bf6d6d6d2fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0136363679373737ff252525ff0d0d0dff0c0c0cff0d0d0dff0d0d0dff373737ff363636ff353535ff39393949ffffff01ffffff01ffffff0186868629646464ff656565fb6464649b55555505ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012e2e2e650e0e0eff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0c0c0cff353535ff363636ff353535ff37373749ffffff01ffffff01ffffff0185858529656565ff525252ff353535ff4b4b4b0fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff011c1c1c430d0d0dcf0b0b0bff0d0d0dff0d0d0dff0d0d0dff171717ff282828ff363636ff37373749ffffff01ffffff01ffffff0144444459363636ff353535ff353535ff4e4e4e0fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0162626203161616630b0b0be70c0c0cff0d0d0dff171717ff161616ff171717ed3737372fffffff013e3e3e2b303030b72a2a2aff151515ff262626ff363636ff4b4b4b0fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636360d101010850a0a0af7141414f91717178f45454511ffffff014c4c4c252c2c2cdb303030ff2d2d2dff151515ff131313ff1b1b1bad5a5a5a07ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012b2b2b2121212127ffffff01ffffff01ffffff01ffffff0161616109313131752b2b2bf1131313cd26262641ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014e4e4e1359595903ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000300000006000000001002000000000008025000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0173737357545454997c7c7c11ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767663515151916c6c6c0dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017676762d636363bb636363ff4d4d4dff434343eb4f4f4f6d7f7f7f05ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767635616161c3626262ff494949ff424242e94f4f4f6392929203ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e19626262955f5f5ffd626262ff666666ff4f4f4fff464646ff424242ff434343d75a5a5a49ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017777771d6464649f5f5f5fff636363ff656565ff4b4b4bff464646ff424242ff444444d158585841ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018585850966666677606060ef626262ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff414141ff464646b75d5d5d2dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018989890d6868687f5f5f5ff5626262ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff404040ff484848b160606027ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016a6a6a55626262df606060ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff454545ff424242fd484848956a6a6a17ffffff01ffffff01ffffff01ffffff01ffffff016969695f606060e3606060ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff454545ff414141f94a4a4a8d65656513ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff016e6e6e3b656565c15f5f5fff636363ff636363ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff464646ff464646ff444444ff424242ed52525277ffffff01ffffff016c6c6c37676767c95f5f5fff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff464646ff464646ff434343ff444444e94d4d4d6dffffff01ffffff01ffffff01ffffff01ffffff01ffffff013c3c3cc5454545ff646464ff646464ff636363ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff464646ff464646ff474747ff424242ff333333fb34343409ffffff0131313199494949ff656565ff646464ff636363ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff464646ff464646ff474747ff414141ff373737ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf333333ff343434ff4f4f4fff666666ff636363ff636363ff636363ff636363ff666666ff4f4f4fff464646ff464646ff464646ff464646ff474747ff444444ff383838ff343434ff363636f737373707ffffff0135353597343434ff343434ff525252ff666666ff636363ff636363ff636363ff636363ff656565ff4b4b4bff464646ff464646ff464646ff464646ff474747ff444444ff383838ff343434ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff333333ff383838ff585858ff676767ff636363ff636363ff666666ff4f4f4fff464646ff464646ff474747ff464646ff3b3b3bff343434ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff333333ff383838ff5a5a5aff666666ff636363ff636363ff656565ff4b4b4bff464646ff464646ff474747ff454545ff3a3a3aff343434ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff323232ff3d3d3dff5d5d5dff666666ff666666ff4f4f4fff464646ff474747ff3e3e3eff353535ff353535ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff313131ff3f3f3fff5f5f5fff666666ff656565ff4b4b4bff464646ff474747ff3d3d3dff353535ff353535ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff363636ff353535ff323232ff444444ff676767ff525252ff404040ff363636ff353535ff363636ff363636ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff363636ff353535ff323232ff464646ff676767ff4e4e4eff404040ff363636ff353535ff363636ff363636ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff363636ff363636ff353535ff383838ff2d2d2dff2b2b2bff373737ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff363636ff363636ff363636ff383838ff2c2c2cff2a2a2aff373737ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff363636ff353535ff383838ff343434ff171717ff090909ff151515ff171717ff2d2d2dff383838ff363636ff363636ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff363636ff353535ff383838ff333333ff151515ff090909ff151515ff181818ff2f2f2fff383838ff363636ff363636ff363636ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff363636ff373737ff373737ff1f1f1fff090909ff0c0c0cff0c0c0cff171717ff171717ff141414ff1b1b1bff323232ff383838ff363636ff363636ff363636ff363636f737373707ffffff0135353597363636ff363636ff363636ff373737ff373737ff1d1d1dff0a0a0aff0c0c0cff0c0c0cff171717ff171717ff141414ff1c1c1cff333333ff383838ff353535ff363636ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf343434ff363636ff393939ff272727ff0c0c0cff0b0b0bff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff161616ff141414ff202020ff353535ff373737ff363636ff363636f737373707ffffff0135353597363636ff363636ff383838ff252525ff0b0b0bff0b0b0bff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff161616ff141414ff222222ff363636ff373737ff363636ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040bf383838ff2d2d2dff101010ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff161616ff141414ff262626ff373737ff373737f737373707ffffff0136363697393939ff2b2b2bff0f0f0fff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff161616ff151515ff272727ff383838ff393939e3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013a3a3abd131313ff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff171717ff262626fb38383807ffffff012a2a2a97121212ff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff161616ff2a2a2ae7ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015f5f5f0b1616167b090909ef0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff171717ff0f0f0fff181818b74040402dffffff01ffffff014646461118181883080808f30b0b0bff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff171717ff161616ff101010ff181818b141414127ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014d4d4d171212129b090909fd0c0c0cff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff111111ff141414d335353547ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013838381d131313a5060606ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff171717ff171717ff111111ff181818cd2e2e2e3dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01333333310f0f0fbb070707ff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff141414ff121212e72424246d86868603ffffff01ffffff017373732b656565b9464646c95e5e5e3bffffff01ffffff01ffffff01323232370e0e0ec3080808ff0d0d0dff0d0d0dff0c0c0cff171717ff171717ff171717ff121212ff161616e525252563ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012525254d0e0e0ed9090909ff0c0c0cff171717ff151515ff121212f91d1d1d894d4d4d13ffffff01ffffff0178787815656565935f5f5ffb646464ff484848ff404040ff454545a96a6a6a1fffffff01ffffff01ffffff011b1b1b570e0e0edf080808ff0d0d0dff171717ff151515ff0f0f0ff3212121815656560dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01636363071a1a1a710a0a0aed0f0f0fff1b1b1bad2f2f2f23ffffff01ffffff018d8d8d0566666675616161eb616161ff636363ff646464ff484848ff464646ff454545ff424242f54c4c4c856262620fffffff01ffffff014040400b21212179080808f10f0f0fff1b1b1ba15757571dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014141411740404037ffffff01ffffff01ffffff016a6a6a4d616161db606060ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff434343ff434343e751515167ffffff01ffffff01ffffff014646461d30303033ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767631616161c35f5f5fff636363ff636363ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff464646ff464646ff424242ff454545d158585841ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015252527f636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff434343ff454545a1ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01313131b53b3b3bff5b5b5bff676767ff636363ff636363ff636363ff636363ff636363ff646464ff484848ff464646ff464646ff464646ff464646ff464646ff474747ff444444ff393939ff383838d3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff323232ff404040ff616161ff656565ff626262ff636363ff636363ff646464ff484848ff464646ff464646ff454545ff494949ff474747ff3b3b3bff343434ff353535ff3a3a3ad3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff353535ff323232ff484848ff656565ff646464ff636363ff646464ff484848ff464646ff474747ff494949ff242424ff282828ff383838ff363636ff363636ff3a3a3ad3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff343434ff343434ff515151ff666666ff656565ff484848ff4b4b4bff323232ff070707ff040404ff151515ff181818ff2f2f2fff383838ff3a3a3ad3ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff363636ff363636ff333333ff383838ff5f5f5fff3c3c3cff0f0f0fff020202ff050505ff050505ff171717ff171717ff141414ff1c1c1cff323232d7ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff363636ff353535ff383838ff343434ff161616ff2a2a2aff0c0c0cff020202ff050505ff050505ff171717ff171717ff101010ff161616bf2e2e2e35ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff363636ff373737ff383838ff1f1f1fff0a0a0aff0c0c0cff373737ff3a3a3aff262626ff060606ff040404ff121212ff151515dd30303051ffffff01ffffff01ffffff018787872d6b6b6b47ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff363636ff363636ff393939ff272727ff0d0d0dff0b0b0bff0d0d0dff0d0d0dff373737ff363636ff373737ff383838ff1c1c1cf92020207568686807ffffff01ffffff01ffffff01ffffff018686863d5f5f5fff676767af77777721ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01363636b3363636ff393939ff2e2e2eff101010ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff373737ff363636ff363636ff353535ff373737ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff018686863d626262ff666666ff646464f76969698d9494940fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01383838b5333333ff161616ff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff373737ff363636ff363636ff363636ff353535ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff018686863d626262ff676767ff6b6b6bff555555ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0125252589030303ff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff333333ff383838ff353535ff363636ff353535ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff018585853d666666ff5f5f5fff3c3c3cff313131ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012d2d2d3f0e0e0ecb080808ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff141414ff222222ff363636ff373737ff353535ebffffff01ffffff01ffffff01ffffff01ffffff01ffffff0177777741414141ff313131ff363636ff353535ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff011e1e1e5f0a0a0ae50a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff171717ff161616ff151515ff282828ff353535f3ffffff01ffffff01ffffff01ffffff016e6e6e0b37373781242424f1191919ff333333ff383838ff343434ff3a3a3a93ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015a5a5a0d1919197f0a0a0af30b0b0bff0d0d0dff0d0d0dff171717ff171717ff161616ff0f0f0ffb24242489ffffff01ffffff01ffffff013e3e3e5d2d2d2de52e2e2eff2b2b2bff151515ff141414ff212121ff363636ff3b3b3b95ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636361b111111a3080808ff0c0c0cff181818ff0f0f0fff171717b545454525ffffff01ffffff017f7f7f05363636c7282828ff313131ff313131ff2b2b2bff151515ff171717ff161616ff0c0c0cfb3434346bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01303030350f0f0fc7121212d337373741ffffff01ffffff01ffffff01ffffff01ffffff016b6b6b0b3a3a3a7d2c2c2cf12f2f2fff2b2b2bff151515ff101010ff171717bb4646462dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01515151193535359b242424ff131313d72828284bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014e4e4e2b59595905ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff000000000000ffff28000000400000008000000001002000000000000042000000000000000000000000000000000000ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0176767635666666914e4e4e457c7c7c09ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018080801569696989545454696c6c6c0bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018484840d70707061616161d5606060fb3d3d3ddf4e4e4e9172727213ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017070704d626262b35f5f5ffb464646f1454545a16a6a6a33ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017676760f67676753646464cf5e5e5eff656565ff626262ff414141ff404040ff444444e54b4b4b7b69696919ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01979797036c6c6c45676767a95d5d5dff616161ff626262ff484848ff424242ff3e3e3efd4e4e4e8958585831ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017e7e7e2b616161a75f5f5fef616161ff636363ff656565ff626262ff424242ff464646ff444444ff414141fd434343b961616153ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017777771969696981606060e7606060ff636363ff636363ff626262ff484848ff464646ff454545ff424242fd414141d95656566569696911ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01858585056e6e6e29656565995f5f5ff1616161ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff444444ff3f3f3fff484848af5353534b86868607ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01797979216a6a6a6f616161ed5e5e5eff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff3e3e3eff474747d75151515762626213ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01838383036f6f6f755f5f5fd3606060ff626262ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff454545ff434343ff404040e94e4e4e8d5f5f5f1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff018f8f8f056b6b6b45616161c95f5f5ff7616161ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff444444ff424242f1434343b16666662dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017070700f6969695f626262d35e5e5eff626262ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff404040ff444444f14d4d4d776a6a6a23ffffff01ffffff01ffffff01ffffff017b7b7b096c6c6c39636363c15f5f5ffb626262ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff434343ff414141f54a4a4aa35b5b5b2d70707007ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0171717143676767a7616161f3616161ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff414141f7474747cd54545447ffffff01ffffff015b5b5b096b6b6b99646464e1606060ff626262ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff424242ff414141d552525277ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01404040b33b3b3bff5c5c5cff656565ff646464ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff474747ff454545ff3a3a3aff313131ad34343407ffffff012e2e2e25383838ff535353ff656565ff656565ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff474747ff464646ff3b3b3bff3a3a3ae9ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9313131ff363636ff484848ff636363ff676767ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff404040ff363636ff343434ff353535a537373705ffffff0135353521333333ff333333ff434343ff5c5c5cff686868ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff484848ff414141ff393939ff313131ff3c3c3cdbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff323232ff353535ff4b4b4bff636363ff656565ff636363ff626262ff636363ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff464646ff474747ff464646ff414141ff363636ff343434ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff333333ff313131ff484848ff5e5e5eff666666ff646464ff626262ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff464646ff464646ff474747ff424242ff3a3a3aff343434ff353535ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff343434ff333333ff3d3d3dff555555ff686868ff656565ff626262ff636363ff656565ff626262ff424242ff464646ff464646ff464646ff484848ff444444ff393939ff353535ff353535ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff353535ff323232ff363636ff515151ff646464ff656565ff636363ff636363ff636363ff626262ff484848ff464646ff464646ff464646ff484848ff454545ff3d3d3dff353535ff343434ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff343434ff303030ff3f3f3fff575757ff666666ff656565ff646464ff626262ff424242ff464646ff474747ff454545ff3a3a3aff343434ff353535ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff303030ff373737ff535353ff636363ff656565ff636363ff626262ff484848ff464646ff474747ff454545ff3e3e3eff353535ff343434ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff333333ff484848ff606060ff696969ff626262ff434343ff474747ff3e3e3eff363636ff353535ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff353535ff343434ff333333ff3e3e3eff5d5d5dff686868ff626262ff484848ff474747ff424242ff373737ff353535ff353535ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff323232ff323232ff505050ff616161ff3d3d3dff373737ff343434ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff343434ff313131ff434343ff606060ff464646ff383838ff343434ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff3a3a3aff2b2b2bff1e1e1eff2d2d2dff383838ff373737ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff393939ff323232ff1c1c1cff262626ff373737ff383838ff353535ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff363636ff353535ff373737ff383838ff303030ff191919ff080808ff101010ff141414ff1a1a1aff303030ff383838ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff363636ff353535ff363636ff383838ff363636ff1d1d1dff0b0b0bff0c0c0cff141414ff181818ff292929ff373737ff373737ff363636ff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff363636ff353535ff393939ff363636ff222222ff0c0c0cff0a0a0aff0c0c0cff121212ff171717ff151515ff161616ff212121ff353535ff393939ff363636ff363636ff363636ff363636ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff363636ff363636ff353535ff383838ff3a3a3aff262626ff121212ff0a0a0aff0c0c0cff0f0f0fff171717ff151515ff151515ff1e1e1eff2f2f2fff3a3a3aff363636ff363636ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff363636ff363636ff363636ff383838ff363636ff262626ff0d0d0dff090909ff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff141414ff151515ff232323ff353535ff383838ff363636ff353535ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff353535ff383838ff383838ff292929ff131313ff080808ff0c0c0cff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff151515ff131313ff202020ff313131ff383838ff363636ff363636ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9343434ff353535ff363636ff3a3a3aff2e2e2eff131313ff0a0a0aff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff161616ff141414ff1a1a1aff2a2a2aff393939ff373737ff363636ff363636ff363636a537373705ffffff0135353521363636ff363636ff363636ff3a3a3aff313131ff1c1c1cff0a0a0aff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff161616ff151515ff161616ff282828ff363636ff383838ff363636ff333333ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444a9353535ff383838ff313131ff151515ff080808ff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff161616ff131313ff1b1b1bff2d2d2dff373737ff373737ff363636a537373705ffffff0134343421363636ff383838ff333333ff1e1e1eff090909ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff131313ff171717ff2a2a2aff363636ff353535ff3d3d3ddbffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01444444af353535ff1e1e1eff0d0d0dff0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff151515ff222222ff333333ff353535ad30303007ffffff0134343423373737ff282828ff0d0d0dff0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff141414ff1b1b1bff2e2e2eff3e3e3ee1ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013e3e3e6f0f0f0fd5040404ff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff101010ff0e0e0ee72f2f2f7347474703ffffff013b3b3b13141414cd050505f70a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff121212ff0c0c0cf12a2a2aa5ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015f5f5f052020202b1a1a1aa1080808f1070707ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff141414ff0c0c0cff212121af2a2a2a496d6d6d07ffffff01ffffff01ffffff01333333231d1d1d730b0b0beb060606ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff171717ff171717ff151515ff0e0e0eff181818d72626265546464615ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014d4d4d29121212af080808ef0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff171717ff141414ff121212f9141414b93b3b3b4fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0138383819151515890a0a0ae5080808ff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff171717ff161616ff101010fb151515d72c2c2c614444440dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0133333311262626510f0f0fd7050505ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff121212ff171717ff171717ff171717ff171717ff171717ff101010ff141414e7242424733a3a3a19ffffff01ffffff01ffffff01878787097272725f4d4d4d736a6a6a11ffffff01ffffff01ffffff016060600524242445191919ad040404ff0a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff171717ff171717ff171717ff111111ff0e0e0efd242424873232322dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015c5c5c0d2525255f090909d7080808fb0b0b0bff0d0d0dff0c0c0cff121212ff171717ff171717ff161616ff121212ff121212df2121218965656511ffffff01ffffff01ffffff018080800d6767674b646464d1606060ff454545ff464646df4f4f4f6165656517ffffff01ffffff01ffffff01ffffff012d2d2d4b101010b5060606fb0a0a0aff0d0d0dff0d0d0dff0f0f0fff171717ff171717ff161616ff131313ff101010ef2020209d4242422dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012c2c2c2d1f1f1f83080808fb080808ff0d0d0dff121212ff171717ff141414ff0f0f0ff91e1e1eb12c2c2c354d4d4d09ffffff01ffffff01ffffff0178787825646464a75f5f5feb616161ff656565ff4a4a4aff414141ff424242f3414141bd69696937ffffff01ffffff01ffffff01ffffff0142424219171717710d0d0de3060606ff0c0c0cff0f0f0fff171717ff151515ff0d0d0dff171717c3292929575656560dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013737372d1212129d080808ef0d0d0dff121212f5191919bf2e2e2e3d70707003ffffff01ffffff018c8c8c037676762564646497606060ed606060ff636363ff636363ff656565ff4a4a4aff444444ff464646ff444444ff404040f74a4a4aad5555553162626207ffffff01ffffff01ffffff014040401125252589090909dd0a0a0aff121212ff141414c738383869ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015b5b5b0b1f1f1f591d1d1daf292929673f3f3f19ffffff01ffffff01ffffff01ffffff016d6d6d715f5f5fcd606060ff626262ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff454545ff434343ff414141db4f4f4f857b7b7b11ffffff01ffffff01ffffff0153535307222222331d1d1da91b1b1b8d4141412365656503ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff017c7c7c0f6868685d636363cb5e5e5eff626262ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff404040ff454545e14c4c4c6b69696917ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0177777733626262a3606060f3616161ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff424242f9454545b55d5d5d49ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014b4b4b0f5e5e5e85626262ff626262ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff444444ff414141ff454545a16464641dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0132323225333333cf4e4e4eff646464ff666666ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff474747ff404040ff303030e35757573bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd313131ff363636ff515151ff636363ff656565ff636363ff636363ff636363ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff464646ff464646ff464646ff464646ff464646ff414141ff373737ff343434ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff343434ff333333ff3c3c3cff5b5b5bff686868ff636363ff626262ff636363ff636363ff636363ff656565ff4a4a4aff444444ff464646ff464646ff454545ff464646ff4c4c4cff454545ff393939ff353535ff353535ff353535ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff353535ff313131ff3f3f3fff5d5d5dff666666ff646464ff626262ff636363ff656565ff4a4a4aff444444ff454545ff474747ff4a4a4aff404040ff212121ff2f2f2fff373737ff373737ff353535ff363636ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff353535ff333333ff363636ff484848ff646464ff676767ff626262ff656565ff4a4a4aff444444ff4b4b4bff4a4a4aff262626ff0b0b0bff090909ff171717ff252525ff353535ff393939ff363636ff323232e159595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff323232ff363636ff4c4c4cff646464ff676767ff4d4d4dff484848ff2c2c2cff0b0b0bff020202ff040404ff0b0b0bff171717ff141414ff161616ff282828ff353535ff343434e359595939ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff363636ff343434ff323232ff3f3f3fff5f5f5fff3a3a3aff161616ff030303ff030303ff050505ff040404ff0b0b0bff171717ff171717ff161616ff151515ff1a1a1aff242424e55555553bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff353535ff363636ff383838ff2e2e2eff191919ff262626ff111111ff030303ff030303ff050505ff040404ff0b0b0bff171717ff171717ff151515ff111111f9121212cd272727557d7d7d09ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff363636ff363636ff383838ff373737ff242424ff0b0b0bff0a0a0aff393939ff393939ff222222ff080808ff020202ff030303ff0b0b0bff181818ff0f0f0fff151515f32424247935353525ffffff01ffffff01ffffff01a3a3a30fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff363636ff363636ff363636ff383838ff373737ff272727ff0c0c0cff090909ff0c0c0cff0e0e0eff373737ff363636ff3a3a3aff393939ff1e1e1eff080808ff080808ff0f0f0feb232323914040401dffffff01ffffff01ffffff01ffffff01ffffff018282825d626262c36d6d6d4d8d8d8d09ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff363636ff353535ff363636ff3a3a3aff2f2f2fff131313ff0b0b0bff0b0b0bff0d0d0dff0c0c0cff0e0e0eff373737ff363636ff353535ff363636ff393939ff303030ff1c1c1cc92626264d68686807ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01868686515e5e5eff646464e9696969957878781fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723363636cd363636ff373737ff383838ff313131ff161616ff090909ff0b0b0bff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0e0e0eff373737ff363636ff363636ff363636ff353535ff353535ff3c3c3c8fffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0186868651616161ff676767ff646464ff656565f16a6a6a7d7f7f7f25ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0137373723353535cd393939ff373737ff1f1f1fff0d0d0dff0a0a0aff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0e0e0eff373737ff363636ff363636ff363636ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0186868651616161ff676767ff666666ff676767ff686868f9555555cd55555511ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0134343425323232cf212121ff0e0e0eff090909ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0e0e0eff383838ff363636ff363636ff363636ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0186868651616161ff686868ff696969ff5f5f5fff3d3d3dff303030ff4848481dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01474747132323238f020202ff080808ff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0c0c0cff2e2e2eff393939ff363636ff353535ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0185858551666666ff676767ff494949ff353535ff323232ff353535ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0130303045101010af080808f70a0a0aff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0d0d0dff131313ff1c1c1cff303030ff373737ff363636ff353535ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0181818151494949ff363636ff313131ff363636ff353535ff363636ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff0141414113191919690f0f0fdb060606ff0c0c0cff0d0d0dff0d0d0dff0d0d0dff0d0d0dff0c0c0cff0d0d0dff171717ff151515ff161616ff222222ff363636ff383838ff37373791ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff014d4d4d53272727c1242424ff373737ff373737ff353535ff353535ff363636ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01626262091c1c1c830b0b0bd7090909ff0c0c0cff0d0d0dff0d0d0dff0c0c0cff0d0d0dff171717ff171717ff171717ff141414ff151515ff202020ff35353595ffffff01ffffff01ffffff01ffffff017474740540404049343434af2a2a2aff262626ff101010ff191919ff2e2e2eff373737ff363636ff363636ff4e4e4e1bffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015a5a5a073636362d141414a7080808f5080808ff0d0d0dff0c0c0cff0d0d0dff171717ff171717ff171717ff151515ff0e0e0efb1b1b1bbb3d3d3d29ffffff01ffffff01ffffff0151515119393939892a2a2ae92d2d2dff323232ff282828ff141414ff151515ff151515ff1f1f1fff343434ff393939ff4949491dffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff013636362f111111b5070707f30a0a0aff0d0d0dff171717ff141414ff111111f5111111c74343433d70707005ffffff01ffffff017c7c7c034e4e4e632a2a2af7292929ff323232ff313131ff323232ff282828ff141414ff171717ff171717ff151515ff0e0e0efd222222e153535315ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff012d2d2d151f1f1f590e0e0edb040404ff0f0f0fff171717e7262626673f3f3f1dffffff01ffffff01ffffff01ffffff01ffffff01444444293535358b2d2d2deb2b2b2bff313131ff323232ff282828ff141414ff171717ff121212ff0d0d0dff2222229d2626263dbebebe03ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01505050112626266f1d1d1d7f36363617ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01616161213333339d2c2c2ce92f2f2fff282828ff111111ff111111f7191919ab3c3c3c41ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015151510b3b3b3b43383838c51f1f1fff141414d71e1e1e654f4f4f13ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff015858580b4d4d4d4159595909ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff01ffffff010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000` diff --git a/swarm/sctx/sctx.go b/swarm/sctx/sctx.go new file mode 100644 index 000000000000..8619f6e19c5a --- /dev/null +++ b/swarm/sctx/sctx.go @@ -0,0 +1,7 @@ +package sctx + +type ContextKey int + +const ( + HTTPRequestIDKey ContextKey = iota +) From 042191338d28065b0f73f9e253ec6402600816ae Mon Sep 17 00:00:00 2001 From: Andrew Chiw Date: Tue, 7 Aug 2018 12:00:12 +0200 Subject: [PATCH 103/166] swarm/api/http: GET/PUT/PATCH/DELETE/POST multipart form unit tests. (#17277) httpDo has a verbose option that dumps the HTTP request --- swarm/api/http/server_test.go | 268 ++++++++++++++++++++++++++++++++++ 1 file changed, 268 insertions(+) diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index dfa8a51877cb..f23f236b221f 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -27,8 +27,10 @@ import ( "fmt" "io" "io/ioutil" + "mime/multipart" "net/http" "os" + "strconv" "strings" "testing" "time" @@ -906,3 +908,269 @@ func TestMethodsNotAllowed(t *testing.T) { } } + +// HTTP convenience function +func httpDo(httpMethod string, url string, reqBody io.Reader, headers map[string]string, verbose bool, t *testing.T) (*http.Response, string) { + // Build the Request + req, err := http.NewRequest(httpMethod, url, reqBody) + if err != nil { + t.Fatal(err) + } + for key, value := range headers { + req.Header.Set(key, value) + } + if verbose { + t.Log(req.Method, req.URL, req.Header, req.Body) + } + + // Send Request out + httpClient := &http.Client{} + res, err := httpClient.Do(req) + if err != nil { + t.Fatal(err) + } + + // Read the HTTP Body + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + body := string(buffer) + + return res, body +} + +func TestGet(t *testing.T) { + // Setup Swarm + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + + testCases := []struct { + uri string + method string + headers map[string]string + expectedStatusCode int + assertResponseBody string + verbose bool + }{ + { + // Accept: text/html GET / -> 200 HTML, Swarm Landing Page + uri: fmt.Sprintf("%s/", srv.URL), + method: "GET", + headers: map[string]string{"Accept": "text/html"}, + expectedStatusCode: 200, + assertResponseBody: "Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution", + verbose: false, + }, + { + // Accept: application/json GET / -> 200 'Welcome to Swarm' + uri: fmt.Sprintf("%s/", srv.URL), + method: "GET", + headers: map[string]string{"Accept": "application/json"}, + expectedStatusCode: 200, + assertResponseBody: "Welcome to Swarm!", + verbose: false, + }, + { + // GET /robots.txt -> 200 + uri: fmt.Sprintf("%s/robots.txt", srv.URL), + method: "GET", + headers: map[string]string{"Accept": "text/html"}, + expectedStatusCode: 200, + assertResponseBody: "User-agent: *\nDisallow: /", + verbose: false, + }, + { + // GET /path_that_doesnt exist -> 400 + uri: fmt.Sprintf("%s/nonexistent_path", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 400, + verbose: false, + }, + { + // GET bzz-invalid:/ -> 400 + uri: fmt.Sprintf("%s/bzz:asdf/", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 400, + verbose: false, + }, + { + // GET bzz-invalid:/ -> 400 + uri: fmt.Sprintf("%s/tbz2/", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 400, + verbose: false, + }, + { + // GET bzz-invalid:/ -> 400 + uri: fmt.Sprintf("%s/bzz-rack:/", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 400, + verbose: false, + }, + { + // GET bzz-invalid:/ -> 400 + uri: fmt.Sprintf("%s/bzz-ls", srv.URL), + method: "GET", + headers: map[string]string{}, + expectedStatusCode: 400, + verbose: false, + }, + } + + for _, testCase := range testCases { + t.Run("GET "+testCase.uri, func(t *testing.T) { + res, body := httpDo(testCase.method, testCase.uri, nil, testCase.headers, testCase.verbose, t) + if res.StatusCode != testCase.expectedStatusCode { + t.Fatalf("expected %s %s to return a %v but it didn't", testCase.method, testCase.uri, testCase.expectedStatusCode) + } + if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { + t.Fatalf("expected %s %s to have %s within HTTP response body but it didn't", testCase.method, testCase.uri, testCase.assertResponseBody) + } + }) + } +} + +func TestModify(t *testing.T) { + // Setup Swarm and upload a test file to it + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + + swarmClient := swarm.NewClient(srv.URL) + data := []byte("data") + file := &swarm.File{ + ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), + ManifestEntry: api.ManifestEntry{ + Path: "", + ContentType: "text/plain", + Size: int64(len(data)), + }, + } + + hash, err := swarmClient.Upload(file, "", false) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + uri string + method string + headers map[string]string + requestBody []byte + expectedStatusCode int + assertResponseBody string + assertResponseHeaders map[string]string + verbose bool + }{ + { + // DELETE bzz:/hash -> 200 OK + uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), + method: "DELETE", + headers: map[string]string{}, + expectedStatusCode: 200, + assertResponseBody: "8b634aea26eec353ac0ecbec20c94f44d6f8d11f38d4578a4c207a84c74ef731", + verbose: false, + }, + { + // PUT bzz:/hash -> 405 Method Not Allowed + uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), + method: "PUT", + headers: map[string]string{}, + expectedStatusCode: 405, + verbose: false, + }, + { + // PUT bzz-raw:/hash -> 405 Method Not Allowed + uri: fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, hash), + method: "PUT", + headers: map[string]string{}, + expectedStatusCode: 405, + verbose: false, + }, + { + // PATCH bzz:/hash -> 405 Method Not Allowed + uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), + method: "PATCH", + headers: map[string]string{}, + expectedStatusCode: 405, + verbose: false, + }, + { + // POST bzz-raw:/ -> 200 OK + uri: fmt.Sprintf("%s/bzz-raw:/", srv.URL), + method: "POST", + headers: map[string]string{}, + requestBody: []byte("POSTdata"), + expectedStatusCode: 200, + assertResponseHeaders: map[string]string{"Content-Length": "64"}, + verbose: false, + }, + { + // POST bzz-raw:/encrypt -> 200 OK + uri: fmt.Sprintf("%s/bzz-raw:/encrypt", srv.URL), + method: "POST", + headers: map[string]string{}, + requestBody: []byte("POSTdata"), + expectedStatusCode: 200, + assertResponseHeaders: map[string]string{"Content-Length": "128"}, + verbose: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.method+" "+testCase.uri, func(t *testing.T) { + reqBody := bytes.NewReader(testCase.requestBody) + res, body := httpDo(testCase.method, testCase.uri, reqBody, testCase.headers, testCase.verbose, t) + + if res.StatusCode != testCase.expectedStatusCode { + t.Fatalf("expected %s %s to return a %v but it returned a %v instead", testCase.method, testCase.uri, testCase.expectedStatusCode, res.StatusCode) + } + if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { + t.Log(body) + t.Fatalf("expected %s %s to have %s within HTTP response body but it didn't", testCase.method, testCase.uri, testCase.assertResponseBody) + } + for key, value := range testCase.assertResponseHeaders { + if res.Header.Get(key) != value { + t.Logf("expected %s=%s in HTTP response header but got %s", key, value, res.Header.Get(key)) + } + } + }) + } +} + +func TestMultiPartUpload(t *testing.T) { + // POST /bzz:/ Content-Type: multipart/form-data + verbose := false + // Setup Swarm + srv := testutil.NewTestSwarmServer(t, serverFunc) + defer srv.Close() + + url := fmt.Sprintf("%s/bzz:/", srv.URL) + + buf := new(bytes.Buffer) + form := multipart.NewWriter(buf) + form.WriteField("name", "John Doe") + file1, _ := form.CreateFormFile("cv", "cv.txt") + file1.Write([]byte("John Doe's Credentials")) + file2, _ := form.CreateFormFile("profile_picture", "profile.jpg") + file2.Write([]byte("imaginethisisjpegdata")) + form.Close() + + headers := map[string]string{ + "Content-Type": form.FormDataContentType(), + "Content-Length": strconv.Itoa(buf.Len()), + } + res, body := httpDo("POST", url, buf, headers, verbose, t) + + if res.StatusCode != 200 { + t.Fatalf("expected POST multipart/form-data to return 200, but it returned %d", res.StatusCode) + } + if len(body) != 64 { + t.Fatalf("expected POST multipart/form-data to return a 64 char manifest but the answer was %d chars long", len(body)) + } +} From de9b0660acf26edc3b261b805c1a3454e3c76321 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 7 Aug 2018 12:56:01 +0200 Subject: [PATCH 104/166] swarm/README: add more sections to easily onboard developers (#17333) --- swarm/README.md | 191 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 190 insertions(+), 1 deletion(-) diff --git a/swarm/README.md b/swarm/README.md index f2d189043406..e81963217f97 100644 --- a/swarm/README.md +++ b/swarm/README.md @@ -7,6 +7,21 @@ Swarm is a distributed storage platform and content distribution service, a nati [![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethersphere/orange-lounge?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) +## Table of Contents + +* [Building the source](#building-the-source) +* [Running Swarm](#running-swarm) +* [Documentation](#documentation) +* [Developers Guide](#developers-guide) + * [Go Environment](#development-environment) + * [Vendored Dependencies](#vendored-dependencies) + * [Testing](#testing) + * [Profiling Swarm](#profiling-swarm) + * [Metrics and Instrumentation in Swarm](#metrics-and-instrumentation-in-swarm) +* [Public Gateways](#public-gateways) +* [Swarm Dapps](#swarm-dapps) +* [Contributing](#contributing) +* [License](#license) ## Building the source @@ -16,13 +31,187 @@ Building Swarm requires Go (version 1.10 or later). go install github.com/ethereum/go-ethereum/cmd/swarm +## Running Swarm + +Going through all the possible command line flags is out of scope here, but we've enumerated a few common parameter combos to get you up to speed quickly on how you can run your own Swarm node. + +To run Swarm you need an Ethereum account. You can create a new account by running the following command: + + geth account new + +You will be prompted for a password: + + Your new account is locked with a password. Please give a password. Do not forget this password. + Passphrase: + Repeat passphrase: + +Once you have specified the password, the output will be the Ethereum address representing that account. For example: + + Address: {2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1} + +Using this account, connect to Swarm with + + swarm --bzzaccount + + # in our example + + swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1 + + +### Verifying that your local Swarm node is running + +When running, Swarm is accessible through an HTTP API on port 8500. + +Confirm that it is up and running by pointing your browser to http://localhost:8500 + +### Ethereum Name Service resolution + +The Ethereum Name Service is the Ethereum equivalent of DNS in the classic web. In order to use ENS to resolve names to Swarm content hashes (e.g. `bzz://theswarm.eth`), `swarm` has to connect to a `geth` instance, which is synced with the Ethereum mainnet. This is done using the `--ens-api` flag. + + swarm --bzzaccount \ + --ens-api '$HOME/.ethereum/geth.ipc' + + # in our example + + swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1 \ + --ens-api '$HOME/.ethereum/geth.ipc' + +For more information on usage, features or command line flags, please consult the Documentation. + ## Documentation Swarm documentation can be found at [https://swarm-guide.readthedocs.io](https://swarm-guide.readthedocs.io). -## Contribution +## Developers Guide + +### Go Environment + +We assume that you have Go v1.10 installed, and `GOPATH` is set. + +You must have your working copy under `$GOPATH/src/github.com/ethereum/go-ethereum`. + +Most likely you will be working from your fork of `go-ethereum`, let's say from `github.com/nirname/go-ethereum`. Clone or move your fork into the right place: + +``` +git clone git@github.com:nirname/go-ethereum.git $GOPATH/src/github.com/ethereum/go-ethereum +``` + + +### Vendored Dependencies + +All dependencies are tracked in the `vendor` directory. We use `govendor` to manage them. + +If you want to add a new dependency, run `govendor fetch `, then commit the result. + +If you want to update all dependencies to their latest upstream version, run `govendor fetch +v`. + + +### Testing + +This section explains how to run unit, integration, and end-to-end tests in your development sandbox. + +Testing one library: + +``` +go test -v -cpu 4 ./swarm/api +``` + +Note: Using options -cpu (number of cores allowed) and -v (logging even if no error) is recommended. + +Testing only some methods: + +``` +go test -v -cpu 4 ./eth -run TestMethod +``` + +Note: here all tests with prefix TestMethod will be run, so if you got TestMethod, TestMethod1, then both! + +Running benchmarks: + +``` +go test -v -cpu 4 -bench . -run BenchmarkJoin +``` + + +### Profiling Swarm + +This section explains how to add Go `pprof` profiler to Swarm + +If `swarm` is started with the `--pprof` option, a debugging HTTP server is made available on port 6060. + +You can bring up http://localhost:6060/debug/pprof to see the heap, running routines etc. + +By clicking full goroutine stack dump (clicking http://localhost:6060/debug/pprof/goroutine?debug=2) you can generate trace that is useful for debugging. + + +### Metrics and Instrumentation in Swarm + +This section explains how to visualize and use existing Swarm metrics and how to instrument Swarm with a new metric. + +Swarm metrics system is based on the `go-metrics` library. + +The most common types of measurements we use in Swarm are `counters` and `resetting timers`. Consult the `go-metrics` documentation for full reference of available types. + +``` +# incrementing a counter +metrics.GetOrRegisterCounter("network.stream.received_chunks", nil).Inc(1) + +# measuring latency with a resetting timer +start := time.Now() +t := metrics.GetOrRegisterResettingTimer("http.request.GET.time"), nil) +... +t := UpdateSince(start) +``` + +#### Visualizing metrics + +Swarm supports an InfluxDB exporter. Consult the help section to learn about the command line arguments used to configure it: + +``` +swarm --help | grep metrics +``` + +We use Grafana and InfluxDB to visualise metrics reported by Swarm. We keep our Grafana dashboards under version control at `./swarm/grafana_dashboards`. You could use them or design your own. + +We have built a tool to help with automatic start of Grafana and InfluxDB and provisioning of dashboards at https://github.com/nonsense/stateth , which requires that you have Docker installed. + +Once you have `stateth` installed, and you have Docker running locally, you have to: + +1. Run `stateth` and keep it running in the background +``` +stateth --rm --grafana-dashboards-folder $GOPATH/src/github.com/ethereum/go-ethereum/swarm/grafana_dashboards --influxdb-database metrics +``` + +2. Run `swarm` with at least the following params: +``` +--metrics \ +--metrics.influxdb.export \ +--metrics.influxdb.endpoint "http://localhost:8086" \ +--metrics.influxdb.username "admin" \ +--metrics.influxdb.password "admin" \ +--metrics.influxdb.database "metrics" +``` + +3. Open Grafana at http://localhost:3000 and view the dashboards to gain insight into Swarm. + + +## Public Gateways + +Swarm offers a local HTTP proxy API that Dapps can use to interact with Swarm. The Ethereum Foundation is hosting a public gateway, which allows free access so that people can try Swarm without running their own node. + +The Swarm public gateways are temporary and users should not rely on their existence for production services. + +The Swarm public gateway can be found at https://swarm-gateways.net and is always running the latest `stable` Swarm release. + +## Swarm Dapps + +You can find a few reference Swarm decentralised applications at: https://swarm-gateways.net/bzz:/swarmapps.eth + +Their source code can be found at: https://github.com/ethersphere/swarm-dapps + +## Contributing Thank you for considering to help out with the source code! We welcome contributions from anyone on the internet, and are grateful for even the smallest of fixes! From cf05ef9106779da0df62c0c03312fc489171aaa5 Mon Sep 17 00:00:00 2001 From: Oleg Kovalov Date: Tue, 7 Aug 2018 12:56:40 +0200 Subject: [PATCH 105/166] p2p, swarm, trie: avoid copying slices in loops (#17265) --- p2p/discover/table.go | 8 ++++---- p2p/discv5/net_test.go | 2 +- p2p/discv5/table.go | 8 ++++---- swarm/api/manifest.go | 6 +++--- trie/node.go | 4 ++-- trie/trie.go | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 8803daa56e1e..0a554bbeb4d1 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -160,7 +160,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) { // Find all non-empty buckets and get a fresh slice of their entries. var buckets [][]*Node - for _, b := range tab.buckets { + for _, b := range &tab.buckets { if len(b.entries) > 0 { buckets = append(buckets, b.entries[:]) } @@ -508,7 +508,7 @@ func (tab *Table) copyLiveNodes() { defer tab.mutex.Unlock() now := time.Now() - for _, b := range tab.buckets { + for _, b := range &tab.buckets { for _, n := range b.entries { if now.Sub(n.addedAt) >= seedMinTableTime { tab.db.updateNode(n) @@ -524,7 +524,7 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { // obviously correct. I believe that tree-based buckets would make // this easier to implement efficiently. close := &nodesByDistance{target: target} - for _, b := range tab.buckets { + for _, b := range &tab.buckets { for _, n := range b.entries { close.push(n, nresults) } @@ -533,7 +533,7 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { } func (tab *Table) len() (n int) { - for _, b := range tab.buckets { + for _, b := range &tab.buckets { n += len(b.entries) } return n diff --git a/p2p/discv5/net_test.go b/p2p/discv5/net_test.go index 001d193cc9f1..1a8137673d38 100644 --- a/p2p/discv5/net_test.go +++ b/p2p/discv5/net_test.go @@ -355,7 +355,7 @@ func (tn *preminedTestnet) mine(target NodeID) { fmt.Printf(" target: %#v,\n", tn.target) fmt.Printf(" targetSha: %#v,\n", tn.targetSha) fmt.Printf(" dists: [%d][]NodeID{\n", len(tn.dists)) - for ld, ns := range tn.dists { + for ld, ns := range &tn.dists { if len(ns) == 0 { continue } diff --git a/p2p/discv5/table.go b/p2p/discv5/table.go index c8d234b936d4..c793be50828d 100644 --- a/p2p/discv5/table.go +++ b/p2p/discv5/table.go @@ -81,7 +81,7 @@ func (tab *Table) chooseBucketRefreshTarget() common.Hash { if printTable { fmt.Println() } - for i, b := range tab.buckets { + for i, b := range &tab.buckets { entries += len(b.entries) if printTable { for _, e := range b.entries { @@ -93,7 +93,7 @@ func (tab *Table) chooseBucketRefreshTarget() common.Hash { prefix := binary.BigEndian.Uint64(tab.self.sha[0:8]) dist := ^uint64(0) entry := int(randUint(uint32(entries + 1))) - for _, b := range tab.buckets { + for _, b := range &tab.buckets { if entry < len(b.entries) { n := b.entries[entry] dist = binary.BigEndian.Uint64(n.sha[0:8]) ^ prefix @@ -121,7 +121,7 @@ func (tab *Table) readRandomNodes(buf []*Node) (n int) { // TODO: tree-based buckets would help here // Find all non-empty buckets and get a fresh slice of their entries. var buckets [][]*Node - for _, b := range tab.buckets { + for _, b := range &tab.buckets { if len(b.entries) > 0 { buckets = append(buckets, b.entries[:]) } @@ -175,7 +175,7 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { // obviously correct. I believe that tree-based buckets would make // this easier to implement efficiently. close := &nodesByDistance{target: target} - for _, b := range tab.buckets { + for _, b := range &tab.buckets { for _, n := range b.entries { close.push(n, nresults) } diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 198ca22ce099..fbd143f295a1 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -159,7 +159,7 @@ func (m *ManifestWalker) Walk(walkFn WalkFn) error { } func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn) error { - for _, entry := range trie.entries { + for _, entry := range &trie.entries { if entry == nil { continue } @@ -308,7 +308,7 @@ func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { } func (mt *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) { - for _, e := range mt.entries { + for _, e := range &mt.entries { if e != nil { cnt++ entry = e @@ -362,7 +362,7 @@ func (mt *manifestTrie) recalcAndStore() error { buffer.WriteString(`{"entries":[`) list := &Manifest{} - for _, entry := range mt.entries { + for _, entry := range &mt.entries { if entry != nil { if entry.Hash == "" { // TODO: paralellize err := entry.subtrie.recalcAndStore() diff --git a/trie/node.go b/trie/node.go index a06f1b3898f3..1fafb7a53825 100644 --- a/trie/node.go +++ b/trie/node.go @@ -55,7 +55,7 @@ var nilValueNode = valueNode(nil) func (n *fullNode) EncodeRLP(w io.Writer) error { var nodes [17]node - for i, child := range n.Children { + for i, child := range &n.Children { if child != nil { nodes[i] = child } else { @@ -98,7 +98,7 @@ func (n valueNode) String() string { return n.fstring("") } func (n *fullNode) fstring(ind string) string { resp := fmt.Sprintf("[\n%s ", ind) - for i, node := range n.Children { + for i, node := range &n.Children { if node == nil { resp += fmt.Sprintf("%s: ", indices[i]) } else { diff --git a/trie/trie.go b/trie/trie.go index 4284e30ad40e..e920ccd23f10 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -356,7 +356,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // value that is left in n or -2 if n contains at least two // values. pos := -1 - for i, cld := range n.Children { + for i, cld := range &n.Children { if cld != nil { if pos == -1 { pos = i From 4bb2dc3d09a76025292b36ac48f6fffa5450adb8 Mon Sep 17 00:00:00 2001 From: Elad Date: Tue, 7 Aug 2018 13:40:38 +0200 Subject: [PATCH 106/166] swarm/api/http: test fixes (#17334) --- swarm/api/http/response.go | 19 +++++-------- swarm/api/http/server_test.go | 51 +++++++++++------------------------ 2 files changed, 21 insertions(+), 49 deletions(-) diff --git a/swarm/api/http/response.go b/swarm/api/http/response.go index 32c09b1f5729..9f4788d35e00 100644 --- a/swarm/api/http/response.go +++ b/swarm/api/http/response.go @@ -29,14 +29,12 @@ import ( "github.com/ethereum/go-ethereum/swarm/api" ) -//metrics variables var ( htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil) jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil) plaintextCounter = metrics.NewRegisteredCounter("api.http.errorpage.plaintext.count", nil) ) -//parameters needed for formatting the correct HTML page type ResponseParams struct { Msg template.HTML Code int @@ -45,12 +43,12 @@ type ResponseParams struct { Details template.HTML } -//ShowMultipeChoices is used when a user requests a resource in a manifest which results -//in ambiguous results. It returns a HTML page with clickable links of each of the entry -//in the manifest which fits the request URI ambiguity. -//For example, if the user requests bzz://read and that manifest contains entries -//"readme.md" and "readinglist.txt", a HTML page is returned with this two links. -//This only applies if the manifest has no default entry +// ShowMultipleChoices is used when a user requests a resource in a manifest which results +// in ambiguous results. It returns a HTML page with clickable links of each of the entry +// in the manifest which fits the request URI ambiguity. +// For example, if the user requests bzz://read and that manifest contains entries +// "readme.md" and "readinglist.txt", a HTML page is returned with this two links. +// This only applies if the manifest has no default entry func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) { log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) msg := "" @@ -66,7 +64,6 @@ func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.Manife } uri.Scheme = "bzz-list" - //request the same url just with bzz-list msg += fmt.Sprintf("Disambiguation:
Your request may refer to multiple choices.
Click here if your browser does not redirect you within 5 seconds.
", "/"+uri.String()) RespondTemplate(w, r, "error", msg, http.StatusMultipleChoices) } @@ -86,7 +83,6 @@ func RespondError(w http.ResponseWriter, r *http.Request, msg string, code int) RespondTemplate(w, r, "error", msg, code) } -//evaluate if client accepts html or json response func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { w.WriteHeader(params.Code) @@ -108,7 +104,6 @@ func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { } } -//return a HTML page func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) { htmlCounter.Inc(1) log.Debug("respondHTML", "ruid", GetRUID(r.Context())) @@ -118,7 +113,6 @@ func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) } } -//return JSON func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { jsonCounter.Inc(1) log.Debug("respondJSON", "ruid", GetRUID(r.Context())) @@ -126,7 +120,6 @@ func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) return json.NewEncoder(w).Encode(params) } -//return plaintext func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { plaintextCounter.Inc(1) log.Debug("respondPlaintext", "ruid", GetRUID(r.Context())) diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index f23f236b221f..3ac60596b5de 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -909,7 +909,6 @@ func TestMethodsNotAllowed(t *testing.T) { } -// HTTP convenience function func httpDo(httpMethod string, url string, reqBody io.Reader, headers map[string]string, verbose bool, t *testing.T) (*http.Response, string) { // Build the Request req, err := http.NewRequest(httpMethod, url, reqBody) @@ -942,11 +941,10 @@ func httpDo(httpMethod string, url string, reqBody io.Reader, headers map[string } func TestGet(t *testing.T) { - // Setup Swarm srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() - testCases := []struct { + for _, testCase := range []struct { uri string method string headers map[string]string @@ -955,25 +953,22 @@ func TestGet(t *testing.T) { verbose bool }{ { - // Accept: text/html GET / -> 200 HTML, Swarm Landing Page uri: fmt.Sprintf("%s/", srv.URL), method: "GET", headers: map[string]string{"Accept": "text/html"}, expectedStatusCode: 200, - assertResponseBody: "Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution", + assertResponseBody: "Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution", verbose: false, }, { - // Accept: application/json GET / -> 200 'Welcome to Swarm' uri: fmt.Sprintf("%s/", srv.URL), method: "GET", headers: map[string]string{"Accept": "application/json"}, expectedStatusCode: 200, - assertResponseBody: "Welcome to Swarm!", + assertResponseBody: "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", verbose: false, }, { - // GET /robots.txt -> 200 uri: fmt.Sprintf("%s/robots.txt", srv.URL), method: "GET", headers: map[string]string{"Accept": "text/html"}, @@ -982,62 +977,54 @@ func TestGet(t *testing.T) { verbose: false, }, { - // GET /path_that_doesnt exist -> 400 uri: fmt.Sprintf("%s/nonexistent_path", srv.URL), method: "GET", headers: map[string]string{}, - expectedStatusCode: 400, + expectedStatusCode: 404, verbose: false, }, { - // GET bzz-invalid:/ -> 400 uri: fmt.Sprintf("%s/bzz:asdf/", srv.URL), method: "GET", headers: map[string]string{}, - expectedStatusCode: 400, + expectedStatusCode: 404, verbose: false, }, { - // GET bzz-invalid:/ -> 400 uri: fmt.Sprintf("%s/tbz2/", srv.URL), method: "GET", headers: map[string]string{}, - expectedStatusCode: 400, + expectedStatusCode: 404, verbose: false, }, { - // GET bzz-invalid:/ -> 400 uri: fmt.Sprintf("%s/bzz-rack:/", srv.URL), method: "GET", headers: map[string]string{}, - expectedStatusCode: 400, + expectedStatusCode: 404, verbose: false, }, { - // GET bzz-invalid:/ -> 400 uri: fmt.Sprintf("%s/bzz-ls", srv.URL), method: "GET", headers: map[string]string{}, - expectedStatusCode: 400, + expectedStatusCode: 404, verbose: false, }, - } - - for _, testCase := range testCases { + } { t.Run("GET "+testCase.uri, func(t *testing.T) { res, body := httpDo(testCase.method, testCase.uri, nil, testCase.headers, testCase.verbose, t) if res.StatusCode != testCase.expectedStatusCode { - t.Fatalf("expected %s %s to return a %v but it didn't", testCase.method, testCase.uri, testCase.expectedStatusCode) + t.Fatalf("expected status code %d but got %d", testCase.expectedStatusCode, res.StatusCode) } if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { - t.Fatalf("expected %s %s to have %s within HTTP response body but it didn't", testCase.method, testCase.uri, testCase.assertResponseBody) + t.Fatalf("expected response to be: %s but got: %s", testCase.assertResponseBody, body) } }) } } func TestModify(t *testing.T) { - // Setup Swarm and upload a test file to it srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() @@ -1057,7 +1044,7 @@ func TestModify(t *testing.T) { t.Fatal(err) } - testCases := []struct { + for _, testCase := range []struct { uri string method string headers map[string]string @@ -1068,7 +1055,6 @@ func TestModify(t *testing.T) { verbose bool }{ { - // DELETE bzz:/hash -> 200 OK uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), method: "DELETE", headers: map[string]string{}, @@ -1077,7 +1063,6 @@ func TestModify(t *testing.T) { verbose: false, }, { - // PUT bzz:/hash -> 405 Method Not Allowed uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), method: "PUT", headers: map[string]string{}, @@ -1085,7 +1070,6 @@ func TestModify(t *testing.T) { verbose: false, }, { - // PUT bzz-raw:/hash -> 405 Method Not Allowed uri: fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, hash), method: "PUT", headers: map[string]string{}, @@ -1093,7 +1077,6 @@ func TestModify(t *testing.T) { verbose: false, }, { - // PATCH bzz:/hash -> 405 Method Not Allowed uri: fmt.Sprintf("%s/bzz:/%s", srv.URL, hash), method: "PATCH", headers: map[string]string{}, @@ -1101,7 +1084,6 @@ func TestModify(t *testing.T) { verbose: false, }, { - // POST bzz-raw:/ -> 200 OK uri: fmt.Sprintf("%s/bzz-raw:/", srv.URL), method: "POST", headers: map[string]string{}, @@ -1111,7 +1093,6 @@ func TestModify(t *testing.T) { verbose: false, }, { - // POST bzz-raw:/encrypt -> 200 OK uri: fmt.Sprintf("%s/bzz-raw:/encrypt", srv.URL), method: "POST", headers: map[string]string{}, @@ -1120,19 +1101,17 @@ func TestModify(t *testing.T) { assertResponseHeaders: map[string]string{"Content-Length": "128"}, verbose: false, }, - } - - for _, testCase := range testCases { + } { t.Run(testCase.method+" "+testCase.uri, func(t *testing.T) { reqBody := bytes.NewReader(testCase.requestBody) res, body := httpDo(testCase.method, testCase.uri, reqBody, testCase.headers, testCase.verbose, t) if res.StatusCode != testCase.expectedStatusCode { - t.Fatalf("expected %s %s to return a %v but it returned a %v instead", testCase.method, testCase.uri, testCase.expectedStatusCode, res.StatusCode) + t.Fatalf("expected status code %d but got %d", testCase.expectedStatusCode, res.StatusCode) } if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { t.Log(body) - t.Fatalf("expected %s %s to have %s within HTTP response body but it didn't", testCase.method, testCase.uri, testCase.assertResponseBody) + t.Fatalf("expected response %s but got %s", testCase.assertResponseBody, body) } for key, value := range testCase.assertResponseHeaders { if res.Header.Get(key) != value { From 8461fea44b140fcb04905be7acc4539f42c0444f Mon Sep 17 00:00:00 2001 From: b00ris Date: Tue, 7 Aug 2018 16:16:56 +0300 Subject: [PATCH 107/166] whisper: remove unused error (#17315) --- whisper/whisperv6/doc.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/whisper/whisperv6/doc.go b/whisper/whisperv6/doc.go index 066a9766d4d8..529bf3d2de0c 100644 --- a/whisper/whisperv6/doc.go +++ b/whisper/whisperv6/doc.go @@ -33,7 +33,6 @@ particularly the notion of singular endpoints. package whisperv6 import ( - "fmt" "time" ) @@ -79,12 +78,6 @@ const ( DefaultSyncAllowance = 10 // seconds ) -type unknownVersionError uint64 - -func (e unknownVersionError) Error() string { - return fmt.Sprintf("invalid envelope version %d", uint64(e)) -} - // MailServer represents a mail server, capable of // archiving the old messages for subsequent delivery // to the peers. Any implementation must ensure that both From 9df16f34689956121ebc360857f91242291b7f0c Mon Sep 17 00:00:00 2001 From: Attila Gazso Date: Tue, 7 Aug 2018 15:34:11 +0200 Subject: [PATCH 108/166] swarm: Added lightnode flag (#17291) * swarm: Added lightnode flag Added --lightnode command line parameter Added LightNode to Handshake message * swarm/config: Fixed variable naming * cmd/swarm: Changed BoolTFlag to BoolFlag for SwarmLightNodeEnabled * swarm/network: Changed logging * swarm/network: Changed protocol version testing * swarm/network: Renamed DefaultNetworkID variable to TestProtocolNetworkID * swarm/network: Bumped protocol version * swarm/network: Changed LightNode handhsake test to table driven * swarm/network: Changed back TestProtocolVersion to 5 for now * swarm/network: Moved the test configuration inside the test function scope --- cmd/swarm/config.go | 11 +++++ cmd/swarm/main.go | 6 +++ swarm/api/config.go | 1 + swarm/network/protocol.go | 13 +++++- swarm/network/protocol_test.go | 80 +++++++++++++++++++++++++++------- swarm/swarm.go | 1 + 6 files changed, 95 insertions(+), 17 deletions(-) diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index ce2acdcc9753..ff085fd94fe4 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -68,6 +68,7 @@ const ( SWARM_ENV_SWAP_API = "SWARM_SWAP_API" SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" + SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE" SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" SWARM_ENV_ENS_API = "SWARM_ENS_API" SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" @@ -204,6 +205,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.SyncUpdateDelay = d } + if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) { + currentConfig.LightNodeEnabled = true + } + if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) { currentConfig.DeliverySkipCheck = true } @@ -301,6 +306,12 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { } } + if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" { + if lightnode, err := strconv.ParseBool(lne); err != nil { + currentConfig.LightNodeEnabled = lightnode + } + } + if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { currentConfig.SwapAPI = swapapi } diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 9185af980f70..258f24d3204d 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -123,6 +123,11 @@ var ( Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, } + SwarmLightNodeEnabled = cli.BoolFlag{ + Name: "lightnode", + Usage: "Enable Swarm LightNode (default false)", + EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE, + } SwarmDeliverySkipCheckFlag = cli.BoolFlag{ Name: "delivery-skip-check", Usage: "Skip chunk delivery check (default false)", @@ -464,6 +469,7 @@ pv(1) tool to get a progress bar: SwarmSwapAPIFlag, SwarmSyncDisabledFlag, SwarmSyncUpdateDelay, + SwarmLightNodeEnabled, SwarmDeliverySkipCheckFlag, SwarmListenAddrFlag, SwarmPortFlag, diff --git a/swarm/api/config.go b/swarm/api/config.go index 939285e09cf3..bdfffdd05f63 100644 --- a/swarm/api/config.go +++ b/swarm/api/config.go @@ -63,6 +63,7 @@ type Config struct { SwapEnabled bool SyncEnabled bool DeliverySkipCheck bool + LightNodeEnabled bool SyncUpdateDelay time.Duration SwapAPI string Cors string diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go index 49ae5a15bf73..7e7fee8efe02 100644 --- a/swarm/network/protocol.go +++ b/swarm/network/protocol.go @@ -94,12 +94,14 @@ type BzzConfig struct { UnderlayAddr []byte // node's underlay address HiveParams *HiveParams NetworkID uint64 + LightNode bool } // Bzz is the swarm protocol bundle type Bzz struct { *Hive NetworkID uint64 + LightNode bool localAddr *BzzAddr mtx sync.Mutex handshakes map[discover.NodeID]*HandshakeMsg @@ -116,6 +118,7 @@ func NewBzz(config *BzzConfig, kad Overlay, store state.Store, streamerSpec *pro return &Bzz{ Hive: NewHive(config.HiveParams, kad, store), NetworkID: config.NetworkID, + LightNode: config.LightNode, localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr}, handshakes: make(map[discover.NodeID]*HandshakeMsg), streamerRun: streamerRun, @@ -209,7 +212,11 @@ func (b *Bzz) RunProtocol(spec *protocols.Spec, run func(*BzzPeer) error) func(* localAddr: b.localAddr, BzzAddr: handshake.peerAddr, lastActive: time.Now(), + LightNode: handshake.LightNode, } + + log.Debug("peer created", "addr", handshake.peerAddr.String()) + return run(peer) } } @@ -228,6 +235,7 @@ func (b *Bzz) performHandshake(p *protocols.Peer, handshake *HandshakeMsg) error return err } handshake.peerAddr = rsh.(*HandshakeMsg).Addr + handshake.LightNode = rsh.(*HandshakeMsg).LightNode return nil } @@ -263,6 +271,7 @@ type BzzPeer struct { localAddr *BzzAddr // local Peers address *BzzAddr // remote address -> implements Addr interface = protocols.Peer lastActive time.Time // time is updated whenever mutexes are releasing + LightNode bool } func NewBzzTestPeer(p *protocols.Peer, addr *BzzAddr) *BzzPeer { @@ -294,6 +303,7 @@ type HandshakeMsg struct { Version uint64 NetworkID uint64 Addr *BzzAddr + LightNode bool // peerAddr is the address received in the peer handshake peerAddr *BzzAddr @@ -305,7 +315,7 @@ type HandshakeMsg struct { // String pretty prints the handshake func (bh *HandshakeMsg) String() string { - return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v", bh.Version, bh.NetworkID, bh.Addr) + return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v, LightNode: %v, peerAddr: %v", bh.Version, bh.NetworkID, bh.Addr, bh.LightNode, bh.peerAddr) } // Perform initiates the handshake and validates the remote handshake message @@ -338,6 +348,7 @@ func (b *Bzz) GetHandshake(peerID discover.NodeID) (*HandshakeMsg, bool) { Version: uint64(BzzSpec.Version), NetworkID: b.NetworkID, Addr: b.localAddr, + LightNode: b.LightNode, init: make(chan bool, 1), done: make(chan struct{}), } diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go index 63faf3108850..b74b72c68b8b 100644 --- a/swarm/network/protocol_test.go +++ b/swarm/network/protocol_test.go @@ -30,6 +30,11 @@ import ( p2ptest "github.com/ethereum/go-ethereum/p2p/testing" ) +const ( + TestProtocolVersion = 5 + TestProtocolNetworkID = 3 +) + var ( loglevel = flag.Int("loglevel", 2, "verbosity of logs") ) @@ -127,23 +132,30 @@ type bzzTester struct { *p2ptest.ProtocolTester addr *BzzAddr cs map[string]chan bool + bzz *Bzz } -func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr) *bzzTester { +func newBzz(addr *BzzAddr, lightNode bool) *Bzz { config := &BzzConfig{ OverlayAddr: addr.Over(), UnderlayAddr: addr.Under(), HiveParams: NewHiveParams(), NetworkID: DefaultNetworkID, + LightNode: lightNode, } kad := NewKademlia(addr.OAddr, NewKadParams()) bzz := NewBzz(config, kad, nil, nil, nil) + return bzz +} - s := p2ptest.NewProtocolTester(t, NewNodeIDFromAddr(addr), 1, bzz.runBzz) +func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr, lightNode bool) *bzzTester { + bzz := newBzz(addr, lightNode) + pt := p2ptest.NewProtocolTester(t, NewNodeIDFromAddr(addr), n, bzz.runBzz) return &bzzTester{ addr: addr, - ProtocolTester: s, + ProtocolTester: pt, + bzz: bzz, } } @@ -184,22 +196,24 @@ func (s *bzzTester) testHandshake(lhs, rhs *HandshakeMsg, disconnects ...*p2ptes return nil } -func correctBzzHandshake(addr *BzzAddr) *HandshakeMsg { +func correctBzzHandshake(addr *BzzAddr, lightNode bool) *HandshakeMsg { return &HandshakeMsg{ - Version: 5, - NetworkID: DefaultNetworkID, + Version: TestProtocolVersion, + NetworkID: TestProtocolNetworkID, Addr: addr, + LightNode: lightNode, } } func TestBzzHandshakeNetworkIDMismatch(t *testing.T) { + lightNode := false addr := RandomAddr() - s := newBzzHandshakeTester(t, 1, addr) + s := newBzzHandshakeTester(t, 1, addr, lightNode) id := s.IDs[0] err := s.testHandshake( - correctBzzHandshake(addr), - &HandshakeMsg{Version: 5, NetworkID: 321, Addr: NewAddrFromNodeID(id)}, + correctBzzHandshake(addr, lightNode), + &HandshakeMsg{Version: TestProtocolVersion, NetworkID: 321, Addr: NewAddrFromNodeID(id)}, &p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): network id mismatch 321 (!= 3)")}, ) @@ -209,14 +223,15 @@ func TestBzzHandshakeNetworkIDMismatch(t *testing.T) { } func TestBzzHandshakeVersionMismatch(t *testing.T) { + lightNode := false addr := RandomAddr() - s := newBzzHandshakeTester(t, 1, addr) + s := newBzzHandshakeTester(t, 1, addr, lightNode) id := s.IDs[0] err := s.testHandshake( - correctBzzHandshake(addr), - &HandshakeMsg{Version: 0, NetworkID: 3, Addr: NewAddrFromNodeID(id)}, - &p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): version mismatch 0 (!= 5)")}, + correctBzzHandshake(addr, lightNode), + &HandshakeMsg{Version: 0, NetworkID: TestProtocolNetworkID, Addr: NewAddrFromNodeID(id)}, + &p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): version mismatch 0 (!= %d)", TestProtocolVersion)}, ) if err != nil { @@ -225,16 +240,49 @@ func TestBzzHandshakeVersionMismatch(t *testing.T) { } func TestBzzHandshakeSuccess(t *testing.T) { + lightNode := false addr := RandomAddr() - s := newBzzHandshakeTester(t, 1, addr) + s := newBzzHandshakeTester(t, 1, addr, lightNode) id := s.IDs[0] err := s.testHandshake( - correctBzzHandshake(addr), - &HandshakeMsg{Version: 5, NetworkID: 3, Addr: NewAddrFromNodeID(id)}, + correctBzzHandshake(addr, lightNode), + &HandshakeMsg{Version: TestProtocolVersion, NetworkID: TestProtocolNetworkID, Addr: NewAddrFromNodeID(id)}, ) if err != nil { t.Fatal(err) } } + +func TestBzzHandshakeLightNode(t *testing.T) { + var lightNodeTests = []struct { + name string + lightNode bool + }{ + {"on", true}, + {"off", false}, + } + + for _, test := range lightNodeTests { + t.Run(test.name, func(t *testing.T) { + randomAddr := RandomAddr() + pt := newBzzHandshakeTester(t, 1, randomAddr, false) + id := pt.IDs[0] + addr := NewAddrFromNodeID(id) + + err := pt.testHandshake( + correctBzzHandshake(randomAddr, false), + &HandshakeMsg{Version: TestProtocolVersion, NetworkID: TestProtocolNetworkID, Addr: addr, LightNode: test.lightNode}, + ) + + if err != nil { + t.Fatal(err) + } + + if pt.bzz.handshakes[id].LightNode != test.lightNode { + t.Fatalf("peer LightNode flag is %v, should be %v", pt.bzz.handshakes[id].LightNode, test.lightNode) + } + }) + } +} diff --git a/swarm/swarm.go b/swarm/swarm.go index db7d2dfedc82..c380a376f67e 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -143,6 +143,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e OverlayAddr: addr.OAddr, UnderlayAddr: addr.UAddr, HiveParams: config.HiveParams, + LightNode: config.LightNodeEnabled, } stateStore, err := state.NewDBStore(filepath.Join(config.Path, "state-store.db")) From 00e6da9704b2cd7ddcc1cd31ed3f6bbaa8e1e284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jano=C5=A1=20Gulja=C5=A1?= Date: Tue, 7 Aug 2018 15:34:33 +0200 Subject: [PATCH 109/166] swarm/bmt: ignore data longer then 4096 bytes in Hasher.Write (#17338) --- swarm/bmt/bmt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go index 1a141047aeba..97e0e141edba 100644 --- a/swarm/bmt/bmt.go +++ b/swarm/bmt/bmt.go @@ -318,7 +318,7 @@ func (h *Hasher) Sum(b []byte) (s []byte) { // with every full segment calls writeSection in a go routine func (h *Hasher) Write(b []byte) (int, error) { l := len(b) - if l == 0 { + if l == 0 || l > 4096 { return 0, nil } t := h.getTree() From a1eb9c7d13240fd250866219a502d0cdc9924e06 Mon Sep 17 00:00:00 2001 From: Giulio M Date: Wed, 8 Aug 2018 09:33:06 +0200 Subject: [PATCH 110/166] swarm/api/http: fixed list leaf links (#17342) --- swarm/api/http/server_test.go | 6 +++--- swarm/api/http/templates.go | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 3ac60596b5de..7934e37eba31 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -576,7 +576,7 @@ func testBzzGetPath(encrypted bool, t *testing.T) { pageFragments: []string{ fmt.Sprintf("Swarm index of bzz:/%s/a/", ref), `b/`, - `a`, + fmt.Sprintf(`a`, ref), }, }, { @@ -584,8 +584,8 @@ func testBzzGetPath(encrypted bool, t *testing.T) { json: `{"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/b","mod_time":"0001-01-01T00:00:00Z"},{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/c","mod_time":"0001-01-01T00:00:00Z"}]}`, pageFragments: []string{ fmt.Sprintf("Swarm index of bzz:/%s/a/b/", ref), - `b`, - `c`, + fmt.Sprintf(`b`, ref), + fmt.Sprintf(`c`, ref), }, }, { diff --git a/swarm/api/http/templates.go b/swarm/api/http/templates.go index 1cd42ca3714a..986f5f8873d3 100644 --- a/swarm/api/http/templates.go +++ b/swarm/api/http/templates.go @@ -18,6 +18,7 @@ package http import ( "encoding/hex" + "fmt" "html/template" "path" @@ -45,7 +46,10 @@ func init() { { templateName: "bzz-list", partial: bzzList, - funcs: template.FuncMap{"basename": path.Base}, + funcs: template.FuncMap{ + "basename": path.Base, + "leaflink": leafLink, + }, }, { templateName: "landing-page", @@ -62,6 +66,10 @@ func init() { faviconBytes = bytes } +func leafLink(URI api.URI, manifestEntry api.ManifestEntry) string { + return fmt.Sprintf("/bzz:/%s/%s", URI.Addr, manifestEntry.Path) +} + const bzzList = `{{ define "content" }}

Swarm index of {{ .URI }}


@@ -83,10 +91,11 @@ const bzzList = `{{ define "content" }} DIR - - {{ end }} {{ range .List.Entries }} + {{ end }} + {{ range .List.Entries }} - {{ basename .Path }} + {{ basename .Path }} {{ .ContentType }} {{ .Size }} From 8051a0768a2af6c36b04ffa6fb225a45986d9b89 Mon Sep 17 00:00:00 2001 From: Mymskmkt <1847234666@qq.com> Date: Wed, 8 Aug 2018 21:08:40 +0800 Subject: [PATCH 111/166] trie: fix comment typo (#17350) --- trie/database.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trie/database.go b/trie/database.go index 8675b9f0a0c0..7df45fe2df3d 100644 --- a/trie/database.go +++ b/trie/database.go @@ -51,7 +51,7 @@ const secureKeyLength = 11 + 32 // DatabaseReader wraps the Get and Has method of a backing store for the trie. type DatabaseReader interface { - // Get retrieves the value associated with key form the database. + // Get retrieves the value associated with key from the database. Get(key []byte) (value []byte, err error) // Has retrieves whether a key is present in the database. From abbb219933504a2aa739353a1cb6157cdd0cf145 Mon Sep 17 00:00:00 2001 From: Jay Date: Thu, 9 Aug 2018 14:56:35 +0900 Subject: [PATCH 112/166] rpc: fix a subscription name (#17345) --- rpc/client_example_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/client_example_test.go b/rpc/client_example_test.go index 8276a9eadd13..9c21c12d5dff 100644 --- a/rpc/client_example_test.go +++ b/rpc/client_example_test.go @@ -66,7 +66,7 @@ func subscribeBlocks(client *rpc.Client, subch chan Block) { defer cancel() // Subscribe to new blocks. - sub, err := client.EthSubscribe(ctx, subch, "newBlocks") + sub, err := client.EthSubscribe(ctx, subch, "newHeads") if err != nil { fmt.Println("subscribe error:", err) return From 834057592f68eecc45382794c0fed96e594e14d1 Mon Sep 17 00:00:00 2001 From: libotony Date: Thu, 9 Aug 2018 15:03:42 +0800 Subject: [PATCH 113/166] p2p/discv5: fix negative index after uint convert to int (#17274) --- p2p/discv5/net.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go index 9b0bd0c80ac6..4c39c055333b 100644 --- a/p2p/discv5/net.go +++ b/p2p/discv5/net.go @@ -1228,7 +1228,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) { if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash { return nil, errors.New("topic hash mismatch") } - if data.Idx < 0 || int(data.Idx) >= len(data.Topics) { + if int(data.Idx) < 0 || int(data.Idx) >= len(data.Topics) { return nil, errors.New("topic index out of range") } return pongpkt.data.(*pong), nil From 11bbc660823246b9fc25e4b994121e30a9f17306 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 8 Aug 2018 17:16:38 +0300 Subject: [PATCH 114/166] eth, trie: fix tracer GC which accidentally pruned the metaroot --- eth/api_tracer.go | 8 ++++++-- trie/database.go | 5 +++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/eth/api_tracer.go b/eth/api_tracer.go index 623e5ed1bd4f..722e2a6e329c 100644 --- a/eth/api_tracer.go +++ b/eth/api_tracer.go @@ -297,7 +297,9 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl database.TrieDB().Reference(root, common.Hash{}) } // Dereference all past tries we ourselves are done working with - database.TrieDB().Dereference(proot) + if proot != (common.Hash{}) { + database.TrieDB().Dereference(proot) + } proot = root // TODO(karalabe): Do we need the preimages? Won't they accumulate too much? @@ -526,7 +528,9 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* return nil, err } database.TrieDB().Reference(root, common.Hash{}) - database.TrieDB().Dereference(proot) + if proot != (common.Hash{}) { + database.TrieDB().Dereference(proot) + } proot = root } nodes, imgs := database.TrieDB().Size() diff --git a/trie/database.go b/trie/database.go index 7df45fe2df3d..d0691b637e3a 100644 --- a/trie/database.go +++ b/trie/database.go @@ -431,6 +431,11 @@ func (db *Database) reference(child common.Hash, parent common.Hash) { // Dereference removes an existing reference from a root node. func (db *Database) Dereference(root common.Hash) { + // Sanity check to ensure that the meta-root is not removed + if root == (common.Hash{}) { + log.Error("Attempted to dereference the trie cache meta root") + return + } db.lock.Lock() defer db.lock.Unlock() From 7b5c3758250ffc78c7a5ce14c1b736a38d548423 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Thu, 9 Aug 2018 11:37:00 +0200 Subject: [PATCH 115/166] cmd/swarm: remove shadow err (#17360) --- cmd/swarm/config.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index ff085fd94fe4..cda8c41c32cd 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -132,7 +132,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) { log.Debug(printConfig(config)) } -//override the current config with whatever is in the config file, if a config file has been provided +//configFileOverride overrides the current config with the config file, if a config file has been provided func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) { var err error @@ -142,7 +142,8 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" { utils.Fatalf("Config file flag provided with invalid file path") } - f, err := os.Open(filepath) + var f *os.File + f, err = os.Open(filepath) if err != nil { return nil, err } From d3e4c2dcb00eb61c32dd3a9b94a317727c2449a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jano=C5=A1=20Gulja=C5=A1?= Date: Thu, 9 Aug 2018 16:14:30 +0200 Subject: [PATCH 116/166] cmd/swarm: disable TestCLISwarmFs fuse test on darwin (#17340) --- cmd/swarm/fs_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go index a2b730bd5d18..4f38b094bbf2 100644 --- a/cmd/swarm/fs_test.go +++ b/cmd/swarm/fs_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . -// +build linux darwin freebsd +// +build linux freebsd package main @@ -43,12 +43,12 @@ type testFile struct { } // TestCLISwarmFs is a high-level test of swarmfs +// +// This test fails on travis for macOS as this executable exits with code 1 +// and without any log messages in the log: +// /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse. +// This is the reason for this file not being built on darwin architecture. func TestCLISwarmFs(t *testing.T) { - // This test fails on travis as this executable exits with code 1 - // and without any log messages in the log. - // /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse - t.Skip() - cluster := newTestCluster(t, 3) defer cluster.Shutdown() From 3bcb501c8fefeec1bc8aac26686460cae86f2ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jano=C5=A1=20Gulja=C5=A1?= Date: Thu, 9 Aug 2018 16:15:07 +0200 Subject: [PATCH 117/166] swarm/api: close tar writer in GetDirectoryTar to flush and clean (#17339) --- swarm/api/api.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/swarm/api/api.go b/swarm/api/api.go index ad4bd7dcb10c..b418c45e1ce6 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -525,6 +525,10 @@ func (a *API) GetDirectoryTar(ctx context.Context, uri *URI) (io.ReadCloser, err return nil }) + // close tar writer before closing pipew + // to flush remaining data to pipew + // regardless of error value + tw.Close() if err != nil { apiGetTarFail.Inc(1) pipew.CloseWithError(err) From 45eaef24319897f5b1679c9d1aa7d88702cce905 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jano=C5=A1=20Gulja=C5=A1?= Date: Thu, 9 Aug 2018 16:15:59 +0200 Subject: [PATCH 118/166] cmd/swarm: solve rare cases of using the same random port in tests (#17352) --- cmd/swarm/config_test.go | 14 ++++ cmd/swarm/run_test.go | 136 +++++++++++++++++++++++++++++++++------ 2 files changed, 131 insertions(+), 19 deletions(-) diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go index d5011e3a7069..02198f878ec4 100644 --- a/cmd/swarm/config_test.go +++ b/cmd/swarm/config_test.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "io/ioutil" + "net" "os" "os/exec" "testing" @@ -559,3 +560,16 @@ func TestValidateConfig(t *testing.T) { } } } + +func assignTCPPort() (string, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", err + } + l.Close() + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return "", err + } + return port, nil +} diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go index a70c4686dd51..90d3c98ba688 100644 --- a/cmd/swarm/run_test.go +++ b/cmd/swarm/run_test.go @@ -17,12 +17,15 @@ package main import ( + "context" "fmt" "io/ioutil" "net" "os" "path/filepath" "runtime" + "sync" + "syscall" "testing" "time" @@ -218,14 +221,12 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { } // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - p2pPort, err := assignTCPPort() + ports, err := getAvailableTCPPorts(2) if err != nil { t.Fatal(err) } + p2pPort := ports[0] + httpPort := ports[1] // start the node node.Cmd = runSwarm(t, @@ -246,6 +247,17 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { } }() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // ensure that all ports have active listeners + // so that the next node will not get the same + // when calling getAvailableTCPPorts + err = waitTCPPorts(ctx, ports...) + if err != nil { + t.Fatal(err) + } + // wait for the node to start for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { node.Client, err = rpc.Dial(conf.IPCEndpoint()) @@ -280,14 +292,12 @@ func newTestNode(t *testing.T, dir string) *testNode { node := &testNode{Dir: dir} // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - p2pPort, err := assignTCPPort() + ports, err := getAvailableTCPPorts(2) if err != nil { t.Fatal(err) } + p2pPort := ports[0] + httpPort := ports[1] // start the node node.Cmd = runSwarm(t, @@ -308,6 +318,17 @@ func newTestNode(t *testing.T, dir string) *testNode { } }() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // ensure that all ports have active listeners + // so that the next node will not get the same + // when calling getAvailableTCPPorts + err = waitTCPPorts(ctx, ports...) + if err != nil { + t.Fatal(err) + } + // wait for the node to start for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { node.Client, err = rpc.Dial(conf.IPCEndpoint()) @@ -343,15 +364,92 @@ func (n *testNode) Shutdown() { } } -func assignTCPPort() (string, error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return "", err +// getAvailableTCPPorts returns a set of ports that +// nothing is listening on at the time. +// +// Function assignTCPPort cannot be called in sequence +// and guardantee that the same port will be returned in +// different calls as the listener is closed within the function, +// not after all listeners are started and selected unique +// available ports. +func getAvailableTCPPorts(count int) (ports []string, err error) { + for i := 0; i < count; i++ { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + // defer close in the loop to be sure the same port will not + // be selected in the next iteration + defer l.Close() + + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return nil, err + } + ports = append(ports, port) } - l.Close() - _, port, err := net.SplitHostPort(l.Addr().String()) - if err != nil { - return "", err + return ports, nil +} + +// waitTCPPorts blocks until tcp connections can be +// established on all provided ports. It runs all +// ports dialers in parallel, and returns the first +// encountered error. +// See waitTCPPort also. +func waitTCPPorts(ctx context.Context, ports ...string) error { + var err error + // mu locks err variable that is assigned in + // other goroutines + var mu sync.Mutex + + // cancel is canceling all goroutines + // when the firs error is returned + // to prevent unnecessary waiting + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wg sync.WaitGroup + for _, port := range ports { + wg.Add(1) + go func(port string) { + defer wg.Done() + + e := waitTCPPort(ctx, port) + + mu.Lock() + defer mu.Unlock() + if e != nil && err == nil { + err = e + cancel() + } + }(port) + } + wg.Wait() + + return err +} + +// waitTCPPort blocks until tcp connection can be established +// ona provided port. It has a 3 minute timeout as maximum, +// to prevent long waiting, but it can be shortened with +// a provided context instance. Dialer has a 10 second timeout +// in every iteration, and connection refused error will be +// retried in 100 milliseconds periods. +func waitTCPPort(ctx context.Context, port string) error { + ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + + for { + c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port) + if err != nil { + if operr, ok := err.(*net.OpError); ok { + if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED { + time.Sleep(100 * time.Millisecond) + continue + } + } + return err + } + return c.Close() } - return port, nil } From f0998415ba9a73f0add32f9b5aed2aec98b9a7f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 8 Aug 2018 12:15:08 +0300 Subject: [PATCH 119/166] cmd, consensus/ethash, eth: miner push notifications --- cmd/geth/main.go | 3 +- cmd/geth/usage.go | 1 + cmd/utils/flags.go | 14 +++- consensus/ethash/algorithm_test.go | 2 +- consensus/ethash/consensus.go | 2 +- consensus/ethash/ethash.go | 18 +++-- consensus/ethash/ethash_test.go | 43 +++++------ consensus/ethash/sealer.go | 88 ++++++++++++++-------- consensus/ethash/sealer_test.go | 115 +++++++++++++++++++++++++++++ eth/backend.go | 8 +- eth/config.go | 1 + les/backend.go | 2 +- 12 files changed, 225 insertions(+), 72 deletions(-) create mode 100644 consensus/ethash/sealer_test.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 77ef6afe245b..d556ad92c343 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -98,8 +98,9 @@ var ( utils.MaxPendingPeersFlag, utils.EtherbaseFlag, utils.GasPriceFlag, - utils.MinerThreadsFlag, utils.MiningEnabledFlag, + utils.MinerThreadsFlag, + utils.MinerNotifyFlag, utils.TargetGasLimitFlag, utils.NATFlag, utils.NoDiscoverFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 6a12a66cc22b..9d63c68f7fba 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -185,6 +185,7 @@ var AppHelpFlagGroups = []flagGroup{ Flags: []cli.Flag{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, + utils.MinerNotifyFlag, utils.EtherbaseFlag, utils.TargetGasLimitFlag, utils.GasPriceFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 522ad06b61c2..d6142f246c16 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -24,7 +24,6 @@ import ( "math/big" "os" "path/filepath" - "runtime" "strconv" "strings" "time" @@ -318,9 +317,13 @@ var ( Usage: "Enable mining", } MinerThreadsFlag = cli.IntFlag{ - Name: "minerthreads", + Name: "miner.threads", Usage: "Number of CPU threads to use for mining", - Value: runtime.NumCPU(), + Value: 0, + } + MinerNotifyFlag = cli.StringFlag{ + Name: "miner.notify", + Usage: "Comma separated HTTP URL list to notify of new work packages", } TargetGasLimitFlag = cli.Uint64Flag{ Name: "targetgaslimit", @@ -1093,6 +1096,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(MinerThreadsFlag.Name) { cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name) } + if ctx.GlobalIsSet(MinerNotifyFlag.Name) { + cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") + } if ctx.GlobalIsSet(DocRootFlag.Name) { cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) } @@ -1293,7 +1299,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir), DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem, DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk, - }) + }, nil) } } if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index e7625f7c00ce..db22cccd0c87 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { go func(idx int) { defer pend.Done() - ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}) + ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil) defer ethash.Close() if err := ethash.VerifySeal(nil, block.Header()); err != nil { t.Errorf("proc %d: block verification failed: %v", idx, err) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index eb0f73d98bb3..e18a06d52af0 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -493,7 +493,7 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head if !bytes.Equal(header.MixDigest[:], digest) { return errInvalidMixDigest } - target := new(big.Int).Div(maxUint256, header.Difficulty) + target := new(big.Int).Div(two256, header.Difficulty) if new(big.Int).SetBytes(result).Cmp(target) > 0 { return errInvalidPoW } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 0cb3059b9dfd..19c94deb6bbf 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -45,11 +45,11 @@ import ( var ErrInvalidDumpMagic = errors.New("invalid dump magic") var ( - // maxUint256 is a big integer representing 2^256-1 - maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) + // two256 is a big integer representing 2^256 + two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // sharedEthash is a full instance that can be shared between multiple users. - sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}) + sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil) // algorithmRevision is the data structure version used for file naming. algorithmRevision = 23 @@ -447,8 +447,10 @@ type Ethash struct { exitCh chan chan error // Notification channel to exiting backend threads } -// New creates a full sized ethash PoW scheme and starts a background thread for remote mining. -func New(config Config) *Ethash { +// New creates a full sized ethash PoW scheme and starts a background thread for +// remote mining, also optionally notifying a batch of remote services of new work +// packages. +func New(config Config, notify []string) *Ethash { if config.CachesInMem <= 0 { log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) config.CachesInMem = 1 @@ -473,13 +475,13 @@ func New(config Config) *Ethash { submitRateCh: make(chan *hashrate), exitCh: make(chan chan error), } - go ethash.remote() + go ethash.remote(notify) return ethash } // NewTester creates a small sized ethash PoW scheme useful only for testing // purposes. -func NewTester() *Ethash { +func NewTester(notify []string) *Ethash { ethash := &Ethash{ config: Config{PowMode: ModeTest}, caches: newlru("cache", 1, newCache), @@ -494,7 +496,7 @@ func NewTester() *Ethash { submitRateCh: make(chan *hashrate), exitCh: make(chan chan error), } - go ethash.remote() + go ethash.remote(notify) return ethash } diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index ccdd30fb0f99..87ac17c2b203 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -32,17 +32,18 @@ import ( // Tests that ethash works correctly in test mode. func TestTestMode(t *testing.T) { - head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} - ethash := NewTester() + ethash := NewTester(nil) defer ethash.Close() - block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil) + + block, err := ethash.Seal(nil, types.NewBlockWithHeader(header), nil) if err != nil { t.Fatalf("failed to seal block: %v", err) } - head.Nonce = types.EncodeNonce(block.Nonce()) - head.MixDigest = block.MixDigest() - if err := ethash.VerifySeal(nil, head); err != nil { + header.Nonce = types.EncodeNonce(block.Nonce()) + header.MixDigest = block.MixDigest() + if err := ethash.VerifySeal(nil, header); err != nil { t.Fatalf("unexpected verification error: %v", err) } } @@ -55,7 +56,7 @@ func TestCacheFileEvict(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmpdir) - e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}) + e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil) defer e.Close() workers := 8 @@ -78,21 +79,21 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) { if block < 0 { block = 0 } - head := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)} - e.VerifySeal(nil, head) + header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)} + e.VerifySeal(nil, header) } } func TestRemoteSealer(t *testing.T) { - ethash := NewTester() + ethash := NewTester(nil) defer ethash.Close() + api := &API{ethash} if _, err := api.GetWork(); err != errNoMiningWork { t.Error("expect to return an error indicate there is no mining work") } - - head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} - block := types.NewBlockWithHeader(head) + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) // Push new work. ethash.Seal(nil, block, nil) @@ -108,16 +109,14 @@ func TestRemoteSealer(t *testing.T) { if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res { t.Error("expect to return false when submit a fake solution") } - // Push new block with same block number to replace the original one. - head = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)} - block = types.NewBlockWithHeader(head) + header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)} + block = types.NewBlockWithHeader(header) ethash.Seal(nil, block, nil) if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() { t.Error("expect to return the latest pushed work") } - // Push block with higher block number. newHead := &types.Header{Number: big.NewInt(2), Difficulty: big.NewInt(100)} newBlock := types.NewBlockWithHeader(newHead) @@ -130,19 +129,18 @@ func TestRemoteSealer(t *testing.T) { func TestHashRate(t *testing.T) { var ( - ethash = NewTester() - api = &API{ethash} hashrate = []hexutil.Uint64{100, 200, 300} expect uint64 ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")} ) - + ethash := NewTester(nil) defer ethash.Close() if tot := ethash.Hashrate(); tot != 0 { t.Error("expect the result should be zero") } + api := &API{ethash} for i := 0; i < len(hashrate); i += 1 { if res := api.SubmitHashRate(hashrate[i], ids[i]); !res { t.Error("remote miner submit hashrate failed") @@ -155,9 +153,8 @@ func TestHashRate(t *testing.T) { } func TestClosedRemoteSealer(t *testing.T) { - ethash := NewTester() - // Make sure exit channel has been listened - time.Sleep(1 * time.Second) + ethash := NewTester(nil) + time.Sleep(1 * time.Second) // ensure exit channel is listening ethash.Close() api := &API{ethash} diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index a9449d406079..03d84847392e 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -17,11 +17,14 @@ package ethash import ( + "bytes" crand "crypto/rand" + "encoding/json" "errors" "math" "math/big" "math/rand" + "net/http" "runtime" "sync" "time" @@ -109,7 +112,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s var ( header = block.Header() hash = header.HashNoNonce().Bytes() - target = new(big.Int).Div(maxUint256, header.Difficulty) + target = new(big.Int).Div(two256, header.Difficulty) number = header.Number.Uint64() dataset = ethash.dataset(number) ) @@ -161,40 +164,65 @@ search: runtime.KeepAlive(dataset) } -// remote starts a standalone goroutine to handle remote mining related stuff. -func (ethash *Ethash) remote() { +// remote is a standalone goroutine to handle remote mining related stuff. +func (ethash *Ethash) remote(notify []string) { var ( - works = make(map[common.Hash]*types.Block) - rates = make(map[common.Hash]hashrate) - currentWork *types.Block + works = make(map[common.Hash]*types.Block) + rates = make(map[common.Hash]hashrate) + + currentBlock *types.Block + currentWork [3]string + + notifyTransport = &http.Transport{} + notifyClient = &http.Client{ + Transport: notifyTransport, + Timeout: time.Second, + } + notifyReqs = make([]*http.Request, len(notify)) ) + // notifyWork notifies all the specified mining endpoints of the availability of + // new work to be processed. + notifyWork := func() { + work := currentWork + blob, _ := json.Marshal(work) - // getWork returns a work package for external miner. + for i, url := range notify { + // Terminate any previously pending request and create the new work + if notifyReqs[i] != nil { + notifyTransport.CancelRequest(notifyReqs[i]) + } + notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob)) + notifyReqs[i].Header.Set("Content-Type", "application/json") + + // Push the new work concurrently to all the remote nodes + go func(req *http.Request, url string) { + res, err := notifyClient.Do(req) + if err != nil { + log.Warn("Failed to notify remote miner", "err", err) + } else { + log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2]) + res.Body.Close() + } + }(notifyReqs[i], url) + } + } + // makeWork creates a work package for external miner. // // The work package consists of 3 strings: // result[0], 32 bytes hex encoded current block header pow-hash // result[1], 32 bytes hex encoded seed hash used for DAG // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty - getWork := func() ([3]string, error) { - var res [3]string - if currentWork == nil { - return res, errNoMiningWork - } - res[0] = currentWork.HashNoNonce().Hex() - res[1] = common.BytesToHash(SeedHash(currentWork.NumberU64())).Hex() + makeWork := func(block *types.Block) { + hash := block.HashNoNonce() - // Calculate the "target" to be returned to the external sealer. - n := big.NewInt(1) - n.Lsh(n, 255) - n.Div(n, currentWork.Difficulty()) - n.Lsh(n, 1) - res[2] = common.BytesToHash(n.Bytes()).Hex() + currentWork[0] = hash.Hex() + currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() + currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() // Trace the seal work fetched by remote sealer. - works[currentWork.HashNoNonce()] = currentWork - return res, nil + currentBlock = block + works[hash] = block } - // submitWork verifies the submitted pow solution, returning // whether the solution was accepted or not (not can be both a bad pow as well as // any other error, like no pending work or stale mining result). @@ -238,21 +266,23 @@ func (ethash *Ethash) remote() { for { select { case block := <-ethash.workCh: - if currentWork != nil && block.ParentHash() != currentWork.ParentHash() { + if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() { // Start new round mining, throw out all previous work. works = make(map[common.Hash]*types.Block) } // Update current work with new received block. // Note same work can be past twice, happens when changing CPU threads. - currentWork = block + makeWork(block) + + // Notify and requested URLs of the new work availability + notifyWork() case work := <-ethash.fetchWorkCh: // Return current mining work to remote miner. - miningWork, err := getWork() - if err != nil { - work.errc <- err + if currentBlock == nil { + work.errc <- errNoMiningWork } else { - work.res <- miningWork + work.res <- currentWork } case result := <-ethash.submitWorkCh: diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go new file mode 100644 index 000000000000..6d8a77049545 --- /dev/null +++ b/consensus/ethash/sealer_test.go @@ -0,0 +1,115 @@ +package ethash + +import ( + "encoding/json" + "io/ioutil" + "math/big" + "net" + "net/http" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// Tests whether remote HTTP servers are correctly notified of new work. +func TestRemoteNotify(t *testing.T) { + // Start a simple webserver to capture notifications + sink := make(chan [3]string) + + server := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("failed to read miner notification: %v", err) + } + var work [3]string + if err := json.Unmarshal(blob, &work); err != nil { + t.Fatalf("failed to unmarshal miner notification: %v", err) + } + sink <- work + }), + } + // Open a custom listener to extract its local address + listener, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to open notification server: %v", err) + } + defer listener.Close() + + go server.Serve(listener) + + // Create the custom ethash engine + ethash := NewTester([]string{"http://" + listener.Addr().String()}) + defer ethash.Close() + + // Stream a work task and ensure the notification bubbles out + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + ethash.Seal(nil, block, nil) + select { + case work := <-sink: + if want := header.HashNoNonce().Hex(); work[0] != want { + t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want) + } + if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want { + t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want) + } + target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty) + if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want { + t.Errorf("work packet target mismatch: have %s, want %s", work[2], want) + } + case <-time.After(time.Second): + t.Fatalf("notification timed out") + } +} + +// Tests that pushing work packages fast to the miner doesn't cause any daa race +// issues in the notifications. +func TestRemoteMultiNotify(t *testing.T) { + // Start a simple webserver to capture notifications + sink := make(chan [3]string, 1024) + + server := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("failed to read miner notification: %v", err) + } + var work [3]string + if err := json.Unmarshal(blob, &work); err != nil { + t.Fatalf("failed to unmarshal miner notification: %v", err) + } + sink <- work + }), + } + // Open a custom listener to extract its local address + listener, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to open notification server: %v", err) + } + defer listener.Close() + + go server.Serve(listener) + + // Create the custom ethash engine + ethash := NewTester([]string{"http://" + listener.Addr().String()}) + defer ethash.Close() + + // Stream a lot of work task and ensure all the notifications bubble out + for i := 0; i < cap(sink); i++ { + header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + ethash.Seal(nil, block, nil) + } + for i := 0; i < cap(sink); i++ { + select { + case <-sink: + case <-time.After(250 * time.Millisecond): + t.Fatalf("notification %d timed out", i) + } + } +} diff --git a/eth/backend.go b/eth/backend.go index 32946a0ab3bf..865534b1988b 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -124,7 +124,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { chainConfig: chainConfig, eventMux: ctx.EventMux, accountManager: ctx.AccountManager, - engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb), + engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, chainDb), shutdownChan: make(chan bool), networkID: config.NetworkId, gasPrice: config.GasPrice, @@ -210,7 +210,7 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data } // CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service -func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine { +func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, db ethdb.Database) consensus.Engine { // If proof-of-authority is requested, set it up if chainConfig.Clique != nil { return clique.New(chainConfig.Clique, db) @@ -222,7 +222,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai return ethash.NewFaker() case ethash.ModeTest: log.Warn("Ethash used in test mode") - return ethash.NewTester() + return ethash.NewTester(nil) case ethash.ModeShared: log.Warn("Ethash used in shared mode") return ethash.NewShared() @@ -234,7 +234,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai DatasetDir: config.DatasetDir, DatasetsInMem: config.DatasetsInMem, DatasetsOnDisk: config.DatasetsOnDisk, - }) + }, notify) engine.SetThreads(-1) // Disable CPU mining return engine } diff --git a/eth/config.go b/eth/config.go index 426d2bf1ef63..0c82f2923259 100644 --- a/eth/config.go +++ b/eth/config.go @@ -97,6 +97,7 @@ type Config struct { // Mining-related options Etherbase common.Address `toml:",omitempty"` MinerThreads int `toml:",omitempty"` + MinerNotify []string `toml:",omitempty"` ExtraData []byte `toml:",omitempty"` GasPrice *big.Int diff --git a/les/backend.go b/les/backend.go index 952d92cc2a22..178bc1e0e49b 100644 --- a/les/backend.go +++ b/les/backend.go @@ -102,7 +102,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { peers: peers, reqDist: newRequestDistributor(peers, quitSync), accountManager: ctx.AccountManager, - engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb), + engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb), shutdownChan: make(chan bool), networkId: config.NetworkId, bloomRequests: make(chan chan *bloombits.Retrieval), From 3ec5dda4d2dd0dec6d5bd465752f30e8f6ce208c Mon Sep 17 00:00:00 2001 From: Elad Date: Fri, 10 Aug 2018 13:49:37 +0200 Subject: [PATCH 120/166] swarm/api/http: added logging to denote request ended (#17371) --- swarm/api/http/middleware.go | 2 +- swarm/api/http/response.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go index d338a782cefe..c0d8d1a4085a 100644 --- a/swarm/api/http/middleware.go +++ b/swarm/api/http/middleware.go @@ -64,8 +64,8 @@ func ParseURI(h http.Handler) http.Handler { func InitLoggingResponseWriter(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { writer := newLoggingResponseWriter(w) - h.ServeHTTP(writer, r) + log.Debug("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode) }) } diff --git a/swarm/api/http/response.go b/swarm/api/http/response.go index 9f4788d35e00..f050e706a85b 100644 --- a/swarm/api/http/response.go +++ b/swarm/api/http/response.go @@ -84,15 +84,16 @@ func RespondError(w http.ResponseWriter, r *http.Request, msg string, code int) } func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { + w.WriteHeader(params.Code) if params.Code >= 400 { - w.Header().Del("Cache-Control") //avoid sending cache headers for errors! + w.Header().Del("Cache-Control") w.Header().Del("ETag") } acceptHeader := r.Header.Get("Accept") - // this cannot be in a switch form since an Accept header can be in the form of "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8" + // this cannot be in a switch since an Accept header can have multiple values: "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8" if strings.Contains(acceptHeader, "application/json") { if err := respondJSON(w, r, params); err != nil { RespondError(w, r, "Internal server error", http.StatusInternalServerError) From 6d1e292eefa70b5cb76cd03ff61fc6c4550d7c36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jano=C5=A1=20Gulja=C5=A1?= Date: Fri, 10 Aug 2018 16:12:55 +0200 Subject: [PATCH 121/166] Manifest cli fix and upload defaultpath only once (#17375) * cmd/swarm: fix manifest subcommands and add tests * cmd/swarm: manifest update: update default entry for non-encrypted uploads * swarm/api: upload defaultpath file only once * swarm/api/client: improve UploadDirectory default path handling * cmd/swarm: support absolute and relative default path values * cmd/swarm: fix a typo in test * cmd/swarm: check encrypted uploads in manifest update tests --- cmd/swarm/main.go | 10 +- cmd/swarm/manifest.go | 234 ++++++------- cmd/swarm/manifest_test.go | 579 ++++++++++++++++++++++++++++++++ cmd/swarm/upload.go | 11 + cmd/swarm/upload_test.go | 81 +++++ swarm/api/api.go | 22 +- swarm/api/client/client.go | 31 +- swarm/api/client/client_test.go | 2 +- swarm/api/http/server.go | 4 +- swarm/api/manifest.go | 17 +- 10 files changed, 840 insertions(+), 151 deletions(-) create mode 100644 cmd/swarm/manifest_test.go diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 258f24d3204d..ac09ae998156 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -322,23 +322,23 @@ Downloads a swarm bzz uri to the given dir. When no dir is provided, working dir Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", Subcommands: []cli.Command{ { - Action: add, + Action: manifestAdd, CustomHelpTemplate: helpTemplate, Name: "add", Usage: "add a new path to the manifest", - ArgsUsage: " []", + ArgsUsage: " ", Description: "Adds a new path to the manifest", }, { - Action: update, + Action: manifestUpdate, CustomHelpTemplate: helpTemplate, Name: "update", Usage: "update the hash for an already existing path in the manifest", - ArgsUsage: " []", + ArgsUsage: " ", Description: "Update the hash for an already existing path in the manifest", }, { - Action: remove, + Action: manifestRemove, CustomHelpTemplate: helpTemplate, Name: "remove", Usage: "removes a path from the manifest", diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go index 82166edf6c80..0216ffc1dd70 100644 --- a/cmd/swarm/manifest.go +++ b/cmd/swarm/manifest.go @@ -18,10 +18,8 @@ package main import ( - "encoding/json" "fmt" - "mime" - "path/filepath" + "os" "strings" "github.com/ethereum/go-ethereum/cmd/utils" @@ -30,127 +28,118 @@ import ( "gopkg.in/urfave/cli.v1" ) -const bzzManifestJSON = "application/bzz-manifest+json" - -func add(ctx *cli.Context) { +// manifestAdd adds a new entry to the manifest at the given path. +// New entry hash, the last argument, must be the hash of a manifest +// with only one entry, which meta-data will be added to the original manifest. +// On success, this function will print new (updated) manifest's hash. +func manifestAdd(ctx *cli.Context) { args := ctx.Args() - if len(args) < 3 { - utils.Fatalf("Need at least three arguments []") + if len(args) != 3 { + utils.Fatalf("Need exactly three arguments ") } var ( mhash = args[0] path = args[1] hash = args[2] - - ctype string - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest ) - if len(args) > 3 { - ctype = args[3] - } else { - ctype = mime.TypeByExtension(filepath.Ext(path)) + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := swarm.NewClient(bzzapi) + + m, _, err := client.DownloadManifest(hash) + if err != nil { + utils.Fatalf("Error downloading manifest to add: %v", err) + } + l := len(m.Entries) + if l == 0 { + utils.Fatalf("No entries in manifest %s", hash) + } else if l > 1 { + utils.Fatalf("Too many entries in manifest %s", hash) } - newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype) + newManifest := addEntryToManifest(client, mhash, path, m.Entries[0]) fmt.Println(newManifest) - - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } } -func update(ctx *cli.Context) { - +// manifestUpdate replaces an existing entry of the manifest at the given path. +// New entry hash, the last argument, must be the hash of a manifest +// with only one entry, which meta-data will be added to the original manifest. +// On success, this function will print hash of the updated manifest. +func manifestUpdate(ctx *cli.Context) { args := ctx.Args() - if len(args) < 3 { - utils.Fatalf("Need at least three arguments ") + if len(args) != 3 { + utils.Fatalf("Need exactly three arguments ") } var ( mhash = args[0] path = args[1] hash = args[2] - - ctype string - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest ) - if len(args) > 3 { - ctype = args[3] - } else { - ctype = mime.TypeByExtension(filepath.Ext(path)) - } - newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype) - fmt.Println(newManifest) + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := swarm.NewClient(bzzapi) - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return + m, _, err := client.DownloadManifest(hash) + if err != nil { + utils.Fatalf("Error downloading manifest to update: %v", err) + } + l := len(m.Entries) + if l == 0 { + utils.Fatalf("No entries in manifest %s", hash) + } else if l > 1 { + utils.Fatalf("Too many entries in manifest %s", hash) } + + newManifest, _, defaultEntryUpdated := updateEntryInManifest(client, mhash, path, m.Entries[0], true) + if defaultEntryUpdated { + // Print informational message to stderr + // allowing the user to get the new manifest hash from stdout + // without the need to parse the complete output. + fmt.Fprintln(os.Stderr, "Manifest default entry is updated, too") + } + fmt.Println(newManifest) } -func remove(ctx *cli.Context) { +// manifestRemove removes an existing entry of the manifest at the given path. +// On success, this function will print hash of the manifest which does not +// contain the path. +func manifestRemove(ctx *cli.Context) { args := ctx.Args() - if len(args) < 2 { - utils.Fatalf("Need at least two arguments ") + if len(args) != 2 { + utils.Fatalf("Need exactly two arguments ") } var ( mhash = args[0] path = args[1] - - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest ) - newManifest := removeEntryFromManifest(ctx, mhash, path) - fmt.Println(newManifest) + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := swarm.NewClient(bzzapi) - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } + newManifest := removeEntryFromManifest(client, mhash, path) + fmt.Println(newManifest) } -func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string { - - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - longestPathEntry = api.ManifestEntry{} - ) +func addEntryToManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry) string { + var longestPathEntry = api.ManifestEntry{} mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } - //TODO: check if the "hash" to add is valid and present in swarm - _, _, err = client.DownloadManifest(hash) - if err != nil { - utils.Fatalf("Hash to add is not present: %v", err) - } - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { + for _, e := range mroot.Entries { + if path == e.Path { utils.Fatalf("Path %s already present, not adding anything", path) } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) + if e.ContentType == api.ManifestType { + prfxlen := strings.HasPrefix(path, e.Path) if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry + longestPathEntry = e } } } @@ -159,25 +148,21 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin if longestPathEntry.Path != "" { // Load the child Manifest add the entry there newPath := path[len(longestPathEntry.Path):] - newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype) + newHash := addEntryToManifest(client, longestPathEntry.Hash, newPath, entry) // Replace the hash for parent Manifests newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash + for _, e := range mroot.Entries { + if longestPathEntry.Path == e.Path { + e.Hash = newHash } - newMRoot.Entries = append(newMRoot.Entries, entry) + newMRoot.Entries = append(newMRoot.Entries, e) } mroot = newMRoot } else { // Add the entry in the leaf Manifest - newEntry := api.ManifestEntry{ - Hash: hash, - Path: path, - ContentType: ctype, - } - mroot.Entries = append(mroot.Entries, newEntry) + entry.Path = path + mroot.Entries = append(mroot.Entries, entry) } newManifestHash, err := client.UploadManifest(mroot, isEncrypted) @@ -185,14 +170,16 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin utils.Fatalf("Manifest upload failed: %v", err) } return newManifestHash - } -func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string { - +// updateEntryInManifest updates an existing entry o path with a new one in the manifest with provided mhash +// finding the path recursively through all nested manifests. Argument isRoot is used for default +// entry update detection. If the updated entry has the same hash as the default entry, then the +// default entry in root manifest will be updated too. +// Returned values are the new manifest hash, hash of the entry that was replaced by the new entry and +// a a bool that is true if default entry is updated. +func updateEntryInManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry, isRoot bool) (newManifestHash, oldHash string, defaultEntryUpdated bool) { var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) newEntry = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{} ) @@ -202,17 +189,18 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st utils.Fatalf("Manifest download failed: %v", err) } - //TODO: check if the "hash" with which to update is valid and present in swarm - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { - newEntry = entry + for _, e := range mroot.Entries { + if path == e.Path { + newEntry = e + // keep the reference of the hash of the entry that should be replaced + // for default entry detection + oldHash = e.Hash } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) + if e.ContentType == api.ManifestType { + prfxlen := strings.HasPrefix(path, e.Path) if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry + longestPathEntry = e } } } @@ -225,50 +213,50 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st if longestPathEntry.Path != "" { // Load the child Manifest add the entry there newPath := path[len(longestPathEntry.Path):] - newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype) + var newHash string + newHash, oldHash, _ = updateEntryInManifest(client, longestPathEntry.Hash, newPath, entry, false) // Replace the hash for parent Manifests newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash + for _, e := range mroot.Entries { + if longestPathEntry.Path == e.Path { + e.Hash = newHash } - newMRoot.Entries = append(newMRoot.Entries, entry) + newMRoot.Entries = append(newMRoot.Entries, e) } mroot = newMRoot } - if newEntry.Path != "" { + // update the manifest if the new entry is found and + // check if default entry should be updated + if newEntry.Path != "" || isRoot { // Replace the hash for leaf Manifest newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if newEntry.Path == entry.Path { - myEntry := api.ManifestEntry{ - Hash: hash, - Path: entry.Path, - ContentType: ctype, - } - newMRoot.Entries = append(newMRoot.Entries, myEntry) - } else { + for _, e := range mroot.Entries { + if newEntry.Path == e.Path { + entry.Path = e.Path newMRoot.Entries = append(newMRoot.Entries, entry) + } else if isRoot && e.Path == "" && e.Hash == oldHash { + entry.Path = e.Path + newMRoot.Entries = append(newMRoot.Entries, entry) + defaultEntryUpdated = true + } else { + newMRoot.Entries = append(newMRoot.Entries, e) } } mroot = newMRoot } - newManifestHash, err := client.UploadManifest(mroot, isEncrypted) + newManifestHash, err = client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } - return newManifestHash + return newManifestHash, oldHash, defaultEntryUpdated } -func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { - +func removeEntryFromManifest(client *swarm.Client, mhash, path string) string { var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) entryToRemove = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{} ) @@ -283,7 +271,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { if path == entry.Path { entryToRemove = entry } else { - if entry.ContentType == bzzManifestJSON { + if entry.ContentType == api.ManifestType { prfxlen := strings.HasPrefix(path, entry.Path) if prfxlen && len(path) > len(longestPathEntry.Path) { longestPathEntry = entry @@ -299,7 +287,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { if longestPathEntry.Path != "" { // Load the child Manifest remove the entry there newPath := path[len(longestPathEntry.Path):] - newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath) + newHash := removeEntryFromManifest(client, longestPathEntry.Hash, newPath) // Replace the hash for parent Manifests newMRoot := &api.Manifest{} diff --git a/cmd/swarm/manifest_test.go b/cmd/swarm/manifest_test.go new file mode 100644 index 000000000000..08fe0b2eb7ab --- /dev/null +++ b/cmd/swarm/manifest_test.go @@ -0,0 +1,579 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/swarm/api" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" +) + +// TestManifestChange tests manifest add, update and remove +// cli commands without encryption. +func TestManifestChange(t *testing.T) { + testManifestChange(t, false) +} + +// TestManifestChange tests manifest add, update and remove +// cli commands with encryption enabled. +func TestManifestChangeEncrypted(t *testing.T) { + testManifestChange(t, true) +} + +// testManifestChange performs cli commands: +// - manifest add +// - manifest update +// - manifest remove +// on a manifest, testing the functionality of this +// comands on paths that are in root manifest or a nested one. +// Argument encrypt controls whether to use encryption or not. +func testManifestChange(t *testing.T, encrypt bool) { + t.Parallel() + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + tmp, err := ioutil.TempDir("", "swarm-manifest-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + origDir := filepath.Join(tmp, "orig") + if err := os.Mkdir(origDir, 0777); err != nil { + t.Fatal(err) + } + + indexDataFilename := filepath.Join(origDir, "index.html") + err = ioutil.WriteFile(indexDataFilename, []byte("

Test

"), 0666) + if err != nil { + t.Fatal(err) + } + // Files paths robots.txt and robots.html share the same prefix "robots." + // which will result a manifest with a nested manifest under path "robots.". + // This will allow testing manifest changes on both root and nested manifest. + err = ioutil.WriteFile(filepath.Join(origDir, "robots.txt"), []byte("Disallow: /"), 0666) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(origDir, "robots.html"), []byte("No Robots Allowed"), 0666) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(origDir, "mutants.txt"), []byte("Frank\nMarcus"), 0666) + if err != nil { + t.Fatal(err) + } + + args := []string{ + "--bzzapi", + cluster.Nodes[0].URL, + "--recursive", + "--defaultpath", + indexDataFilename, + "up", + origDir, + } + if encrypt { + args = append(args, "--encrypt") + } + + origManifestHash := runSwarmExpectHash(t, args...) + + checkHashLength(t, origManifestHash, encrypt) + + client := swarm.NewClient(cluster.Nodes[0].URL) + + // upload a new file and use its manifest to add it the original manifest. + t.Run("add", func(t *testing.T) { + humansData := []byte("Ann\nBob") + humansDataFilename := filepath.Join(tmp, "humans.txt") + err = ioutil.WriteFile(humansDataFilename, humansData, 0666) + if err != nil { + t.Fatal(err) + } + + humansManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + humansDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "add", + origManifestHash, + "humans.txt", + humansManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "humans.txt" { + found = true + if e.Size != int64(len(humansData)) { + t.Errorf("expected humans.txt size %v, got %v", len(humansData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for humans.txt") + } + ct := "text/plain; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break + } + } + if !found { + t.Fatal("no humans.txt in new manifest") + } + + checkFile(t, client, newManifestHash, "humans.txt", humansData) + }) + + // upload a new file and use its manifest to add it the original manifest, + // but ensure that the file will be in the nested manifest of the original one. + t.Run("add nested", func(t *testing.T) { + robotsData := []byte(`{"disallow": "/"}`) + robotsDataFilename := filepath.Join(tmp, "robots.json") + err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666) + if err != nil { + t.Fatal(err) + } + + robotsManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + robotsDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "add", + origManifestHash, + "robots.json", + robotsManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + loop: + for _, e := range newManifest.Entries { + if e.Path == "robots." { + nestedManifest := downloadManifest(t, client, e.Hash, encrypt) + for _, e := range nestedManifest.Entries { + if e.Path == "json" { + found = true + if e.Size != int64(len(robotsData)) { + t.Errorf("expected robots.json size %v, got %v", len(robotsData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for robots.json") + } + ct := "application/json" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break loop + } + } + } + } + if !found { + t.Fatal("no robots.json in new manifest") + } + + checkFile(t, client, newManifestHash, "robots.json", robotsData) + }) + + // upload a new file and use its manifest to change the file it the original manifest. + t.Run("update", func(t *testing.T) { + indexData := []byte("

Ethereum Swarm

") + indexDataFilename := filepath.Join(tmp, "index.html") + err = ioutil.WriteFile(indexDataFilename, indexData, 0666) + if err != nil { + t.Fatal(err) + } + + indexManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + indexDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "update", + origManifestHash, + "index.html", + indexManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "index.html" { + found = true + if e.Size != int64(len(indexData)) { + t.Errorf("expected index.html size %v, got %v", len(indexData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for index.html") + } + ct := "text/html; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break + } + } + if !found { + t.Fatal("no index.html in new manifest") + } + + checkFile(t, client, newManifestHash, "index.html", indexData) + + // check default entry change + checkFile(t, client, newManifestHash, "", indexData) + }) + + // upload a new file and use its manifest to change the file it the original manifest, + // but ensure that the file is in the nested manifest of the original one. + t.Run("update nested", func(t *testing.T) { + robotsData := []byte(`Only humans allowed!!!`) + robotsDataFilename := filepath.Join(tmp, "robots.html") + err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666) + if err != nil { + t.Fatal(err) + } + + humansManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + robotsDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "update", + origManifestHash, + "robots.html", + humansManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + loop: + for _, e := range newManifest.Entries { + if e.Path == "robots." { + nestedManifest := downloadManifest(t, client, e.Hash, encrypt) + for _, e := range nestedManifest.Entries { + if e.Path == "html" { + found = true + if e.Size != int64(len(robotsData)) { + t.Errorf("expected robots.html size %v, got %v", len(robotsData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for robots.html") + } + ct := "text/html; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break loop + } + } + } + } + if !found { + t.Fatal("no robots.html in new manifest") + } + + checkFile(t, client, newManifestHash, "robots.html", robotsData) + }) + + // remove a file from the manifest. + t.Run("remove", func(t *testing.T) { + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "remove", + origManifestHash, + "mutants.txt", + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "mutants.txt" { + found = true + break + } + } + if found { + t.Fatal("mutants.txt is not removed") + } + }) + + // remove a file from the manifest, but ensure that the file is in + // the nested manifest of the original one. + t.Run("remove nested", func(t *testing.T) { + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "remove", + origManifestHash, + "robots.html", + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + loop: + for _, e := range newManifest.Entries { + if e.Path == "robots." { + nestedManifest := downloadManifest(t, client, e.Hash, encrypt) + for _, e := range nestedManifest.Entries { + if e.Path == "html" { + found = true + break loop + } + } + } + } + if found { + t.Fatal("robots.html in not removed") + } + }) +} + +// TestNestedDefaultEntryUpdate tests if the default entry is updated +// if the file in nested manifest used for it is also updated. +func TestNestedDefaultEntryUpdate(t *testing.T) { + testNestedDefaultEntryUpdate(t, false) +} + +// TestNestedDefaultEntryUpdateEncrypted tests if the default entry +// of encrypted upload is updated if the file in nested manifest +// used for it is also updated. +func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) { + testNestedDefaultEntryUpdate(t, true) +} + +func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) { + t.Parallel() + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + tmp, err := ioutil.TempDir("", "swarm-manifest-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + origDir := filepath.Join(tmp, "orig") + if err := os.Mkdir(origDir, 0777); err != nil { + t.Fatal(err) + } + + indexData := []byte("

Test

") + indexDataFilename := filepath.Join(origDir, "index.html") + err = ioutil.WriteFile(indexDataFilename, indexData, 0666) + if err != nil { + t.Fatal(err) + } + // Add another file with common prefix as the default entry to test updates of + // default entry with nested manifests. + err = ioutil.WriteFile(filepath.Join(origDir, "index.txt"), []byte("Test"), 0666) + if err != nil { + t.Fatal(err) + } + + args := []string{ + "--bzzapi", + cluster.Nodes[0].URL, + "--recursive", + "--defaultpath", + indexDataFilename, + "up", + origDir, + } + if encrypt { + args = append(args, "--encrypt") + } + + origManifestHash := runSwarmExpectHash(t, args...) + + checkHashLength(t, origManifestHash, encrypt) + + client := swarm.NewClient(cluster.Nodes[0].URL) + + newIndexData := []byte("

Ethereum Swarm

") + newIndexDataFilename := filepath.Join(tmp, "index.html") + err = ioutil.WriteFile(newIndexDataFilename, newIndexData, 0666) + if err != nil { + t.Fatal(err) + } + + newIndexManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + newIndexDataFilename, + ) + + newManifestHash := runSwarmExpectHash(t, + "--bzzapi", + cluster.Nodes[0].URL, + "manifest", + "update", + origManifestHash, + "index.html", + newIndexManifestHash, + ) + + checkHashLength(t, newManifestHash, encrypt) + + newManifest := downloadManifest(t, client, newManifestHash, encrypt) + + var found bool + for _, e := range newManifest.Entries { + if e.Path == "index." { + found = true + newManifest = downloadManifest(t, client, e.Hash, encrypt) + break + } + } + if !found { + t.Fatal("no index. path in new manifest") + } + + found = false + for _, e := range newManifest.Entries { + if e.Path == "html" { + found = true + if e.Size != int64(len(newIndexData)) { + t.Errorf("expected index.html size %v, got %v", len(newIndexData), e.Size) + } + if e.ModTime.IsZero() { + t.Errorf("got zero mod time for index.html") + } + ct := "text/html; charset=utf-8" + if e.ContentType != ct { + t.Errorf("expected content type %q, got %q", ct, e.ContentType) + } + break + } + } + if !found { + t.Fatal("no html in new manifest") + } + + checkFile(t, client, newManifestHash, "index.html", newIndexData) + + // check default entry change + checkFile(t, client, newManifestHash, "", newIndexData) +} + +func runSwarmExpectHash(t *testing.T, args ...string) (hash string) { + t.Helper() + hashRegexp := `[a-f\d]{64,128}` + up := runSwarm(t, args...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + return matches[0] +} + +func checkHashLength(t *testing.T, hash string, encrypted bool) { + t.Helper() + l := len(hash) + if encrypted && l != 128 { + t.Errorf("expected hash length 128, got %v", l) + } + if !encrypted && l != 64 { + t.Errorf("expected hash length 64, got %v", l) + } +} + +func downloadManifest(t *testing.T, client *swarm.Client, hash string, encrypted bool) (manifest *api.Manifest) { + t.Helper() + m, isEncrypted, err := client.DownloadManifest(hash) + if err != nil { + t.Fatal(err) + } + + if encrypted != isEncrypted { + t.Error("new manifest encryption flag is not correct") + } + return m +} + +func checkFile(t *testing.T, client *swarm.Client, hash, path string, expected []byte) { + t.Helper() + f, err := client.Download(hash, path) + if err != nil { + t.Fatal(err) + } + + got, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, expected) { + t.Errorf("expected file content %q, got %q", expected, got) + } +} diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go index 8ba0e7c5f0c0..9eae2a3f806b 100644 --- a/cmd/swarm/upload.go +++ b/cmd/swarm/upload.go @@ -98,6 +98,17 @@ func upload(ctx *cli.Context) { if !recursive { return "", errors.New("Argument is a directory and recursive upload is disabled") } + if defaultPath != "" { + // construct absolute default path + absDefaultPath, _ := filepath.Abs(defaultPath) + absFile, _ := filepath.Abs(file) + // make sure absolute directory ends with only one "/" + // to trim it from absolute default path and get relative default path + absFile = strings.TrimRight(absFile, "/") + "/" + if absDefaultPath != "" && absFile != "" && strings.HasPrefix(absDefaultPath, absFile) { + defaultPath = strings.TrimPrefix(absDefaultPath, absFile) + } + } return client.UploadDirectory(file, defaultPath, "", toEncrypt) } } else { diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go index 2afc9b3a1186..c3199dadc6ba 100644 --- a/cmd/swarm/upload_test.go +++ b/cmd/swarm/upload_test.go @@ -273,3 +273,84 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) { } } } + +// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute +// default paths and with encryption. +func TestCLISwarmUpDefaultPath(t *testing.T) { + testCLISwarmUpDefaultPath(false, false, t) + testCLISwarmUpDefaultPath(false, true, t) + testCLISwarmUpDefaultPath(true, false, t) + testCLISwarmUpDefaultPath(true, true, t) +} + +func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) { + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + tmp, err := ioutil.TempDir("", "swarm-defaultpath-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + err = ioutil.WriteFile(filepath.Join(tmp, "index.html"), []byte("

Test

"), 0666) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(tmp, "robots.txt"), []byte("Disallow: /"), 0666) + if err != nil { + t.Fatal(err) + } + + defaultPath := "index.html" + if absDefaultPath { + defaultPath = filepath.Join(tmp, defaultPath) + } + + args := []string{ + "--bzzapi", + cluster.Nodes[0].URL, + "--recursive", + "--defaultpath", + defaultPath, + "up", + tmp, + } + if toEncrypt { + args = append(args, "--encrypt") + } + + up := runSwarm(t, args...) + hashRegexp := `[a-f\d]{64,128}` + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + + client := swarm.NewClient(cluster.Nodes[0].URL) + + m, isEncrypted, err := client.DownloadManifest(hash) + if err != nil { + t.Fatal(err) + } + + if toEncrypt != isEncrypted { + t.Error("downloaded manifest is not encrypted") + } + + var found bool + var entriesCount int + for _, e := range m.Entries { + entriesCount++ + if e.Path == "" { + found = true + } + } + + if !found { + t.Error("manifest default entry was not found") + } + + if entriesCount != 3 { + t.Errorf("manifest contains %v entries, expected %v", entriesCount, 3) + } +} diff --git a/swarm/api/api.go b/swarm/api/api.go index b418c45e1ce6..99d971b105ce 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -704,11 +704,12 @@ func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content [] return fkey, newMkey.String(), nil } -func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestPath string, mw *ManifestWriter) (storage.Address, error) { +func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestPath, defaultPath string, mw *ManifestWriter) (storage.Address, error) { apiUploadTarCount.Inc(1) var contentKey storage.Address tr := tar.NewReader(bodyReader) defer bodyReader.Close() + var defaultPathFound bool for { hdr, err := tr.Next() if err == io.EOF { @@ -737,6 +738,25 @@ func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestP apiUploadTarFail.Inc(1) return nil, fmt.Errorf("error adding manifest entry from tar stream: %s", err) } + if hdr.Name == defaultPath { + entry := &ManifestEntry{ + Hash: contentKey.Hex(), + Path: "", // default entry + ContentType: hdr.Xattrs["user.swarm.content-type"], + Mode: hdr.Mode, + Size: hdr.Size, + ModTime: hdr.ModTime, + } + contentKey, err = mw.AddEntry(ctx, nil, entry) + if err != nil { + apiUploadTarFail.Inc(1) + return nil, fmt.Errorf("error adding default manifest entry from tar stream: %s", err) + } + defaultPathFound = true + } + } + if defaultPath != "" && !defaultPathFound { + return contentKey, fmt.Errorf("default path %q not found", defaultPath) } return contentKey, nil } diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index b3a5e929d016..8a9efe3608c9 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -138,7 +138,7 @@ func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, er if file.Size <= 0 { return "", errors.New("file size must be greater than zero") } - return c.TarUpload(manifest, &FileUploader{file}, toEncrypt) + return c.TarUpload(manifest, &FileUploader{file}, "", toEncrypt) } // Download downloads a file with the given path from the swarm manifest with @@ -175,7 +175,15 @@ func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bo } else if !stat.IsDir() { return "", fmt.Errorf("not a directory: %s", dir) } - return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}, toEncrypt) + if defaultPath != "" { + if _, err := os.Stat(filepath.Join(dir, defaultPath)); err != nil { + if os.IsNotExist(err) { + return "", fmt.Errorf("the default path %q was not found in the upload directory %q", defaultPath, dir) + } + return "", fmt.Errorf("default path: %v", err) + } + } + return c.TarUpload(manifest, &DirectoryUploader{dir}, defaultPath, toEncrypt) } // DownloadDirectory downloads the files contained in a swarm manifest under @@ -389,21 +397,11 @@ func (u UploaderFunc) Upload(upload UploadFn) error { // DirectoryUploader uploads all files in a directory, optionally uploading // a file to the default path type DirectoryUploader struct { - Dir string - DefaultPath string + Dir string } // Upload performs the upload of the directory and default path func (d *DirectoryUploader) Upload(upload UploadFn) error { - if d.DefaultPath != "" { - file, err := Open(d.DefaultPath) - if err != nil { - return err - } - if err := upload(file); err != nil { - return err - } - } return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error { if err != nil { return err @@ -441,7 +439,7 @@ type UploadFn func(file *File) error // TarUpload uses the given Uploader to upload files to swarm as a tar stream, // returning the resulting manifest hash -func (c *Client) TarUpload(hash string, uploader Uploader, toEncrypt bool) (string, error) { +func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) { reqR, reqW := io.Pipe() defer reqR.Close() addr := hash @@ -458,6 +456,11 @@ func (c *Client) TarUpload(hash string, uploader Uploader, toEncrypt bool) (stri return "", err } req.Header.Set("Content-Type", "application/x-tar") + if defaultPath != "" { + q := req.URL.Query() + q.Set("defaultpath", defaultPath) + req.URL.RawQuery = q.Encode() + } // use 'Expect: 100-continue' so we don't send the request body if // the server refuses the request diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index dc608e3f1f86..ae82a91d798a 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -194,7 +194,7 @@ func TestClientUploadDownloadDirectory(t *testing.T) { // upload the directory client := NewClient(srv.URL) - defaultPath := filepath.Join(dir, testDirFiles[0]) + defaultPath := testDirFiles[0] hash, err := client.UploadDirectory(dir, defaultPath, "", false) if err != nil { t.Fatalf("error uploading directory: %s", err) diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index bd6949de6caf..5a5c42adc073 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -336,7 +336,9 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) { log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context())) - key, err := s.api.UploadTar(r.Context(), r.Body, GetURI(r.Context()).Path, mw) + defaultPath := r.URL.Query().Get("defaultpath") + + key, err := s.api.UploadTar(r.Context(), r.Body, GetURI(r.Context()).Path, defaultPath, mw) if err != nil { return nil, err } diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index fbd143f295a1..2a163dd39c9e 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -106,13 +106,18 @@ func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC } // AddEntry stores the given data and adds the resulting key to the manifest -func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (storage.Address, error) { - key, _, err := m.api.Store(ctx, data, e.Size, m.trie.encrypted) - if err != nil { - return nil, err - } +func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (key storage.Address, err error) { entry := newManifestTrieEntry(e, nil) - entry.Hash = key.Hex() + if data != nil { + key, _, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted) + if err != nil { + return nil, err + } + entry.Hash = key.Hex() + } + if entry.Hash == "" { + return key, errors.New("missing entry hash") + } m.trie.addEntry(entry, m.quitC) return key, nil } From fb368723acf83e64c71e1eaa403e7cda06e6ce5e Mon Sep 17 00:00:00 2001 From: Mymskmkt <1847234666@qq.com> Date: Mon, 13 Aug 2018 16:40:52 +0800 Subject: [PATCH 122/166] core: fix comment typo (#17376) --- core/tx_cacher.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/tx_cacher.go b/core/tx_cacher.go index 6d989c83d98c..bcaa5ead3813 100644 --- a/core/tx_cacher.go +++ b/core/tx_cacher.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// senderCacher is a concurrent tranaction sender recoverer anc cacher. +// senderCacher is a concurrent transaction sender recoverer anc cacher. var senderCacher = newTxSenderCacher(runtime.NumCPU()) // txSenderCacherRequest is a request for recovering transaction senders with a @@ -45,7 +45,7 @@ type txSenderCacher struct { } // newTxSenderCacher creates a new transaction sender background cacher and starts -// as many procesing goroutines as allowed by the GOMAXPROCS on construction. +// as many processing goroutines as allowed by the GOMAXPROCS on construction. func newTxSenderCacher(threads int) *txSenderCacher { cacher := &txSenderCacher{ tasks: make(chan *txSenderCacherRequest, threads), From e07e507d1af687cd64b263038ebd3cb7be74fb40 Mon Sep 17 00:00:00 2001 From: Eugene Valeyev Date: Mon, 13 Aug 2018 17:27:25 +0300 Subject: [PATCH 123/166] whisper: fixed broken partial topic filtering Changes in #15811 broke partial topic filtering. Re-enable it. --- whisper/whisperv5/filter.go | 2 +- whisper/whisperv5/filter_test.go | 8 +++---- whisper/whisperv6/filter.go | 17 --------------- whisper/whisperv6/filter_test.go | 36 -------------------------------- 4 files changed, 5 insertions(+), 58 deletions(-) diff --git a/whisper/whisperv5/filter.go b/whisper/whisperv5/filter.go index 3190334ebbe3..9550a7e3893d 100644 --- a/whisper/whisperv5/filter.go +++ b/whisper/whisperv5/filter.go @@ -220,7 +220,7 @@ func matchSingleTopic(topic TopicType, bt []byte) bool { bt = bt[:TopicLength] } - if len(bt) < TopicLength { + if len(bt) == 0 { return false } diff --git a/whisper/whisperv5/filter_test.go b/whisper/whisperv5/filter_test.go index 01034a351386..c01c22668cf2 100644 --- a/whisper/whisperv5/filter_test.go +++ b/whisper/whisperv5/filter_test.go @@ -829,16 +829,16 @@ func TestMatchSingleTopic_WithTail_ReturnTrue(t *testing.T) { } } -func TestMatchSingleTopic_NotEquals_ReturnFalse(t *testing.T) { +func TestMatchSingleTopic_PartialTopic_ReturnTrue(t *testing.T) { bt := []byte("tes") - topic := BytesToTopic(bt) + topic := BytesToTopic([]byte("test")) - if matchSingleTopic(topic, bt) { + if !matchSingleTopic(topic, bt) { t.FailNow() } } -func TestMatchSingleTopic_InsufficientLength_ReturnFalse(t *testing.T) { +func TestMatchSingleTopic_NotEquals_ReturnFalse(t *testing.T) { bt := []byte("test") topic := BytesToTopic([]byte("not_equal")) diff --git a/whisper/whisperv6/filter.go b/whisper/whisperv6/filter.go index 2f170ddebefe..6a5b79674b5c 100644 --- a/whisper/whisperv6/filter.go +++ b/whisper/whisperv6/filter.go @@ -250,23 +250,6 @@ func (f *Filter) MatchEnvelope(envelope *Envelope) bool { return f.PoW <= 0 || envelope.pow >= f.PoW } -func matchSingleTopic(topic TopicType, bt []byte) bool { - if len(bt) > TopicLength { - bt = bt[:TopicLength] - } - - if len(bt) < TopicLength { - return false - } - - for j, b := range bt { - if topic[j] != b { - return false - } - } - return true -} - // IsPubKeyEqual checks that two public keys are equal func IsPubKeyEqual(a, b *ecdsa.PublicKey) bool { if !ValidatePublicKey(a) { diff --git a/whisper/whisperv6/filter_test.go b/whisper/whisperv6/filter_test.go index 0bb7986c390b..82e4aa024148 100644 --- a/whisper/whisperv6/filter_test.go +++ b/whisper/whisperv6/filter_test.go @@ -829,39 +829,3 @@ func TestVariableTopics(t *testing.T) { } } } - -func TestMatchSingleTopic_ReturnTrue(t *testing.T) { - bt := []byte("test") - topic := BytesToTopic(bt) - - if !matchSingleTopic(topic, bt) { - t.FailNow() - } -} - -func TestMatchSingleTopic_WithTail_ReturnTrue(t *testing.T) { - bt := []byte("test with tail") - topic := BytesToTopic([]byte("test")) - - if !matchSingleTopic(topic, bt) { - t.FailNow() - } -} - -func TestMatchSingleTopic_NotEquals_ReturnFalse(t *testing.T) { - bt := []byte("tes") - topic := BytesToTopic(bt) - - if matchSingleTopic(topic, bt) { - t.FailNow() - } -} - -func TestMatchSingleTopic_InsufficientLength_ReturnFalse(t *testing.T) { - bt := []byte("test") - topic := BytesToTopic([]byte("not_equal")) - - if matchSingleTopic(topic, bt) { - t.FailNow() - } -} From 8a040de60bd6b740ebe87cd8e1fe6bfdb6635d2f Mon Sep 17 00:00:00 2001 From: Yao Zengzeng Date: Tue, 14 Aug 2018 19:25:36 +0800 Subject: [PATCH 124/166] README.md: fix some typos (#17381) Signed-off-by: YaoZengzeng --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 86fcdeee4dde..c6bc91af174b 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ This command will: * Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). - This too is optional and if you leave it out you can always attach to an already running Geth instance + This tool is optional and if you leave it out you can always attach to an already running Geth instance with `geth attach`. ### Full node on the Ethereum test network From 97887d98da703a31040bceee13bce9ee77fca673 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 14 Aug 2018 16:03:56 +0200 Subject: [PATCH 125/166] swarm/network, swarm/storage: validate chunk size (#17397) * swarm/network, swarm/storage: validate default chunk size * swarm/bmt, swarm/network, swarm/storage: update BMT hash initialisation * swarm/bmt: move segmentCount to tests * swarm/chunk: change chunk.DefaultSize to be untyped const * swarm/storage: add size validator * swarm/storage: add chunk size validation to localstore * swarm/storage: move validation from localstore to validator * swarm/storage: global chunk rules in MRU --- swarm/bmt/bmt.go | 5 +---- swarm/bmt/bmt_test.go | 19 +++++++++++++------ swarm/chunk/chunk.go | 5 +++++ swarm/network/stream/delivery.go | 7 +++++++ swarm/storage/chunker.go | 9 +++------ swarm/storage/hasherstore.go | 9 +++++---- swarm/storage/ldbstore_test.go | 11 ++++++----- swarm/storage/localstore.go | 10 +++------- swarm/storage/localstore_test.go | 8 +++++--- swarm/storage/mru/handler.go | 25 ++++++++----------------- swarm/storage/mru/resource_test.go | 8 +++----- swarm/storage/mru/testutil.go | 5 +---- swarm/storage/mru/update.go | 3 ++- swarm/storage/pyramid.go | 5 +++-- swarm/storage/types.go | 13 ++++++++++--- swarm/swarm.go | 13 ++++--------- 16 files changed, 79 insertions(+), 76 deletions(-) create mode 100644 swarm/chunk/chunk.go diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go index 97e0e141edba..a85d4369e5b5 100644 --- a/swarm/bmt/bmt.go +++ b/swarm/bmt/bmt.go @@ -55,9 +55,6 @@ Two implementations are provided: */ const ( - // SegmentCount is the maximum number of segments of the underlying chunk - // Should be equal to max-chunk-data-size / hash-size - SegmentCount = 128 // PoolSize is the maximum number of bmt trees used by the hashers, i.e, // the maximum number of concurrent BMT hashing operations performed by the same hasher PoolSize = 8 @@ -318,7 +315,7 @@ func (h *Hasher) Sum(b []byte) (s []byte) { // with every full segment calls writeSection in a go routine func (h *Hasher) Write(b []byte) (int, error) { l := len(b) - if l == 0 || l > 4096 { + if l == 0 || l > h.pool.Size { return 0, nil } t := h.getTree() diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go index 891d8cbb2940..760aa11d8b0f 100644 --- a/swarm/bmt/bmt_test.go +++ b/swarm/bmt/bmt_test.go @@ -34,6 +34,13 @@ import ( // the actual data length generated (could be longer than max datalength of the BMT) const BufferSize = 4128 +const ( + // segmentCount is the maximum number of segments of the underlying chunk + // Should be equal to max-chunk-data-size / hash-size + // Currently set to 128 == 4096 (default chunk size) / 32 (sha3.keccak256 size) + segmentCount = 128 +) + var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128} // calculates the Keccak256 SHA3 hash of the data @@ -224,14 +231,14 @@ func TestHasherReuse(t *testing.T) { // tests if bmt reuse is not corrupting result func testHasherReuse(poolsize int, t *testing.T) { hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, poolsize) + pool := NewTreePool(hasher, segmentCount, poolsize) defer pool.Drain(0) bmt := New(pool) for i := 0; i < 100; i++ { data := newData(BufferSize) n := rand.Intn(bmt.Size()) - err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount) + err := testHasherCorrectness(bmt, hasher, data, n, segmentCount) if err != nil { t.Fatal(err) } @@ -241,7 +248,7 @@ func testHasherReuse(poolsize int, t *testing.T) { // Tests if pool can be cleanly reused even in concurrent use by several hasher func TestBMTConcurrentUse(t *testing.T) { hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) defer pool.Drain(0) cycles := 100 errc := make(chan error) @@ -451,7 +458,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) { func benchmarkBMT(t *testing.B, n int) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) bmt := New(pool) t.ReportAllocs() @@ -465,7 +472,7 @@ func benchmarkBMT(t *testing.B, n int) { func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) bmt := New(pool).NewAsyncWriter(double) idxs, segments := splitAndShuffle(bmt.SectionSize(), data) shuffle(len(idxs), func(i int, j int) { @@ -483,7 +490,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) { func benchmarkPool(t *testing.B, poolsize, n int) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, poolsize) + pool := NewTreePool(hasher, segmentCount, poolsize) cycles := 100 t.ReportAllocs() diff --git a/swarm/chunk/chunk.go b/swarm/chunk/chunk.go new file mode 100644 index 000000000000..1449efccd0ef --- /dev/null +++ b/swarm/chunk/chunk.go @@ -0,0 +1,5 @@ +package chunk + +const ( + DefaultSize = 4096 +) diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go index fa210e300741..36040339d3d8 100644 --- a/swarm/network/stream/delivery.go +++ b/swarm/network/stream/delivery.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/discover" + cp "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/spancontext" @@ -229,6 +230,11 @@ R: for req := range d.receiveC { processReceivedChunksCount.Inc(1) + if len(req.SData) > cp.DefaultSize+8 { + log.Warn("received chunk is bigger than expected", "len", len(req.SData)) + continue R + } + // this should be has locally chunk, err := d.db.Get(context.TODO(), req.Addr) if err == nil { @@ -244,6 +250,7 @@ R: continue R default: } + chunk.SData = req.SData d.db.Put(context.TODO(), chunk) diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go index b9b5022734c0..6d805b8e2903 100644 --- a/swarm/storage/chunker.go +++ b/swarm/storage/chunker.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/spancontext" opentracing "github.com/opentracing/opentracing-go" @@ -69,10 +70,6 @@ var ( errOperationTimedOut = errors.New("operation timed out") ) -const ( - DefaultChunkSize int64 = 4096 -) - type ChunkerParams struct { chunkSize int64 hashSize int64 @@ -136,7 +133,7 @@ type TreeChunker struct { func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader { jp := &JoinerParams{ ChunkerParams: ChunkerParams{ - chunkSize: DefaultChunkSize, + chunkSize: chunk.DefaultSize, hashSize: int64(len(addr)), }, addr: addr, @@ -156,7 +153,7 @@ func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) ( tsp := &TreeSplitterParams{ SplitterParams: SplitterParams{ ChunkerParams: ChunkerParams{ - chunkSize: DefaultChunkSize, + chunkSize: chunk.DefaultSize, hashSize: putter.RefSize(), }, reader: data, diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go index 139c0ee031e6..bc23077c1832 100644 --- a/swarm/storage/hasherstore.go +++ b/swarm/storage/hasherstore.go @@ -22,6 +22,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/storage/encryption" ) @@ -57,7 +58,7 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) refSize := int64(hashSize) if toEncrypt { refSize += encryption.KeyLength - chunkEncryption = newChunkEncryption(DefaultChunkSize, refSize) + chunkEncryption = newChunkEncryption(chunk.DefaultSize, refSize) } return &hasherStore{ @@ -190,9 +191,9 @@ func (h *hasherStore) decryptChunkData(chunkData ChunkData, encryptionKey encryp // removing extra bytes which were just added for padding length := ChunkData(decryptedSpan).Size() - for length > DefaultChunkSize { - length = length + (DefaultChunkSize - 1) - length = length / DefaultChunkSize + for length > chunk.DefaultSize { + length = length + (chunk.DefaultSize - 1) + length = length / chunk.DefaultSize length *= h.refSize } diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index baf9e8c14276..5ee88baa5126 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" @@ -184,7 +185,7 @@ func testIterator(t *testing.T, mock bool) { t.Fatalf("init dbStore failed: %v", err) } - chunks := GenerateRandomChunks(DefaultChunkSize, chunkcount) + chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount) wg := &sync.WaitGroup{} wg.Add(len(chunks)) @@ -294,7 +295,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } @@ -344,7 +345,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } @@ -398,7 +399,7 @@ func TestLDBStoreAddRemove(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } @@ -460,7 +461,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < capacity; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index 096d150ae393..9e34749797c5 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -98,20 +98,16 @@ func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) { // After the LDBStore.Put, it is ensured that the MemStore // contains the chunk with the same data, but nil ReqC channel. func (ls *LocalStore) Put(ctx context.Context, chunk *Chunk) { - if l := len(chunk.SData); l < 9 { - log.Debug("incomplete chunk data", "addr", chunk.Addr, "length", l) - chunk.SetErrored(ErrChunkInvalid) - chunk.markAsStored() - return - } valid := true + // ls.Validators contains a list of one validator per chunk type. + // if one validator succeeds, then the chunk is valid for _, v := range ls.Validators { if valid = v.Validate(chunk.Addr, chunk.SData); valid { break } } if !valid { - log.Trace("invalid content address", "addr", chunk.Addr) + log.Trace("invalid chunk", "addr", chunk.Addr, "len", len(chunk.SData)) chunk.SetErrored(ErrChunkInvalid) chunk.markAsStored() return diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go index 2bb81efa3a03..ae62218fe8ac 100644 --- a/swarm/storage/localstore_test.go +++ b/swarm/storage/localstore_test.go @@ -20,6 +20,8 @@ import ( "io/ioutil" "os" "testing" + + "github.com/ethereum/go-ethereum/swarm/chunk" ) var ( @@ -61,7 +63,7 @@ func TestValidator(t *testing.T) { // add content address validator and check puts // bad should fail, good should pass store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc)) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) @@ -79,7 +81,7 @@ func TestValidator(t *testing.T) { var negV boolTestValidator store.Validators = append(store.Validators, negV) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) @@ -97,7 +99,7 @@ func TestValidator(t *testing.T) { var posV boolTestValidator = true store.Validators = append(store.Validators, posV) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 32f43d502953..57561fd14be2 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -21,17 +21,15 @@ package mru import ( "bytes" "context" - "fmt" "sync" "time" "unsafe" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) -const chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler - type Handler struct { chunkStore *storage.NetStore HashSize int @@ -66,8 +64,7 @@ func init() { } // NewHandler creates a new Mutable Resource API -func NewHandler(params *HandlerParams) (*Handler, error) { - +func NewHandler(params *HandlerParams) *Handler { rh := &Handler{ resources: make(map[uint64]*resource), storeTimeout: defaultStoreTimeout, @@ -82,7 +79,7 @@ func NewHandler(params *HandlerParams) (*Handler, error) { hashPool.Put(hashfunc) } - return rh, nil + return rh } // SetStore sets the store backend for the Mutable Resource API @@ -94,9 +91,8 @@ func (h *Handler) SetStore(store *storage.NetStore) { // If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature // It implements the storage.ChunkValidator interface func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { - dataLength := len(data) - if dataLength < minimumChunkLength { + if dataLength < minimumChunkLength || dataLength > chunk.DefaultSize+8 { return false } @@ -106,7 +102,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { rootAddr, _ := metadataHash(data) valid := bytes.Equal(chunkAddr, rootAddr) if !valid { - log.Debug(fmt.Sprintf("Invalid root metadata chunk with address: %s", chunkAddr.Hex())) + log.Debug("Invalid root metadata chunk with address", "addr", chunkAddr.Hex()) } return valid } @@ -118,7 +114,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // First, deserialize the chunk var r SignedResourceUpdate if err := r.fromChunk(chunkAddr, data); err != nil { - log.Debug("Invalid resource chunk with address %s: %s ", chunkAddr.Hex(), err.Error()) + log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) return false } @@ -126,7 +122,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // that was used to retrieve this chunk // if this validation fails, someone forged a chunk. if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) { - log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr %s", chunkAddr.Hex()) + log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr", "addr", chunkAddr.Hex()) return false } @@ -134,7 +130,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // If it fails, it means either the signature is not valid, data is corrupted // or someone is trying to update someone else's resource. if err := r.Verify(); err != nil { - log.Debug("Invalid signature: %v", err) + log.Debug("Invalid signature", "err", err) return false } @@ -172,11 +168,6 @@ func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) { return rsrc.version, nil } -// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore -func (h *Handler) chunkSize() int64 { - return chunkSize -} - // New creates a new metadata chunk out of the request passed in. func (h *Handler) New(ctx context.Context, request *Request) error { diff --git a/swarm/storage/mru/resource_test.go b/swarm/storage/mru/resource_test.go index 95c9eccdfc7b..76d7c58a1e89 100644 --- a/swarm/storage/mru/resource_test.go +++ b/swarm/storage/mru/resource_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/contracts/ens" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -776,14 +777,11 @@ func TestValidatorInStore(t *testing.T) { // set up resource handler and add is as a validator to the localstore rhParams := &HandlerParams{} - rh, err := NewHandler(rhParams) - if err != nil { - t.Fatal(err) - } + rh := NewHandler(rhParams) store.Validators = append(store.Validators, rh) // create content addressed chunks, one good, one faulty - chunks := storage.GenerateRandomChunks(storage.DefaultChunkSize, 2) + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk := chunks[0] badChunk := chunks[1] badChunk.SData = goodChunk.SData diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go index 751f51af3267..6efcba9aba3a 100644 --- a/swarm/storage/mru/testutil.go +++ b/swarm/storage/mru/testutil.go @@ -38,10 +38,7 @@ func (t *TestHandler) Close() { // NewTestHandler creates Handler object to be used for testing purposes. func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { path := filepath.Join(datadir, testDbDirName) - rh, err := NewHandler(params) - if err != nil { - return nil, fmt.Errorf("resource handler create fail: %v", err) - } + rh := NewHandler(params) localstoreparams := storage.NewDefaultLocalStoreParams() localstoreparams.Init(path) localStore, err := storage.NewLocalStore(localstoreparams, nil) diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index 88c4ac4e5962..d1bd37ddff02 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "errors" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/multihash" ) @@ -42,7 +43,7 @@ const chunkPrefixLength = 2 + 2 // // Minimum size is Header + 1 (minimum data length, enforced) const minimumUpdateDataLength = updateHeaderLength + 1 -const maxUpdateDataLength = chunkSize - signatureLength - updateHeaderLength - chunkPrefixLength +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - updateHeaderLength - chunkPrefixLength // binaryPut serializes the resource update information into the given slice func (r *resourceUpdate) binaryPut(serializedData []byte) error { diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go index 2923c81c5a3d..36ff66d045d5 100644 --- a/swarm/storage/pyramid.go +++ b/swarm/storage/pyramid.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" ) @@ -101,11 +102,11 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get New chunks to store are store using the putter which the caller provides. */ func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { - return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split(ctx) + return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize)).Split(ctx) } func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { - return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append(ctx) + return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize)).Append(ctx) } // Entry to create a tree node diff --git a/swarm/storage/types.go b/swarm/storage/types.go index 3114ef5767a2..53e3af485a70 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/swarm/bmt" + "github.com/ethereum/go-ethereum/swarm/chunk" ) const MaxPO = 16 @@ -114,7 +115,9 @@ func MakeHashFunc(hash string) SwarmHasher { case "BMT": return func() SwarmHash { hasher := sha3.NewKeccak256 - pool := bmt.NewTreePool(hasher, bmt.SegmentCount, bmt.PoolSize) + hasherSize := hasher().Size() + segmentCount := chunk.DefaultSize / hasherSize + pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize) return bmt.New(pool) } } @@ -230,8 +233,8 @@ func GenerateRandomChunk(dataSize int64) *Chunk { func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) { var i int hasher := MakeHashFunc(DefaultHash)() - if dataSize > DefaultChunkSize { - dataSize = DefaultChunkSize + if dataSize > chunk.DefaultSize { + dataSize = chunk.DefaultSize } for i = 0; i < count; i++ { @@ -345,6 +348,10 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator { // Validate that the given key is a valid content address for the given data func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool { + if l := len(data); l < 9 || l > chunk.DefaultSize+8 { + return false + } + hasher := v.Hasher() hasher.ResetWithLength(data[:8]) hasher.Write(data[8:]) diff --git a/swarm/swarm.go b/swarm/swarm.go index c380a376f67e..f731ff33d7d8 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -195,18 +195,13 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e var resourceHandler *mru.Handler rhparams := &mru.HandlerParams{} - resourceHandler, err = mru.NewHandler(rhparams) - if err != nil { - return nil, err - } + resourceHandler = mru.NewHandler(rhparams) resourceHandler.SetStore(netStore) - var validators []storage.ChunkValidator - validators = append(validators, storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash))) - if resourceHandler != nil { - validators = append(validators, resourceHandler) + self.lstore.Validators = []storage.ChunkValidator{ + storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)), + resourceHandler, } - self.lstore.Validators = validators // setup local store log.Debug(fmt.Sprintf("Set up local storage")) From e0e0e53401e93733d921338b6d794162c40a7883 Mon Sep 17 00:00:00 2001 From: gary rong Date: Tue, 14 Aug 2018 23:30:42 +0800 Subject: [PATCH 126/166] crypto: change formula for create2 (#17393) --- core/vm/evm.go | 2 +- crypto/crypto.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index a2722537ddfd..a24f6f38650a 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -427,7 +427,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I // Create2 creates a new contract using code as deployment code. // -// The different between Create2 with Create is Create2 uses sha3(msg.sender ++ salt ++ init_code)[12:] +// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:] // instead of the usual sender-and-nonce-hash as the address where the contract is initialized at. func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code) diff --git a/crypto/crypto.go b/crypto/crypto.go index dec6e3c19e71..3211957e0ab9 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -78,8 +78,8 @@ func CreateAddress(b common.Address, nonce uint64) common.Address { // CreateAddress2 creates an ethereum address given the address bytes, initial // contract code and a salt. -func CreateAddress2(b common.Address, salt common.Hash, code []byte) common.Address { - return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt.Bytes(), code)[12:]) +func CreateAddress2(b common.Address, salt [32]byte, code []byte) common.Address { + return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], Keccak256(code))[12:]) } // ToECDSA creates a private key with the given D value. From a1783d169732dd34aa8c7d68f411ce741c1a5015 Mon Sep 17 00:00:00 2001 From: gary rong Date: Tue, 14 Aug 2018 23:34:33 +0800 Subject: [PATCH 127/166] miner: move agent logic to worker (#17351) * miner: move agent logic to worker * miner: polish * core: persist block before reorg --- core/blockchain.go | 7 +- miner/agent.go | 116 ------- miner/miner.go | 64 ++-- miner/worker.go | 721 ++++++++++++++++++++++++------------------- miner/worker_test.go | 212 +++++++++++++ 5 files changed, 646 insertions(+), 474 deletions(-) delete mode 100644 miner/agent.go create mode 100644 miner/worker_test.go diff --git a/core/blockchain.go b/core/blockchain.go index 62dc26125051..0461da7fd937 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -899,9 +899,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { return NonStatTy, err } - // Write other block data using a batch. - batch := bc.db.NewBatch() - rawdb.WriteBlock(batch, block) + rawdb.WriteBlock(bc.db, block) root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) if err != nil { @@ -955,6 +953,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } } } + + // Write other block data using a batch. + batch := bc.db.NewBatch() rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) // If the total difficulty is higher than our known, add it to the canonical chain diff --git a/miner/agent.go b/miner/agent.go deleted file mode 100644 index e922ea153c2e..000000000000 --- a/miner/agent.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "sync" - "sync/atomic" - - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/log" -) - -type CpuAgent struct { - mu sync.Mutex - - taskCh chan *Package - returnCh chan<- *Package - stop chan struct{} - quitCurrentOp chan struct{} - - chain consensus.ChainReader - engine consensus.Engine - - started int32 // started indicates whether the agent is currently started -} - -func NewCpuAgent(chain consensus.ChainReader, engine consensus.Engine) *CpuAgent { - agent := &CpuAgent{ - chain: chain, - engine: engine, - stop: make(chan struct{}, 1), - taskCh: make(chan *Package, 1), - } - return agent -} - -func (self *CpuAgent) AssignTask(p *Package) { - if atomic.LoadInt32(&self.started) == 1 { - self.taskCh <- p - } -} -func (self *CpuAgent) DeliverTo(ch chan<- *Package) { self.returnCh = ch } - -func (self *CpuAgent) Start() { - if !atomic.CompareAndSwapInt32(&self.started, 0, 1) { - return // agent already started - } - go self.update() -} - -func (self *CpuAgent) Stop() { - if !atomic.CompareAndSwapInt32(&self.started, 1, 0) { - return // agent already stopped - } - self.stop <- struct{}{} -done: - // Empty work channel - for { - select { - case <-self.taskCh: - default: - break done - } - } -} - -func (self *CpuAgent) update() { -out: - for { - select { - case p := <-self.taskCh: - self.mu.Lock() - if self.quitCurrentOp != nil { - close(self.quitCurrentOp) - } - self.quitCurrentOp = make(chan struct{}) - go self.mine(p, self.quitCurrentOp) - self.mu.Unlock() - case <-self.stop: - self.mu.Lock() - if self.quitCurrentOp != nil { - close(self.quitCurrentOp) - self.quitCurrentOp = nil - } - self.mu.Unlock() - break out - } - } -} - -func (self *CpuAgent) mine(p *Package, stop <-chan struct{}) { - var err error - if p.Block, err = self.engine.Seal(self.chain, p.Block, stop); p.Block != nil { - log.Info("Successfully sealed new block", "number", p.Block.Number(), "hash", p.Block.Hash()) - self.returnCh <- p - } else { - if err != nil { - log.Warn("Block sealing failed", "err", err) - } - self.returnCh <- nil - } -} diff --git a/miner/miner.go b/miner/miner.go index 4c5717c8ad60..e350e456e977 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -21,14 +21,12 @@ import ( "fmt" "sync/atomic" - "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -36,10 +34,8 @@ import ( // Backend wraps all methods required for mining. type Backend interface { - AccountManager() *accounts.Manager BlockChain() *core.BlockChain TxPool() *core.TxPool - ChainDb() ethdb.Database } // Miner creates blocks and searches for proof-of-work values. @@ -49,6 +45,7 @@ type Miner struct { coinbase common.Address eth Backend engine consensus.Engine + exitCh chan struct{} canStart int32 // can start indicates whether we can start the mining operation shouldStart int32 // should start indicates whether we should start after sync @@ -59,10 +56,10 @@ func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine con eth: eth, mux: mux, engine: engine, + exitCh: make(chan struct{}), worker: newWorker(config, engine, eth, mux), canStart: 1, } - miner.Register(NewCpuAgent(eth.BlockChain(), engine)) go miner.update() return miner @@ -74,28 +71,35 @@ func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine con // and halt your mining operation for as long as the DOS continues. func (self *Miner) update() { events := self.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{}) -out: - for ev := range events.Chan() { - switch ev.Data.(type) { - case downloader.StartEvent: - atomic.StoreInt32(&self.canStart, 0) - if self.Mining() { - self.Stop() - atomic.StoreInt32(&self.shouldStart, 1) - log.Info("Mining aborted due to sync") - } - case downloader.DoneEvent, downloader.FailedEvent: - shouldStart := atomic.LoadInt32(&self.shouldStart) == 1 + defer events.Unsubscribe() - atomic.StoreInt32(&self.canStart, 1) - atomic.StoreInt32(&self.shouldStart, 0) - if shouldStart { - self.Start(self.coinbase) + for { + select { + case ev := <-events.Chan(): + if ev == nil { + return + } + switch ev.Data.(type) { + case downloader.StartEvent: + atomic.StoreInt32(&self.canStart, 0) + if self.Mining() { + self.Stop() + atomic.StoreInt32(&self.shouldStart, 1) + log.Info("Mining aborted due to sync") + } + case downloader.DoneEvent, downloader.FailedEvent: + shouldStart := atomic.LoadInt32(&self.shouldStart) == 1 + + atomic.StoreInt32(&self.canStart, 1) + atomic.StoreInt32(&self.shouldStart, 0) + if shouldStart { + self.Start(self.coinbase) + } + // stop immediately and ignore all further pending events + return } - // unsubscribe. we're only interested in this event once - events.Unsubscribe() - // stop immediately and ignore all further pending events - break out + case <-self.exitCh: + return } } } @@ -109,7 +113,6 @@ func (self *Miner) Start(coinbase common.Address) { return } self.worker.start() - self.worker.commitNewWork() } func (self *Miner) Stop() { @@ -117,12 +120,9 @@ func (self *Miner) Stop() { atomic.StoreInt32(&self.shouldStart, 0) } -func (self *Miner) Register(agent Agent) { - self.worker.register(agent) -} - -func (self *Miner) Unregister(agent Agent) { - self.worker.unregister(agent) +func (self *Miner) Close() { + self.worker.close() + close(self.exitCh) } func (self *Miner) Mining() bool { diff --git a/miner/worker.go b/miner/worker.go index ae695f019818..81a63c29a4f9 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -32,16 +32,14 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) const ( - resultQueueSize = 10 - miningLogAtDepth = 5 - + // resultQueueSize is the size of channel listening to sealing result. + resultQueueSize = 10 // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 @@ -49,17 +47,10 @@ const ( chainHeadChanSize = 10 // chainSideChanSize is the size of channel listening to ChainSideEvent. chainSideChanSize = 10 + miningLogAtDepth = 5 ) -// Agent can register themselves with the worker -type Agent interface { - AssignTask(*Package) - DeliverTo(chan<- *Package) - Start() - Stop() -} - -// Env is the workers current environment and holds all of the current state information. +// Env is the worker's current environment and holds all of the current state information. type Env struct { config *params.ChainConfig signer types.Signer @@ -74,25 +65,124 @@ type Env struct { header *types.Header txs []*types.Transaction receipts []*types.Receipt +} - createdAt time.Time +func (env *Env) commitTransaction(tx *types.Transaction, bc *core.BlockChain, coinbase common.Address, gp *core.GasPool) (error, []*types.Log) { + snap := env.state.Snapshot() + + receipt, _, err := core.ApplyTransaction(env.config, bc, &coinbase, gp, env.state, env.header, tx, &env.header.GasUsed, vm.Config{}) + if err != nil { + env.state.RevertToSnapshot(snap) + return err, nil + } + env.txs = append(env.txs, tx) + env.receipts = append(env.receipts, receipt) + + return nil, receipt.Logs } -// Package contains all information for consensus engine sealing and result submitting. -type Package struct { - Receipts []*types.Receipt - State *state.StateDB - Block *types.Block +func (env *Env) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) { + if env.gasPool == nil { + env.gasPool = new(core.GasPool).AddGas(env.header.GasLimit) + } + + var coalescedLogs []*types.Log + + for { + // If we don't have enough gas for any further transactions then we're done + if env.gasPool.Gas() < params.TxGas { + log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) + break + } + // Retrieve the next transaction and abort if all done + tx := txs.Peek() + if tx == nil { + break + } + // Error may be ignored here. The error has already been checked + // during transaction acceptance is the transaction pool. + // + // We use the eip155 signer regardless of the current hf. + from, _ := types.Sender(env.signer, tx) + // Check whether the tx is replay protected. If we're not in the EIP155 hf + // phase, start ignoring the sender until we do. + if tx.Protected() && !env.config.IsEIP155(env.header.Number) { + log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block) + + txs.Pop() + continue + } + // Start executing the transaction + env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) + + err, logs := env.commitTransaction(tx, bc, coinbase, env.gasPool) + switch err { + case core.ErrGasLimitReached: + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Trace("Gas limit exceeded for current block", "sender", from) + txs.Pop() + + case core.ErrNonceTooLow: + // New head notification data race between the transaction pool and miner, shift + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + txs.Shift() + + case core.ErrNonceTooHigh: + // Reorg notification data race between the transaction pool and miner, skip account = + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + txs.Pop() + + case nil: + // Everything ok, collect the logs and shift in the next transaction from the same account + coalescedLogs = append(coalescedLogs, logs...) + env.tcount++ + txs.Shift() + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + txs.Shift() + } + } + + if len(coalescedLogs) > 0 || env.tcount > 0 { + // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined + // logs by filling in the block hash when the block was mined by the local miner. This can + // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. + cpy := make([]*types.Log, len(coalescedLogs)) + for i, l := range coalescedLogs { + cpy[i] = new(types.Log) + *cpy[i] = *l + } + go func(logs []*types.Log, tcount int) { + if len(logs) > 0 { + mux.Post(core.PendingLogsEvent{Logs: logs}) + } + if tcount > 0 { + mux.Post(core.PendingStateEvent{}) + } + }(cpy, env.tcount) + } } -// worker is the main object which takes care of applying messages to the new state +// task contains all information for consensus engine sealing and result submitting. +type task struct { + receipts []*types.Receipt + state *state.StateDB + block *types.Block + createdAt time.Time +} + +// worker is the main object which takes care of submitting new work to consensus engine +// and gathering the sealing result. type worker struct { config *params.ChainConfig engine consensus.Engine + eth Backend + chain *core.BlockChain - mu sync.Mutex - - // update loop + // Subscriptions mux *event.TypeMux txsCh chan core.NewTxsEvent txsSub event.Subscription @@ -101,31 +191,30 @@ type worker struct { chainSideCh chan core.ChainSideEvent chainSideSub event.Subscription - agents map[Agent]struct{} - recv chan *Package + // Channels + newWork chan struct{} + taskCh chan *task + resultCh chan *task + exitCh chan struct{} - eth Backend - chain *core.BlockChain - proc core.Validator - chainDb ethdb.Database + current *Env // An environment for current running cycle. + possibleUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. + unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. + mu sync.RWMutex // The lock used to protect the coinbase and extra fields coinbase common.Address extra []byte - currentMu sync.Mutex - current *Env - - snapshotMu sync.RWMutex + snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot snapshotBlock *types.Block snapshotState *state.StateDB - uncleMu sync.Mutex - possibleUncles map[common.Hash]*types.Block - - unconfirmed *unconfirmedBlocks // set of locally mined blocks pending canonicalness confirmations - // atomic status counters running int32 // The indicator whether the consensus engine is running or not. + + // Test hooks + newTaskHook func(*task) // Method to call upon receiving a new sealing task + fullTaskInterval func() // Method to call before pushing the full sealing task } func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux) *worker { @@ -134,220 +223,274 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, engine: engine, eth: eth, mux: mux, - txsCh: make(chan core.NewTxsEvent, txChanSize), - chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), - chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), - chainDb: eth.ChainDb(), - recv: make(chan *Package, resultQueueSize), chain: eth.BlockChain(), - proc: eth.BlockChain().Validator(), possibleUncles: make(map[common.Hash]*types.Block), - agents: make(map[Agent]struct{}), unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), + txsCh: make(chan core.NewTxsEvent, txChanSize), + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), + chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), + newWork: make(chan struct{}, 1), + taskCh: make(chan *task), + resultCh: make(chan *task, resultQueueSize), + exitCh: make(chan struct{}), } // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) // Subscribe events for blockchain worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) - go worker.update() - go worker.wait() - worker.commitNewWork() + go worker.mainLoop() + go worker.resultLoop() + go worker.taskLoop() + // Submit first work to initialize pending state. + worker.newWork <- struct{}{} return worker } -func (self *worker) setEtherbase(addr common.Address) { - self.mu.Lock() - defer self.mu.Unlock() - self.coinbase = addr +// setEtherbase sets the etherbase used to initialize the block coinbase field. +func (w *worker) setEtherbase(addr common.Address) { + w.mu.Lock() + defer w.mu.Unlock() + w.coinbase = addr } -func (self *worker) setExtra(extra []byte) { - self.mu.Lock() - defer self.mu.Unlock() - self.extra = extra +// setExtra sets the content used to initialize the block extra field. +func (w *worker) setExtra(extra []byte) { + w.mu.Lock() + defer w.mu.Unlock() + w.extra = extra } -func (self *worker) pending() (*types.Block, *state.StateDB) { +// pending returns the pending state and corresponding block. +func (w *worker) pending() (*types.Block, *state.StateDB) { // return a snapshot to avoid contention on currentMu mutex - self.snapshotMu.RLock() - defer self.snapshotMu.RUnlock() - return self.snapshotBlock, self.snapshotState.Copy() + w.snapshotMu.RLock() + defer w.snapshotMu.RUnlock() + if w.snapshotState == nil { + return nil, nil + } + return w.snapshotBlock, w.snapshotState.Copy() } -func (self *worker) pendingBlock() *types.Block { +// pendingBlock returns pending block. +func (w *worker) pendingBlock() *types.Block { // return a snapshot to avoid contention on currentMu mutex - self.snapshotMu.RLock() - defer self.snapshotMu.RUnlock() - return self.snapshotBlock + w.snapshotMu.RLock() + defer w.snapshotMu.RUnlock() + return w.snapshotBlock } -func (self *worker) start() { - self.mu.Lock() - defer self.mu.Unlock() - atomic.StoreInt32(&self.running, 1) - for agent := range self.agents { - agent.Start() - } +// start sets the running status as 1 and triggers new work submitting. +func (w *worker) start() { + atomic.StoreInt32(&w.running, 1) + w.newWork <- struct{}{} } -func (self *worker) stop() { - self.mu.Lock() - defer self.mu.Unlock() - - atomic.StoreInt32(&self.running, 0) - for agent := range self.agents { - agent.Stop() - } +// stop sets the running status as 0. +func (w *worker) stop() { + atomic.StoreInt32(&w.running, 0) } -func (self *worker) isRunning() bool { - return atomic.LoadInt32(&self.running) == 1 +// isRunning returns an indicator whether worker is running or not. +func (w *worker) isRunning() bool { + return atomic.LoadInt32(&w.running) == 1 } -func (self *worker) register(agent Agent) { - self.mu.Lock() - defer self.mu.Unlock() - self.agents[agent] = struct{}{} - agent.DeliverTo(self.recv) - if self.isRunning() { - agent.Start() +// close terminates all background threads maintained by the worker and cleans up buffered channels. +// Note the worker does not support being closed multiple times. +func (w *worker) close() { + close(w.exitCh) + // Clean up buffered channels + for empty := false; !empty; { + select { + case <-w.resultCh: + default: + empty = true + } } } -func (self *worker) unregister(agent Agent) { - self.mu.Lock() - defer self.mu.Unlock() - delete(self.agents, agent) - agent.Stop() -} - -func (self *worker) update() { - defer self.txsSub.Unsubscribe() - defer self.chainHeadSub.Unsubscribe() - defer self.chainSideSub.Unsubscribe() +// mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. +func (w *worker) mainLoop() { + defer w.txsSub.Unsubscribe() + defer w.chainHeadSub.Unsubscribe() + defer w.chainSideSub.Unsubscribe() for { - // A real event arrived, process interesting content select { - // Handle ChainHeadEvent - case <-self.chainHeadCh: - self.commitNewWork() - - // Handle ChainSideEvent - case ev := <-self.chainSideCh: - self.uncleMu.Lock() - self.possibleUncles[ev.Block.Hash()] = ev.Block - self.uncleMu.Unlock() - - // Handle NewTxsEvent - case ev := <-self.txsCh: + case <-w.newWork: + // Submit a work when the worker is created or started. + w.commitNewWork() + + case <-w.chainHeadCh: + // Resubmit a work for new cycle once worker receives chain head event. + w.commitNewWork() + + case ev := <-w.chainSideCh: + // Add side block to possible uncle block set. + w.possibleUncles[ev.Block.Hash()] = ev.Block + + case ev := <-w.txsCh: // Apply transactions to the pending state if we're not mining. // // Note all transactions received may not be continuous with transactions // already included in the current mining block. These transactions will // be automatically eliminated. - if !self.isRunning() { - self.currentMu.Lock() + if !w.isRunning() && w.current != nil { + w.mu.Lock() + coinbase := w.coinbase + w.mu.Unlock() + txs := make(map[common.Address]types.Transactions) for _, tx := range ev.Txs { - acc, _ := types.Sender(self.current.signer, tx) + acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], tx) } - txset := types.NewTransactionsByPriceAndNonce(self.current.signer, txs) - self.current.commitTransactions(self.mux, txset, self.chain, self.coinbase) - self.updateSnapshot() - self.currentMu.Unlock() + txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) + w.current.commitTransactions(w.mux, txset, w.chain, coinbase) + w.updateSnapshot() } else { // If we're mining, but nothing is being processed, wake on new transactions - if self.config.Clique != nil && self.config.Clique.Period == 0 { - self.commitNewWork() + if w.config.Clique != nil && w.config.Clique.Period == 0 { + w.commitNewWork() } } // System stopped - case <-self.txsSub.Err(): + case <-w.exitCh: return - case <-self.chainHeadSub.Err(): + case <-w.txsSub.Err(): return - case <-self.chainSideSub.Err(): + case <-w.chainHeadSub.Err(): + return + case <-w.chainSideSub.Err(): return } } } -func (self *worker) wait() { +// seal pushes a sealing task to consensus engine and submits the result. +func (w *worker) seal(t *task, stop <-chan struct{}) { + var ( + err error + res *task + ) + + if t.block, err = w.engine.Seal(w.chain, t.block, stop); t.block != nil { + log.Info("Successfully sealed new block", "number", t.block.Number(), "hash", t.block.Hash(), + "elapsed", common.PrettyDuration(time.Since(t.createdAt))) + res = t + } else { + if err != nil { + log.Warn("Block sealing failed", "err", err) + } + res = nil + } + select { + case w.resultCh <- res: + case <-w.exitCh: + } +} + +// taskLoop is a standalone goroutine to fetch sealing task from the generator and +// push them to consensus engine. +func (w *worker) taskLoop() { + var stopCh chan struct{} + + // interrupt aborts the in-flight sealing task. + interrupt := func() { + if stopCh != nil { + close(stopCh) + stopCh = nil + } + } for { - for result := range self.recv { + select { + case task := <-w.taskCh: + if w.newTaskHook != nil { + w.newTaskHook(task) + } + interrupt() + stopCh = make(chan struct{}) + go w.seal(task, stopCh) + case <-w.exitCh: + interrupt() + return + } + } +} +// resultLoop is a standalone goroutine to handle sealing result submitting +// and flush relative data to the database. +func (w *worker) resultLoop() { + for { + select { + case result := <-w.resultCh: if result == nil { continue } - block := result.Block + block := result.block // Update the block hash in all logs since it is now available and not when the // receipt/log of individual transactions were created. - for _, r := range result.Receipts { + for _, r := range result.receipts { for _, l := range r.Logs { l.BlockHash = block.Hash() } } - for _, log := range result.State.Logs() { + for _, log := range result.state.Logs() { log.BlockHash = block.Hash() } - self.currentMu.Lock() - stat, err := self.chain.WriteBlockWithState(block, result.Receipts, result.State) - self.currentMu.Unlock() + // Commit block and state to database. + stat, err := w.chain.WriteBlockWithState(block, result.receipts, result.state) if err != nil { log.Error("Failed writing block to chain", "err", err) continue } // Broadcast the block and announce chain insertion event - self.mux.Post(core.NewMinedBlockEvent{Block: block}) + w.mux.Post(core.NewMinedBlockEvent{Block: block}) var ( events []interface{} - logs = result.State.Logs() + logs = result.state.Logs() ) - events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) - if stat == core.CanonStatTy { + switch stat { + case core.CanonStatTy: + events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) events = append(events, core.ChainHeadEvent{Block: block}) + case core.SideStatTy: + events = append(events, core.ChainSideEvent{Block: block}) } - self.chain.PostChainEvents(events, logs) + w.chain.PostChainEvents(events, logs) - // Insert the block into the set of pending ones to wait for confirmations - self.unconfirmed.Insert(block.NumberU64(), block.Hash()) - } - } -} + // Insert the block into the set of pending ones to resultLoop for confirmations + w.unconfirmed.Insert(block.NumberU64(), block.Hash()) -// push sends a new work task to currently live miner agents. -func (self *worker) push(p *Package) { - for agent := range self.agents { - agent.AssignTask(p) + case <-w.exitCh: + return + } } } // makeCurrent creates a new environment for the current cycle. -func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error { - state, err := self.chain.StateAt(parent.Root()) +func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { + state, err := w.chain.StateAt(parent.Root()) if err != nil { return err } env := &Env{ - config: self.config, - signer: types.NewEIP155Signer(self.config.ChainID), + config: w.config, + signer: types.NewEIP155Signer(w.config.ChainID), state: state, ancestors: mapset.NewSet(), family: mapset.NewSet(), uncles: mapset.NewSet(), header: header, - createdAt: time.Now(), } // when 08 is processed ancestors contain 07 (quick block) - for _, ancestor := range self.chain.GetBlocksFromHash(parent.Hash(), 7) { + for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { for _, uncle := range ancestor.Uncles() { env.family.Add(uncle.Hash()) } @@ -357,20 +500,63 @@ func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error // Keep track of transactions which return errors so they can be removed env.tcount = 0 - self.current = env + w.current = env return nil } -func (self *worker) commitNewWork() { - self.mu.Lock() - defer self.mu.Unlock() - self.uncleMu.Lock() - defer self.uncleMu.Unlock() - self.currentMu.Lock() - defer self.currentMu.Unlock() +// commitUncle adds the given block to uncle block set, returns error if failed to add. +func (w *worker) commitUncle(env *Env, uncle *types.Header) error { + hash := uncle.Hash() + if env.uncles.Contains(hash) { + return fmt.Errorf("uncle not unique") + } + if !env.ancestors.Contains(uncle.ParentHash) { + return fmt.Errorf("uncle's parent unknown (%x)", uncle.ParentHash[0:4]) + } + if env.family.Contains(hash) { + return fmt.Errorf("uncle already in family (%x)", hash) + } + env.uncles.Add(uncle.Hash()) + return nil +} + +// updateSnapshot updates pending snapshot block and state. +// Note this function assumes the current variable is thread safe. +func (w *worker) updateSnapshot() { + w.snapshotMu.Lock() + defer w.snapshotMu.Unlock() + + var uncles []*types.Header + w.current.uncles.Each(func(item interface{}) bool { + hash, ok := item.(common.Hash) + if !ok { + return false + } + uncle, exist := w.possibleUncles[hash] + if !exist { + return false + } + uncles = append(uncles, uncle.Header()) + return true + }) + + w.snapshotBlock = types.NewBlock( + w.current.header, + w.current.txs, + uncles, + w.current.receipts, + ) + + w.snapshotState = w.current.state.Copy() +} + +// commitNewWork generates several new sealing tasks based on the parent block. +func (w *worker) commitNewWork() { + w.mu.RLock() + defer w.mu.RUnlock() tstart := time.Now() - parent := self.chain.CurrentBlock() + parent := w.chain.CurrentBlock() tstamp := tstart.Unix() if parent.Time().Cmp(new(big.Int).SetInt64(tstamp)) >= 0 { @@ -388,28 +574,28 @@ func (self *worker) commitNewWork() { ParentHash: parent.Hash(), Number: num.Add(num, common.Big1), GasLimit: core.CalcGasLimit(parent), - Extra: self.extra, + Extra: w.extra, Time: big.NewInt(tstamp), } // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) - if self.isRunning() { - if self.coinbase == (common.Address{}) { + if w.isRunning() { + if w.coinbase == (common.Address{}) { log.Error("Refusing to mine without etherbase") return } - header.Coinbase = self.coinbase + header.Coinbase = w.coinbase } - if err := self.engine.Prepare(self.chain, header); err != nil { + if err := w.engine.Prepare(w.chain, header); err != nil { log.Error("Failed to prepare header for mining", "err", err) return } // If we are care about TheDAO hard-fork check whether to override the extra-data or not - if daoBlock := self.config.DAOForkBlock; daoBlock != nil { + if daoBlock := w.config.DAOForkBlock; daoBlock != nil { // Check whether the block is among the fork extra-override range limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { // Depending whether we support or oppose the fork, override differently - if self.config.DAOForkSupport { + if w.config.DAOForkSupport { header.Extra = common.CopyBytes(params.DAOForkBlockExtra) } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data @@ -417,14 +603,14 @@ func (self *worker) commitNewWork() { } } // Could potentially happen if starting to mine in an odd state. - err := self.makeCurrent(parent, header) + err := w.makeCurrent(parent, header) if err != nil { log.Error("Failed to create mining context", "err", err) return } // Create the current work task and check any fork transitions needed - env := self.current - if self.config.DAOForkSupport && self.config.DAOForkBlock != nil && self.config.DAOForkBlock.Cmp(header.Number) == 0 { + env := w.current + if w.config.DAOForkSupport && w.config.DAOForkBlock != nil && w.config.DAOForkBlock.Cmp(header.Number) == 0 { misc.ApplyDAOHardFork(env.state) } @@ -433,11 +619,11 @@ func (self *worker) commitNewWork() { uncles []*types.Header badUncles []common.Hash ) - for hash, uncle := range self.possibleUncles { + for hash, uncle := range w.possibleUncles { if len(uncles) == 2 { break } - if err := self.commitUncle(env, uncle.Header()); err != nil { + if err := w.commitUncle(env, uncle.Header()); err != nil { log.Trace("Bad uncle found and will be removed", "hash", hash) log.Trace(fmt.Sprint(uncle)) @@ -448,184 +634,73 @@ func (self *worker) commitNewWork() { } } for _, hash := range badUncles { - delete(self.possibleUncles, hash) + delete(w.possibleUncles, hash) } var ( - emptyBlock *types.Block - fullBlock *types.Block + emptyBlock, fullBlock *types.Block + emptyState, fullState *state.StateDB ) // Create an empty block based on temporary copied state for sealing in advance without waiting block // execution finished. - emptyState := env.state.Copy() - if emptyBlock, err = self.engine.Finalize(self.chain, header, emptyState, nil, uncles, nil); err != nil { + emptyState = env.state.Copy() + if emptyBlock, err = w.engine.Finalize(w.chain, header, emptyState, nil, uncles, nil); err != nil { log.Error("Failed to finalize block for temporary sealing", "err", err) } else { // Push empty work in advance without applying pending transaction. // The reason is transactions execution can cost a lot and sealer need to // take advantage of this part time. - if self.isRunning() { - log.Info("Commit new empty mining work", "number", emptyBlock.Number(), "uncles", len(uncles)) - self.push(&Package{nil, emptyState, emptyBlock}) + if w.isRunning() { + select { + case w.taskCh <- &task{receipts: nil, state: emptyState, block: emptyBlock, createdAt: time.Now()}: + log.Info("Commit new empty mining work", "number", emptyBlock.Number(), "uncles", len(uncles)) + case <-w.exitCh: + log.Info("Worker has exited") + return + } } } // Fill the block with all available pending transactions. - pending, err := self.eth.TxPool().Pending() + pending, err := w.eth.TxPool().Pending() if err != nil { log.Error("Failed to fetch pending transactions", "err", err) return } - txs := types.NewTransactionsByPriceAndNonce(self.current.signer, pending) - env.commitTransactions(self.mux, txs, self.chain, self.coinbase) + // Short circuit if there is no available pending transactions + if len(pending) == 0 { + w.updateSnapshot() + return + } + txs := types.NewTransactionsByPriceAndNonce(w.current.signer, pending) + env.commitTransactions(w.mux, txs, w.chain, w.coinbase) // Create the full block to seal with the consensus engine - if fullBlock, err = self.engine.Finalize(self.chain, header, env.state, env.txs, uncles, env.receipts); err != nil { + fullState = env.state.Copy() + if fullBlock, err = w.engine.Finalize(w.chain, header, fullState, env.txs, uncles, env.receipts); err != nil { log.Error("Failed to finalize block for sealing", "err", err) return } - // We only care about logging if we're actually mining. - if self.isRunning() { - log.Info("Commit new full mining work", "number", fullBlock.Number(), "txs", env.tcount, "uncles", len(uncles), "elapsed", common.PrettyDuration(time.Since(tstart))) - self.unconfirmed.Shift(fullBlock.NumberU64() - 1) - self.push(&Package{env.receipts, env.state, fullBlock}) - } - self.updateSnapshot() -} - -func (self *worker) commitUncle(env *Env, uncle *types.Header) error { - hash := uncle.Hash() - if env.uncles.Contains(hash) { - return fmt.Errorf("uncle not unique") - } - if !env.ancestors.Contains(uncle.ParentHash) { - return fmt.Errorf("uncle's parent unknown (%x)", uncle.ParentHash[0:4]) - } - if env.family.Contains(hash) { - return fmt.Errorf("uncle already in family (%x)", hash) - } - env.uncles.Add(uncle.Hash()) - return nil -} - -func (self *worker) updateSnapshot() { - self.snapshotMu.Lock() - defer self.snapshotMu.Unlock() - - var uncles []*types.Header - self.current.uncles.Each(func(item interface{}) bool { - if header, ok := item.(*types.Header); ok { - uncles = append(uncles, header) - return true - } - return false - }) - - self.snapshotBlock = types.NewBlock( - self.current.header, - self.current.txs, - uncles, - self.current.receipts, - ) - self.snapshotState = self.current.state.Copy() -} - -func (env *Env) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) { - if env.gasPool == nil { - env.gasPool = new(core.GasPool).AddGas(env.header.GasLimit) + // Deep copy receipts here to avoid interaction between different tasks. + cpy := make([]*types.Receipt, len(env.receipts)) + for i, l := range env.receipts { + cpy[i] = new(types.Receipt) + *cpy[i] = *l } - - var coalescedLogs []*types.Log - - for { - // If we don't have enough gas for any further transactions then we're done - if env.gasPool.Gas() < params.TxGas { - log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) - break - } - // Retrieve the next transaction and abort if all done - tx := txs.Peek() - if tx == nil { - break - } - // Error may be ignored here. The error has already been checked - // during transaction acceptance is the transaction pool. - // - // We use the eip155 signer regardless of the current hf. - from, _ := types.Sender(env.signer, tx) - // Check whether the tx is replay protected. If we're not in the EIP155 hf - // phase, start ignoring the sender until we do. - if tx.Protected() && !env.config.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block) - - txs.Pop() - continue - } - // Start executing the transaction - env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) - - err, logs := env.commitTransaction(tx, bc, coinbase, env.gasPool) - switch err { - case core.ErrGasLimitReached: - // Pop the current out-of-gas transaction without shifting in the next from the account - log.Trace("Gas limit exceeded for current block", "sender", from) - txs.Pop() - - case core.ErrNonceTooLow: - // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) - txs.Shift() - - case core.ErrNonceTooHigh: - // Reorg notification data race between the transaction pool and miner, skip account = - log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) - txs.Pop() - - case nil: - // Everything ok, collect the logs and shift in the next transaction from the same account - coalescedLogs = append(coalescedLogs, logs...) - env.tcount++ - txs.Shift() - - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). - log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) - txs.Shift() + // We only care about logging if we're actually mining. + if w.isRunning() { + if w.fullTaskInterval != nil { + w.fullTaskInterval() } - } - if len(coalescedLogs) > 0 || env.tcount > 0 { - // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined - // logs by filling in the block hash when the block was mined by the local miner. This can - // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. - cpy := make([]*types.Log, len(coalescedLogs)) - for i, l := range coalescedLogs { - cpy[i] = new(types.Log) - *cpy[i] = *l + select { + case w.taskCh <- &task{receipts: cpy, state: fullState, block: fullBlock, createdAt: time.Now()}: + w.unconfirmed.Shift(fullBlock.NumberU64() - 1) + log.Info("Commit new full mining work", "number", fullBlock.Number(), "txs", env.tcount, "uncles", len(uncles), "elapsed", common.PrettyDuration(time.Since(tstart))) + case <-w.exitCh: + log.Info("Worker has exited") } - go func(logs []*types.Log, tcount int) { - if len(logs) > 0 { - mux.Post(core.PendingLogsEvent{Logs: logs}) - } - if tcount > 0 { - mux.Post(core.PendingStateEvent{}) - } - }(cpy, env.tcount) - } -} - -func (env *Env) commitTransaction(tx *types.Transaction, bc *core.BlockChain, coinbase common.Address, gp *core.GasPool) (error, []*types.Log) { - snap := env.state.Snapshot() - - receipt, _, err := core.ApplyTransaction(env.config, bc, &coinbase, gp, env.state, env.header, tx, &env.header.GasUsed, vm.Config{}) - if err != nil { - env.state.RevertToSnapshot(snap) - return err, nil } - env.txs = append(env.txs, tx) - env.receipts = append(env.receipts, receipt) - - return nil, receipt.Logs + w.updateSnapshot() } diff --git a/miner/worker_test.go b/miner/worker_test.go new file mode 100644 index 000000000000..5823a608ef6b --- /dev/null +++ b/miner/worker_test.go @@ -0,0 +1,212 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package miner + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/clique" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" +) + +var ( + // Test chain configurations + testTxPoolConfig core.TxPoolConfig + ethashChainConfig *params.ChainConfig + cliqueChainConfig *params.ChainConfig + + // Test accounts + testBankKey, _ = crypto.GenerateKey() + testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) + testBankFunds = big.NewInt(1000000000000000000) + + acc1Key, _ = crypto.GenerateKey() + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + + // Test transactions + pendingTxs []*types.Transaction + newTxs []*types.Transaction +) + +func init() { + testTxPoolConfig = core.DefaultTxPoolConfig + testTxPoolConfig.Journal = "" + ethashChainConfig = params.TestChainConfig + cliqueChainConfig = params.TestChainConfig + cliqueChainConfig.Clique = ¶ms.CliqueConfig{ + Period: 1, + Epoch: 30000, + } + tx1, _ := types.SignTx(types.NewTransaction(0, acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) + pendingTxs = append(pendingTxs, tx1) + tx2, _ := types.SignTx(types.NewTransaction(1, acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) + newTxs = append(newTxs, tx2) +} + +// testWorkerBackend implements worker.Backend interfaces and wraps all information needed during the testing. +type testWorkerBackend struct { + db ethdb.Database + txPool *core.TxPool + chain *core.BlockChain + testTxFeed event.Feed +} + +func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) *testWorkerBackend { + var ( + db = ethdb.NewMemDatabase() + gspec = core.Genesis{ + Config: chainConfig, + Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, + } + ) + + switch engine.(type) { + case *clique.Clique: + gspec.ExtraData = make([]byte, 32+common.AddressLength+65) + copy(gspec.ExtraData[32:], testBankAddress[:]) + case *ethash.Ethash: + default: + t.Fatal("unexpect consensus engine type") + } + gspec.MustCommit(db) + + chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}) + txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain) + + return &testWorkerBackend{ + db: db, + chain: chain, + txPool: txpool, + } +} + +func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } +func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool } +func (b *testWorkerBackend) PostChainEvents(events []interface{}) { + b.chain.PostChainEvents(events, nil) +} + +func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) (*worker, *testWorkerBackend) { + backend := newTestWorkerBackend(t, chainConfig, engine) + backend.txPool.AddLocals(pendingTxs) + w := newWorker(chainConfig, engine, backend, new(event.TypeMux)) + w.setEtherbase(testBankAddress) + return w, backend +} + +func TestPendingStateAndBlockEthash(t *testing.T) { + testPendingStateAndBlock(t, ethashChainConfig, ethash.NewFaker()) +} +func TestPendingStateAndBlockClique(t *testing.T) { + testPendingStateAndBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase())) +} + +func testPendingStateAndBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { + defer engine.Close() + + w, b := newTestWorker(t, chainConfig, engine) + defer w.close() + + // Ensure snapshot has been updated. + time.Sleep(100 * time.Millisecond) + block, state := w.pending() + if block.NumberU64() != 1 { + t.Errorf("block number mismatch, has %d, want %d", block.NumberU64(), 1) + } + if balance := state.GetBalance(acc1Addr); balance.Cmp(big.NewInt(1000)) != 0 { + t.Errorf("account balance mismatch, has %d, want %d", balance, 1000) + } + b.txPool.AddLocals(newTxs) + // Ensure the new tx events has been processed + time.Sleep(100 * time.Millisecond) + block, state = w.pending() + if balance := state.GetBalance(acc1Addr); balance.Cmp(big.NewInt(2000)) != 0 { + t.Errorf("account balance mismatch, has %d, want %d", balance, 2000) + } +} + +func TestEmptyWorkEthash(t *testing.T) { + testEmptyWork(t, ethashChainConfig, ethash.NewFaker()) +} +func TestEmptyWorkClique(t *testing.T) { + testEmptyWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase())) +} + +func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { + defer engine.Close() + + w, _ := newTestWorker(t, chainConfig, engine) + defer w.close() + + var ( + taskCh = make(chan struct{}, 2) + taskIndex int + ) + + checkEqual := func(t *testing.T, task *task, index int) { + receiptLen, balance := 0, big.NewInt(0) + if index == 1 { + receiptLen, balance = 1, big.NewInt(1000) + } + if len(task.receipts) != receiptLen { + t.Errorf("receipt number mismatch has %d, want %d", len(task.receipts), receiptLen) + } + if task.state.GetBalance(acc1Addr).Cmp(balance) != 0 { + t.Errorf("account balance mismatch has %d, want %d", task.state.GetBalance(acc1Addr), balance) + } + } + + w.newTaskHook = func(task *task) { + if task.block.NumberU64() == 1 { + checkEqual(t, task, taskIndex) + taskIndex += 1 + taskCh <- struct{}{} + } + } + w.fullTaskInterval = func() { + time.Sleep(100 * time.Millisecond) + } + + // Ensure worker has finished initialization + for { + b := w.pendingBlock() + if b != nil && b.NumberU64() == 1 { + break + } + } + + w.start() + for i := 0; i < 2; i += 1 { + to := time.NewTimer(time.Second) + select { + case <-taskCh: + case <-to.C: + t.Error("new task timeout") + } + } +} From b2ddb1fcbf77771d0693ee5a00f8ae1cd4c0f87c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Tue, 14 Aug 2018 22:44:46 +0200 Subject: [PATCH 128/166] les: implement client connection logic (#16899) This PR implements les.freeClientPool. It also adds a simulated clock in common/mclock, which enables time-sensitive tests to run quickly and still produce accurate results, and package common/prque which is a generalised variant of prque that enables removing elements other than the top one from the queue. les.freeClientPool implements a client database that limits the connection time of each client and manages accepting/rejecting incoming connections and even kicking out some connected clients. The pool calculates recent usage time for each known client (a value that increases linearly when the client is connected and decreases exponentially when not connected). Clients with lower recent usage are preferred, unknown nodes have the highest priority. Already connected nodes receive a small bias in their favor in order to avoid accepting and instantly kicking out clients. Note: the pool can use any string for client identification. Using signature keys for that purpose would not make sense when being known has a negative value for the client. Currently the LES protocol manager uses IP addresses (without port address) to identify clients. --- common/mclock/mclock.go | 31 +++++ common/mclock/simclock.go | 129 ++++++++++++++++++ common/prque/prque.go | 57 ++++++++ common/prque/sstack.go | 106 +++++++++++++++ les/freeclient.go | 278 ++++++++++++++++++++++++++++++++++++++ les/freeclient_test.go | 139 +++++++++++++++++++ les/handler.go | 22 ++- 7 files changed, 761 insertions(+), 1 deletion(-) create mode 100644 common/mclock/simclock.go create mode 100755 common/prque/prque.go create mode 100755 common/prque/sstack.go create mode 100644 les/freeclient.go create mode 100644 les/freeclient_test.go diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go index 02608d17b0be..dcac59c6ceaa 100644 --- a/common/mclock/mclock.go +++ b/common/mclock/mclock.go @@ -30,3 +30,34 @@ type AbsTime time.Duration func Now() AbsTime { return AbsTime(monotime.Now()) } + +// Add returns t + d. +func (t AbsTime) Add(d time.Duration) AbsTime { + return t + AbsTime(d) +} + +// Clock interface makes it possible to replace the monotonic system clock with +// a simulated clock. +type Clock interface { + Now() AbsTime + Sleep(time.Duration) + After(time.Duration) <-chan time.Time +} + +// System implements Clock using the system clock. +type System struct{} + +// Now implements Clock. +func (System) Now() AbsTime { + return AbsTime(monotime.Now()) +} + +// Sleep implements Clock. +func (System) Sleep(d time.Duration) { + time.Sleep(d) +} + +// After implements Clock. +func (System) After(d time.Duration) <-chan time.Time { + return time.After(d) +} diff --git a/common/mclock/simclock.go b/common/mclock/simclock.go new file mode 100644 index 000000000000..e014f56150ea --- /dev/null +++ b/common/mclock/simclock.go @@ -0,0 +1,129 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mclock + +import ( + "sync" + "time" +) + +// Simulated implements a virtual Clock for reproducible time-sensitive tests. It +// simulates a scheduler on a virtual timescale where actual processing takes zero time. +// +// The virtual clock doesn't advance on its own, call Run to advance it and execute timers. +// Since there is no way to influence the Go scheduler, testing timeout behaviour involving +// goroutines needs special care. A good way to test such timeouts is as follows: First +// perform the action that is supposed to time out. Ensure that the timer you want to test +// is created. Then run the clock until after the timeout. Finally observe the effect of +// the timeout using a channel or semaphore. +type Simulated struct { + now AbsTime + scheduled []event + mu sync.RWMutex + cond *sync.Cond +} + +type event struct { + do func() + at AbsTime +} + +// Run moves the clock by the given duration, executing all timers before that duration. +func (s *Simulated) Run(d time.Duration) { + s.mu.Lock() + defer s.mu.Unlock() + s.init() + + end := s.now + AbsTime(d) + for len(s.scheduled) > 0 { + ev := s.scheduled[0] + if ev.at > end { + break + } + s.now = ev.at + ev.do() + s.scheduled = s.scheduled[1:] + } + s.now = end +} + +func (s *Simulated) ActiveTimers() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.scheduled) +} + +func (s *Simulated) WaitForTimers(n int) { + s.mu.Lock() + defer s.mu.Unlock() + s.init() + + for len(s.scheduled) < n { + s.cond.Wait() + } +} + +// Now implements Clock. +func (s *Simulated) Now() AbsTime { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.now +} + +// Sleep implements Clock. +func (s *Simulated) Sleep(d time.Duration) { + <-s.After(d) +} + +// After implements Clock. +func (s *Simulated) After(d time.Duration) <-chan time.Time { + after := make(chan time.Time, 1) + s.insert(d, func() { + after <- (time.Time{}).Add(time.Duration(s.now)) + }) + return after +} + +func (s *Simulated) insert(d time.Duration, do func()) { + s.mu.Lock() + defer s.mu.Unlock() + s.init() + + at := s.now + AbsTime(d) + l, h := 0, len(s.scheduled) + ll := h + for l != h { + m := (l + h) / 2 + if at < s.scheduled[m].at { + h = m + } else { + l = m + 1 + } + } + s.scheduled = append(s.scheduled, event{}) + copy(s.scheduled[l+1:], s.scheduled[l:ll]) + s.scheduled[l] = event{do: do, at: at} + s.cond.Broadcast() +} + +func (s *Simulated) init() { + if s.cond == nil { + s.cond = sync.NewCond(&s.mu) + } +} diff --git a/common/prque/prque.go b/common/prque/prque.go new file mode 100755 index 000000000000..9fd31a2e5d1f --- /dev/null +++ b/common/prque/prque.go @@ -0,0 +1,57 @@ +// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". + +package prque + +import ( + "container/heap" +) + +// Priority queue data structure. +type Prque struct { + cont *sstack +} + +// Creates a new priority queue. +func New(setIndex setIndexCallback) *Prque { + return &Prque{newSstack(setIndex)} +} + +// Pushes a value with a given priority into the queue, expanding if necessary. +func (p *Prque) Push(data interface{}, priority int64) { + heap.Push(p.cont, &item{data, priority}) +} + +// Pops the value with the greates priority off the stack and returns it. +// Currently no shrinking is done. +func (p *Prque) Pop() (interface{}, int64) { + item := heap.Pop(p.cont).(*item) + return item.value, item.priority +} + +// Pops only the item from the queue, dropping the associated priority value. +func (p *Prque) PopItem() interface{} { + return heap.Pop(p.cont).(*item).value +} + +// Remove removes the element with the given index. +func (p *Prque) Remove(i int) interface{} { + if i < 0 { + return nil + } + return heap.Remove(p.cont, i) +} + +// Checks whether the priority queue is empty. +func (p *Prque) Empty() bool { + return p.cont.Len() == 0 +} + +// Returns the number of element in the priority queue. +func (p *Prque) Size() int { + return p.cont.Len() +} + +// Clears the contents of the priority queue. +func (p *Prque) Reset() { + *p = *New(p.cont.setIndex) +} diff --git a/common/prque/sstack.go b/common/prque/sstack.go new file mode 100755 index 000000000000..4875dae99d96 --- /dev/null +++ b/common/prque/sstack.go @@ -0,0 +1,106 @@ +// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". + +package prque + +// The size of a block of data +const blockSize = 4096 + +// A prioritized item in the sorted stack. +// +// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0. +// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63. +type item struct { + value interface{} + priority int64 +} + +// setIndexCallback is called when the element is moved to a new index. +// Providing setIndexCallback is optional, it is needed only if the application needs +// to delete elements other than the top one. +type setIndexCallback func(a interface{}, i int) + +// Internal sortable stack data structure. Implements the Push and Pop ops for +// the stack (heap) functionality and the Len, Less and Swap methods for the +// sortability requirements of the heaps. +type sstack struct { + setIndex setIndexCallback + size int + capacity int + offset int + + blocks [][]*item + active []*item +} + +// Creates a new, empty stack. +func newSstack(setIndex setIndexCallback) *sstack { + result := new(sstack) + result.setIndex = setIndex + result.active = make([]*item, blockSize) + result.blocks = [][]*item{result.active} + result.capacity = blockSize + return result +} + +// Pushes a value onto the stack, expanding it if necessary. Required by +// heap.Interface. +func (s *sstack) Push(data interface{}) { + if s.size == s.capacity { + s.active = make([]*item, blockSize) + s.blocks = append(s.blocks, s.active) + s.capacity += blockSize + s.offset = 0 + } else if s.offset == blockSize { + s.active = s.blocks[s.size/blockSize] + s.offset = 0 + } + if s.setIndex != nil { + s.setIndex(data.(*item).value, s.size) + } + s.active[s.offset] = data.(*item) + s.offset++ + s.size++ +} + +// Pops a value off the stack and returns it. Currently no shrinking is done. +// Required by heap.Interface. +func (s *sstack) Pop() (res interface{}) { + s.size-- + s.offset-- + if s.offset < 0 { + s.offset = blockSize - 1 + s.active = s.blocks[s.size/blockSize] + } + res, s.active[s.offset] = s.active[s.offset], nil + if s.setIndex != nil { + s.setIndex(res.(*item).value, -1) + } + return +} + +// Returns the length of the stack. Required by sort.Interface. +func (s *sstack) Len() int { + return s.size +} + +// Compares the priority of two elements of the stack (higher is first). +// Required by sort.Interface. +func (s *sstack) Less(i, j int) bool { + return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 +} + +// Swaps two elements in the stack. Required by sort.Interface. +func (s *sstack) Swap(i, j int) { + ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize + a, b := s.blocks[jb][jo], s.blocks[ib][io] + if s.setIndex != nil { + s.setIndex(a.value, i) + s.setIndex(b.value, j) + } + s.blocks[ib][io], s.blocks[jb][jo] = a, b +} + +// Resets the stack, effectively clearing its contents. +func (s *sstack) Reset() { + *s = *newSstack(s.setIndex) +} diff --git a/les/freeclient.go b/les/freeclient.go new file mode 100644 index 000000000000..5ee607be8fb7 --- /dev/null +++ b/les/freeclient.go @@ -0,0 +1,278 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( + "io" + "math" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// freeClientPool implements a client database that limits the connection time +// of each client and manages accepting/rejecting incoming connections and even +// kicking out some connected clients. The pool calculates recent usage time +// for each known client (a value that increases linearly when the client is +// connected and decreases exponentially when not connected). Clients with lower +// recent usage are preferred, unknown nodes have the highest priority. Already +// connected nodes receive a small bias in their favor in order to avoid accepting +// and instantly kicking out clients. +// +// Note: the pool can use any string for client identification. Using signature +// keys for that purpose would not make sense when being known has a negative +// value for the client. Currently the LES protocol manager uses IP addresses +// (without port address) to identify clients. +type freeClientPool struct { + db ethdb.Database + lock sync.Mutex + clock mclock.Clock + closed bool + + connectedLimit, totalLimit int + + addressMap map[string]*freeClientPoolEntry + connPool, disconnPool *prque.Prque + startupTime mclock.AbsTime + logOffsetAtStartup int64 +} + +const ( + recentUsageExpTC = time.Hour // time constant of the exponential weighting window for "recent" server usage + fixedPointMultiplier = 0x1000000 // constant to convert logarithms to fixed point format + connectedBias = time.Minute // this bias is applied in favor of already connected clients in order to avoid kicking them out very soon +) + +// newFreeClientPool creates a new free client pool +func newFreeClientPool(db ethdb.Database, connectedLimit, totalLimit int, clock mclock.Clock) *freeClientPool { + pool := &freeClientPool{ + db: db, + clock: clock, + addressMap: make(map[string]*freeClientPoolEntry), + connPool: prque.New(poolSetIndex), + disconnPool: prque.New(poolSetIndex), + connectedLimit: connectedLimit, + totalLimit: totalLimit, + } + pool.loadFromDb() + return pool +} + +func (f *freeClientPool) stop() { + f.lock.Lock() + f.closed = true + f.saveToDb() + f.lock.Unlock() +} + +// connect should be called after a successful handshake. If the connection was +// rejected, there is no need to call disconnect. +// +// Note: the disconnectFn callback should not block. +func (f *freeClientPool) connect(address string, disconnectFn func()) bool { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return false + } + e := f.addressMap[address] + now := f.clock.Now() + var recentUsage int64 + if e == nil { + e = &freeClientPoolEntry{address: address, index: -1} + f.addressMap[address] = e + } else { + if e.connected { + log.Debug("Client already connected", "address", address) + return false + } + recentUsage = int64(math.Exp(float64(e.logUsage-f.logOffset(now)) / fixedPointMultiplier)) + } + e.linUsage = recentUsage - int64(now) + // check whether (linUsage+connectedBias) is smaller than the highest entry in the connected pool + if f.connPool.Size() == f.connectedLimit { + i := f.connPool.PopItem().(*freeClientPoolEntry) + if e.linUsage+int64(connectedBias)-i.linUsage < 0 { + // kick it out and accept the new client + f.connPool.Remove(i.index) + f.calcLogUsage(i, now) + i.connected = false + f.disconnPool.Push(i, -i.logUsage) + log.Debug("Client kicked out", "address", i.address) + i.disconnectFn() + } else { + // keep the old client and reject the new one + f.connPool.Push(i, i.linUsage) + log.Debug("Client rejected", "address", address) + return false + } + } + f.disconnPool.Remove(e.index) + e.connected = true + e.disconnectFn = disconnectFn + f.connPool.Push(e, e.linUsage) + if f.connPool.Size()+f.disconnPool.Size() > f.totalLimit { + f.disconnPool.Pop() + } + log.Debug("Client accepted", "address", address) + return true +} + +// disconnect should be called when a connection is terminated. If the disconnection +// was initiated by the pool itself using disconnectFn then calling disconnect is +// not necessary but permitted. +func (f *freeClientPool) disconnect(address string) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return + } + e := f.addressMap[address] + now := f.clock.Now() + if !e.connected { + log.Debug("Client already disconnected", "address", address) + return + } + + f.connPool.Remove(e.index) + f.calcLogUsage(e, now) + e.connected = false + f.disconnPool.Push(e, -e.logUsage) + log.Debug("Client disconnected", "address", address) +} + +// logOffset calculates the time-dependent offset for the logarithmic +// representation of recent usage +func (f *freeClientPool) logOffset(now mclock.AbsTime) int64 { + // Note: fixedPointMultiplier acts as a multiplier here; the reason for dividing the divisor + // is to avoid int64 overflow. We assume that int64(recentUsageExpTC) >> fixedPointMultiplier. + logDecay := int64((time.Duration(now - f.startupTime)) / (recentUsageExpTC / fixedPointMultiplier)) + return f.logOffsetAtStartup + logDecay +} + +// calcLogUsage converts recent usage from linear to logarithmic representation +// when disconnecting a peer or closing the client pool +func (f *freeClientPool) calcLogUsage(e *freeClientPoolEntry, now mclock.AbsTime) { + dt := e.linUsage + int64(now) + if dt < 1 { + dt = 1 + } + e.logUsage = int64(math.Log(float64(dt))*fixedPointMultiplier) + f.logOffset(now) +} + +// freeClientPoolStorage is the RLP representation of the pool's database storage +type freeClientPoolStorage struct { + LogOffset uint64 + List []*freeClientPoolEntry +} + +// loadFromDb restores pool status from the database storage +// (automatically called at initialization) +func (f *freeClientPool) loadFromDb() { + enc, err := f.db.Get([]byte("freeClientPool")) + if err != nil { + return + } + var storage freeClientPoolStorage + err = rlp.DecodeBytes(enc, &storage) + if err != nil { + log.Error("Failed to decode client list", "err", err) + return + } + f.logOffsetAtStartup = int64(storage.LogOffset) + f.startupTime = f.clock.Now() + for _, e := range storage.List { + log.Debug("Loaded free client record", "address", e.address, "logUsage", e.logUsage) + f.addressMap[e.address] = e + f.disconnPool.Push(e, -e.logUsage) + } +} + +// saveToDb saves pool status to the database storage +// (automatically called during shutdown) +func (f *freeClientPool) saveToDb() { + now := f.clock.Now() + storage := freeClientPoolStorage{ + LogOffset: uint64(f.logOffset(now)), + List: make([]*freeClientPoolEntry, len(f.addressMap)), + } + i := 0 + for _, e := range f.addressMap { + if e.connected { + f.calcLogUsage(e, now) + } + storage.List[i] = e + i++ + } + enc, err := rlp.EncodeToBytes(storage) + if err != nil { + log.Error("Failed to encode client list", "err", err) + } else { + f.db.Put([]byte("freeClientPool"), enc) + } +} + +// freeClientPoolEntry represents a client address known by the pool. +// When connected, recent usage is calculated as linUsage + int64(clock.Now()) +// When disconnected, it is calculated as exp(logUsage - logOffset) where logOffset +// also grows linearly with time while the server is running. +// Conversion between linear and logarithmic representation happens when connecting +// or disconnecting the node. +// +// Note: linUsage and logUsage are values used with constantly growing offsets so +// even though they are close to each other at any time they may wrap around int64 +// limits over time. Comparison should be performed accordingly. +type freeClientPoolEntry struct { + address string + connected bool + disconnectFn func() + linUsage, logUsage int64 + index int +} + +func (e *freeClientPoolEntry) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, []interface{}{e.address, uint64(e.logUsage)}) +} + +func (e *freeClientPoolEntry) DecodeRLP(s *rlp.Stream) error { + var entry struct { + Address string + LogUsage uint64 + } + if err := s.Decode(&entry); err != nil { + return err + } + e.address = entry.Address + e.logUsage = int64(entry.LogUsage) + e.connected = false + e.index = -1 + return nil +} + +// poolSetIndex callback is used by both priority queues to set/update the index of +// the element in the queue. Index is needed to remove elements other than the top one. +func poolSetIndex(a interface{}, i int) { + a.(*freeClientPoolEntry).index = i +} diff --git a/les/freeclient_test.go b/les/freeclient_test.go new file mode 100644 index 000000000000..e95abc7aad01 --- /dev/null +++ b/les/freeclient_test.go @@ -0,0 +1,139 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package light implements on-demand retrieval capable state and chain objects +// for the Ethereum Light Client. +package les + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" +) + +func TestFreeClientPoolL10C100(t *testing.T) { + testFreeClientPool(t, 10, 100) +} + +func TestFreeClientPoolL40C200(t *testing.T) { + testFreeClientPool(t, 40, 200) +} + +func TestFreeClientPoolL100C300(t *testing.T) { + testFreeClientPool(t, 100, 300) +} + +const testFreeClientPoolTicks = 500000 + +func testFreeClientPool(t *testing.T, connLimit, clientCount int) { + var ( + clock mclock.Simulated + db = ethdb.NewMemDatabase() + pool = newFreeClientPool(db, connLimit, 10000, &clock) + connected = make([]bool, clientCount) + connTicks = make([]int, clientCount) + disconnCh = make(chan int, clientCount) + ) + peerId := func(i int) string { + return fmt.Sprintf("test peer #%d", i) + } + disconnFn := func(i int) func() { + return func() { + disconnCh <- i + } + } + + // pool should accept new peers up to its connected limit + for i := 0; i < connLimit; i++ { + if pool.connect(peerId(i), disconnFn(i)) { + connected[i] = true + } else { + t.Fatalf("Test peer #%d rejected", i) + } + } + // since all accepted peers are new and should not be kicked out, the next one should be rejected + if pool.connect(peerId(connLimit), disconnFn(connLimit)) { + connected[connLimit] = true + t.Fatalf("Peer accepted over connected limit") + } + + // randomly connect and disconnect peers, expect to have a similar total connection time at the end + for tickCounter := 0; tickCounter < testFreeClientPoolTicks; tickCounter++ { + clock.Run(1 * time.Second) + + i := rand.Intn(clientCount) + if connected[i] { + pool.disconnect(peerId(i)) + connected[i] = false + connTicks[i] += tickCounter + } else { + if pool.connect(peerId(i), disconnFn(i)) { + connected[i] = true + connTicks[i] -= tickCounter + } + } + pollDisconnects: + for { + select { + case i := <-disconnCh: + pool.disconnect(peerId(i)) + if connected[i] { + connTicks[i] += tickCounter + connected[i] = false + } + default: + break pollDisconnects + } + } + } + + expTicks := testFreeClientPoolTicks * connLimit / clientCount + expMin := expTicks - expTicks/10 + expMax := expTicks + expTicks/10 + + // check if the total connected time of peers are all in the expected range + for i, c := range connected { + if c { + connTicks[i] += testFreeClientPoolTicks + } + if connTicks[i] < expMin || connTicks[i] > expMax { + t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], expMin, expMax) + } + } + + // a previously unknown peer should be accepted now + if !pool.connect("newPeer", func() {}) { + t.Fatalf("Previously unknown peer rejected") + } + + // close and restart pool + pool.stop() + pool = newFreeClientPool(db, connLimit, 10000, &clock) + + // try connecting all known peers (connLimit should be filled up) + for i := 0; i < clientCount; i++ { + pool.connect(peerId(i), func() {}) + } + // expect pool to remember known nodes and kick out one of them to accept a new one + if !pool.connect("newPeer2", func() {}) { + t.Errorf("Previously unknown peer rejected after restarting pool") + } + pool.stop() +} diff --git a/les/handler.go b/les/handler.go index 2fc4cde34941..91a235bf0a91 100644 --- a/les/handler.go +++ b/les/handler.go @@ -28,6 +28,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -104,6 +105,7 @@ type ProtocolManager struct { odr *LesOdr server *LesServer serverPool *serverPool + clientPool *freeClientPool lesTopic discv5.Topic reqDist *requestDistributor retriever *retrieveManager @@ -226,6 +228,7 @@ func (pm *ProtocolManager) Start(maxPeers int) { if pm.lightSync { go pm.syncer() } else { + pm.clientPool = newFreeClientPool(pm.chainDb, maxPeers, 10000, mclock.System{}) go func() { for range pm.newPeerCh { } @@ -243,6 +246,9 @@ func (pm *ProtocolManager) Stop() { pm.noMorePeers <- struct{}{} close(pm.quitSync) // quits syncer, fetcher + if pm.clientPool != nil { + pm.clientPool.stop() + } // Disconnect existing sessions. // This also closes the gate for any new registrations on the peer set. @@ -264,7 +270,8 @@ func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgRea // this function terminates, the peer is disconnected. func (pm *ProtocolManager) handle(p *peer) error { // Ignore maxPeers if this is a trusted peer - if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted { + // In server mode we try to check into the client pool after handshake + if pm.lightSync && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted { return p2p.DiscTooManyPeers } @@ -282,6 +289,19 @@ func (pm *ProtocolManager) handle(p *peer) error { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err } + + if !pm.lightSync && !p.Peer.Info().Network.Trusted { + addr, ok := p.RemoteAddr().(*net.TCPAddr) + // test peer address is not a tcp address, don't use client pool if can not typecast + if ok { + id := addr.IP.String() + if !pm.clientPool.connect(id, func() { go pm.removePeer(p.id) }) { + return p2p.DiscTooManyPeers + } + defer pm.clientPool.disconnect(id) + } + } + if rw, ok := p.rw.(*meteredMsgReadWriter); ok { rw.Init(p.version) } From b52bb31b767a3e31266e6138e24f7226ebbd8882 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Tue, 14 Aug 2018 22:59:18 +0200 Subject: [PATCH 129/166] p2p/discv5: add delay to refresh cycle when no seed nodes are found (#16994) --- p2p/discv5/net.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go index 4c39c055333b..b93c93d6486b 100644 --- a/p2p/discv5/net.go +++ b/p2p/discv5/net.go @@ -678,7 +678,7 @@ func (net *Network) refresh(done chan<- struct{}) { } if len(seeds) == 0 { log.Trace("no seed nodes found") - close(done) + time.AfterFunc(time.Second*10, func() { close(done) }) return } for _, n := range seeds { From 212bba47ff13812ddabb642da463e58cda4ff20f Mon Sep 17 00:00:00 2001 From: Jeff Prestes Date: Wed, 15 Aug 2018 04:15:42 -0300 Subject: [PATCH 130/166] backends: configurable gas limit to allow testing large contracts (#17358) * backends: increase gaslimit in order to allow tests of large contracts * backends: increase gaslimit in order to allow tests of large contracts * backends: increase gaslimit in order to allow tests of large contracts --- accounts/abi/bind/backends/simulated.go | 4 ++-- accounts/abi/bind/bind_test.go | 24 ++++++++++++------------ accounts/abi/bind/util_test.go | 8 +++++--- contracts/chequebook/cheque_test.go | 2 +- contracts/chequebook/gencode.go | 2 +- contracts/ens/ens_test.go | 2 +- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index fa8828f61bc6..1d14f8c6f117 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -65,9 +65,9 @@ type SimulatedBackend struct { // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. -func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend { +func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { database := ethdb.NewMemDatabase() - genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc} + genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}) diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 2a5a88648771..0e5b1c1616f1 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -229,7 +229,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy an interaction tester contract and call a transaction on it _, _, interactor, err := DeployInteractor(auth, sim, "Deploy string") @@ -270,7 +270,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a tuple tester contract and execute a structured call on it _, _, getter, err := DeployGetter(auth, sim) @@ -302,7 +302,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a tuple tester contract and execute a structured call on it _, _, tupler, err := DeployTupler(auth, sim) @@ -344,7 +344,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a slice tester contract and execute a n array call on it _, _, slicer, err := DeploySlicer(auth, sim) @@ -378,7 +378,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a default method invoker contract and execute its default method _, _, defaulter, err := DeployDefaulter(auth, sim) @@ -411,7 +411,7 @@ var bindTests = []struct { `[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`, ` // Create a simulator and wrap a non-deployed contract - sim := backends.NewSimulatedBackend(nil) + sim := backends.NewSimulatedBackend(nil, uint64(10000000000)) nonexistent, err := NewNonExistent(common.Address{}, sim) if err != nil { @@ -447,7 +447,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a funky gas pattern contract _, _, limiter, err := DeployFunkyGasPattern(auth, sim) @@ -482,7 +482,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a sender tester contract and execute a structured call on it _, _, callfrom, err := DeployCallFrom(auth, sim) @@ -542,7 +542,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy a underscorer tester contract and execute a structured call on it _, _, underscorer, err := DeployUnderscorer(auth, sim) @@ -612,7 +612,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) // Deploy an eventer contract _, _, eventer, err := DeployEventer(auth, sim) @@ -761,7 +761,7 @@ var bindTests = []struct { // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) //deploy the test contract _, _, testContract, err := DeployDeeplyNestedArray(auth, sim) @@ -820,7 +820,7 @@ func TestBindings(t *testing.T) { t.Skip("go sdk not found for testing") } // Skip the test if the go-ethereum sources are symlinked (https://github.com/golang/go/issues/14845) - linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil))\n}") + linkTestCode := fmt.Sprintf("package linktest\nfunc CheckSymlinks(){\nfmt.Println(backends.NewSimulatedBackend(nil,uint64(10000000000)))\n}") linkTestDeps, err := imports.Process(os.TempDir(), []byte(linkTestCode), nil) if err != nil { t.Fatalf("failed check for goimports symlink bug: %v", err) diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 49e6dc813a33..8f4092971f2f 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct { func TestWaitDeployed(t *testing.T) { for name, test := range waitDeployedTests { - backend := backends.NewSimulatedBackend(core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, - }) + backend := backends.NewSimulatedBackend( + core.GenesisAlloc{ + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + }, 10000000, + ) // Create the transaction. tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code)) diff --git a/contracts/chequebook/cheque_test.go b/contracts/chequebook/cheque_test.go index 6b6b28e6577b..4bd2e176b14f 100644 --- a/contracts/chequebook/cheque_test.go +++ b/contracts/chequebook/cheque_test.go @@ -46,7 +46,7 @@ func newTestBackend() *backends.SimulatedBackend { addr0: {Balance: big.NewInt(1000000000)}, addr1: {Balance: big.NewInt(1000000000)}, addr2: {Balance: big.NewInt(1000000000)}, - }) + }, 10000000) } func deploy(prvKey *ecdsa.PrivateKey, amount *big.Int, backend *backends.SimulatedBackend) (common.Address, error) { diff --git a/contracts/chequebook/gencode.go b/contracts/chequebook/gencode.go index 45f6d68f3eb9..ddfe8d151274 100644 --- a/contracts/chequebook/gencode.go +++ b/contracts/chequebook/gencode.go @@ -40,7 +40,7 @@ var ( ) func main() { - backend := backends.NewSimulatedBackend(testAlloc) + backend := backends.NewSimulatedBackend(testAlloc, uint64(100000000)) auth := bind.NewKeyedTransactor(testKey) // Deploy the contract, get the code. diff --git a/contracts/ens/ens_test.go b/contracts/ens/ens_test.go index 6ad844708202..411b04197cd2 100644 --- a/contracts/ens/ens_test.go +++ b/contracts/ens/ens_test.go @@ -35,7 +35,7 @@ var ( ) func TestENS(t *testing.T) { - contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}) + contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) transactOpts := bind.NewKeyedTransactor(key) ensAddr, ens, err := DeployENS(transactOpts, contractBackend) From 2a17fe25612b57d943862459dba88666685ffd69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 15 Aug 2018 11:01:49 +0300 Subject: [PATCH 131/166] cmd: polish miner flags, deprecate olds, add upgrade path --- cmd/geth/chaincmd.go | 12 ++--- cmd/geth/main.go | 29 +++++++---- cmd/geth/usage.go | 15 +++--- cmd/puppeth/module_node.go | 2 +- cmd/utils/flags.go | 104 ++++++++++++++++++++++++------------- 5 files changed, 102 insertions(+), 60 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index ff27a9dfb4a6..87548865be61 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -48,7 +48,6 @@ var ( ArgsUsage: "", Flags: []cli.Flag{ utils.DataDirFlag, - utils.LightModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -66,7 +65,7 @@ It expects the genesis file as argument.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, utils.GCModeFlag, utils.CacheDatabaseFlag, utils.CacheGCFlag, @@ -87,7 +86,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -105,7 +104,7 @@ be gzipped.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -119,7 +118,7 @@ be gzipped.`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -149,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f ArgsUsage: " ", Flags: []cli.Flag{ utils.DataDirFlag, - utils.LightModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -163,7 +161,7 @@ Remove blockchain and state databases`, Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.LightModeFlag, + utils.SyncModeFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` diff --git a/cmd/geth/main.go b/cmd/geth/main.go index d556ad92c343..a0638605132e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -82,8 +82,6 @@ var ( utils.TxPoolAccountQueueFlag, utils.TxPoolGlobalQueueFlag, utils.TxPoolLifetimeFlag, - utils.FastSyncFlag, - utils.LightModeFlag, utils.SyncModeFlag, utils.GCModeFlag, utils.LightServFlag, @@ -96,12 +94,18 @@ var ( utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, - utils.EtherbaseFlag, - utils.GasPriceFlag, utils.MiningEnabledFlag, utils.MinerThreadsFlag, + utils.MinerLegacyThreadsFlag, utils.MinerNotifyFlag, - utils.TargetGasLimitFlag, + utils.MinerGasTargetFlag, + utils.MinerLegacyGasTargetFlag, + utils.MinerGasPriceFlag, + utils.MinerLegacyGasPriceFlag, + utils.MinerEtherbaseFlag, + utils.MinerLegacyEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerLegacyExtraDataFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, @@ -122,7 +126,6 @@ var ( utils.NoCompactionFlag, utils.GpoBlocksFlag, utils.GpoPercentileFlag, - utils.ExtraDataFlag, configFileFlag, } @@ -324,7 +327,7 @@ func startNode(ctx *cli.Context, stack *node.Node) { // Start auxiliary services if enabled if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { // Mining only makes sense if a full Ethereum node is running - if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { utils.Fatalf("Light clients do not support mining") } var ethereum *eth.Ethereum @@ -332,7 +335,11 @@ func startNode(ctx *cli.Context, stack *node.Node) { utils.Fatalf("Ethereum service not running: %v", err) } // Use a reduced number of threads if requested - if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 { + threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name) + if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) { + threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name) + } + if threads > 0 { type threaded interface { SetThreads(threads int) } @@ -341,7 +348,11 @@ func startNode(ctx *cli.Context, stack *node.Node) { } } // Set the gas price to the limits from the CLI and start mining - ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name)) + gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name) + if ctx.IsSet(utils.MinerGasPriceFlag.Name) { + gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) + } + ethereum.TxPool().SetGasPrice(gasprice) if err := ethereum.StartMining(true); err != nil { utils.Fatalf("Failed to start mining: %v", err) } diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 9d63c68f7fba..9e18f7047279 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -186,10 +186,10 @@ var AppHelpFlagGroups = []flagGroup{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, utils.MinerNotifyFlag, - utils.EtherbaseFlag, - utils.TargetGasLimitFlag, - utils.GasPriceFlag, - utils.ExtraDataFlag, + utils.MinerGasPriceFlag, + utils.MinerGasTargetFlag, + utils.MinerEtherbaseFlag, + utils.MinerExtraDataFlag, }, }, { @@ -231,8 +231,11 @@ var AppHelpFlagGroups = []flagGroup{ { Name: "DEPRECATED", Flags: []cli.Flag{ - utils.FastSyncFlag, - utils.LightModeFlag, + utils.MinerLegacyThreadsFlag, + utils.MinerLegacyGasTargetFlag, + utils.MinerLegacyGasPriceFlag, + utils.MinerLegacyEtherbaseFlag, + utils.MinerLegacyExtraDataFlag, }, }, { diff --git a/cmd/puppeth/module_node.go b/cmd/puppeth/module_node.go index a480a894ebb2..8ad41555e118 100644 --- a/cmd/puppeth/module_node.go +++ b/cmd/puppeth/module_node.go @@ -42,7 +42,7 @@ ADD genesis.json /genesis.json RUN \ echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}} echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}} - echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh + echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gasprice {{.GasPrice}}' >> geth.sh ENTRYPOINT ["/bin/sh", "geth.sh"] ` diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d6142f246c16..e3a8cc2eac9e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -157,14 +157,6 @@ var ( Usage: "Document Root for HTTPClient file scheme", Value: DirectoryString{homeDir()}, } - FastSyncFlag = cli.BoolFlag{ - Name: "fast", - Usage: "Enable fast syncing through state downloads (replaced by --syncmode)", - } - LightModeFlag = cli.BoolFlag{ - Name: "light", - Usage: "Enable light client mode (replaced by --syncmode)", - } defaultSyncMode = eth.DefaultConfig.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", @@ -321,29 +313,53 @@ var ( Usage: "Number of CPU threads to use for mining", Value: 0, } + MinerLegacyThreadsFlag = cli.IntFlag{ + Name: "minerthreads", + Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)", + Value: 0, + } MinerNotifyFlag = cli.StringFlag{ Name: "miner.notify", Usage: "Comma separated HTTP URL list to notify of new work packages", } - TargetGasLimitFlag = cli.Uint64Flag{ + MinerGasTargetFlag = cli.Uint64Flag{ + Name: "miner.gastarget", + Usage: "Target gas floor for mined blocks", + Value: params.GenesisGasLimit, + } + MinerLegacyGasTargetFlag = cli.Uint64Flag{ Name: "targetgaslimit", - Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine", + Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)", Value: params.GenesisGasLimit, } - EtherbaseFlag = cli.StringFlag{ - Name: "etherbase", - Usage: "Public address for block mining rewards (default = first account created)", - Value: "0", + MinerGasPriceFlag = BigFlag{ + Name: "miner.gasprice", + Usage: "Minimal gas price for mining a transactions", + Value: eth.DefaultConfig.GasPrice, } - GasPriceFlag = BigFlag{ + MinerLegacyGasPriceFlag = BigFlag{ Name: "gasprice", - Usage: "Minimal gas price to accept for mining a transactions", + Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)", Value: eth.DefaultConfig.GasPrice, } - ExtraDataFlag = cli.StringFlag{ - Name: "extradata", + MinerEtherbaseFlag = cli.StringFlag{ + Name: "miner.etherbase", + Usage: "Public address for block mining rewards (default = first account)", + Value: "0", + } + MinerLegacyEtherbaseFlag = cli.StringFlag{ + Name: "etherbase", + Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)", + Value: "0", + } + MinerExtraDataFlag = cli.StringFlag{ + Name: "miner.extradata", Usage: "Block extra data set by the miner (default = client version)", } + MinerLegacyExtraDataFlag = cli.StringFlag{ + Name: "extradata", + Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)", + } // Account settings UnlockedAccountFlag = cli.StringFlag{ Name: "unlock", @@ -813,10 +829,19 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error // setEtherbase retrieves the etherbase either from the directly specified // command line flags or from the keystore if CLI indexed. func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) { - if ctx.GlobalIsSet(EtherbaseFlag.Name) { - account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name)) + // Extract the current etherbase, new flag overriding legacy one + var etherbase string + if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) { + etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name) + } + if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) { + etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name) + } + // Convert the etherbase into an address and configure it + if etherbase != "" { + account, err := MakeAddress(ks, etherbase) if err != nil { - Fatalf("Option %q: %v", EtherbaseFlag.Name, err) + Fatalf("Invalid miner etherbase: %v", err) } cfg.Etherbase = account.Address } @@ -847,7 +872,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { setBootstrapNodes(ctx, cfg) setBootstrapNodesV5(ctx, cfg) - lightClient := ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalString(SyncModeFlag.Name) == "light" + lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light" lightServer := ctx.GlobalInt(LightServFlag.Name) != 0 lightPeers := ctx.GlobalInt(LightPeersFlag.Name) @@ -1052,8 +1077,6 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { // Avoid conflicting network flags checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag) - checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag) - checkExclusive(ctx, LightServFlag, LightModeFlag) checkExclusive(ctx, LightServFlag, SyncModeFlag, "light") ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) @@ -1062,13 +1085,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { setTxPool(ctx, &cfg.TxPool) setEthash(ctx, cfg) - switch { - case ctx.GlobalIsSet(SyncModeFlag.Name): + if ctx.GlobalIsSet(SyncModeFlag.Name) { cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) - case ctx.GlobalBool(FastSyncFlag.Name): - cfg.SyncMode = downloader.FastSync - case ctx.GlobalBool(LightModeFlag.Name): - cfg.SyncMode = downloader.LightSync } if ctx.GlobalIsSet(LightServFlag.Name) { cfg.LightServ = ctx.GlobalInt(LightServFlag.Name) @@ -1093,6 +1111,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } + if ctx.GlobalIsSet(MinerLegacyThreadsFlag.Name) { + cfg.MinerThreads = ctx.GlobalInt(MinerLegacyThreadsFlag.Name) + } if ctx.GlobalIsSet(MinerThreadsFlag.Name) { cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name) } @@ -1102,11 +1123,17 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(DocRootFlag.Name) { cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) } - if ctx.GlobalIsSet(ExtraDataFlag.Name) { - cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name)) + if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) { + cfg.ExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name)) } - if ctx.GlobalIsSet(GasPriceFlag.Name) { - cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name) + if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { + cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) + } + if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { + cfg.GasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name) + } + if ctx.GlobalIsSet(MinerGasPriceFlag.Name) { + cfg.GasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name) } if ctx.GlobalIsSet(VMEnableDebugFlag.Name) { // TODO(fjl): force-enable this in --dev mode @@ -1148,7 +1175,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { log.Info("Using developer account", "address", developer.Address) cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address) - if !ctx.GlobalIsSet(GasPriceFlag.Name) { + if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { cfg.GasPrice = big.NewInt(1) } } @@ -1223,7 +1250,10 @@ func RegisterEthStatsService(stack *node.Node, url string) { // SetupNetwork configures the system for either the main net or some test network. func SetupNetwork(ctx *cli.Context) { // TODO(fjl): move target gas limit into config - params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name) + params.TargetGasLimit = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name) + if ctx.GlobalIsSet(MinerGasTargetFlag.Name) { + params.TargetGasLimit = ctx.GlobalUint64(MinerGasTargetFlag.Name) + } } func SetupMetrics(ctx *cli.Context) { @@ -1254,7 +1284,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { handles = makeDatabaseHandles() ) name := "chaindata" - if ctx.GlobalBool(LightModeFlag.Name) { + if ctx.GlobalString(SyncModeFlag.Name) == "light" { name = "lightchaindata" } chainDb, err := stack.OpenDatabase(name, cache, handles) From 040aa2bb101e5e602308b24812bfbf2451b21174 Mon Sep 17 00:00:00 2001 From: gary rong Date: Wed, 15 Aug 2018 19:09:17 +0800 Subject: [PATCH 132/166] miner: streaming uncle blocks (#17320) * miner: stream uncle block * miner: polish --- miner/worker.go | 99 +++++++++++++++++++++++++------------------- miner/worker_test.go | 76 ++++++++++++++++++++++++++++++---- 2 files changed, 125 insertions(+), 50 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 81a63c29a4f9..fae480c84cca 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -213,8 +213,9 @@ type worker struct { running int32 // The indicator whether the consensus engine is running or not. // Test hooks - newTaskHook func(*task) // Method to call upon receiving a new sealing task - fullTaskInterval func() // Method to call before pushing the full sealing task + newTaskHook func(*task) // Method to call upon receiving a new sealing task + skipSealHook func(*task) bool // Method to decide whether skipping the sealing. + fullTaskHook func() // Method to call before pushing the full sealing task } func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux) *worker { @@ -329,8 +330,32 @@ func (w *worker) mainLoop() { w.commitNewWork() case ev := <-w.chainSideCh: + if _, exist := w.possibleUncles[ev.Block.Hash()]; exist { + continue + } // Add side block to possible uncle block set. w.possibleUncles[ev.Block.Hash()] = ev.Block + // If our mining block contains less than 2 uncle blocks, + // add the new uncle block if valid and regenerate a mining block. + if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { + start := time.Now() + if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { + var uncles []*types.Header + w.current.uncles.Each(func(item interface{}) bool { + hash, ok := item.(common.Hash) + if !ok { + return false + } + uncle, exist := w.possibleUncles[hash] + if !exist { + return false + } + uncles = append(uncles, uncle.Header()) + return true + }) + w.commit(uncles, nil, true, start) + } + } case ev := <-w.txsCh: // Apply transactions to the pending state if we're not mining. @@ -378,6 +403,10 @@ func (w *worker) seal(t *task, stop <-chan struct{}) { res *task ) + if w.skipSealHook != nil && w.skipSealHook(t) { + return + } + if t.block, err = w.engine.Seal(w.chain, t.block, stop); t.block != nil { log.Info("Successfully sealed new block", "number", t.block.Number(), "hash", t.block.Hash(), "elapsed", common.PrettyDuration(time.Since(t.createdAt))) @@ -637,30 +666,9 @@ func (w *worker) commitNewWork() { delete(w.possibleUncles, hash) } - var ( - emptyBlock, fullBlock *types.Block - emptyState, fullState *state.StateDB - ) - // Create an empty block based on temporary copied state for sealing in advance without waiting block // execution finished. - emptyState = env.state.Copy() - if emptyBlock, err = w.engine.Finalize(w.chain, header, emptyState, nil, uncles, nil); err != nil { - log.Error("Failed to finalize block for temporary sealing", "err", err) - } else { - // Push empty work in advance without applying pending transaction. - // The reason is transactions execution can cost a lot and sealer need to - // take advantage of this part time. - if w.isRunning() { - select { - case w.taskCh <- &task{receipts: nil, state: emptyState, block: emptyBlock, createdAt: time.Now()}: - log.Info("Commit new empty mining work", "number", emptyBlock.Number(), "uncles", len(uncles)) - case <-w.exitCh: - log.Info("Worker has exited") - return - } - } - } + w.commit(uncles, nil, false, tstart) // Fill the block with all available pending transactions. pending, err := w.eth.TxPool().Pending() @@ -676,31 +684,38 @@ func (w *worker) commitNewWork() { txs := types.NewTransactionsByPriceAndNonce(w.current.signer, pending) env.commitTransactions(w.mux, txs, w.chain, w.coinbase) - // Create the full block to seal with the consensus engine - fullState = env.state.Copy() - if fullBlock, err = w.engine.Finalize(w.chain, header, fullState, env.txs, uncles, env.receipts); err != nil { - log.Error("Failed to finalize block for sealing", "err", err) - return - } + w.commit(uncles, w.fullTaskHook, true, tstart) +} + +// commit runs any post-transaction state modifications, assembles the final block +// and commits new work if consensus engine is running. +func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { // Deep copy receipts here to avoid interaction between different tasks. - cpy := make([]*types.Receipt, len(env.receipts)) - for i, l := range env.receipts { - cpy[i] = new(types.Receipt) - *cpy[i] = *l + receipts := make([]*types.Receipt, len(w.current.receipts)) + for i, l := range w.current.receipts { + receipts[i] = new(types.Receipt) + *receipts[i] = *l + } + s := w.current.state.Copy() + block, err := w.engine.Finalize(w.chain, w.current.header, s, w.current.txs, uncles, w.current.receipts) + if err != nil { + return err } - // We only care about logging if we're actually mining. if w.isRunning() { - if w.fullTaskInterval != nil { - w.fullTaskInterval() + if interval != nil { + interval() } - select { - case w.taskCh <- &task{receipts: cpy, state: fullState, block: fullBlock, createdAt: time.Now()}: - w.unconfirmed.Shift(fullBlock.NumberU64() - 1) - log.Info("Commit new full mining work", "number", fullBlock.Number(), "txs", env.tcount, "uncles", len(uncles), "elapsed", common.PrettyDuration(time.Since(tstart))) + case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: + w.unconfirmed.Shift(block.NumberU64() - 1) + log.Info("Commit new mining work", "number", block.Number(), "txs", w.current.tcount, "uncles", len(uncles), + "elapsed", common.PrettyDuration(time.Since(start))) case <-w.exitCh: log.Info("Worker has exited") } } - w.updateSnapshot() + if update { + w.updateSnapshot() + } + return nil } diff --git a/miner/worker_test.go b/miner/worker_test.go index 5823a608ef6b..408c47e3b38b 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -59,7 +59,7 @@ func init() { ethashChainConfig = params.TestChainConfig cliqueChainConfig = params.TestChainConfig cliqueChainConfig.Clique = ¶ms.CliqueConfig{ - Period: 1, + Period: 10, Epoch: 30000, } tx1, _ := types.SignTx(types.NewTransaction(0, acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) @@ -74,6 +74,7 @@ type testWorkerBackend struct { txPool *core.TxPool chain *core.BlockChain testTxFeed event.Feed + uncleBlock *types.Block } func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) *testWorkerBackend { @@ -93,15 +94,19 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine default: t.Fatal("unexpect consensus engine type") } - gspec.MustCommit(db) + genesis := gspec.MustCommit(db) chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}) txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain) + blocks, _ := core.GenerateChain(chainConfig, genesis, engine, db, 1, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(acc1Addr) + }) return &testWorkerBackend{ - db: db, - chain: chain, - txPool: txpool, + db: db, + chain: chain, + txPool: txpool, + uncleBlock: blocks[0], } } @@ -188,7 +193,7 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens taskCh <- struct{}{} } } - w.fullTaskInterval = func() { + w.fullTaskHook = func() { time.Sleep(100 * time.Millisecond) } @@ -202,11 +207,66 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens w.start() for i := 0; i < 2; i += 1 { - to := time.NewTimer(time.Second) select { case <-taskCh: - case <-to.C: + case <-time.NewTimer(time.Second).C: t.Error("new task timeout") } } } + +func TestStreamUncleBlock(t *testing.T) { + ethash := ethash.NewFaker() + defer ethash.Close() + + w, b := newTestWorker(t, ethashChainConfig, ethash) + defer w.close() + + var taskCh = make(chan struct{}) + + taskIndex := 0 + w.newTaskHook = func(task *task) { + if task.block.NumberU64() == 1 { + if taskIndex == 2 { + has := task.block.Header().UncleHash + want := types.CalcUncleHash([]*types.Header{b.uncleBlock.Header()}) + if has != want { + t.Errorf("uncle hash mismatch, has %s, want %s", has.Hex(), want.Hex()) + } + } + taskCh <- struct{}{} + taskIndex += 1 + } + } + w.skipSealHook = func(task *task) bool { + return true + } + w.fullTaskHook = func() { + time.Sleep(100 * time.Millisecond) + } + + // Ensure worker has finished initialization + for { + b := w.pendingBlock() + if b != nil && b.NumberU64() == 1 { + break + } + } + + w.start() + // Ignore the first two works + for i := 0; i < 2; i += 1 { + select { + case <-taskCh: + case <-time.NewTimer(time.Second).C: + t.Error("new task timeout") + } + } + b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.uncleBlock}}) + + select { + case <-taskCh: + case <-time.NewTimer(time.Second).C: + t.Error("new task timeout") + } +} From d8541a9f99c58d97ba4908c3a768e518f28d2441 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 15 Aug 2018 13:50:16 +0300 Subject: [PATCH 133/166] consensus/ethash: use DAGs for remote mining, generate async --- consensus/ethash/consensus.go | 50 ++++++++++++++++++++++++++++------- consensus/ethash/ethash.go | 46 +++++++++++++++++++++++++------- consensus/ethash/sealer.go | 11 ++++---- 3 files changed, 83 insertions(+), 24 deletions(-) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index e18a06d52af0..86fd997ae440 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -461,6 +461,13 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { // VerifySeal implements consensus.Engine, checking whether the given block satisfies // the PoW difficulty requirements. func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error { + return ethash.verifySeal(chain, header, false) +} + +// verifySeal checks whether a block satisfies the PoW difficulty requirements, +// either using the usual ethash cache for it, or alternatively using a full DAG +// to make remote mining fast. +func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error { // If we're running a fake PoW, accept any seal as valid if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { time.Sleep(ethash.fakeDelay) @@ -471,25 +478,48 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head } // If we're running a shared PoW, delegate verification to it if ethash.shared != nil { - return ethash.shared.VerifySeal(chain, header) + return ethash.shared.verifySeal(chain, header, fulldag) } // Ensure that we have a valid difficulty for the block if header.Difficulty.Sign() <= 0 { return errInvalidDifficulty } - // Recompute the digest and PoW value and verify against the header + // Recompute the digest and PoW values number := header.Number.Uint64() - cache := ethash.cache(number) - size := datasetSize(number) - if ethash.config.PowMode == ModeTest { - size = 32 * 1024 + var ( + digest []byte + result []byte + ) + // If fast-but-heavy PoW verification was requested, use an ethash dataset + if fulldag { + dataset := ethash.dataset(number, true) + if dataset.generated() { + digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64()) + + // Datasets are unmapped in a finalizer. Ensure that the dataset stays alive + // until after the call to hashimotoFull so it's not unmapped while being used. + runtime.KeepAlive(dataset) + } else { + // Dataset not yet generated, don't hang, use a cache instead + fulldag = false + } } - digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64()) - // Caches are unmapped in a finalizer. Ensure that the cache stays live - // until after the call to hashimotoLight so it's not unmapped while being used. - runtime.KeepAlive(cache) + // If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache + if !fulldag { + cache := ethash.cache(number) + + size := datasetSize(number) + if ethash.config.PowMode == ModeTest { + size = 32 * 1024 + } + digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64()) + // Caches are unmapped in a finalizer. Ensure that the cache stays alive + // until after the call to hashimotoLight so it's not unmapped while being used. + runtime.KeepAlive(cache) + } + // Verify the calculated values against the ones provided in the header if !bytes.Equal(header.MixDigest[:], digest) { return errInvalidMixDigest } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 19c94deb6bbf..d98c3371c560 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -29,6 +29,7 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" "time" "unsafe" @@ -281,6 +282,7 @@ type dataset struct { mmap mmap.MMap // Memory map itself to unmap before releasing dataset []uint32 // The actual cache data content once sync.Once // Ensures the cache is generated only once + done uint32 // Atomic flag to determine generation status } // newDataset creates a new ethash mining dataset and returns it as a plain Go @@ -292,6 +294,9 @@ func newDataset(epoch uint64) interface{} { // generate ensures that the dataset content is generated before use. func (d *dataset) generate(dir string, limit int, test bool) { d.once.Do(func() { + // Mark the dataset generated after we're done. This is needed for remote + defer atomic.StoreUint32(&d.done, 1) + csize := cacheSize(d.epoch*epochLength + 1) dsize := datasetSize(d.epoch*epochLength + 1) seed := seedHash(d.epoch*epochLength + 1) @@ -306,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) { d.dataset = make([]uint32, dsize/4) generateDataset(d.dataset, d.epoch, cache) + + return } // Disk storage is needed, this will get fancy var endian string @@ -348,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) { }) } +// generated returns whether this particular dataset finished generating already +// or not (it may not have been started at all). This is useful for remote miners +// to default to verification caches instead of blocking on DAG generations. +func (d *dataset) generated() bool { + return atomic.LoadUint32(&d.done) == 1 +} + // finalizer closes any file handlers and memory maps open. func (d *dataset) finalizer() { if d.mmap != nil { @@ -589,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache { // dataset tries to retrieve a mining dataset for the specified block number // by first checking against a list of in-memory datasets, then against DAGs // stored on disk, and finally generating one if none can be found. -func (ethash *Ethash) dataset(block uint64) *dataset { +// +// If async is specified, not only the future but the current DAG is also +// generates on a background thread. +func (ethash *Ethash) dataset(block uint64, async bool) *dataset { + // Retrieve the requested ethash dataset epoch := block / epochLength currentI, futureI := ethash.datasets.get(epoch) current := currentI.(*dataset) - // Wait for generation finish. - current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) - - // If we need a new future dataset, now's a good time to regenerate it. - if futureI != nil { - future := futureI.(*dataset) - go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + // If async is specified, generate everything in a background thread + if async && !current.generated() { + go func() { + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + + if futureI != nil { + future := futureI.(*dataset) + future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + } + }() + } else { + // Either blocking generation was requested, or already done + current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + + if futureI != nil { + future := futureI.(*dataset) + go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) + } } - return current } diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index 03d84847392e..c3b2c86d1202 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -114,7 +114,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s hash = header.HashNoNonce().Bytes() target = new(big.Int).Div(two256, header.Difficulty) number = header.Number.Uint64() - dataset = ethash.dataset(number) + dataset = ethash.dataset(number, false) ) // Start generating random nonces until we abort or find a good one var ( @@ -233,21 +233,22 @@ func (ethash *Ethash) remote(notify []string) { log.Info("Work submitted but none pending", "hash", hash) return false } - // Verify the correctness of submitted result. header := block.Header() header.Nonce = nonce header.MixDigest = mixDigest - if err := ethash.VerifySeal(nil, header); err != nil { - log.Warn("Invalid proof-of-work submitted", "hash", hash, "err", err) + + start := time.Now() + if err := ethash.verifySeal(nil, header, true); err != nil { + log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err) return false } - // Make sure the result channel is created. if ethash.resultCh == nil { log.Warn("Ethash result channel is empty, submitted mining result is rejected") return false } + log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start)) // Solutions seems to be valid, return to the miner and notify acceptance. select { From e8752f4e9f9be3d2932cd4835a5d72d17ac2338b Mon Sep 17 00:00:00 2001 From: Elad Date: Wed, 15 Aug 2018 17:41:52 +0200 Subject: [PATCH 134/166] cmd/swarm, swarm: added access control functionality (#17404) Co-authored-by: Janos Guljas Co-authored-by: Anton Evangelatov Co-authored-by: Balint Gabor --- cmd/swarm/access.go | 219 ++++++++++++ cmd/swarm/access_test.go | 581 ++++++++++++++++++++++++++++++++ cmd/swarm/config.go | 1 + cmd/swarm/download.go | 40 ++- cmd/swarm/list.go | 2 +- cmd/swarm/main.go | 95 +++++- cmd/swarm/run_test.go | 25 +- swarm/api/act.go | 468 +++++++++++++++++++++++++ swarm/api/api.go | 129 +++++-- swarm/api/api_test.go | 68 +++- swarm/api/client/client.go | 48 ++- swarm/api/client/client_test.go | 4 +- swarm/api/encrypt.go | 76 +++++ swarm/api/filesystem.go | 4 +- swarm/api/filesystem_test.go | 4 +- swarm/api/http/middleware.go | 12 +- swarm/api/http/response.go | 2 +- swarm/api/http/server.go | 111 +++--- swarm/api/manifest.go | 69 ++-- swarm/api/manifest_test.go | 8 +- swarm/api/storage.go | 6 +- swarm/api/uri.go | 13 + swarm/fuse/swarmfs_test.go | 2 +- swarm/network_test.go | 2 +- swarm/sctx/sctx.go | 15 + swarm/swarm.go | 4 +- swarm/testutil/http.go | 2 +- 27 files changed, 1826 insertions(+), 184 deletions(-) create mode 100644 cmd/swarm/access.go create mode 100644 cmd/swarm/access_test.go create mode 100644 swarm/api/act.go create mode 100644 swarm/api/encrypt.go diff --git a/cmd/swarm/access.go b/cmd/swarm/access.go new file mode 100644 index 000000000000..12cfbfc1a46b --- /dev/null +++ b/cmd/swarm/access.go @@ -0,0 +1,219 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . +package main + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/api/client" + "gopkg.in/urfave/cli.v1" +) + +var salt = make([]byte, 32) + +func init() { + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } +} + +func accessNewPass(ctx *cli.Context) { + args := ctx.Args() + if len(args) != 1 { + utils.Fatalf("Expected 1 argument - the ref") + } + + var ( + ae *api.AccessEntry + accessKey []byte + err error + ref = args[0] + password = getPassPhrase("", 0, makePasswordList(ctx)) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ) + accessKey, ae, err = api.DoPasswordNew(ctx, password, salt) + if err != nil { + utils.Fatalf("error getting session key: %v", err) + } + m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) + if dryRun { + err = printManifests(m, nil) + if err != nil { + utils.Fatalf("had an error printing the manifests: %v", err) + } + } else { + utils.Fatalf("uploading manifests") + err = uploadManifests(ctx, m, nil) + if err != nil { + utils.Fatalf("had an error uploading the manifests: %v", err) + } + } +} + +func accessNewPK(ctx *cli.Context) { + args := ctx.Args() + if len(args) != 1 { + utils.Fatalf("Expected 1 argument - the ref") + } + + var ( + ae *api.AccessEntry + sessionKey []byte + err error + ref = args[0] + privateKey = getPrivKey(ctx) + granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ) + sessionKey, ae, err = api.DoPKNew(ctx, privateKey, granteePublicKey, salt) + if err != nil { + utils.Fatalf("error getting session key: %v", err) + } + m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae) + if dryRun { + err = printManifests(m, nil) + if err != nil { + utils.Fatalf("had an error printing the manifests: %v", err) + } + } else { + err = uploadManifests(ctx, m, nil) + if err != nil { + utils.Fatalf("had an error uploading the manifests: %v", err) + } + } +} + +func accessNewACT(ctx *cli.Context) { + args := ctx.Args() + if len(args) != 1 { + utils.Fatalf("Expected 1 argument - the ref") + } + + var ( + ae *api.AccessEntry + actManifest *api.Manifest + accessKey []byte + err error + ref = args[0] + grantees = []string{} + actFilename = ctx.String(SwarmAccessGrantKeysFlag.Name) + privateKey = getPrivKey(ctx) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ) + + bytes, err := ioutil.ReadFile(actFilename) + if err != nil { + utils.Fatalf("had an error reading the grantee public key list") + } + grantees = strings.Split(string(bytes), "\n") + accessKey, ae, actManifest, err = api.DoACTNew(ctx, privateKey, salt, grantees) + if err != nil { + utils.Fatalf("error generating ACT manifest: %v", err) + } + + if err != nil { + utils.Fatalf("error getting session key: %v", err) + } + m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) + if err != nil { + utils.Fatalf("error generating root access manifest: %v", err) + } + + if dryRun { + err = printManifests(m, actManifest) + if err != nil { + utils.Fatalf("had an error printing the manifests: %v", err) + } + } else { + err = uploadManifests(ctx, m, actManifest) + if err != nil { + utils.Fatalf("had an error uploading the manifests: %v", err) + } + } +} + +func printManifests(rootAccessManifest, actManifest *api.Manifest) error { + js, err := json.Marshal(rootAccessManifest) + if err != nil { + return err + } + fmt.Println(string(js)) + + if actManifest != nil { + js, err := json.Marshal(actManifest) + if err != nil { + return err + } + fmt.Println(string(js)) + } + return nil +} + +func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error { + bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client := client.NewClient(bzzapi) + + var ( + key string + err error + ) + if actManifest != nil { + key, err = client.UploadManifest(actManifest, false) + if err != nil { + return err + } + + rootAccessManifest.Entries[0].Access.Act = key + } + key, err = client.UploadManifest(rootAccessManifest, false) + if err != nil { + return err + } + fmt.Println(key) + return nil +} + +// makePasswordList reads password lines from the file specified by the global --password flag +// and also by the same subcommand --password flag. +// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand. +// Function ctx.SetGlobal is not setting the global flag value that can be accessed +// by ctx.GlobalString using the current version of cli package. +func makePasswordList(ctx *cli.Context) []string { + path := ctx.GlobalString(utils.PasswordFileFlag.Name) + if path == "" { + path = ctx.String(utils.PasswordFileFlag.Name) + if path == "" { + return nil + } + } + text, err := ioutil.ReadFile(path) + if err != nil { + utils.Fatalf("Failed to read password file: %v", err) + } + lines := strings.Split(string(text), "\n") + // Sanitise DOS line endings. + for i := range lines { + lines[i] = strings.TrimRight(lines[i], "\r") + } + return lines +} diff --git a/cmd/swarm/access_test.go b/cmd/swarm/access_test.go new file mode 100644 index 000000000000..163eb2b4d6f2 --- /dev/null +++ b/cmd/swarm/access_test.go @@ -0,0 +1,581 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . +package main + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "encoding/json" + "io" + "io/ioutil" + gorand "math/rand" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/api" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" +) + +// TestAccessPassword tests for the correct creation of an ACT manifest protected by a password. +// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry +// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded +// is then fetched through 2nd node. since the tested code is not key-aware - we can just +// fetch from the 2nd node using HTTP BasicAuth +func TestAccessPassword(t *testing.T) { + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + proxyNode := cluster.Nodes[0] + + // create a tmp file + tmp, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + // write data to file + data := "notsorandomdata" + dataFilename := filepath.Join(tmp, "data.txt") + + err = ioutil.WriteFile(dataFilename, []byte(data), 0666) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{128}` + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, + "--bzzapi", + proxyNode.URL, //it doesn't matter through which node we upload content + "up", + "--encrypt", + dataFilename) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + + ref := matches[0] + + password := "smth" + passwordFilename := filepath.Join(tmp, "password.txt") + + err = ioutil.WriteFile(passwordFilename, []byte(password), 0666) + if err != nil { + t.Fatal(err) + } + + up = runSwarm(t, + "access", + "new", + "pass", + "--dry-run", + "--password", + passwordFilename, + ref, + ) + + _, matches = up.ExpectRegexp(".+") + up.ExpectExit() + + if len(matches) == 0 { + t.Fatalf("stdout not matched") + } + + var m api.Manifest + + err = json.Unmarshal([]byte(matches[0]), &m) + if err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + + if len(m.Entries) != 1 { + t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) + } + + e := m.Entries[0] + + ct := "application/bzz-manifest+json" + if e.ContentType != ct { + t.Errorf("expected %q content type, got %q", ct, e.ContentType) + } + + if e.Access == nil { + t.Fatal("manifest access is nil") + } + + a := e.Access + + if a.Type != "pass" { + t.Errorf(`got access type %q, expected "pass"`, a.Type) + } + if len(a.Salt) < 32 { + t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) + } + if a.KdfParams == nil { + t.Fatal("manifest access kdf params is nil") + } + + client := swarm.NewClient(cluster.Nodes[0].URL) + + hash, err := client.UploadManifest(&m, false) + if err != nil { + t.Fatal(err) + } + + httpClient := &http.Client{} + + url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash + response, err := httpClient.Get(url) + if err != nil { + t.Fatal(err) + } + if response.StatusCode != http.StatusUnauthorized { + t.Fatal("should be a 401") + } + authHeader := response.Header.Get("WWW-Authenticate") + if authHeader == "" { + t.Fatal("should be something here") + } + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + t.Fatal(err) + } + req.SetBasicAuth("", password) + + response, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + t.Errorf("expected status %v, got %v", http.StatusOK, response.StatusCode) + } + d, err := ioutil.ReadAll(response.Body) + if err != nil { + t.Fatal(err) + } + if string(d) != data { + t.Errorf("expected decrypted data %q, got %q", data, string(d)) + } + + wrongPasswordFilename := filepath.Join(tmp, "password-wrong.txt") + + err = ioutil.WriteFile(wrongPasswordFilename, []byte("just wr0ng"), 0666) + if err != nil { + t.Fatal(err) + } + + //download file with 'swarm down' with wrong password + up = runSwarm(t, + "--bzzapi", + proxyNode.URL, + "down", + "bzz:/"+hash, + tmp, + "--password", + wrongPasswordFilename) + + _, matches = up.ExpectRegexp("unauthorized") + if len(matches) != 1 && matches[0] != "unauthorized" { + t.Fatal(`"unauthorized" not found in output"`) + } + up.ExpectExit() +} + +// TestAccessPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee). +// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry +// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears. +// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware, +// the test will fail if the proxy's given private key is not granted on the ACT. +func TestAccessPK(t *testing.T) { + // Setup Swarm and upload a test file to it + cluster := newTestCluster(t, 1) + defer cluster.Shutdown() + + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer tmp.Close() + defer os.Remove(tmp.Name()) + + // write data to file + data := "notsorandomdata" + _, err = io.WriteString(tmp, data) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{128}` + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + "--encrypt", + tmp.Name()) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + + ref := matches[0] + + pk := cluster.Nodes[0].PrivateKey + granteePubKey := crypto.CompressPubkey(&pk.PublicKey) + + publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp") + if err != nil { + t.Fatal(err) + } + + passFile, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer passFile.Close() + defer os.Remove(passFile.Name()) + _, err = io.WriteString(passFile, testPassphrase) + if err != nil { + t.Fatal(err) + } + _, publisherAccount := getTestAccount(t, publisherDir) + up = runSwarm(t, + "--bzzaccount", + publisherAccount.Address.String(), + "--password", + passFile.Name(), + "--datadir", + publisherDir, + "--bzzapi", + cluster.Nodes[0].URL, + "access", + "new", + "pk", + "--dry-run", + "--grant-key", + hex.EncodeToString(granteePubKey), + ref, + ) + + _, matches = up.ExpectRegexp(".+") + up.ExpectExit() + + if len(matches) == 0 { + t.Fatalf("stdout not matched") + } + + var m api.Manifest + + err = json.Unmarshal([]byte(matches[0]), &m) + if err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + + if len(m.Entries) != 1 { + t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) + } + + e := m.Entries[0] + + ct := "application/bzz-manifest+json" + if e.ContentType != ct { + t.Errorf("expected %q content type, got %q", ct, e.ContentType) + } + + if e.Access == nil { + t.Fatal("manifest access is nil") + } + + a := e.Access + + if a.Type != "pk" { + t.Errorf(`got access type %q, expected "pk"`, a.Type) + } + if len(a.Salt) < 32 { + t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) + } + if a.KdfParams != nil { + t.Fatal("manifest access kdf params should be nil") + } + + client := swarm.NewClient(cluster.Nodes[0].URL) + + hash, err := client.UploadManifest(&m, false) + if err != nil { + t.Fatal(err) + } + + httpClient := &http.Client{} + + url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash + response, err := httpClient.Get(url) + if err != nil { + t.Fatal(err) + } + if response.StatusCode != http.StatusOK { + t.Fatal("should be a 200") + } + d, err := ioutil.ReadAll(response.Body) + if err != nil { + t.Fatal(err) + } + if string(d) != data { + t.Errorf("expected decrypted data %q, got %q", data, string(d)) + } +} + +// TestAccessACT tests the e2e creation, uploading and downloading of an ACT type access control +// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data +// set. the third node should fail decoding the reference as it will not be granted access. the publisher uploads through +// one of the nodes then disappears. +func TestAccessACT(t *testing.T) { + // Setup Swarm and upload a test file to it + cluster := newTestCluster(t, 3) + defer cluster.Shutdown() + + var uploadThroughNode = cluster.Nodes[0] + client := swarm.NewClient(uploadThroughNode.URL) + + r1 := gorand.New(gorand.NewSource(time.Now().UnixNano())) + nodeToSkip := r1.Intn(3) // a number between 0 and 2 (node indices in `cluster`) + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer tmp.Close() + defer os.Remove(tmp.Name()) + + // write data to file + data := "notsorandomdata" + _, err = io.WriteString(tmp, data) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{128}` + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, + "--bzzapi", + cluster.Nodes[0].URL, + "up", + "--encrypt", + tmp.Name()) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + + if len(matches) < 1 { + t.Fatal("no matches found") + } + + ref := matches[0] + grantees := []string{} + for i, v := range cluster.Nodes { + if i == nodeToSkip { + continue + } + pk := v.PrivateKey + granteePubKey := crypto.CompressPubkey(&pk.PublicKey) + grantees = append(grantees, hex.EncodeToString(granteePubKey)) + } + + granteesPubkeyListFile, err := ioutil.TempFile("", "grantees-pubkey-list.csv") + if err != nil { + t.Fatal(err) + } + + _, err = granteesPubkeyListFile.WriteString(strings.Join(grantees, "\n")) + if err != nil { + t.Fatal(err) + } + + defer granteesPubkeyListFile.Close() + defer os.Remove(granteesPubkeyListFile.Name()) + + publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp") + if err != nil { + t.Fatal(err) + } + + passFile, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer passFile.Close() + defer os.Remove(passFile.Name()) + _, err = io.WriteString(passFile, testPassphrase) + if err != nil { + t.Fatal(err) + } + + _, publisherAccount := getTestAccount(t, publisherDir) + up = runSwarm(t, + "--bzzaccount", + publisherAccount.Address.String(), + "--password", + passFile.Name(), + "--datadir", + publisherDir, + "--bzzapi", + cluster.Nodes[0].URL, + "access", + "new", + "act", + "--grant-keys", + granteesPubkeyListFile.Name(), + ref, + ) + + _, matches = up.ExpectRegexp(`[a-f\d]{64}`) + up.ExpectExit() + + if len(matches) == 0 { + t.Fatalf("stdout not matched") + } + hash := matches[0] + m, _, err := client.DownloadManifest(hash) + if err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + + if len(m.Entries) != 1 { + t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) + } + + e := m.Entries[0] + + ct := "application/bzz-manifest+json" + if e.ContentType != ct { + t.Errorf("expected %q content type, got %q", ct, e.ContentType) + } + + if e.Access == nil { + t.Fatal("manifest access is nil") + } + + a := e.Access + + if a.Type != "act" { + t.Fatalf(`got access type %q, expected "act"`, a.Type) + } + if len(a.Salt) < 32 { + t.Fatalf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) + } + if a.KdfParams != nil { + t.Fatal("manifest access kdf params should be nil") + } + + httpClient := &http.Client{} + + // all nodes except the skipped node should be able to decrypt the content + for i, node := range cluster.Nodes { + log.Debug("trying to fetch from node", "node index", i) + + url := node.URL + "/" + "bzz:/" + hash + response, err := httpClient.Get(url) + if err != nil { + t.Fatal(err) + } + log.Debug("got response from node", "response code", response.StatusCode) + + if i == nodeToSkip { + log.Debug("reached node to skip", "status code", response.StatusCode) + + if response.StatusCode != http.StatusUnauthorized { + t.Fatalf("should be a 401") + } + + continue + } + + if response.StatusCode != http.StatusOK { + t.Fatal("should be a 200") + } + d, err := ioutil.ReadAll(response.Body) + if err != nil { + t.Fatal(err) + } + if string(d) != data { + t.Errorf("expected decrypted data %q, got %q", data, string(d)) + } + } +} + +// TestKeypairSanity is a sanity test for the crypto scheme for ACT. it asserts the correct shared secret according to +// the specs at https://github.com/ethersphere/swarm-docs/blob/eb857afda906c6e7bb90d37f3f334ccce5eef230/act.md +func TestKeypairSanity(t *testing.T) { + salt := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + t.Fatalf("reading from crypto/rand failed: %v", err.Error()) + } + sharedSecret := "a85586744a1ddd56a7ed9f33fa24f40dd745b3a941be296a0d60e329dbdb896d" + + for i, v := range []struct { + publisherPriv string + granteePub string + }{ + { + publisherPriv: "ec5541555f3bc6376788425e9d1a62f55a82901683fd7062c5eddcc373a73459", + granteePub: "0226f213613e843a413ad35b40f193910d26eb35f00154afcde9ded57479a6224a", + }, + { + publisherPriv: "70c7a73011aa56584a0009ab874794ee7e5652fd0c6911cd02f8b6267dd82d2d", + granteePub: "02e6f8d5e28faaa899744972bb847b6eb805a160494690c9ee7197ae9f619181db", + }, + } { + b, _ := hex.DecodeString(v.granteePub) + granteePub, _ := crypto.DecompressPubkey(b) + publisherPrivate, _ := crypto.HexToECDSA(v.publisherPriv) + + ssKey, err := api.NewSessionKeyPK(publisherPrivate, granteePub, salt) + if err != nil { + t.Fatal(err) + } + + hasher := sha3.NewKeccak256() + hasher.Write(salt) + shared, err := hex.DecodeString(sharedSecret) + if err != nil { + t.Fatal(err) + } + hasher.Write(shared) + sum := hasher.Sum(nil) + + if !bytes.Equal(ssKey, sum) { + t.Fatalf("%d: got a session key mismatch", i) + } + } +} diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index cda8c41c32cd..1183f8bc8169 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -78,6 +78,7 @@ const ( SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" + SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD" GETH_ENV_DATADIR = "GETH_DATADIR" ) diff --git a/cmd/swarm/download.go b/cmd/swarm/download.go index c2418f744c84..91bc2c93abc3 100644 --- a/cmd/swarm/download.go +++ b/cmd/swarm/download.go @@ -68,18 +68,36 @@ func download(ctx *cli.Context) { utils.Fatalf("could not parse uri argument: %v", err) } - // assume behaviour according to --recursive switch - if isRecursive { - if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil { - utils.Fatalf("encoutered an error while downloading directory: %v", err) - } - } else { - // we are downloading a file - log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path)) + dl := func(credentials string) error { + // assume behaviour according to --recursive switch + if isRecursive { + if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil { + if err == swarm.ErrUnauthorized { + return err + } + return fmt.Errorf("directory %s: %v", uri.Path, err) + } + } else { + // we are downloading a file + log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path) - err := client.DownloadFile(uri.Addr, uri.Path, dest) - if err != nil { - utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err) + err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials) + if err != nil { + if err == swarm.ErrUnauthorized { + return err + } + return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err) + } } + return nil + } + if passwords := makePasswordList(ctx); passwords != nil { + password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords) + err = dl(password) + } else { + err = dl("") + } + if err != nil { + utils.Fatalf("download: %v", err) } } diff --git a/cmd/swarm/list.go b/cmd/swarm/list.go index 57b5517c6ef5..01b3f4ab6c5b 100644 --- a/cmd/swarm/list.go +++ b/cmd/swarm/list.go @@ -44,7 +44,7 @@ func list(ctx *cli.Context) { bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") client := swarm.NewClient(bzzapi) - list, err := client.List(manifest, prefix) + list, err := client.List(manifest, prefix, "") if err != nil { utils.Fatalf("Failed to generate file and directory list: %s", err) } diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index ac09ae998156..76be60cb683c 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -155,6 +155,14 @@ var ( Name: "defaultpath", Usage: "path to file served for empty url path (none)", } + SwarmAccessGrantKeyFlag = cli.StringFlag{ + Name: "grant-key", + Usage: "grants a given public key access to an ACT", + } + SwarmAccessGrantKeysFlag = cli.StringFlag{ + Name: "grant-keys", + Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT", + } SwarmUpFromStdinFlag = cli.BoolFlag{ Name: "stdin", Usage: "reads data to be uploaded from stdin", @@ -167,6 +175,15 @@ var ( Name: "encrypt", Usage: "use encrypted upload", } + SwarmAccessPasswordFlag = cli.StringFlag{ + Name: "password", + Usage: "Password", + EnvVar: SWARM_ACCESS_PASSWORD, + } + SwarmDryRunFlag = cli.BoolFlag{ + Name: "dry-run", + Usage: "dry-run", + } CorsStringFlag = cli.StringFlag{ Name: "corsdomain", Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", @@ -252,6 +269,61 @@ func init() { Flags: []cli.Flag{SwarmEncryptedFlag}, Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", }, + { + CustomHelpTemplate: helpTemplate, + Name: "access", + Usage: "encrypts a reference and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root manifest", + Subcommands: []cli.Command{ + { + CustomHelpTemplate: helpTemplate, + Name: "new", + Usage: "encrypts a reference and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + Subcommands: []cli.Command{ + { + Action: accessNewPass, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + utils.PasswordFileFlag, + SwarmDryRunFlag, + }, + Name: "pass", + Usage: "encrypts a reference with a password and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + { + Action: accessNewPK, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + utils.PasswordFileFlag, + SwarmDryRunFlag, + SwarmAccessGrantKeyFlag, + }, + Name: "pk", + Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + { + Action: accessNewACT, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + SwarmAccessGrantKeysFlag, + SwarmDryRunFlag, + }, + Name: "act", + Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + }, + }, + }, + }, { CustomHelpTemplate: helpTemplate, Name: "resource", @@ -304,16 +376,13 @@ func init() { Description: "Prints the swarm hash of file or directory", }, { - Action: download, - Name: "down", - Flags: []cli.Flag{SwarmRecursiveFlag}, - Usage: "downloads a swarm manifest or a file inside a manifest", - ArgsUsage: " []", - Description: ` -Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries. -`, + Action: download, + Name: "down", + Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag}, + Usage: "downloads a swarm manifest or a file inside a manifest", + ArgsUsage: " []", + Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`, }, - { Name: "manifest", CustomHelpTemplate: helpTemplate, @@ -413,16 +482,14 @@ pv(1) tool to get a progress bar: Name: "import", Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", ArgsUsage: " ", - Description: ` -Import chunks from a tar archive into a local chunk database (use - to read from stdin). + Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin). swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar The import may be quite large, consider piping the input through the Unix pv(1) tool to get a progress bar: - pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks - -`, + pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, }, { Action: dbClean, @@ -535,6 +602,7 @@ func version(ctx *cli.Context) error { func bzzd(ctx *cli.Context) error { //build a valid bzzapi.Config from all available sources: //default config, file config, command line and env vars + bzzconfig, err := buildConfig(ctx) if err != nil { utils.Fatalf("unable to configure swarm: %v", err) @@ -557,6 +625,7 @@ func bzzd(ctx *cli.Context) error { if err != nil { utils.Fatalf("can't create node: %v", err) } + //a few steps need to be done after the config phase is completed, //due to overriding behavior initSwarmNode(bzzconfig, stack, ctx) diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go index 90d3c98ba688..3e766dc10d58 100644 --- a/cmd/swarm/run_test.go +++ b/cmd/swarm/run_test.go @@ -18,10 +18,12 @@ package main import ( "context" + "crypto/ecdsa" "fmt" "io/ioutil" "net" "os" + "path" "path/filepath" "runtime" "sync" @@ -175,14 +177,15 @@ func (c *testCluster) Cleanup() { } type testNode struct { - Name string - Addr string - URL string - Enode string - Dir string - IpcPath string - Client *rpc.Client - Cmd *cmdtest.TestCmd + Name string + Addr string + URL string + Enode string + Dir string + IpcPath string + PrivateKey *ecdsa.PrivateKey + Client *rpc.Client + Cmd *cmdtest.TestCmd } const testPassphrase = "swarm-test-passphrase" @@ -289,7 +292,11 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { func newTestNode(t *testing.T, dir string) *testNode { conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} + ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1) + + pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase}) + + node := &testNode{Dir: dir, PrivateKey: pk} // assign ports ports, err := getAvailableTCPPorts(2) diff --git a/swarm/api/act.go b/swarm/api/act.go new file mode 100644 index 000000000000..b1a59478315f --- /dev/null +++ b/swarm/api/act.go @@ -0,0 +1,468 @@ +package api + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/ecies" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/sctx" + "github.com/ethereum/go-ethereum/swarm/storage" + "golang.org/x/crypto/scrypt" + cli "gopkg.in/urfave/cli.v1" +) + +var ( + ErrDecrypt = errors.New("cant decrypt - forbidden") + ErrUnknownAccessType = errors.New("unknown access type (or not implemented)") + ErrDecryptDomainForbidden = errors.New("decryption request domain forbidden - can only decrypt on localhost") + AllowedDecryptDomains = []string{ + "localhost", + "127.0.0.1", + } +) + +const EMPTY_CREDENTIALS = "" + +type AccessEntry struct { + Type AccessType + Publisher string + Salt []byte + Act string + KdfParams *KdfParams +} + +type DecryptFunc func(*ManifestEntry) error + +func (a *AccessEntry) MarshalJSON() (out []byte, err error) { + + return json.Marshal(struct { + Type AccessType `json:"type,omitempty"` + Publisher string `json:"publisher,omitempty"` + Salt string `json:"salt,omitempty"` + Act string `json:"act,omitempty"` + KdfParams *KdfParams `json:"kdf_params,omitempty"` + }{ + Type: a.Type, + Publisher: a.Publisher, + Salt: hex.EncodeToString(a.Salt), + Act: a.Act, + KdfParams: a.KdfParams, + }) + +} + +func (a *AccessEntry) UnmarshalJSON(value []byte) error { + v := struct { + Type AccessType `json:"type,omitempty"` + Publisher string `json:"publisher,omitempty"` + Salt string `json:"salt,omitempty"` + Act string `json:"act,omitempty"` + KdfParams *KdfParams `json:"kdf_params,omitempty"` + }{} + + err := json.Unmarshal(value, &v) + if err != nil { + return err + } + a.Act = v.Act + a.KdfParams = v.KdfParams + a.Publisher = v.Publisher + a.Salt, err = hex.DecodeString(v.Salt) + if err != nil { + return err + } + if len(a.Salt) != 32 { + return errors.New("salt should be 32 bytes long") + } + a.Type = v.Type + return nil +} + +type KdfParams struct { + N int `json:"n"` + P int `json:"p"` + R int `json:"r"` +} + +type AccessType string + +const AccessTypePass = AccessType("pass") +const AccessTypePK = AccessType("pk") +const AccessTypeACT = AccessType("act") + +func NewAccessEntryPassword(salt []byte, kdfParams *KdfParams) (*AccessEntry, error) { + if len(salt) != 32 { + return nil, fmt.Errorf("salt should be 32 bytes long") + } + return &AccessEntry{ + Type: AccessTypePass, + Salt: salt, + KdfParams: kdfParams, + }, nil +} + +func NewAccessEntryPK(publisher string, salt []byte) (*AccessEntry, error) { + if len(publisher) != 66 { + return nil, fmt.Errorf("publisher should be 66 characters long, got %d", len(publisher)) + } + if len(salt) != 32 { + return nil, fmt.Errorf("salt should be 32 bytes long") + } + return &AccessEntry{ + Type: AccessTypePK, + Publisher: publisher, + Salt: salt, + }, nil +} + +func NewAccessEntryACT(publisher string, salt []byte, act string) (*AccessEntry, error) { + if len(salt) != 32 { + return nil, fmt.Errorf("salt should be 32 bytes long") + } + if len(publisher) != 66 { + return nil, fmt.Errorf("publisher should be 66 characters long") + } + + return &AccessEntry{ + Type: AccessTypeACT, + Publisher: publisher, + Salt: salt, + Act: act, + }, nil +} + +func NOOPDecrypt(*ManifestEntry) error { + return nil +} + +var DefaultKdfParams = NewKdfParams(262144, 1, 8) + +func NewKdfParams(n, p, r int) *KdfParams { + + return &KdfParams{ + N: n, + P: p, + R: r, + } +} + +// NewSessionKeyPassword creates a session key based on a shared secret (password) and the given salt +// and kdf parameters in the access entry +func NewSessionKeyPassword(password string, accessEntry *AccessEntry) ([]byte, error) { + if accessEntry.Type != AccessTypePass { + return nil, errors.New("incorrect access entry type") + } + return scrypt.Key( + []byte(password), + accessEntry.Salt, + accessEntry.KdfParams.N, + accessEntry.KdfParams.R, + accessEntry.KdfParams.P, + 32, + ) +} + +// NewSessionKeyPK creates a new ACT Session Key using an ECDH shared secret for the given key pair and the given salt value +func NewSessionKeyPK(private *ecdsa.PrivateKey, public *ecdsa.PublicKey, salt []byte) ([]byte, error) { + granteePubEcies := ecies.ImportECDSAPublic(public) + privateKey := ecies.ImportECDSA(private) + + bytes, err := privateKey.GenerateShared(granteePubEcies, 16, 16) + if err != nil { + return nil, err + } + bytes = append(salt, bytes...) + sessionKey := crypto.Keccak256(bytes) + return sessionKey, nil +} + +func (a *API) NodeSessionKey(privateKey *ecdsa.PrivateKey, publicKey *ecdsa.PublicKey, salt []byte) ([]byte, error) { + return NewSessionKeyPK(privateKey, publicKey, salt) +} +func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.PrivateKey) DecryptFunc { + return func(m *ManifestEntry) error { + if m.Access == nil { + return nil + } + + allowed := false + requestDomain := sctx.GetHost(ctx) + for _, v := range AllowedDecryptDomains { + if strings.Contains(requestDomain, v) { + allowed = true + } + } + + if !allowed { + return ErrDecryptDomainForbidden + } + + switch m.Access.Type { + case "pass": + if credentials != "" { + key, err := NewSessionKeyPassword(credentials, m.Access) + if err != nil { + return err + } + + ref, err := hex.DecodeString(m.Hash) + if err != nil { + return err + } + + enc := NewRefEncryption(len(ref) - 8) + decodedRef, err := enc.Decrypt(ref, key) + if err != nil { + return ErrDecrypt + } + + m.Hash = hex.EncodeToString(decodedRef) + m.Access = nil + return nil + } + return ErrDecrypt + case "pk": + publisherBytes, err := hex.DecodeString(m.Access.Publisher) + if err != nil { + return ErrDecrypt + } + publisher, err := crypto.DecompressPubkey(publisherBytes) + if err != nil { + return ErrDecrypt + } + key, err := a.NodeSessionKey(pk, publisher, m.Access.Salt) + if err != nil { + return ErrDecrypt + } + ref, err := hex.DecodeString(m.Hash) + if err != nil { + return err + } + + enc := NewRefEncryption(len(ref) - 8) + decodedRef, err := enc.Decrypt(ref, key) + if err != nil { + return ErrDecrypt + } + + m.Hash = hex.EncodeToString(decodedRef) + m.Access = nil + return nil + case "act": + publisherBytes, err := hex.DecodeString(m.Access.Publisher) + if err != nil { + return ErrDecrypt + } + publisher, err := crypto.DecompressPubkey(publisherBytes) + if err != nil { + return ErrDecrypt + } + + sessionKey, err := a.NodeSessionKey(pk, publisher, m.Access.Salt) + if err != nil { + return ErrDecrypt + } + + hasher := sha3.NewKeccak256() + hasher.Write(append(sessionKey, 0)) + lookupKey := hasher.Sum(nil) + + hasher.Reset() + + hasher.Write(append(sessionKey, 1)) + accessKeyDecryptionKey := hasher.Sum(nil) + + lk := hex.EncodeToString(lookupKey) + list, err := a.GetManifestList(ctx, NOOPDecrypt, storage.Address(common.Hex2Bytes(m.Access.Act)), lk) + + found := "" + for _, v := range list.Entries { + if v.Path == lk { + found = v.Hash + } + } + + if found == "" { + return ErrDecrypt + } + + v, err := hex.DecodeString(found) + if err != nil { + return err + } + enc := NewRefEncryption(len(v) - 8) + decodedRef, err := enc.Decrypt(v, accessKeyDecryptionKey) + if err != nil { + return ErrDecrypt + } + + ref, err := hex.DecodeString(m.Hash) + if err != nil { + return err + } + + enc = NewRefEncryption(len(ref) - 8) + decodedMainRef, err := enc.Decrypt(ref, decodedRef) + if err != nil { + return ErrDecrypt + } + m.Hash = hex.EncodeToString(decodedMainRef) + m.Access = nil + return nil + } + return ErrUnknownAccessType + } +} + +func GenerateAccessControlManifest(ctx *cli.Context, ref string, accessKey []byte, ae *AccessEntry) (*Manifest, error) { + refBytes, err := hex.DecodeString(ref) + if err != nil { + return nil, err + } + // encrypt ref with accessKey + enc := NewRefEncryption(len(refBytes)) + encrypted, err := enc.Encrypt(refBytes, accessKey) + if err != nil { + return nil, err + } + + m := &Manifest{ + Entries: []ManifestEntry{ + { + Hash: hex.EncodeToString(encrypted), + ContentType: ManifestType, + ModTime: time.Now(), + Access: ae, + }, + }, + } + + return m, nil +} + +func DoPKNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { + if granteePublicKey == "" { + return nil, nil, errors.New("need a grantee Public Key") + } + b, err := hex.DecodeString(granteePublicKey) + if err != nil { + log.Error("error decoding grantee public key", "err", err) + return nil, nil, err + } + + granteePub, err := crypto.DecompressPubkey(b) + if err != nil { + log.Error("error decompressing grantee public key", "err", err) + return nil, nil, err + } + + sessionKey, err = NewSessionKeyPK(privateKey, granteePub, salt) + if err != nil { + log.Error("error getting session key", "err", err) + return nil, nil, err + } + + ae, err = NewAccessEntryPK(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt) + if err != nil { + log.Error("error generating access entry", "err", err) + return nil, nil, err + } + + return sessionKey, ae, nil +} + +func DoACTNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees []string) (accessKey []byte, ae *AccessEntry, actManifest *Manifest, err error) { + if len(grantees) == 0 { + return nil, nil, nil, errors.New("did not get any grantee public keys") + } + + publisherPub := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)) + grantees = append(grantees, publisherPub) + + accessKey = make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } + if _, err := io.ReadFull(rand.Reader, accessKey); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } + + lookupPathEncryptedAccessKeyMap := make(map[string]string) + i := 0 + for _, v := range grantees { + i++ + if v == "" { + return nil, nil, nil, errors.New("need a grantee Public Key") + } + b, err := hex.DecodeString(v) + if err != nil { + log.Error("error decoding grantee public key", "err", err) + return nil, nil, nil, err + } + + granteePub, err := crypto.DecompressPubkey(b) + if err != nil { + log.Error("error decompressing grantee public key", "err", err) + return nil, nil, nil, err + } + sessionKey, err := NewSessionKeyPK(privateKey, granteePub, salt) + + hasher := sha3.NewKeccak256() + hasher.Write(append(sessionKey, 0)) + lookupKey := hasher.Sum(nil) + + hasher.Reset() + hasher.Write(append(sessionKey, 1)) + + accessKeyEncryptionKey := hasher.Sum(nil) + + enc := NewRefEncryption(len(accessKey)) + encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey) + + lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey) + } + + m := &Manifest{ + Entries: []ManifestEntry{}, + } + + for k, v := range lookupPathEncryptedAccessKeyMap { + m.Entries = append(m.Entries, ManifestEntry{ + Path: k, + Hash: v, + ContentType: "text/plain", + }) + } + + ae, err = NewAccessEntryACT(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt, "") + if err != nil { + return nil, nil, nil, err + } + + return accessKey, ae, m, nil +} + +func DoPasswordNew(ctx *cli.Context, password string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { + ae, err = NewAccessEntryPassword(salt, DefaultKdfParams) + if err != nil { + return nil, nil, err + } + + sessionKey, err = NewSessionKeyPassword(password, ae) + if err != nil { + return nil, nil, err + } + return sessionKey, ae, nil +} diff --git a/swarm/api/api.go b/swarm/api/api.go index 99d971b105ce..adf469cfaa82 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -19,6 +19,9 @@ package api import ( "archive/tar" "context" + "crypto/ecdsa" + "encoding/hex" + "errors" "fmt" "io" "math/big" @@ -43,6 +46,10 @@ import ( opentracing "github.com/opentracing/opentracing-go" ) +var ( + ErrNotFound = errors.New("not found") +) + var ( apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) @@ -227,14 +234,18 @@ type API struct { resource *mru.Handler fileStore *storage.FileStore dns Resolver + Decryptor func(context.Context, string) DecryptFunc } // NewAPI the api constructor initialises a new API instance. -func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler) (self *API) { +func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler, pk *ecdsa.PrivateKey) (self *API) { self = &API{ fileStore: fileStore, dns: dns, resource: resourceHandler, + Decryptor: func(ctx context.Context, credentials string) DecryptFunc { + return self.doDecrypt(ctx, credentials, pk) + }, } return } @@ -260,8 +271,30 @@ func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt b // ErrResolve is returned when an URI cannot be resolved from ENS. type ErrResolve error +// Resolve a name into a content-addressed hash +// where address could be an ENS name, or a content addressed hash +func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) { + // if DNS is not configured, return an error + if a.dns == nil { + if hashMatcher.MatchString(address) { + return common.Hex2Bytes(address), nil + } + apiResolveFail.Inc(1) + return nil, fmt.Errorf("no DNS to resolve name: %q", address) + } + // try and resolve the address + resolved, err := a.dns.Resolve(address) + if err != nil { + if hashMatcher.MatchString(address) { + return common.Hex2Bytes(address), nil + } + return nil, err + } + return resolved[:], nil +} + // Resolve resolves a URI to an Address using the MultiResolver. -func (a *API) Resolve(ctx context.Context, uri *URI) (storage.Address, error) { +func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (storage.Address, error) { apiResolveCount.Inc(1) log.Trace("resolving", "uri", uri.Addr) @@ -280,28 +313,44 @@ func (a *API) Resolve(ctx context.Context, uri *URI) (storage.Address, error) { return key, nil } - // if DNS is not configured, check if the address is a hash - if a.dns == nil { - key := uri.Address() - if key == nil { - apiResolveFail.Inc(1) - return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr) - } - return key, nil + addr, err := a.Resolve(ctx, uri.Addr) + if err != nil { + return nil, err } - // try and resolve the address - resolved, err := a.dns.Resolve(uri.Addr) - if err == nil { - return resolved[:], nil + if uri.Path == "" { + return addr, nil } - - key := uri.Address() - if key == nil { - apiResolveFail.Inc(1) + walker, err := a.NewManifestWalker(ctx, addr, a.Decryptor(ctx, credentials), nil) + if err != nil { return nil, err } - return key, nil + var entry *ManifestEntry + walker.Walk(func(e *ManifestEntry) error { + // if the entry matches the path, set entry and stop + // the walk + if e.Path == uri.Path { + entry = e + // return an error to cancel the walk + return errors.New("found") + } + // ignore non-manifest files + if e.ContentType != ManifestType { + return nil + } + // if the manifest's path is a prefix of the + // requested path, recurse into it by returning + // nil and continuing the walk + if strings.HasPrefix(uri.Path, e.Path) { + return nil + } + return ErrSkipManifest + }) + if entry == nil { + return nil, errors.New("not found") + } + addr = storage.Address(common.Hex2Bytes(entry.Hash)) + return addr, nil } // Put provides singleton manifest creation on top of FileStore store @@ -332,10 +381,10 @@ func (a *API) Put(ctx context.Context, content string, contentType string, toEnc // Get uses iterative manifest retrieval and prefix matching // to resolve basePath to content using FileStore retrieve // it returns a section reader, mimeType, status, the key of the actual content and an error -func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { +func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { log.Debug("api.get", "key", manifestAddr, "path", path) apiGetCount.Inc(1) - trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil) + trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -347,6 +396,16 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string if entry != nil { log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash) + + if entry.ContentType == ManifestType { + log.Debug("entry is manifest", "key", manifestAddr, "new key", entry.Hash) + adr, err := hex.DecodeString(entry.Hash) + if err != nil { + return nil, "", 0, nil, err + } + return a.Get(ctx, decrypt, adr, entry.Path) + } + // we need to do some extra work if this is a mutable resource manifest if entry.ContentType == ResourceContentType { @@ -398,7 +457,7 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string log.Trace("resource is multihash", "key", manifestAddr) // get the manifest the multihash digest points to - trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil) + trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -451,7 +510,7 @@ func (a *API) Delete(ctx context.Context, addr string, path string) (storage.Add apiDeleteFail.Inc(1) return nil, err } - key, err := a.Resolve(ctx, uri) + key, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS) if err != nil { return nil, err @@ -470,13 +529,13 @@ func (a *API) Delete(ctx context.Context, addr string, path string) (storage.Add // GetDirectoryTar fetches a requested directory as a tarstream // it returns an io.Reader and an error. Do not forget to Close() the returned ReadCloser -func (a *API) GetDirectoryTar(ctx context.Context, uri *URI) (io.ReadCloser, error) { +func (a *API) GetDirectoryTar(ctx context.Context, decrypt DecryptFunc, uri *URI) (io.ReadCloser, error) { apiGetTarCount.Inc(1) - addr, err := a.Resolve(ctx, uri) + addr, err := a.Resolve(ctx, uri.Addr) if err != nil { return nil, err } - walker, err := a.NewManifestWalker(ctx, addr, nil) + walker, err := a.NewManifestWalker(ctx, addr, decrypt, nil) if err != nil { apiGetTarFail.Inc(1) return nil, err @@ -542,9 +601,9 @@ func (a *API) GetDirectoryTar(ctx context.Context, uri *URI) (io.ReadCloser, err // GetManifestList lists the manifest entries for the specified address and prefix // and returns it as a ManifestList -func (a *API) GetManifestList(ctx context.Context, addr storage.Address, prefix string) (list ManifestList, err error) { +func (a *API) GetManifestList(ctx context.Context, decryptor DecryptFunc, addr storage.Address, prefix string) (list ManifestList, err error) { apiManifestListCount.Inc(1) - walker, err := a.NewManifestWalker(ctx, addr, nil) + walker, err := a.NewManifestWalker(ctx, addr, decryptor, nil) if err != nil { apiManifestListFail.Inc(1) return ManifestList{}, err @@ -631,7 +690,7 @@ func (a *API) UpdateManifest(ctx context.Context, addr storage.Address, update f func (a *API) Modify(ctx context.Context, addr storage.Address, path, contentHash, contentType string) (storage.Address, error) { apiModifyCount.Inc(1) quitC := make(chan bool) - trie, err := loadManifest(ctx, a.fileStore, addr, quitC) + trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt) if err != nil { apiModifyFail.Inc(1) return nil, err @@ -663,7 +722,7 @@ func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content [] apiAddFileFail.Inc(1) return nil, "", err } - mkey, err := a.Resolve(ctx, uri) + mkey, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -770,7 +829,7 @@ func (a *API) RemoveFile(ctx context.Context, mhash string, path string, fname s apiRmFileFail.Inc(1) return "", err } - mkey, err := a.Resolve(ctx, uri) + mkey, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -837,7 +896,7 @@ func (a *API) AppendFile(ctx context.Context, mhash, path, fname string, existin apiAppendFileFail.Inc(1) return nil, "", err } - mkey, err := a.Resolve(ctx, uri) + mkey, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -891,13 +950,13 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver if err != nil { return nil, nil, err } - addr, err = a.Resolve(ctx, uri) + addr, err = a.Resolve(ctx, uri.Addr) if err != nil { return nil, nil, err } quitC := make(chan bool) - rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC) + rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt) if err != nil { return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err) } @@ -955,7 +1014,7 @@ func (a *API) ResourceHashSize() int { // ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk. func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) { - trie, err := loadManifest(ctx, a.fileStore, addr, nil) + trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { return nil, fmt.Errorf("cannot load resource manifest: %v", err) } diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go index 78fab9508a70..a65bf07e2e2d 100644 --- a/swarm/api/api_test.go +++ b/swarm/api/api_test.go @@ -19,6 +19,7 @@ package api import ( "context" "errors" + "flag" "fmt" "io" "io/ioutil" @@ -28,10 +29,17 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/sctx" "github.com/ethereum/go-ethereum/swarm/storage" ) +func init() { + loglevel := flag.Int("loglevel", 2, "loglevel") + flag.Parse() + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) +} + func testAPI(t *testing.T, f func(*API, bool)) { datadir, err := ioutil.TempDir("", "bzz-test") if err != nil { @@ -42,7 +50,7 @@ func testAPI(t *testing.T, f func(*API, bool)) { if err != nil { return } - api := NewAPI(fileStore, nil, nil) + api := NewAPI(fileStore, nil, nil, nil) f(api, false) f(api, true) } @@ -85,7 +93,7 @@ func expResponse(content string, mimeType string, status int) *Response { func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse { addr := storage.Address(common.Hex2Bytes(bzzhash)) - reader, mimeType, status, _, err := api.Get(context.TODO(), addr, path) + reader, mimeType, status, _, err := api.Get(context.TODO(), NOOPDecrypt, addr, path) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -229,7 +237,7 @@ func TestAPIResolve(t *testing.T) { if x.immutable { uri.Scheme = "bzz-immutable" } - res, err := api.Resolve(context.TODO(), uri) + res, err := api.ResolveURI(context.TODO(), uri, "") if err == nil { if x.expectErr != nil { t.Fatalf("expected error %q, got result %q", x.expectErr, res) @@ -373,3 +381,55 @@ func TestMultiResolver(t *testing.T) { }) } } + +func TestDecryptOriginForbidden(t *testing.T) { + ctx := context.TODO() + ctx = sctx.SetHost(ctx, "swarm-gateways.net") + + me := &ManifestEntry{ + Access: &AccessEntry{Type: AccessTypePass}, + } + + api := NewAPI(nil, nil, nil, nil) + + f := api.Decryptor(ctx, "") + err := f(me) + if err != ErrDecryptDomainForbidden { + t.Fatalf("should fail with ErrDecryptDomainForbidden, got %v", err) + } +} + +func TestDecryptOrigin(t *testing.T) { + for _, v := range []struct { + host string + expectError error + }{ + { + host: "localhost", + expectError: ErrDecrypt, + }, + { + host: "127.0.0.1", + expectError: ErrDecrypt, + }, + { + host: "swarm-gateways.net", + expectError: ErrDecryptDomainForbidden, + }, + } { + ctx := context.TODO() + ctx = sctx.SetHost(ctx, v.host) + + me := &ManifestEntry{ + Access: &AccessEntry{Type: AccessTypePass}, + } + + api := NewAPI(nil, nil, nil, nil) + + f := api.Decryptor(ctx, "") + err := f(me) + if err != v.expectError { + t.Fatalf("should fail with %v, got %v", v.expectError, err) + } + } +} diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 8a9efe3608c9..3d06e9e1ccc5 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -43,6 +43,10 @@ var ( DefaultClient = NewClient(DefaultGateway) ) +var ( + ErrUnauthorized = errors.New("unauthorized") +) + func NewClient(gateway string) *Client { return &Client{ Gateway: gateway, @@ -188,7 +192,7 @@ func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bo // DownloadDirectory downloads the files contained in a swarm manifest under // the given path into a local directory (existing files will be overwritten) -func (c *Client) DownloadDirectory(hash, path, destDir string) error { +func (c *Client) DownloadDirectory(hash, path, destDir, credentials string) error { stat, err := os.Stat(destDir) if err != nil { return err @@ -201,13 +205,20 @@ func (c *Client) DownloadDirectory(hash, path, destDir string) error { if err != nil { return err } + if credentials != "" { + req.SetBasicAuth("", credentials) + } req.Header.Set("Accept", "application/x-tar") res, err := http.DefaultClient.Do(req) if err != nil { return err } defer res.Body.Close() - if res.StatusCode != http.StatusOK { + switch res.StatusCode { + case http.StatusOK: + case http.StatusUnauthorized: + return ErrUnauthorized + default: return fmt.Errorf("unexpected HTTP status: %s", res.Status) } tr := tar.NewReader(res.Body) @@ -248,7 +259,7 @@ func (c *Client) DownloadDirectory(hash, path, destDir string) error { // DownloadFile downloads a single file into the destination directory // if the manifest entry does not specify a file name - it will fallback // to the hash of the file as a filename -func (c *Client) DownloadFile(hash, path, dest string) error { +func (c *Client) DownloadFile(hash, path, dest, credentials string) error { hasDestinationFilename := false if stat, err := os.Stat(dest); err == nil { hasDestinationFilename = !stat.IsDir() @@ -261,9 +272,9 @@ func (c *Client) DownloadFile(hash, path, dest string) error { } } - manifestList, err := c.List(hash, path) + manifestList, err := c.List(hash, path, credentials) if err != nil { - return fmt.Errorf("could not list manifest: %v", err) + return err } switch len(manifestList.Entries) { @@ -280,13 +291,19 @@ func (c *Client) DownloadFile(hash, path, dest string) error { if err != nil { return err } + if credentials != "" { + req.SetBasicAuth("", credentials) + } res, err := http.DefaultClient.Do(req) if err != nil { return err } defer res.Body.Close() - - if res.StatusCode != http.StatusOK { + switch res.StatusCode { + case http.StatusOK: + case http.StatusUnauthorized: + return ErrUnauthorized + default: return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode) } filename := "" @@ -367,13 +384,24 @@ func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) { // - a prefix of "dir1/" would return [dir1/dir2/, dir1/file3.txt] // // where entries ending with "/" are common prefixes. -func (c *Client) List(hash, prefix string) (*api.ManifestList, error) { - res, err := http.DefaultClient.Get(c.Gateway + "/bzz-list:/" + hash + "/" + prefix) +func (c *Client) List(hash, prefix, credentials string) (*api.ManifestList, error) { + req, err := http.NewRequest(http.MethodGet, c.Gateway+"/bzz-list:/"+hash+"/"+prefix, nil) + if err != nil { + return nil, err + } + if credentials != "" { + req.SetBasicAuth("", credentials) + } + res, err := http.DefaultClient.Do(req) if err != nil { return nil, err } defer res.Body.Close() - if res.StatusCode != http.StatusOK { + switch res.StatusCode { + case http.StatusOK: + case http.StatusUnauthorized: + return nil, ErrUnauthorized + default: return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) } var list api.ManifestList diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index ae82a91d798a..2212f5c4c3c0 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -228,7 +228,7 @@ func TestClientUploadDownloadDirectory(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmp) - if err := client.DownloadDirectory(hash, "", tmp); err != nil { + if err := client.DownloadDirectory(hash, "", tmp, ""); err != nil { t.Fatal(err) } for _, file := range testDirFiles { @@ -265,7 +265,7 @@ func testClientFileList(toEncrypt bool, t *testing.T) { } ls := func(prefix string) []string { - list, err := client.List(hash, prefix) + list, err := client.List(hash, prefix, "") if err != nil { t.Fatal(err) } diff --git a/swarm/api/encrypt.go b/swarm/api/encrypt.go new file mode 100644 index 000000000000..9a2e369149c5 --- /dev/null +++ b/swarm/api/encrypt.go @@ -0,0 +1,76 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package api + +import ( + "encoding/binary" + "errors" + + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/swarm/storage/encryption" +) + +type RefEncryption struct { + spanEncryption encryption.Encryption + dataEncryption encryption.Encryption + span []byte +} + +func NewRefEncryption(refSize int) *RefEncryption { + span := make([]byte, 8) + binary.LittleEndian.PutUint64(span, uint64(refSize)) + return &RefEncryption{ + spanEncryption: encryption.New(0, uint32(refSize/32), sha3.NewKeccak256), + dataEncryption: encryption.New(refSize, 0, sha3.NewKeccak256), + span: span, + } +} + +func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) { + encryptedSpan, err := re.spanEncryption.Encrypt(re.span, key) + if err != nil { + return nil, err + } + encryptedData, err := re.dataEncryption.Encrypt(ref, key) + if err != nil { + return nil, err + } + encryptedRef := make([]byte, len(ref)+8) + copy(encryptedRef[:8], encryptedSpan) + copy(encryptedRef[8:], encryptedData) + + return encryptedRef, nil +} + +func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) { + decryptedSpan, err := re.spanEncryption.Decrypt(ref[:8], key) + if err != nil { + return nil, err + } + + size := binary.LittleEndian.Uint64(decryptedSpan) + if size != uint64(len(ref)-8) { + return nil, errors.New("invalid span in encrypted reference") + } + + decryptedRef, err := re.dataEncryption.Decrypt(ref[8:], key) + if err != nil { + return nil, err + } + + return decryptedRef, nil +} diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go index aacd266998d8..8251ebc4dc48 100644 --- a/swarm/api/filesystem.go +++ b/swarm/api/filesystem.go @@ -191,7 +191,7 @@ func (fs *FileSystem) Download(bzzpath, localpath string) error { if err != nil { return err } - addr, err := fs.api.Resolve(context.TODO(), uri) + addr, err := fs.api.Resolve(context.TODO(), uri.Addr) if err != nil { return err } @@ -202,7 +202,7 @@ func (fs *FileSystem) Download(bzzpath, localpath string) error { } quitC := make(chan bool) - trie, err := loadManifest(context.TODO(), fs.api.fileStore, addr, quitC) + trie, err := loadManifest(context.TODO(), fs.api.fileStore, addr, quitC, NOOPDecrypt) if err != nil { log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) return err diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go index 84a2989d6eb1..fe7527b1f1a1 100644 --- a/swarm/api/filesystem_test.go +++ b/swarm/api/filesystem_test.go @@ -64,7 +64,7 @@ func TestApiDirUpload0(t *testing.T) { checkResponse(t, resp, exp) addr := storage.Address(common.Hex2Bytes(bzzhash)) - _, _, _, _, err = api.Get(context.TODO(), addr, "") + _, _, _, _, err = api.Get(context.TODO(), NOOPDecrypt, addr, "") if err == nil { t.Fatalf("expected error: %v", err) } @@ -143,7 +143,7 @@ func TestApiDirUploadModify(t *testing.T) { exp = expResponse(content, "text/css", 0) checkResponse(t, resp, exp) - _, _, _, _, err = api.Get(context.TODO(), addr, "") + _, _, _, _, err = api.Get(context.TODO(), nil, addr, "") if err == nil { t.Errorf("expected error: %v", err) } diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go index c0d8d1a4085a..3b2dcc7d5986 100644 --- a/swarm/api/http/middleware.go +++ b/swarm/api/http/middleware.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/sctx" "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/pborman/uuid" ) @@ -35,6 +36,15 @@ func SetRequestID(h http.Handler) http.Handler { }) } +func SetRequestHost(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(sctx.SetHost(r.Context(), r.Host)) + log.Info("setting request host", "ruid", GetRUID(r.Context()), "host", sctx.GetHost(r.Context())) + + h.ServeHTTP(w, r) + }) +} + func ParseURI(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) @@ -87,7 +97,7 @@ func RecoverPanic(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err != nil { - log.Error("panic recovery!", "stack trace", debug.Stack(), "url", r.URL.String(), "headers", r.Header) + log.Error("panic recovery!", "stack trace", string(debug.Stack()), "url", r.URL.String(), "headers", r.Header) } }() h.ServeHTTP(w, r) diff --git a/swarm/api/http/response.go b/swarm/api/http/response.go index f050e706a85b..c9fb9d2855b8 100644 --- a/swarm/api/http/response.go +++ b/swarm/api/http/response.go @@ -79,7 +79,7 @@ func RespondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg s } func RespondError(w http.ResponseWriter, r *http.Request, msg string, code int) { - log.Debug("RespondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) + log.Debug("RespondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code) RespondTemplate(w, r, "error", msg, code) } diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 5a5c42adc073..b5ea0c23d47a 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -23,7 +23,6 @@ import ( "bufio" "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -97,6 +96,7 @@ func NewServer(api *api.API, corsString string) *Server { defaultMiddlewares := []Adapter{ RecoverPanic, SetRequestID, + SetRequestHost, InitLoggingResponseWriter, ParseURI, InstrumentOpenTracing, @@ -169,6 +169,7 @@ func NewServer(api *api.API, corsString string) *Server { } func (s *Server) ListenAndServe(addr string) error { + s.listenAddr = addr return http.ListenAndServe(addr, s) } @@ -178,16 +179,24 @@ func (s *Server) ListenAndServe(addr string) error { // https://github.com/atom/electron/blob/master/docs/api/protocol.md type Server struct { http.Handler - api *api.API + api *api.API + listenAddr string } func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) { - log.Debug("handleBzzGet", "ruid", GetRUID(r.Context())) + log.Debug("handleBzzGet", "ruid", GetRUID(r.Context()), "uri", r.RequestURI) if r.Header.Get("Accept") == "application/x-tar" { uri := GetURI(r.Context()) - reader, err := s.api.GetDirectoryTar(r.Context(), uri) + _, credentials, _ := r.BasicAuth() + reader, err := s.api.GetDirectoryTar(r.Context(), s.api.Decryptor(r.Context(), credentials), uri) if err != nil { + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", uri.Address().String())) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } RespondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError) + return } defer reader.Close() @@ -287,7 +296,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { var addr storage.Address if uri.Addr != "" && uri.Addr != "encrypt" { - addr, err = s.api.Resolve(r.Context(), uri) + addr, err = s.api.Resolve(r.Context(), uri.Addr) if err != nil { postFilesFail.Inc(1) RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError) @@ -563,7 +572,7 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { // resolve the content key. manifestAddr := uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.Context(), uri) + manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr) if err != nil { getFail.Inc(1) RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) @@ -682,62 +691,21 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { uri := GetURI(r.Context()) log.Debug("handle.get", "ruid", ruid, "uri", uri) getCount.Inc(1) + _, pass, _ := r.BasicAuth() - var err error - addr := uri.Address() - if addr == nil { - addr, err = s.api.Resolve(r.Context(), uri) - if err != nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) - return - } - } else { - w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. + addr, err := s.api.ResolveURI(r.Context(), uri, pass) + if err != nil { + getFail.Inc(1) + RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) + return } + w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. log.Debug("handle.get: resolved", "ruid", ruid, "key", addr) // if path is set, interpret as a manifest and return the // raw entry at the given path - if uri.Path != "" { - walker, err := s.api.NewManifestWalker(r.Context(), addr, nil) - if err != nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) - return - } - var entry *api.ManifestEntry - walker.Walk(func(e *api.ManifestEntry) error { - // if the entry matches the path, set entry and stop - // the walk - if e.Path == uri.Path { - entry = e - // return an error to cancel the walk - return errors.New("found") - } - - // ignore non-manifest files - if e.ContentType != api.ManifestType { - return nil - } - - // if the manifest's path is a prefix of the - // requested path, recurse into it by returning - // nil and continuing the walk - if strings.HasPrefix(uri.Path, e.Path) { - return nil - } - return api.ErrSkipManifest - }) - if entry == nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("manifest entry could not be loaded"), http.StatusNotFound) - return - } - addr = storage.Address(common.Hex2Bytes(entry.Hash)) - } etag := common.Bytes2Hex(addr) noneMatchEtag := r.Header.Get("If-None-Match") w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to manifest key or raw entry key. @@ -781,6 +749,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) uri := GetURI(r.Context()) + _, credentials, _ := r.BasicAuth() log.Debug("handle.get.list", "ruid", ruid, "uri", uri) getListCount.Inc(1) @@ -790,7 +759,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { return } - addr, err := s.api.Resolve(r.Context(), uri) + addr, err := s.api.Resolve(r.Context(), uri.Addr) if err != nil { getListFail.Inc(1) RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) @@ -798,9 +767,14 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { } log.Debug("handle.get.list: resolved", "ruid", ruid, "key", addr) - list, err := s.api.GetManifestList(r.Context(), addr, uri.Path) + list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), addr, uri.Path) if err != nil { getListFail.Inc(1) + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", addr.String())) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } RespondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -833,7 +807,8 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) uri := GetURI(r.Context()) - log.Debug("handle.get.file", "ruid", ruid) + _, credentials, _ := r.BasicAuth() + log.Debug("handle.get.file", "ruid", ruid, "uri", r.RequestURI) getFileCount.Inc(1) // ensure the root path has a trailing slash so that relative URLs work @@ -845,7 +820,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { manifestAddr := uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.Context(), uri) + manifestAddr, err = s.api.ResolveURI(r.Context(), uri, credentials) if err != nil { getFileFail.Inc(1) RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) @@ -856,7 +831,8 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { } log.Debug("handle.get.file: resolved", "ruid", ruid, "key", manifestAddr) - reader, contentType, status, contentKey, err := s.api.Get(r.Context(), manifestAddr, uri.Path) + + reader, contentType, status, contentKey, err := s.api.Get(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path) etag := common.Bytes2Hex(contentKey) noneMatchEtag := r.Header.Get("If-None-Match") @@ -869,6 +845,12 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { } if err != nil { + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr)) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } + switch status { case http.StatusNotFound: getFileNotFound.Inc(1) @@ -883,9 +865,14 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { //the request results in ambiguous files //e.g. /read with readme.md and readinglist.txt available in manifest if status == http.StatusMultipleChoices { - list, err := s.api.GetManifestList(r.Context(), manifestAddr, uri.Path) + list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path) if err != nil { getFileFail.Inc(1) + if isDecryptError(err) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr)) + RespondError(w, r, err.Error(), http.StatusUnauthorized) + return + } RespondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -951,3 +938,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) { lrw.statusCode = code lrw.ResponseWriter.WriteHeader(code) } + +func isDecryptError(err error) bool { + return strings.Contains(err.Error(), api.ErrDecrypt.Error()) +} diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 2a163dd39c9e..a1329a800fe2 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -46,13 +46,14 @@ type Manifest struct { // ManifestEntry represents an entry in a swarm manifest type ManifestEntry struct { - Hash string `json:"hash,omitempty"` - Path string `json:"path,omitempty"` - ContentType string `json:"contentType,omitempty"` - Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` - Status int `json:"status,omitempty"` + Hash string `json:"hash,omitempty"` + Path string `json:"path,omitempty"` + ContentType string `json:"contentType,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + ModTime time.Time `json:"mod_time,omitempty"` + Status int `json:"status,omitempty"` + Access *AccessEntry `json:"access,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -98,7 +99,7 @@ type ManifestWriter struct { } func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWriter, error) { - trie, err := loadManifest(ctx, a.fileStore, addr, quitC) + trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -141,8 +142,8 @@ type ManifestWalker struct { quitC chan bool } -func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWalker, error) { - trie, err := loadManifest(ctx, a.fileStore, addr, quitC) +func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, decrypt DecryptFunc, quitC chan bool) (*ManifestWalker, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC, decrypt) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -194,6 +195,7 @@ type manifestTrie struct { entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry ref storage.Address // if ref != nil, it is stored encrypted bool + decrypt DecryptFunc } func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry { @@ -209,15 +211,15 @@ type manifestTrieEntry struct { subtrie *manifestTrie } -func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand log.Trace("manifest lookup", "key", hash) // retrieve manifest via FileStore manifestReader, isEncrypted := fileStore.Retrieve(ctx, hash) log.Trace("reader retrieved", "key", hash) - return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC) + return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC, decrypt) } -func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand // TODO check size for oversized manifests size, err := mr.Size(mr.Context(), quitC) @@ -258,26 +260,41 @@ func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore trie = &manifestTrie{ fileStore: fileStore, encrypted: isEncrypted, + decrypt: decrypt, } for _, entry := range man.Entries { - trie.addEntry(entry, quitC) + err = trie.addEntry(entry, quitC) + if err != nil { + return + } } return } -func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { +func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) error { mt.ref = nil // trie modified, hash needs to be re-calculated on demand + if entry.ManifestEntry.Access != nil { + if mt.decrypt == nil { + return errors.New("dont have decryptor") + } + + err := mt.decrypt(&entry.ManifestEntry) + if err != nil { + return err + } + } + if len(entry.Path) == 0 { mt.entries[256] = entry - return + return nil } b := entry.Path[0] oldentry := mt.entries[b] if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) { mt.entries[b] = entry - return + return nil } cpl := 0 @@ -287,12 +304,12 @@ func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) { if mt.loadSubTrie(oldentry, quitC) != nil { - return + return nil } entry.Path = entry.Path[cpl:] oldentry.subtrie.addEntry(entry, quitC) oldentry.Hash = "" - return + return nil } commonPrefix := entry.Path[:cpl] @@ -310,6 +327,7 @@ func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { Path: commonPrefix, ContentType: ManifestType, }, subtrie) + return nil } func (mt *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) { @@ -398,9 +416,20 @@ func (mt *manifestTrie) recalcAndStore() error { } func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) { + if entry.ManifestEntry.Access != nil { + if mt.decrypt == nil { + return errors.New("dont have decryptor") + } + + err := mt.decrypt(&entry.ManifestEntry) + if err != nil { + return err + } + } + if entry.subtrie == nil { hash := common.Hex2Bytes(entry.Hash) - entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC) + entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC, mt.decrypt) entry.Hash = "" // might not match, should be recalculated } return diff --git a/swarm/api/manifest_test.go b/swarm/api/manifest_test.go index d65f023f85cc..1c8e53c43308 100644 --- a/swarm/api/manifest_test.go +++ b/swarm/api/manifest_test.go @@ -44,7 +44,7 @@ func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...stri quitC := make(chan bool) fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams()) ref := make([]byte, fileStore.HashSize()) - trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC) + trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt) if err != nil { t.Errorf("unexpected error making manifest: %v", err) } @@ -101,7 +101,7 @@ func TestExactMatch(t *testing.T) { mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map") fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams()) ref := make([]byte, fileStore.HashSize()) - trie, err := readManifest(mf, ref, fileStore, false, quitC) + trie, err := readManifest(mf, ref, fileStore, false, quitC, nil) if err != nil { t.Errorf("unexpected error making manifest: %v", err) } @@ -134,7 +134,7 @@ func TestAddFileWithManifestPath(t *testing.T) { } fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams()) ref := make([]byte, fileStore.HashSize()) - trie, err := readManifest(reader, ref, fileStore, false, nil) + trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt) if err != nil { t.Fatal(err) } @@ -161,7 +161,7 @@ func TestReadManifestOverSizeLimit(t *testing.T) { reader := &storage.LazyTestSectionReader{ SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))), } - _, err := readManifest(reader, storage.Address{}, nil, false, nil) + _, err := readManifest(reader, storage.Address{}, nil, false, nil, NOOPDecrypt) if err == nil { t.Fatal("got no error from readManifest") } diff --git a/swarm/api/storage.go b/swarm/api/storage.go index 3b52301a0186..8a48fe5bc004 100644 --- a/swarm/api/storage.go +++ b/swarm/api/storage.go @@ -63,11 +63,11 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) { if err != nil { return nil, err } - addr, err := s.api.Resolve(ctx, uri) + addr, err := s.api.Resolve(ctx, uri.Addr) if err != nil { return nil, err } - reader, mimeType, status, _, err := s.api.Get(ctx, addr, uri.Path) + reader, mimeType, status, _, err := s.api.Get(ctx, nil, addr, uri.Path) if err != nil { return nil, err } @@ -93,7 +93,7 @@ func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, conte if err != nil { return "", err } - addr, err := s.api.Resolve(ctx, uri) + addr, err := s.api.Resolve(ctx, uri.Addr) if err != nil { return "", err } diff --git a/swarm/api/uri.go b/swarm/api/uri.go index 14965e0d9343..808517088a54 100644 --- a/swarm/api/uri.go +++ b/swarm/api/uri.go @@ -53,6 +53,19 @@ type URI struct { Path string } +func (u *URI) MarshalJSON() (out []byte, err error) { + return []byte(`"` + u.String() + `"`), nil +} + +func (u *URI) UnmarshalJSON(value []byte) error { + uri, err := Parse(string(value)) + if err != nil { + return err + } + *u = *uri + return nil +} + // Parse parses rawuri into a URI struct, where rawuri is expected to have one // of the following formats: // diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go index d579d15a02b8..6efeb78d9bd5 100644 --- a/swarm/fuse/swarmfs_test.go +++ b/swarm/fuse/swarmfs_test.go @@ -1650,7 +1650,7 @@ func TestFUSE(t *testing.T) { if err != nil { t.Fatal(err) } - ta := &testAPI{api: api.NewAPI(fileStore, nil, nil)} + ta := &testAPI{api: api.NewAPI(fileStore, nil, nil, nil)} //run a short suite of tests //approx time: 28s diff --git a/swarm/network_test.go b/swarm/network_test.go index d2a03093389f..176c635d8262 100644 --- a/swarm/network_test.go +++ b/swarm/network_test.go @@ -445,7 +445,7 @@ func retrieve( log.Debug("api get: check file", "node", id.String(), "key", f.addr.String(), "total files found", atomic.LoadUint64(totalFoundCount)) - r, _, _, _, err := swarm.api.Get(context.TODO(), f.addr, "/") + r, _, _, _, err := swarm.api.Get(context.TODO(), api.NOOPDecrypt, f.addr, "/") if err != nil { errc <- fmt.Errorf("api get: node %s, key %s, kademlia %s: %v", id, f.addr, swarm.bzz.Hive, err) return diff --git a/swarm/sctx/sctx.go b/swarm/sctx/sctx.go index 8619f6e19c5a..bed2b1145821 100644 --- a/swarm/sctx/sctx.go +++ b/swarm/sctx/sctx.go @@ -1,7 +1,22 @@ package sctx +import "context" + type ContextKey int const ( HTTPRequestIDKey ContextKey = iota + requestHostKey ) + +func SetHost(ctx context.Context, domain string) context.Context { + return context.WithValue(ctx, requestHostKey, domain) +} + +func GetHost(ctx context.Context) string { + v, ok := ctx.Value(requestHostKey).(string) + if ok { + return v + } + return "" +} diff --git a/swarm/swarm.go b/swarm/swarm.go index f731ff33d7d8..a895bdfa55ec 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -85,14 +85,12 @@ type Swarm struct { type SwarmAPI struct { Api *api.API Backend chequebook.Backend - PrvKey *ecdsa.PrivateKey } func (self *Swarm) API() *SwarmAPI { return &SwarmAPI{ Api: self.api, Backend: self.backend, - PrvKey: self.privateKey, } } @@ -217,7 +215,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e pss.SetHandshakeController(self.ps, pss.NewHandshakeParams()) } - self.api = api.NewAPI(self.fileStore, self.dns, resourceHandler) + self.api = api.NewAPI(self.fileStore, self.dns, resourceHandler, self.privateKey) // Manifests for Smart Hosting log.Debug(fmt.Sprintf("-> Web3 virtual server API")) diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 238f7830885c..7fd60fcc3d2b 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -77,7 +77,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes t.Fatal(err) } - a := api.NewAPI(fileStore, nil, rh.Handler) + a := api.NewAPI(fileStore, nil, rh.Handler, nil) srv := httptest.NewServer(serverFunc(a)) return &TestSwarmServer{ Server: srv, From 2cdf6ee7e00d6127c372e7a28bb27a80ef495cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Wed, 15 Aug 2018 22:25:46 +0200 Subject: [PATCH 135/166] light: CHT and bloom trie indexers working in light mode (#16534) This PR enables the indexers to work in light client mode by downloading a part of these tries (the Merkle proofs of the last values of the last known section) in order to be able to add new values and recalculate subsequent hashes. It also adds CHT data to NodeInfo. --- core/chain_indexer.go | 30 +++++-- core/chain_indexer_test.go | 6 +- eth/backend.go | 2 +- eth/bloombits.go | 23 +++-- les/backend.go | 39 +++++---- les/distributor.go | 4 - les/handler.go | 34 ++++++-- les/helper_test.go | 6 +- les/odr.go | 18 ++-- les/odr_test.go | 3 +- les/request_test.go | 3 +- les/retrieve.go | 3 +- les/server.go | 4 +- light/lightchain.go | 14 +-- light/odr.go | 4 + light/postprocess.go | 170 ++++++++++++++++++++++++++++--------- 16 files changed, 251 insertions(+), 112 deletions(-) diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 0b927116d066..11a7c96fa058 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -17,6 +17,7 @@ package core import ( + "context" "encoding/binary" "fmt" "sync" @@ -37,11 +38,11 @@ import ( type ChainIndexerBackend interface { // Reset initiates the processing of a new chain segment, potentially terminating // any partially completed operations (in case of a reorg). - Reset(section uint64, prevHead common.Hash) error + Reset(ctx context.Context, section uint64, prevHead common.Hash) error // Process crunches through the next header in the chain segment. The caller // will ensure a sequential order of headers. - Process(header *types.Header) + Process(ctx context.Context, header *types.Header) error // Commit finalizes the section metadata and stores it into the database. Commit() error @@ -71,9 +72,11 @@ type ChainIndexer struct { backend ChainIndexerBackend // Background processor generating the index data content children []*ChainIndexer // Child indexers to cascade chain updates to - active uint32 // Flag whether the event loop was started - update chan struct{} // Notification channel that headers should be processed - quit chan chan error // Quit channel to tear down running goroutines + active uint32 // Flag whether the event loop was started + update chan struct{} // Notification channel that headers should be processed + quit chan chan error // Quit channel to tear down running goroutines + ctx context.Context + ctxCancel func() sectionSize uint64 // Number of blocks in a single chain segment to process confirmsReq uint64 // Number of confirmations before processing a completed segment @@ -105,6 +108,8 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken } // Initialize database dependent fields and start the updater c.loadValidSections() + c.ctx, c.ctxCancel = context.WithCancel(context.Background()) + go c.updateLoop() return c @@ -138,6 +143,8 @@ func (c *ChainIndexer) Start(chain ChainIndexerChain) { func (c *ChainIndexer) Close() error { var errs []error + c.ctxCancel() + // Tear down the primary update loop errc := make(chan error) c.quit <- errc @@ -297,6 +304,12 @@ func (c *ChainIndexer) updateLoop() { c.lock.Unlock() newHead, err := c.processSection(section, oldHead) if err != nil { + select { + case <-c.ctx.Done(): + <-c.quit <- nil + return + default: + } c.log.Error("Section processing failed", "error", err) } c.lock.Lock() @@ -344,7 +357,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com // Reset and partial processing - if err := c.backend.Reset(section, lastHead); err != nil { + if err := c.backend.Reset(c.ctx, section, lastHead); err != nil { c.setValidSections(0) return common.Hash{}, err } @@ -360,11 +373,12 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com } else if header.ParentHash != lastHead { return common.Hash{}, fmt.Errorf("chain reorged during section processing") } - c.backend.Process(header) + if err := c.backend.Process(c.ctx, header); err != nil { + return common.Hash{}, err + } lastHead = header.Hash() } if err := c.backend.Commit(); err != nil { - c.log.Error("Section commit failed", "error", err) return common.Hash{}, err } return lastHead, nil diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index 550caf5567c8..a029dec62658 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -17,6 +17,7 @@ package core import ( + "context" "fmt" "math/big" "math/rand" @@ -210,13 +211,13 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 { return b.stored * b.indexer.sectionSize } -func (b *testChainIndexBackend) Reset(section uint64, prevHead common.Hash) error { +func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error { b.section = section b.headerCnt = 0 return nil } -func (b *testChainIndexBackend) Process(header *types.Header) { +func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error { b.headerCnt++ if b.headerCnt > b.indexer.sectionSize { b.t.Error("Processing too many headers") @@ -227,6 +228,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) { b.t.Fatal("Unexpected call to Process") case b.processCh <- header.Number.Uint64(): } + return nil } func (b *testChainIndexBackend) Commit() error { diff --git a/eth/backend.go b/eth/backend.go index 865534b1988b..6549cb8a3de0 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -130,7 +130,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { gasPrice: config.GasPrice, etherbase: config.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), - bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks), + bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, bloomConfirms), } log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId) diff --git a/eth/bloombits.go b/eth/bloombits.go index 954239d141d3..eb18565e2c22 100644 --- a/eth/bloombits.go +++ b/eth/bloombits.go @@ -17,6 +17,7 @@ package eth import ( + "context" "time" "github.com/ethereum/go-ethereum/common" @@ -92,30 +93,28 @@ const ( // BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index // for the Ethereum header bloom filters, permitting blazing fast filtering. type BloomIndexer struct { - size uint64 // section size to generate bloombits for - - db ethdb.Database // database instance to write index data and metadata into - gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index - - section uint64 // Section is the section number being processed currently - head common.Hash // Head is the hash of the last header processed + size uint64 // section size to generate bloombits for + db ethdb.Database // database instance to write index data and metadata into + gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index + section uint64 // Section is the section number being processed currently + head common.Hash // Head is the hash of the last header processed } // NewBloomIndexer returns a chain indexer that generates bloom bits data for the // canonical chain for fast logs filtering. -func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer { +func NewBloomIndexer(db ethdb.Database, size, confReq uint64) *core.ChainIndexer { backend := &BloomIndexer{ db: db, size: size, } table := ethdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix)) - return core.NewChainIndexer(db, table, backend, size, bloomConfirms, bloomThrottling, "bloombits") + return core.NewChainIndexer(db, table, backend, size, confReq, bloomThrottling, "bloombits") } // Reset implements core.ChainIndexerBackend, starting a new bloombits index // section. -func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error { +func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { gen, err := bloombits.NewGenerator(uint(b.size)) b.gen, b.section, b.head = gen, section, common.Hash{} return err @@ -123,16 +122,16 @@ func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error // Process implements core.ChainIndexerBackend, adding a new header's bloom into // the index. -func (b *BloomIndexer) Process(header *types.Header) { +func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error { b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom) b.head = header.Hash() + return nil } // Commit implements core.ChainIndexerBackend, finalizing the bloom section and // writing it out into the database. func (b *BloomIndexer) Commit() error { batch := b.db.NewBatch() - for i := 0; i < types.BloomBitLength; i++ { bits, err := b.gen.Bitset(uint(i)) if err != nil { diff --git a/les/backend.go b/les/backend.go index 178bc1e0e49b..9b8cc1828f5c 100644 --- a/les/backend.go +++ b/les/backend.go @@ -95,29 +95,35 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { quitSync := make(chan struct{}) leth := &LightEthereum{ - config: config, - chainConfig: chainConfig, - chainDb: chainDb, - eventMux: ctx.EventMux, - peers: peers, - reqDist: newRequestDistributor(peers, quitSync), - accountManager: ctx.AccountManager, - engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb), - shutdownChan: make(chan bool), - networkId: config.NetworkId, - bloomRequests: make(chan chan *bloombits.Retrieval), - bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency), - chtIndexer: light.NewChtIndexer(chainDb, true), - bloomTrieIndexer: light.NewBloomTrieIndexer(chainDb, true), + config: config, + chainConfig: chainConfig, + chainDb: chainDb, + eventMux: ctx.EventMux, + peers: peers, + reqDist: newRequestDistributor(peers, quitSync), + accountManager: ctx.AccountManager, + engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb), + shutdownChan: make(chan bool), + networkId: config.NetworkId, + bloomRequests: make(chan chan *bloombits.Retrieval), + bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency, light.HelperTrieConfirmations), } leth.relay = NewLesTxRelay(peers, leth.reqDist) leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg) leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool) - leth.odr = NewLesOdr(chainDb, leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer, leth.retriever) + leth.odr = NewLesOdr(chainDb, leth.retriever) + leth.chtIndexer = light.NewChtIndexer(chainDb, true, leth.odr) + leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, true, leth.odr) + leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer) + // Note: NewLightChain adds the trusted checkpoint so it needs an ODR with + // indexers already set but not started yet if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil { return nil, err } + // Note: AddChildIndexer starts the update process for the child + leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer) + leth.chtIndexer.Start(leth.blockchain) leth.bloomIndexer.Start(leth.blockchain) // Rewind the chain in case of an incompatible config upgrade. if compat, ok := genesisErr.(*params.ConfigCompatError); ok { @@ -242,9 +248,6 @@ func (s *LightEthereum) Stop() error { if s.chtIndexer != nil { s.chtIndexer.Close() } - if s.bloomTrieIndexer != nil { - s.bloomTrieIndexer.Close() - } s.blockchain.Stop() s.protocolManager.Stop() s.txPool.Stop() diff --git a/les/distributor.go b/les/distributor.go index 159fa4c73f73..d3f6b21d182e 100644 --- a/les/distributor.go +++ b/les/distributor.go @@ -20,14 +20,10 @@ package les import ( "container/list" - "errors" "sync" "time" ) -// ErrNoPeers is returned if no peers capable of serving a queued request are available -var ErrNoPeers = errors.New("no suitable peers available") - // requestDistributor implements a mechanism that distributes requests to // suitable peers, obeying flow control rules and prioritizing them in creation // order (even when a resend is necessary). diff --git a/les/handler.go b/les/handler.go index 91a235bf0a91..ccb4a88448d5 100644 --- a/les/handler.go +++ b/les/handler.go @@ -1206,11 +1206,12 @@ func (pm *ProtocolManager) txStatus(hashes []common.Hash) []txStatus { // NodeInfo represents a short summary of the Ethereum sub-protocol metadata // known about the host peer. type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) - Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain - Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block - Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules - Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block + Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) + Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain + Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block + Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules + Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block + CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup } // NodeInfo retrieves some protocol metadata about the running host node. @@ -1218,12 +1219,31 @@ func (self *ProtocolManager) NodeInfo() *NodeInfo { head := self.blockchain.CurrentHeader() hash := head.Hash() + var cht light.TrustedCheckpoint + + sections, _, sectionHead := self.odr.ChtIndexer().Sections() + sections2, _, sectionHead2 := self.odr.BloomTrieIndexer().Sections() + if sections2 < sections { + sections = sections2 + sectionHead = sectionHead2 + } + if sections > 0 { + sectionIndex := sections - 1 + cht = light.TrustedCheckpoint{ + SectionIdx: sectionIndex, + SectionHead: sectionHead, + CHTRoot: light.GetChtRoot(self.chainDb, sectionIndex, sectionHead), + BloomRoot: light.GetBloomTrieRoot(self.chainDb, sectionIndex, sectionHead), + } + } + return &NodeInfo{ Network: self.networkId, Difficulty: self.blockchain.GetTd(hash, head.Number.Uint64()), Genesis: self.blockchain.Genesis().Hash(), Config: self.blockchain.Config(), Head: hash, + CHT: cht, } } @@ -1258,7 +1278,7 @@ func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, s } _, ok := <-pc.manager.reqDist.queue(rq) if !ok { - return ErrNoPeers + return light.ErrNoPeers } return nil } @@ -1282,7 +1302,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip } _, ok := <-pc.manager.reqDist.queue(rq) if !ok { - return ErrNoPeers + return light.ErrNoPeers } return nil } diff --git a/les/helper_test.go b/les/helper_test.go index 8fd01a39e048..50c97e06e1c1 100644 --- a/les/helper_test.go +++ b/les/helper_test.go @@ -156,12 +156,12 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor } else { blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}) - chtIndexer := light.NewChtIndexer(db, false) + chtIndexer := light.NewChtIndexer(db, false, nil) chtIndexer.Start(blockchain) - bbtIndexer := light.NewBloomTrieIndexer(db, false) + bbtIndexer := light.NewBloomTrieIndexer(db, false, nil) - bloomIndexer := eth.NewBloomIndexer(db, params.BloomBitsBlocks) + bloomIndexer := eth.NewBloomIndexer(db, params.BloomBitsBlocks, light.HelperTrieProcessConfirmations) bloomIndexer.AddChildIndexer(bbtIndexer) bloomIndexer.Start(blockchain) diff --git a/les/odr.go b/les/odr.go index f8412aaad73a..2ad28d5d9f12 100644 --- a/les/odr.go +++ b/les/odr.go @@ -33,14 +33,11 @@ type LesOdr struct { stop chan struct{} } -func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr { +func NewLesOdr(db ethdb.Database, retriever *retrieveManager) *LesOdr { return &LesOdr{ - db: db, - chtIndexer: chtIndexer, - bloomTrieIndexer: bloomTrieIndexer, - bloomIndexer: bloomIndexer, - retriever: retriever, - stop: make(chan struct{}), + db: db, + retriever: retriever, + stop: make(chan struct{}), } } @@ -54,6 +51,13 @@ func (odr *LesOdr) Database() ethdb.Database { return odr.db } +// SetIndexers adds the necessary chain indexers to the ODR backend +func (odr *LesOdr) SetIndexers(chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer) { + odr.chtIndexer = chtIndexer + odr.bloomTrieIndexer = bloomTrieIndexer + odr.bloomIndexer = bloomIndexer +} + // ChtIndexer returns the CHT chain indexer func (odr *LesOdr) ChtIndexer() *core.ChainIndexer { return odr.chtIndexer diff --git a/les/odr_test.go b/les/odr_test.go index 983f7262b06a..c7c25cbe4b36 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -167,7 +167,8 @@ func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) { rm := newRetrieveManager(peers, dist, nil) db := ethdb.NewMemDatabase() ldb := ethdb.NewMemDatabase() - odr := NewLesOdr(ldb, light.NewChtIndexer(db, true), light.NewBloomTrieIndexer(db, true), eth.NewBloomIndexer(db, light.BloomTrieFrequency), rm) + odr := NewLesOdr(ldb, rm) + odr.SetIndexers(light.NewChtIndexer(db, true, nil), light.NewBloomTrieIndexer(db, true, nil), eth.NewBloomIndexer(db, light.BloomTrieFrequency, light.HelperTrieConfirmations)) pm := newTestProtocolManagerMust(t, false, 4, testChainGen, nil, nil, db) lpm := newTestProtocolManagerMust(t, true, 0, nil, peers, odr, ldb) _, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm) diff --git a/les/request_test.go b/les/request_test.go index ba2f603d8b4a..db576798b478 100644 --- a/les/request_test.go +++ b/les/request_test.go @@ -89,7 +89,8 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) { rm := newRetrieveManager(peers, dist, nil) db := ethdb.NewMemDatabase() ldb := ethdb.NewMemDatabase() - odr := NewLesOdr(ldb, light.NewChtIndexer(db, true), light.NewBloomTrieIndexer(db, true), eth.NewBloomIndexer(db, light.BloomTrieFrequency), rm) + odr := NewLesOdr(ldb, rm) + odr.SetIndexers(light.NewChtIndexer(db, true, nil), light.NewBloomTrieIndexer(db, true, nil), eth.NewBloomIndexer(db, light.BloomTrieFrequency, light.HelperTrieConfirmations)) pm := newTestProtocolManagerMust(t, false, 4, testChainGen, nil, nil, db) lpm := newTestProtocolManagerMust(t, true, 0, nil, peers, odr, ldb) diff --git a/les/retrieve.go b/les/retrieve.go index a9037a38ef7c..8ae36d82cd71 100644 --- a/les/retrieve.go +++ b/les/retrieve.go @@ -27,6 +27,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/light" ) var ( @@ -207,7 +208,7 @@ func (r *sentReq) stateRequesting() reqStateFn { return r.stateNoMorePeers } // nothing to wait for, no more peers to ask, return with error - r.stop(ErrNoPeers) + r.stop(light.ErrNoPeers) // no need to go to stopped state because waiting() already returned false return nil } diff --git a/les/server.go b/les/server.go index fca6124c9c74..a934fbf26e6c 100644 --- a/les/server.go +++ b/les/server.go @@ -67,8 +67,8 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { protocolManager: pm, quitSync: quitSync, lesTopics: lesTopics, - chtIndexer: light.NewChtIndexer(eth.ChainDb(), false), - bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false), + chtIndexer: light.NewChtIndexer(eth.ChainDb(), false, nil), + bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false, nil), } logger := log.New() diff --git a/light/lightchain.go b/light/lightchain.go index 30b9bd89a61c..b7e629e88b53 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -116,19 +116,19 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus. } // addTrustedCheckpoint adds a trusted checkpoint to the blockchain -func (self *LightChain) addTrustedCheckpoint(cp trustedCheckpoint) { +func (self *LightChain) addTrustedCheckpoint(cp TrustedCheckpoint) { if self.odr.ChtIndexer() != nil { - StoreChtRoot(self.chainDb, cp.sectionIdx, cp.sectionHead, cp.chtRoot) - self.odr.ChtIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead) + StoreChtRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.CHTRoot) + self.odr.ChtIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead) } if self.odr.BloomTrieIndexer() != nil { - StoreBloomTrieRoot(self.chainDb, cp.sectionIdx, cp.sectionHead, cp.bloomTrieRoot) - self.odr.BloomTrieIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead) + StoreBloomTrieRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.BloomRoot) + self.odr.BloomTrieIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead) } if self.odr.BloomIndexer() != nil { - self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead) + self.odr.BloomIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead) } - log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.sectionIdx+1)*CHTFrequencyClient-1, "hash", cp.sectionHead) + log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*CHTFrequencyClient-1, "hash", cp.SectionHead) } func (self *LightChain) getProcInterrupt() bool { diff --git a/light/odr.go b/light/odr.go index 8f1e50b817d9..83c64055a2b1 100644 --- a/light/odr.go +++ b/light/odr.go @@ -20,6 +20,7 @@ package light import ( "context" + "errors" "math/big" "github.com/ethereum/go-ethereum/common" @@ -33,6 +34,9 @@ import ( // service is not required. var NoOdr = context.Background() +// ErrNoPeers is returned if no peers capable of serving a queued request are available +var ErrNoPeers = errors.New("no suitable peers available") + // OdrBackend is an interface to a backend service that handles ODR retrievals type type OdrBackend interface { Database() ethdb.Database diff --git a/light/postprocess.go b/light/postprocess.go index 2090a9d0444b..0b25e1d88141 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -17,8 +17,10 @@ package light import ( + "context" "encoding/binary" "errors" + "fmt" "math/big" "time" @@ -47,35 +49,35 @@ const ( HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated ) -// trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with +// TrustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with // the appropriate section index and head hash. It is used to start light syncing from this checkpoint // and avoid downloading the entire header chain while still being able to securely access old headers/logs. -type trustedCheckpoint struct { - name string - sectionIdx uint64 - sectionHead, chtRoot, bloomTrieRoot common.Hash +type TrustedCheckpoint struct { + name string + SectionIdx uint64 + SectionHead, CHTRoot, BloomRoot common.Hash } var ( - mainnetCheckpoint = trustedCheckpoint{ - name: "mainnet", - sectionIdx: 179, - sectionHead: common.HexToHash("ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea"), - chtRoot: common.HexToHash("646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16"), - bloomTrieRoot: common.HexToHash("d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25"), + mainnetCheckpoint = TrustedCheckpoint{ + name: "mainnet", + SectionIdx: 179, + SectionHead: common.HexToHash("ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea"), + CHTRoot: common.HexToHash("646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16"), + BloomRoot: common.HexToHash("d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25"), } - ropstenCheckpoint = trustedCheckpoint{ - name: "ropsten", - sectionIdx: 107, - sectionHead: common.HexToHash("e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce"), - chtRoot: common.HexToHash("15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0"), - bloomTrieRoot: common.HexToHash("e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2"), + ropstenCheckpoint = TrustedCheckpoint{ + name: "ropsten", + SectionIdx: 107, + SectionHead: common.HexToHash("e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce"), + CHTRoot: common.HexToHash("15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0"), + BloomRoot: common.HexToHash("e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2"), } ) // trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to -var trustedCheckpoints = map[common.Hash]trustedCheckpoint{ +var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{ params.MainnetGenesisHash: mainnetCheckpoint, params.TestnetGenesisHash: ropstenCheckpoint, } @@ -119,7 +121,8 @@ func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common // ChtIndexerBackend implements core.ChainIndexerBackend type ChtIndexerBackend struct { - diskdb ethdb.Database + diskdb, trieTable ethdb.Database + odr OdrBackend triedb *trie.Database section, sectionSize uint64 lastHash common.Hash @@ -127,7 +130,7 @@ type ChtIndexerBackend struct { } // NewBloomTrieIndexer creates a BloomTrie chain indexer -func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer { +func NewChtIndexer(db ethdb.Database, clientMode bool, odr OdrBackend) *core.ChainIndexer { var sectionSize, confirmReq uint64 if clientMode { sectionSize = CHTFrequencyClient @@ -137,28 +140,64 @@ func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer { confirmReq = HelperTrieProcessConfirmations } idb := ethdb.NewTable(db, "chtIndex-") + trieTable := ethdb.NewTable(db, ChtTablePrefix) backend := &ChtIndexerBackend{ diskdb: db, - triedb: trie.NewDatabase(ethdb.NewTable(db, ChtTablePrefix)), + odr: odr, + trieTable: trieTable, + triedb: trie.NewDatabase(trieTable), sectionSize: sectionSize, } return core.NewChainIndexer(db, idb, backend, sectionSize, confirmReq, time.Millisecond*100, "cht") } +// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the +// ODR backend in order to be able to add new entries and calculate subsequent root hashes +func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { + batch := c.trieTable.NewBatch() + r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1} + for { + err := c.odr.Retrieve(ctx, r) + switch err { + case nil: + r.Proof.Store(batch) + return batch.Write() + case ErrNoPeers: + // if there are no peers to serve, retry later + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(time.Second * 10): + // stay in the loop and try again + } + default: + return err + } + } +} + // Reset implements core.ChainIndexerBackend -func (c *ChtIndexerBackend) Reset(section uint64, lastSectionHead common.Hash) error { +func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { var root common.Hash if section > 0 { root = GetChtRoot(c.diskdb, section-1, lastSectionHead) } var err error c.trie, err = trie.New(root, c.triedb) + + if err != nil && c.odr != nil { + err = c.fetchMissingNodes(ctx, section, root) + if err == nil { + c.trie, err = trie.New(root, c.triedb) + } + } + c.section = section return err } // Process implements core.ChainIndexerBackend -func (c *ChtIndexerBackend) Process(header *types.Header) { +func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error { hash, num := header.Hash(), header.Number.Uint64() c.lastHash = hash @@ -170,6 +209,7 @@ func (c *ChtIndexerBackend) Process(header *types.Header) { binary.BigEndian.PutUint64(encNumber[:], num) data, _ := rlp.EncodeToBytes(ChtNode{hash, td}) c.trie.Update(encNumber[:], data) + return nil } // Commit implements core.ChainIndexerBackend @@ -181,16 +221,15 @@ func (c *ChtIndexerBackend) Commit() error { c.triedb.Commit(root, false) if ((c.section+1)*c.sectionSize)%CHTFrequencyClient == 0 { - log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", c.lastHash, "root", root) + log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) } StoreChtRoot(c.diskdb, c.section, c.lastHash, root) return nil } const ( - BloomTrieFrequency = 32768 - ethBloomBitsSection = 4096 - ethBloomBitsConfirmations = 256 + BloomTrieFrequency = 32768 + ethBloomBitsSection = 4096 ) var ( @@ -215,7 +254,8 @@ func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root // BloomTrieIndexerBackend implements core.ChainIndexerBackend type BloomTrieIndexerBackend struct { - diskdb ethdb.Database + diskdb, trieTable ethdb.Database + odr OdrBackend triedb *trie.Database section, parentSectionSize, bloomTrieRatio uint64 trie *trie.Trie @@ -223,44 +263,98 @@ type BloomTrieIndexerBackend struct { } // NewBloomTrieIndexer creates a BloomTrie chain indexer -func NewBloomTrieIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer { +func NewBloomTrieIndexer(db ethdb.Database, clientMode bool, odr OdrBackend) *core.ChainIndexer { + trieTable := ethdb.NewTable(db, BloomTrieTablePrefix) backend := &BloomTrieIndexerBackend{ - diskdb: db, - triedb: trie.NewDatabase(ethdb.NewTable(db, BloomTrieTablePrefix)), + diskdb: db, + odr: odr, + trieTable: trieTable, + triedb: trie.NewDatabase(trieTable), } idb := ethdb.NewTable(db, "bltIndex-") - var confirmReq uint64 if clientMode { backend.parentSectionSize = BloomTrieFrequency - confirmReq = HelperTrieConfirmations } else { backend.parentSectionSize = ethBloomBitsSection - confirmReq = HelperTrieProcessConfirmations } backend.bloomTrieRatio = BloomTrieFrequency / backend.parentSectionSize backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio) - return core.NewChainIndexer(db, idb, backend, BloomTrieFrequency, confirmReq-ethBloomBitsConfirmations, time.Millisecond*100, "bloomtrie") + return core.NewChainIndexer(db, idb, backend, BloomTrieFrequency, 0, time.Millisecond*100, "bloomtrie") +} + +// fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the +// ODR backend in order to be able to add new entries and calculate subsequent root hashes +func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { + indexCh := make(chan uint, types.BloomBitLength) + type res struct { + nodes *NodeSet + err error + } + resCh := make(chan res, types.BloomBitLength) + for i := 0; i < 20; i++ { + go func() { + for bitIndex := range indexCh { + r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}} + for { + if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers { + // if there are no peers to serve, retry later + select { + case <-ctx.Done(): + resCh <- res{nil, ctx.Err()} + return + case <-time.After(time.Second * 10): + // stay in the loop and try again + } + } else { + resCh <- res{r.Proofs, err} + break + } + } + } + }() + } + + for i := uint(0); i < types.BloomBitLength; i++ { + indexCh <- i + } + close(indexCh) + batch := b.trieTable.NewBatch() + for i := uint(0); i < types.BloomBitLength; i++ { + res := <-resCh + if res.err != nil { + return res.err + } + res.nodes.Store(batch) + } + return batch.Write() } // Reset implements core.ChainIndexerBackend -func (b *BloomTrieIndexerBackend) Reset(section uint64, lastSectionHead common.Hash) error { +func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { var root common.Hash if section > 0 { root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead) } var err error b.trie, err = trie.New(root, b.triedb) + if err != nil && b.odr != nil { + err = b.fetchMissingNodes(ctx, section, root) + if err == nil { + b.trie, err = trie.New(root, b.triedb) + } + } b.section = section return err } // Process implements core.ChainIndexerBackend -func (b *BloomTrieIndexerBackend) Process(header *types.Header) { +func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error { num := header.Number.Uint64() - b.section*BloomTrieFrequency if (num+1)%b.parentSectionSize == 0 { b.sectionHeads[num/b.parentSectionSize] = header.Hash() } + return nil } // Commit implements core.ChainIndexerBackend @@ -300,7 +394,7 @@ func (b *BloomTrieIndexerBackend) Commit() error { b.triedb.Commit(root, false) sectionHead := b.sectionHeads[b.bloomTrieRatio-1] - log.Info("Storing bloom trie", "section", b.section, "head", sectionHead, "root", root, "compression", float64(compSize)/float64(decompSize)) + log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize)) StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root) return nil From b24fb76a3ae8b2a41bde9ed0744b4284e385e011 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 16 Aug 2018 09:41:16 +0300 Subject: [PATCH 136/166] cmd/puppeth: fix nil panic on disconnected stats gathering --- cmd/puppeth/wizard_netstats.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/puppeth/wizard_netstats.go b/cmd/puppeth/wizard_netstats.go index 89b38e262b93..99ca11bb1776 100644 --- a/cmd/puppeth/wizard_netstats.go +++ b/cmd/puppeth/wizard_netstats.go @@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s logger.Info("Starting remote server health-check") stat := &serverStat{ - address: client.address, services: make(map[string]map[string]string), } if client == nil { @@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s } client = conn } + stat.address = client.address + // Client connected one way or another, run health-checks logger.Debug("Checking for nginx availability") if infos, err := checkNginx(client, w.network); err != nil { @@ -214,6 +215,9 @@ func (stats serverStats) render() { if len(stat.address) > len(separator[1]) { separator[1] = strings.Repeat("-", len(stat.address)) } + if len(stat.failure) > len(separator[1]) { + separator[1] = strings.Repeat("-", len(stat.failure)) + } for service, configs := range stat.services { if len(service) > len(separator[2]) { separator[2] = strings.Repeat("-", len(service)) @@ -250,7 +254,11 @@ func (stats serverStats) render() { sort.Strings(services) if len(services) == 0 { - table.Append([]string{server, stats[server].address, "", "", ""}) + if stats[server].failure != "" { + table.Append([]string{server, stats[server].failure, "", "", ""}) + } else { + table.Append([]string{server, stats[server].address, "", "", ""}) + } } for j, service := range services { // Add an empty line between all services From 3e21adc6488be41ac882c316486573374785cc82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 9 Aug 2018 13:46:52 +0300 Subject: [PATCH 137/166] crypto/bn256: fix issues caused by Go 1.11 --- crypto/bn256/cloudflare/gfp_amd64.s | 2 +- crypto/bn256/cloudflare/gfp_decl.go | 7 +++ crypto/bn256/google/bn256.go | 40 ++++++++++------ crypto/bn256/google/curve.go | 10 +++- crypto/bn256/google/twist.go | 10 +++- vendor/golang.org/x/sys/cpu/cpu.go | 38 +++++++++++++++ vendor/golang.org/x/sys/cpu/cpu_arm.go | 7 +++ vendor/golang.org/x/sys/cpu/cpu_arm64.go | 7 +++ vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 +++++++ vendor/golang.org/x/sys/cpu/cpu_gccgo.c | 43 +++++++++++++++++ vendor/golang.org/x/sys/cpu/cpu_gccgo.go | 26 ++++++++++ vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 9 ++++ vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 9 ++++ vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 9 ++++ vendor/golang.org/x/sys/cpu/cpu_s390x.go | 7 +++ vendor/golang.org/x/sys/cpu/cpu_x86.go | 55 ++++++++++++++++++++++ vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 +++++++++++ vendor/vendor.json | 6 +++ 18 files changed, 311 insertions(+), 17 deletions(-) create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s diff --git a/crypto/bn256/cloudflare/gfp_amd64.s b/crypto/bn256/cloudflare/gfp_amd64.s index 3a785d200bcf..bdb4ffb78707 100644 --- a/crypto/bn256/cloudflare/gfp_amd64.s +++ b/crypto/bn256/cloudflare/gfp_amd64.s @@ -110,7 +110,7 @@ TEXT ·gfpMul(SB),0,$160-24 MOVQ b+16(FP), SI // Jump to a slightly different implementation if MULX isn't supported. - CMPB runtime·support_bmi2(SB), $0 + CMPB ·hasBMI2(SB), $0 JE nobmi2Mul mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI)) diff --git a/crypto/bn256/cloudflare/gfp_decl.go b/crypto/bn256/cloudflare/gfp_decl.go index 6a8a4fddb611..fdea5c11a5be 100644 --- a/crypto/bn256/cloudflare/gfp_decl.go +++ b/crypto/bn256/cloudflare/gfp_decl.go @@ -5,6 +5,13 @@ package bn256 // This file contains forward declarations for the architecture-specific // assembly implementations of these functions, provided that they exist. +import ( + "golang.org/x/sys/cpu" +) + +//nolint:varcheck +var hasBMI2 = cpu.X86.HasBMI2 + // go:noescape func gfpNeg(c, a *gfP) diff --git a/crypto/bn256/google/bn256.go b/crypto/bn256/google/bn256.go index 5da83e033a98..e0402e51f0e2 100644 --- a/crypto/bn256/google/bn256.go +++ b/crypto/bn256/google/bn256.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package bn256 implements a particular bilinear group at the 128-bit security level. +// Package bn256 implements a particular bilinear group. // // Bilinear groups are the basis of many of the new cryptographic protocols // that have been proposed over the past decade. They consist of a triplet of @@ -14,6 +14,10 @@ // Barreto-Naehrig curve as described in // http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible // with the implementation described in that paper. +// +// (This package previously claimed to operate at a 128-bit security level. +// However, recent improvements in attacks mean that is no longer true. See +// https://moderncrypto.org/mail-archive/curves/2016/000740.html.) package bn256 import ( @@ -50,8 +54,8 @@ func RandomG1(r io.Reader) (*big.Int, *G1, error) { return k, new(G1).ScalarBaseMult(k), nil } -func (g *G1) String() string { - return "bn256.G1" + g.p.String() +func (e *G1) String() string { + return "bn256.G1" + e.p.String() } // CurvePoints returns p's curve points in big integer @@ -98,15 +102,19 @@ func (e *G1) Neg(a *G1) *G1 { } // Marshal converts n to a byte slice. -func (n *G1) Marshal() []byte { - n.p.MakeAffine(nil) - - xBytes := new(big.Int).Mod(n.p.x, P).Bytes() - yBytes := new(big.Int).Mod(n.p.y, P).Bytes() - +func (e *G1) Marshal() []byte { // Each value is a 256-bit number. const numBytes = 256 / 8 + if e.p.IsInfinity() { + return make([]byte, numBytes*2) + } + + e.p.MakeAffine(nil) + + xBytes := new(big.Int).Mod(e.p.x, P).Bytes() + yBytes := new(big.Int).Mod(e.p.y, P).Bytes() + ret := make([]byte, numBytes*2) copy(ret[1*numBytes-len(xBytes):], xBytes) copy(ret[2*numBytes-len(yBytes):], yBytes) @@ -175,8 +183,8 @@ func RandomG2(r io.Reader) (*big.Int, *G2, error) { return k, new(G2).ScalarBaseMult(k), nil } -func (g *G2) String() string { - return "bn256.G2" + g.p.String() +func (e *G2) String() string { + return "bn256.G2" + e.p.String() } // CurvePoints returns the curve points of p which includes the real @@ -216,6 +224,13 @@ func (e *G2) Add(a, b *G2) *G2 { // Marshal converts n into a byte slice. func (n *G2) Marshal() []byte { + // Each value is a 256-bit number. + const numBytes = 256 / 8 + + if n.p.IsInfinity() { + return make([]byte, numBytes*4) + } + n.p.MakeAffine(nil) xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes() @@ -223,9 +238,6 @@ func (n *G2) Marshal() []byte { yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes() yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes() - // Each value is a 256-bit number. - const numBytes = 256 / 8 - ret := make([]byte, numBytes*4) copy(ret[1*numBytes-len(xxBytes):], xxBytes) copy(ret[2*numBytes-len(xyBytes):], xyBytes) diff --git a/crypto/bn256/google/curve.go b/crypto/bn256/google/curve.go index 3e679fdc7e8f..819cb81da7ab 100644 --- a/crypto/bn256/google/curve.go +++ b/crypto/bn256/google/curve.go @@ -245,11 +245,19 @@ func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoi return c } +// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets +// c to 0 : 1 : 0. func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint { if words := c.z.Bits(); len(words) == 1 && words[0] == 1 { return c } - + if c.IsInfinity() { + c.x.SetInt64(0) + c.y.SetInt64(1) + c.z.SetInt64(0) + c.t.SetInt64(0) + return c + } zInv := pool.Get().ModInverse(c.z, P) t := pool.Get().Mul(c.y, zInv) t.Mod(t, P) diff --git a/crypto/bn256/google/twist.go b/crypto/bn256/google/twist.go index 1f5a4d9deb9b..43364ff5b7bd 100644 --- a/crypto/bn256/google/twist.go +++ b/crypto/bn256/google/twist.go @@ -225,11 +225,19 @@ func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoi return c } +// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets +// c to 0 : 1 : 0. func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint { if c.z.IsOne() { return c } - + if c.IsInfinity() { + c.x.SetZero() + c.y.SetOne() + c.z.SetZero() + c.t.SetZero() + return c + } zInv := newGFp2(pool).Invert(c.z, pool) t := newGFp2(pool).Mul(c.y, zInv, pool) zInv2 := newGFp2(pool).Square(zInv, pool) diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000000..3d88f866739e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000000..d93036f7522b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000000..1d2ab2902a76 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000000..f7cb46971cb0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 000000000000..e363c7d13197 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 000000000000..ba49b91bd398 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000000..6165f121249a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000000..1269eee88d00 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000000..d10759a524f2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000000..684c4f005d09 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000000..71e288b06223 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1< Date: Thu, 16 Aug 2018 19:14:33 +0800 Subject: [PATCH 138/166] miner: regenerate mining work every 3 seconds (#17413) * miner: regenerate mining work every 3 seconds * miner: polish --- core/events.go | 3 - miner/worker.go | 332 ++++++++++++++++++++++++++----------------- miner/worker_test.go | 65 +++++++++ 3 files changed, 267 insertions(+), 133 deletions(-) diff --git a/core/events.go b/core/events.go index 8d200f2a29fc..710bdb589485 100644 --- a/core/events.go +++ b/core/events.go @@ -29,9 +29,6 @@ type PendingLogsEvent struct { Logs []*types.Log } -// PendingStateEvent is posted pre mining and notifies of pending state changes. -type PendingStateEvent struct{} - // NewMinedBlockEvent is posted when a block has been imported. type NewMinedBlockEvent struct{ Block *types.Block } diff --git a/miner/worker.go b/miner/worker.go index fae480c84cca..81a9eabfd5d8 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -40,19 +40,27 @@ import ( const ( // resultQueueSize is the size of channel listening to sealing result. resultQueueSize = 10 + // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 + // chainHeadChanSize is the size of channel listening to ChainHeadEvent. chainHeadChanSize = 10 + // chainSideChanSize is the size of channel listening to ChainSideEvent. chainSideChanSize = 10 - miningLogAtDepth = 5 + + // miningLogAtDepth is the number of confirmations before logging successful mining. + miningLogAtDepth = 5 + + // blockRecommitInterval is the time interval to recreate the mining block with + // any newly arrived transactions. + blockRecommitInterval = 3 * time.Second ) -// Env is the worker's current environment and holds all of the current state information. -type Env struct { - config *params.ChainConfig +// environment is the worker's current environment and holds all of the current state information. +type environment struct { signer types.Signer state *state.StateDB // apply state changes here @@ -67,105 +75,6 @@ type Env struct { receipts []*types.Receipt } -func (env *Env) commitTransaction(tx *types.Transaction, bc *core.BlockChain, coinbase common.Address, gp *core.GasPool) (error, []*types.Log) { - snap := env.state.Snapshot() - - receipt, _, err := core.ApplyTransaction(env.config, bc, &coinbase, gp, env.state, env.header, tx, &env.header.GasUsed, vm.Config{}) - if err != nil { - env.state.RevertToSnapshot(snap) - return err, nil - } - env.txs = append(env.txs, tx) - env.receipts = append(env.receipts, receipt) - - return nil, receipt.Logs -} - -func (env *Env) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) { - if env.gasPool == nil { - env.gasPool = new(core.GasPool).AddGas(env.header.GasLimit) - } - - var coalescedLogs []*types.Log - - for { - // If we don't have enough gas for any further transactions then we're done - if env.gasPool.Gas() < params.TxGas { - log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) - break - } - // Retrieve the next transaction and abort if all done - tx := txs.Peek() - if tx == nil { - break - } - // Error may be ignored here. The error has already been checked - // during transaction acceptance is the transaction pool. - // - // We use the eip155 signer regardless of the current hf. - from, _ := types.Sender(env.signer, tx) - // Check whether the tx is replay protected. If we're not in the EIP155 hf - // phase, start ignoring the sender until we do. - if tx.Protected() && !env.config.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block) - - txs.Pop() - continue - } - // Start executing the transaction - env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) - - err, logs := env.commitTransaction(tx, bc, coinbase, env.gasPool) - switch err { - case core.ErrGasLimitReached: - // Pop the current out-of-gas transaction without shifting in the next from the account - log.Trace("Gas limit exceeded for current block", "sender", from) - txs.Pop() - - case core.ErrNonceTooLow: - // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) - txs.Shift() - - case core.ErrNonceTooHigh: - // Reorg notification data race between the transaction pool and miner, skip account = - log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) - txs.Pop() - - case nil: - // Everything ok, collect the logs and shift in the next transaction from the same account - coalescedLogs = append(coalescedLogs, logs...) - env.tcount++ - txs.Shift() - - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). - log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) - txs.Shift() - } - } - - if len(coalescedLogs) > 0 || env.tcount > 0 { - // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined - // logs by filling in the block hash when the block was mined by the local miner. This can - // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. - cpy := make([]*types.Log, len(coalescedLogs)) - for i, l := range coalescedLogs { - cpy[i] = new(types.Log) - *cpy[i] = *l - } - go func(logs []*types.Log, tcount int) { - if len(logs) > 0 { - mux.Post(core.PendingLogsEvent{Logs: logs}) - } - if tcount > 0 { - mux.Post(core.PendingStateEvent{}) - } - }(cpy, env.tcount) - } -} - // task contains all information for consensus engine sealing and result submitting. type task struct { receipts []*types.Receipt @@ -174,6 +83,17 @@ type task struct { createdAt time.Time } +const ( + commitInterruptNone int32 = iota + commitInterruptNewHead + commitInterruptResubmit +) + +type newWorkReq struct { + interrupt *int32 + noempty bool +} + // worker is the main object which takes care of submitting new work to consensus engine // and gathering the sealing result. type worker struct { @@ -192,12 +112,13 @@ type worker struct { chainSideSub event.Subscription // Channels - newWork chan struct{} - taskCh chan *task - resultCh chan *task - exitCh chan struct{} + newWorkCh chan *newWorkReq + taskCh chan *task + resultCh chan *task + startCh chan struct{} + exitCh chan struct{} - current *Env // An environment for current running cycle. + current *environment // An environment for current running cycle. possibleUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. @@ -230,10 +151,11 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, txsCh: make(chan core.NewTxsEvent, txChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), - newWork: make(chan struct{}, 1), + newWorkCh: make(chan *newWorkReq), taskCh: make(chan *task), resultCh: make(chan *task, resultQueueSize), exitCh: make(chan struct{}), + startCh: make(chan struct{}, 1), } // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) @@ -242,11 +164,13 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) go worker.mainLoop() + go worker.newWorkLoop() go worker.resultLoop() go worker.taskLoop() // Submit first work to initialize pending state. - worker.newWork <- struct{}{} + worker.startCh <- struct{}{} + return worker } @@ -286,7 +210,7 @@ func (w *worker) pendingBlock() *types.Block { // start sets the running status as 1 and triggers new work submitting. func (w *worker) start() { atomic.StoreInt32(&w.running, 1) - w.newWork <- struct{}{} + w.startCh <- struct{}{} } // stop sets the running status as 0. @@ -313,6 +237,44 @@ func (w *worker) close() { } } +// newWorkLoop is a standalone goroutine to submit new mining work upon received events. +func (w *worker) newWorkLoop() { + var interrupt *int32 + + timer := time.NewTimer(0) + <-timer.C // discard the initial tick + + // recommit aborts in-flight transaction execution with given signal and resubmits a new one. + recommit := func(noempty bool, s int32) { + if interrupt != nil { + atomic.StoreInt32(interrupt, s) + } + interrupt = new(int32) + w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty} + timer.Reset(blockRecommitInterval) + } + + for { + select { + case <-w.startCh: + recommit(false, commitInterruptNewHead) + + case <-w.chainHeadCh: + recommit(false, commitInterruptNewHead) + + case <-timer.C: + // If mining is running resubmit a new work cycle periodically to pull in + // higher priced transactions. Disable this overhead for pending blocks. + if w.isRunning() && (w.config.Clique == nil || w.config.Clique.Period > 0) { + recommit(true, commitInterruptResubmit) + } + + case <-w.exitCh: + return + } + } +} + // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. func (w *worker) mainLoop() { defer w.txsSub.Unsubscribe() @@ -321,13 +283,8 @@ func (w *worker) mainLoop() { for { select { - case <-w.newWork: - // Submit a work when the worker is created or started. - w.commitNewWork() - - case <-w.chainHeadCh: - // Resubmit a work for new cycle once worker receives chain head event. - w.commitNewWork() + case req := <-w.newWorkCh: + w.commitNewWork(req.interrupt, req.noempty) case ev := <-w.chainSideCh: if _, exist := w.possibleUncles[ev.Block.Hash()]; exist { @@ -364,9 +321,9 @@ func (w *worker) mainLoop() { // already included in the current mining block. These transactions will // be automatically eliminated. if !w.isRunning() && w.current != nil { - w.mu.Lock() + w.mu.RLock() coinbase := w.coinbase - w.mu.Unlock() + w.mu.RUnlock() txs := make(map[common.Address]types.Transactions) for _, tx := range ev.Txs { @@ -374,12 +331,12 @@ func (w *worker) mainLoop() { txs[acc] = append(txs[acc], tx) } txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) - w.current.commitTransactions(w.mux, txset, w.chain, coinbase) + w.commitTransactions(txset, coinbase, nil) w.updateSnapshot() } else { // If we're mining, but nothing is being processed, wake on new transactions if w.config.Clique != nil && w.config.Clique.Period == 0 { - w.commitNewWork() + w.commitNewWork(nil, false) } } @@ -508,8 +465,7 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { if err != nil { return err } - env := &Env{ - config: w.config, + env := &environment{ signer: types.NewEIP155Signer(w.config.ChainID), state: state, ancestors: mapset.NewSet(), @@ -534,7 +490,7 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { } // commitUncle adds the given block to uncle block set, returns error if failed to add. -func (w *worker) commitUncle(env *Env, uncle *types.Header) error { +func (w *worker) commitUncle(env *environment, uncle *types.Header) error { hash := uncle.Hash() if env.uncles.Contains(hash) { return fmt.Errorf("uncle not unique") @@ -579,8 +535,120 @@ func (w *worker) updateSnapshot() { w.snapshotState = w.current.state.Copy() } +func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { + snap := w.current.state.Snapshot() + + receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{}) + if err != nil { + w.current.state.RevertToSnapshot(snap) + return nil, err + } + w.current.txs = append(w.current.txs, tx) + w.current.receipts = append(w.current.receipts, receipt) + + return receipt.Logs, nil +} + +func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { + // Short circuit if current is nil + if w.current == nil { + return true + } + + if w.current.gasPool == nil { + w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) + } + + var coalescedLogs []*types.Log + + for { + // In the following three cases, we will interrupt the execution of the transaction. + // (1) new head block event arrival, the interrupt signal is 1 + // (2) worker start or restart, the interrupt signal is 1 + // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. + // For the first two cases, the semi-finished work will be discarded. + // For the third case, the semi-finished work will be submitted to the consensus engine. + // TODO(rjl493456442) give feedback to newWorkLoop to adjust resubmit interval if it is too short. + if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { + return atomic.LoadInt32(interrupt) == commitInterruptNewHead + } + // If we don't have enough gas for any further transactions then we're done + if w.current.gasPool.Gas() < params.TxGas { + log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) + break + } + // Retrieve the next transaction and abort if all done + tx := txs.Peek() + if tx == nil { + break + } + // Error may be ignored here. The error has already been checked + // during transaction acceptance is the transaction pool. + // + // We use the eip155 signer regardless of the current hf. + from, _ := types.Sender(w.current.signer, tx) + // Check whether the tx is replay protected. If we're not in the EIP155 hf + // phase, start ignoring the sender until we do. + if tx.Protected() && !w.config.IsEIP155(w.current.header.Number) { + log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.config.EIP155Block) + + txs.Pop() + continue + } + // Start executing the transaction + w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) + + logs, err := w.commitTransaction(tx, coinbase) + switch err { + case core.ErrGasLimitReached: + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Trace("Gas limit exceeded for current block", "sender", from) + txs.Pop() + + case core.ErrNonceTooLow: + // New head notification data race between the transaction pool and miner, shift + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + txs.Shift() + + case core.ErrNonceTooHigh: + // Reorg notification data race between the transaction pool and miner, skip account = + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + txs.Pop() + + case nil: + // Everything ok, collect the logs and shift in the next transaction from the same account + coalescedLogs = append(coalescedLogs, logs...) + w.current.tcount++ + txs.Shift() + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + txs.Shift() + } + } + + if !w.isRunning() && len(coalescedLogs) > 0 { + // We don't push the pendingLogsEvent while we are mining. The reason is that + // when we are mining, the worker will regenerate a mining block every 3 seconds. + // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. + + // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined + // logs by filling in the block hash when the block was mined by the local miner. This can + // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. + cpy := make([]*types.Log, len(coalescedLogs)) + for i, l := range coalescedLogs { + cpy[i] = new(types.Log) + *cpy[i] = *l + } + go w.mux.Post(core.PendingLogsEvent{Logs: cpy}) + } + return false +} + // commitNewWork generates several new sealing tasks based on the parent block. -func (w *worker) commitNewWork() { +func (w *worker) commitNewWork(interrupt *int32, noempty bool) { w.mu.RLock() defer w.mu.RUnlock() @@ -666,9 +734,11 @@ func (w *worker) commitNewWork() { delete(w.possibleUncles, hash) } - // Create an empty block based on temporary copied state for sealing in advance without waiting block - // execution finished. - w.commit(uncles, nil, false, tstart) + if !noempty { + // Create an empty block based on temporary copied state for sealing in advance without waiting block + // execution finished. + w.commit(uncles, nil, false, tstart) + } // Fill the block with all available pending transactions. pending, err := w.eth.TxPool().Pending() @@ -682,7 +752,9 @@ func (w *worker) commitNewWork() { return } txs := types.NewTransactionsByPriceAndNonce(w.current.signer, pending) - env.commitTransactions(w.mux, txs, w.chain, w.coinbase) + if w.commitTransactions(txs, w.coinbase, interrupt) { + return + } w.commit(uncles, w.fullTaskHook, true, tstart) } diff --git a/miner/worker_test.go b/miner/worker_test.go index 408c47e3b38b..34bb7f5f3341 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -270,3 +270,68 @@ func TestStreamUncleBlock(t *testing.T) { t.Error("new task timeout") } } + +func TestRegenerateMiningBlockEthash(t *testing.T) { + testRegenerateMiningBlock(t, ethashChainConfig, ethash.NewFaker()) +} + +func TestRegenerateMiningBlockClique(t *testing.T) { + testRegenerateMiningBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase())) +} + +func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { + defer engine.Close() + + w, b := newTestWorker(t, chainConfig, engine) + defer w.close() + + var taskCh = make(chan struct{}) + + taskIndex := 0 + w.newTaskHook = func(task *task) { + if task.block.NumberU64() == 1 { + if taskIndex == 2 { + receiptLen, balance := 2, big.NewInt(2000) + if len(task.receipts) != receiptLen { + t.Errorf("receipt number mismatch has %d, want %d", len(task.receipts), receiptLen) + } + if task.state.GetBalance(acc1Addr).Cmp(balance) != 0 { + t.Errorf("account balance mismatch has %d, want %d", task.state.GetBalance(acc1Addr), balance) + } + } + taskCh <- struct{}{} + taskIndex += 1 + } + } + w.skipSealHook = func(task *task) bool { + return true + } + w.fullTaskHook = func() { + time.Sleep(100 * time.Millisecond) + } + // Ensure worker has finished initialization + for { + b := w.pendingBlock() + if b != nil && b.NumberU64() == 1 { + break + } + } + + w.start() + // Ignore the first two works + for i := 0; i < 2; i += 1 { + select { + case <-taskCh: + case <-time.NewTimer(time.Second).C: + t.Error("new task timeout") + } + } + b.txPool.AddLocals(newTxs) + time.Sleep(3 * time.Second) + + select { + case <-taskCh: + case <-time.NewTimer(time.Second).C: + t.Error("new task timeout") + } +} From 62f5137a72c69ae8cffbc3526611af77448d25de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 16 Aug 2018 14:47:49 +0300 Subject: [PATCH 139/166] miner: add gas and fee details to mining logs --- miner/worker.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 81a9eabfd5d8..e7e279645805 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -780,8 +780,16 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st select { case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: w.unconfirmed.Shift(block.NumberU64() - 1) - log.Info("Commit new mining work", "number", block.Number(), "txs", w.current.tcount, "uncles", len(uncles), - "elapsed", common.PrettyDuration(time.Since(start))) + + feesWei := new(big.Int) + for _, tx := range block.Transactions() { + feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice())) + } + feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) + + log.Info("Commit new mining work", "number", block.Number(), "uncles", len(uncles), "txs", w.current.tcount, + "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start))) + case <-w.exitCh: log.Info("Worker has exited") } From 60390878a5128e64228de6873b5a16adddbbd68b Mon Sep 17 00:00:00 2001 From: Sasuke1964 Date: Fri, 17 Aug 2018 02:38:02 -0500 Subject: [PATCH 140/166] accounts: fixed typo (#17421) --- accounts/accounts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts/accounts.go b/accounts/accounts.go index 76951e1a42a6..cb1eae281587 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -106,7 +106,7 @@ type Wallet interface { // or optionally with the aid of any location metadata from the embedded URL field. // // If the wallet requires additional authentication to sign the request (e.g. - // a password to decrypt the account, or a PIN code o verify the transaction), + // a password to decrypt the account, or a PIN code to verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing // the needed details via SignTxWithPassphrase, or by other means (e.g. unlock From f44046a1c6889049dbf0f9448075a43f5b280b09 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Fri, 17 Aug 2018 10:33:39 +0200 Subject: [PATCH 141/166] build: do not require `ethereum-swarm` deb when installing `ethereum` (#17425) --- build/ci.go | 11 ----------- build/deb/ethereum/deb.control | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/build/ci.go b/build/ci.go index ff23e15fdbad..40252cbde1ff 100644 --- a/build/ci.go +++ b/build/ci.go @@ -644,17 +644,6 @@ func (meta debMetadata) ExeName(exe debExecutable) string { return exe.Package() } -// EthereumSwarmPackageName returns the name of the swarm package based on -// environment, e.g. "ethereum-swarm-unstable", or "ethereum-swarm". -// This is needed so that we make sure that "ethereum" package, -// depends on and installs "ethereum-swarm" -func (meta debMetadata) EthereumSwarmPackageName() string { - if isUnstableBuild(meta.Env) { - return debSwarm.Name + "-unstable" - } - return debSwarm.Name -} - // ExeConflicts returns the content of the Conflicts field // for executable packages. func (meta debMetadata) ExeConflicts(exe debExecutable) string { diff --git a/build/deb/ethereum/deb.control b/build/deb/ethereum/deb.control index e693d1d046ec..defb106fe367 100644 --- a/build/deb/ethereum/deb.control +++ b/build/deb/ethereum/deb.control @@ -10,7 +10,7 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum Package: {{.Name}} Architecture: any -Depends: ${misc:Depends}, {{.EthereumSwarmPackageName}}, {{.ExeList}} +Depends: ${misc:Depends}, {{.ExeList}} Description: Meta-package to install geth, swarm, and other tools Meta-package to install geth, swarm and other tools From 2695fa2213fe5010a80970bca1078834662d5972 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 17 Aug 2018 12:21:53 +0200 Subject: [PATCH 142/166] les: fix crasher in NodeInfo when running as server (#17419) * les: fix crasher in NodeInfo when running as server The ProtocolManager computes CHT and Bloom trie roots by asking the indexers for their current head. It tried to get the indexers from LesOdr, but no LesOdr instance is created in server mode. Attempt to fix this by moving the indexers, protocol creation and NodeInfo to a new lesCommons struct which is embedded into both server and client. All this setup code should really be cleaned up, but this is just a hotfix so we have to do that some other time. * les: fix commons protocol maker --- les/backend.go | 48 ++++++++--------- les/commons.go | 106 +++++++++++++++++++++++++++++++++++++ les/handler.go | 128 ++++++++++----------------------------------- les/helper_test.go | 10 +--- les/server.go | 38 +++++++------- 5 files changed, 178 insertions(+), 152 deletions(-) create mode 100644 les/commons.go diff --git a/les/backend.go b/les/backend.go index 9b8cc1828f5c..d26c1470fe0d 100644 --- a/les/backend.go +++ b/les/backend.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/light" @@ -47,26 +46,24 @@ import ( ) type LightEthereum struct { - config *eth.Config + lesCommons odr *LesOdr relay *LesTxRelay chainConfig *params.ChainConfig // Channel for shutting down the service shutdownChan chan bool + // Handlers - peers *peerSet - txPool *light.TxPool - blockchain *light.LightChain - protocolManager *ProtocolManager - serverPool *serverPool - reqDist *requestDistributor - retriever *retrieveManager - // DB interfaces - chainDb ethdb.Database // Block chain database - - bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests - bloomIndexer, chtIndexer, bloomTrieIndexer *core.ChainIndexer + peers *peerSet + txPool *light.TxPool + blockchain *light.LightChain + serverPool *serverPool + reqDist *requestDistributor + retriever *retrieveManager + + bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests + bloomIndexer *core.ChainIndexer ApiBackend *LesApiBackend @@ -95,9 +92,11 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { quitSync := make(chan struct{}) leth := &LightEthereum{ - config: config, + lesCommons: lesCommons{ + chainDb: chainDb, + config: config, + }, chainConfig: chainConfig, - chainDb: chainDb, eventMux: ctx.EventMux, peers: peers, reqDist: newRequestDistributor(peers, quitSync), @@ -112,10 +111,12 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { leth.relay = NewLesTxRelay(peers, leth.reqDist) leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg) leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool) + leth.odr = NewLesOdr(chainDb, leth.retriever) leth.chtIndexer = light.NewChtIndexer(chainDb, true, leth.odr) leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, true, leth.odr) leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer) + // Note: NewLightChain adds the trusted checkpoint so it needs an ODR with // indexers already set but not started yet if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil { @@ -125,6 +126,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer) leth.chtIndexer.Start(leth.blockchain) leth.bloomIndexer.Start(leth.blockchain) + // Rewind the chain in case of an incompatible config upgrade. if compat, ok := genesisErr.(*params.ConfigCompatError); ok { log.Warn("Rewinding chain to upgrade configuration", "err", compat) @@ -133,7 +135,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { } leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay) - if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, ClientProtocolVersions, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil { + if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil { return nil, err } leth.ApiBackend = &LesApiBackend{leth, nil} @@ -215,14 +217,14 @@ func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) { func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain } func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool } func (s *LightEthereum) Engine() consensus.Engine { return s.engine } -func (s *LightEthereum) LesVersion() int { return int(s.protocolManager.SubProtocols[0].Version) } +func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) } func (s *LightEthereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader } func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux } // Protocols implements node.Service, returning all the currently configured // network protocols to start. func (s *LightEthereum) Protocols() []p2p.Protocol { - return s.protocolManager.SubProtocols + return s.makeProtocols(ClientProtocolVersions) } // Start implements node.Service, starting all internal goroutines needed by the @@ -242,12 +244,8 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error { // Ethereum protocol. func (s *LightEthereum) Stop() error { s.odr.Stop() - if s.bloomIndexer != nil { - s.bloomIndexer.Close() - } - if s.chtIndexer != nil { - s.chtIndexer.Close() - } + s.bloomIndexer.Close() + s.chtIndexer.Close() s.blockchain.Stop() s.protocolManager.Stop() s.txPool.Stop() diff --git a/les/commons.go b/les/commons.go new file mode 100644 index 000000000000..251b7a5833c6 --- /dev/null +++ b/les/commons.go @@ -0,0 +1,106 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/params" +) + +// lesCommons contains fields needed by both server and client. +type lesCommons struct { + config *eth.Config + chainDb ethdb.Database + protocolManager *ProtocolManager + chtIndexer, bloomTrieIndexer *core.ChainIndexer +} + +// NodeInfo represents a short summary of the Ethereum sub-protocol metadata +// known about the host peer. +type NodeInfo struct { + Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) + Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain + Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block + Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules + Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block + CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup +} + +// makeProtocols creates protocol descriptors for the given LES versions. +func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol { + protos := make([]p2p.Protocol, len(versions)) + for i, version := range versions { + version := version + protos[i] = p2p.Protocol{ + Name: "les", + Version: version, + Length: ProtocolLengths[version], + NodeInfo: c.nodeInfo, + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + return c.protocolManager.runPeer(version, p, rw) + }, + PeerInfo: func(id discover.NodeID) interface{} { + if p := c.protocolManager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { + return p.Info() + } + return nil + }, + } + } + return protos +} + +// nodeInfo retrieves some protocol metadata about the running host node. +func (c *lesCommons) nodeInfo() interface{} { + var cht light.TrustedCheckpoint + sections, _, sectionHead := c.chtIndexer.Sections() + sections2, _, sectionHead2 := c.bloomTrieIndexer.Sections() + if sections2 < sections { + sections = sections2 + sectionHead = sectionHead2 + } + if sections > 0 { + sectionIndex := sections - 1 + cht = light.TrustedCheckpoint{ + SectionIdx: sectionIndex, + SectionHead: sectionHead, + CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead), + BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead), + } + } + + chain := c.protocolManager.blockchain + head := chain.CurrentHeader() + hash := head.Hash() + return &NodeInfo{ + Network: c.config.NetworkId, + Difficulty: chain.GetTd(hash, head.Number.Uint64()), + Genesis: chain.Genesis().Hash(), + Config: chain.Config(), + Head: chain.CurrentHeader().Hash(), + CHT: cht, + } +} diff --git a/les/handler.go b/les/handler.go index ccb4a88448d5..ca40eaabfa16 100644 --- a/les/handler.go +++ b/les/handler.go @@ -20,7 +20,6 @@ package les import ( "encoding/binary" "encoding/json" - "errors" "fmt" "math/big" "net" @@ -40,7 +39,6 @@ import ( "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -65,10 +63,6 @@ const ( disableClientRemovePeer = false ) -// errIncompatibleConfig is returned if the requested protocols and configs are -// not compatible (low protocol version restrictions and high requirements). -var errIncompatibleConfig = errors.New("incompatible configuration") - func errResp(code errCode, format string, v ...interface{}) error { return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) } @@ -115,8 +109,6 @@ type ProtocolManager struct { peers *peerSet maxPeers int - SubProtocols []p2p.Protocol - eventMux *event.TypeMux // channels for fetcher, syncer, txsyncLoop @@ -131,7 +123,7 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protocolVersions []uint, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) { +func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ lightSync: lightSync, @@ -155,54 +147,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protoco manager.reqDist = odr.retriever.dist } - // Initiate a sub-protocol for every implemented version we can handle - manager.SubProtocols = make([]p2p.Protocol, 0, len(protocolVersions)) - for _, version := range protocolVersions { - // Compatible, initialize the sub-protocol - version := version // Closure for the run - manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ - Name: "les", - Version: version, - Length: ProtocolLengths[version], - Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - var entry *poolEntry - peer := manager.newPeer(int(version), networkId, p, rw) - if manager.serverPool != nil { - addr := p.RemoteAddr().(*net.TCPAddr) - entry = manager.serverPool.connect(peer, addr.IP, uint16(addr.Port)) - } - peer.poolEntry = entry - select { - case manager.newPeerCh <- peer: - manager.wg.Add(1) - defer manager.wg.Done() - err := manager.handle(peer) - if entry != nil { - manager.serverPool.disconnect(entry) - } - return err - case <-manager.quitSync: - if entry != nil { - manager.serverPool.disconnect(entry) - } - return p2p.DiscQuitting - } - }, - NodeInfo: func() interface{} { - return manager.NodeInfo() - }, - PeerInfo: func(id discover.NodeID) interface{} { - if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { - return p.Info() - } - return nil - }, - }) - } - if len(manager.SubProtocols) == 0 { - return nil, errIncompatibleConfig - } - removePeer := manager.removePeer if disableClientRemovePeer { removePeer = func(id string) {} @@ -262,6 +206,32 @@ func (pm *ProtocolManager) Stop() { log.Info("Light Ethereum protocol stopped") } +// runPeer is the p2p protocol run function for the given version. +func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { + var entry *poolEntry + peer := pm.newPeer(int(version), pm.networkId, p, rw) + if pm.serverPool != nil { + addr := p.RemoteAddr().(*net.TCPAddr) + entry = pm.serverPool.connect(peer, addr.IP, uint16(addr.Port)) + } + peer.poolEntry = entry + select { + case pm.newPeerCh <- peer: + pm.wg.Add(1) + defer pm.wg.Done() + err := pm.handle(peer) + if entry != nil { + pm.serverPool.disconnect(entry) + } + return err + case <-pm.quitSync: + if entry != nil { + pm.serverPool.disconnect(entry) + } + return p2p.DiscQuitting + } +} + func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { return newPeer(pv, nv, p, newMeteredMsgWriter(rw)) } @@ -1203,50 +1173,6 @@ func (pm *ProtocolManager) txStatus(hashes []common.Hash) []txStatus { return stats } -// NodeInfo represents a short summary of the Ethereum sub-protocol metadata -// known about the host peer. -type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) - Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain - Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block - Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules - Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block - CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup -} - -// NodeInfo retrieves some protocol metadata about the running host node. -func (self *ProtocolManager) NodeInfo() *NodeInfo { - head := self.blockchain.CurrentHeader() - hash := head.Hash() - - var cht light.TrustedCheckpoint - - sections, _, sectionHead := self.odr.ChtIndexer().Sections() - sections2, _, sectionHead2 := self.odr.BloomTrieIndexer().Sections() - if sections2 < sections { - sections = sections2 - sectionHead = sectionHead2 - } - if sections > 0 { - sectionIndex := sections - 1 - cht = light.TrustedCheckpoint{ - SectionIdx: sectionIndex, - SectionHead: sectionHead, - CHTRoot: light.GetChtRoot(self.chainDb, sectionIndex, sectionHead), - BloomRoot: light.GetBloomTrieRoot(self.chainDb, sectionIndex, sectionHead), - } - } - - return &NodeInfo{ - Network: self.networkId, - Difficulty: self.blockchain.GetTd(hash, head.Number.Uint64()), - Genesis: self.blockchain.Genesis().Hash(), - Config: self.blockchain.Config(), - Head: hash, - CHT: cht, - } -} - // downloaderPeerNotify implements peerSetNotify type downloaderPeerNotify ProtocolManager diff --git a/les/helper_test.go b/les/helper_test.go index 50c97e06e1c1..8817c20c7d33 100644 --- a/les/helper_test.go +++ b/les/helper_test.go @@ -172,18 +172,12 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor chain = blockchain } - var protocolVersions []uint - if lightSync { - protocolVersions = ClientProtocolVersions - } else { - protocolVersions = ServerProtocolVersions - } - pm, err := NewProtocolManager(gspec.Config, lightSync, protocolVersions, NetworkId, evmux, engine, peers, chain, nil, db, odr, nil, nil, make(chan struct{}), new(sync.WaitGroup)) + pm, err := NewProtocolManager(gspec.Config, lightSync, NetworkId, evmux, engine, peers, chain, nil, db, odr, nil, nil, make(chan struct{}), new(sync.WaitGroup)) if err != nil { return nil, err } if !lightSync { - srv := &LesServer{protocolManager: pm} + srv := &LesServer{lesCommons: lesCommons{protocolManager: pm}} pm.server = srv srv.defParams = &flowcontrol.ServerParams{ diff --git a/les/server.go b/les/server.go index a934fbf26e6c..df98d1e3a877 100644 --- a/les/server.go +++ b/les/server.go @@ -38,21 +38,19 @@ import ( ) type LesServer struct { - config *eth.Config - protocolManager *ProtocolManager - fcManager *flowcontrol.ClientManager // nil if our node is client only - fcCostStats *requestCostStats - defParams *flowcontrol.ServerParams - lesTopics []discv5.Topic - privateKey *ecdsa.PrivateKey - quitSync chan struct{} - - chtIndexer, bloomTrieIndexer *core.ChainIndexer + lesCommons + + fcManager *flowcontrol.ClientManager // nil if our node is client only + fcCostStats *requestCostStats + defParams *flowcontrol.ServerParams + lesTopics []discv5.Topic + privateKey *ecdsa.PrivateKey + quitSync chan struct{} } func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { quitSync := make(chan struct{}) - pm, err := NewProtocolManager(eth.BlockChain().Config(), false, ServerProtocolVersions, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup)) + pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup)) if err != nil { return nil, err } @@ -63,13 +61,17 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { } srv := &LesServer{ - config: config, - protocolManager: pm, - quitSync: quitSync, - lesTopics: lesTopics, - chtIndexer: light.NewChtIndexer(eth.ChainDb(), false, nil), - bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false, nil), + lesCommons: lesCommons{ + config: config, + chainDb: eth.ChainDb(), + chtIndexer: light.NewChtIndexer(eth.ChainDb(), false, nil), + bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false, nil), + protocolManager: pm, + }, + quitSync: quitSync, + lesTopics: lesTopics, } + logger := log.New() chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility @@ -104,7 +106,7 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { } func (s *LesServer) Protocols() []p2p.Protocol { - return s.protocolManager.SubProtocols + return s.makeProtocols(ServerProtocolVersions) } // Start starts the LES server From 22cd3f70a68da31b1054fcfca74d187133df29b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 17 Aug 2018 15:47:14 +0300 Subject: [PATCH 143/166] miner: update mining log with correct fee calculation --- miner/worker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index e7e279645805..2f76f2a92cb6 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -782,8 +782,8 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st w.unconfirmed.Shift(block.NumberU64() - 1) feesWei := new(big.Int) - for _, tx := range block.Transactions() { - feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice())) + for i, tx := range block.Transactions() { + feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) } feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) From 251c868008f30b2991bc6986e60a0e7bbdc78b05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 17 Aug 2018 18:12:39 +0300 Subject: [PATCH 144/166] consensus/ethash: reduce notify test aggressiveness --- consensus/ethash/sealer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go index 6d8a77049545..6c7157a5aeeb 100644 --- a/consensus/ethash/sealer_test.go +++ b/consensus/ethash/sealer_test.go @@ -70,7 +70,7 @@ func TestRemoteNotify(t *testing.T) { // issues in the notifications. func TestRemoteMultiNotify(t *testing.T) { // Start a simple webserver to capture notifications - sink := make(chan [3]string, 1024) + sink := make(chan [3]string, 64) server := &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { From 0fd02fe9cff54a5edce87588ed780c76a95329fd Mon Sep 17 00:00:00 2001 From: hackyminer Date: Sat, 18 Aug 2018 06:07:20 +0900 Subject: [PATCH 145/166] console: fixed comment typo --- console/console.go | 2 +- console/console_test.go | 8 ++++---- console/prompter.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/console/console.go b/console/console.go index 56e03837ac44..3c397f800614 100644 --- a/console/console.go +++ b/console/console.go @@ -314,7 +314,7 @@ func (c *Console) Interactive() { input = "" // Current user input scheduler = make(chan string) // Channel to send the next prompt on and receive the input ) - // Start a goroutine to listen for promt requests and send back inputs + // Start a goroutine to listen for prompt requests and send back inputs go func() { for { // Read the next user input diff --git a/console/console_test.go b/console/console_test.go index 7b1629c032a1..26465ca6f450 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -201,7 +201,7 @@ func TestInteractive(t *testing.T) { go tester.console.Interactive() - // Wait for a promt and send a statement back + // Wait for a prompt and send a statement back select { case <-tester.input.scheduler: case <-time.After(time.Second): @@ -212,7 +212,7 @@ func TestInteractive(t *testing.T) { case <-time.After(time.Second): t.Fatalf("input feedback timeout") } - // Wait for the second promt and ensure first statement was evaluated + // Wait for the second prompt and ensure first statement was evaluated select { case <-tester.input.scheduler: case <-time.After(time.Second): @@ -249,7 +249,7 @@ func TestExecute(t *testing.T) { } // Tests that the JavaScript objects returned by statement executions are properly -// pretty printed instead of just displaing "[object]". +// pretty printed instead of just displaying "[object]". func TestPrettyPrint(t *testing.T) { tester := newTester(t, nil) defer tester.Close(t) @@ -300,7 +300,7 @@ func TestIndenting(t *testing.T) { }{ {`var a = 1;`, 0}, {`"some string"`, 0}, - {`"some string with (parentesis`, 0}, + {`"some string with (parenthesis`, 0}, {`"some string with newline ("`, 0}, {`function v(a,b) {}`, 0}, diff --git a/console/prompter.go b/console/prompter.go index c477b48178b3..9b90034db904 100644 --- a/console/prompter.go +++ b/console/prompter.go @@ -27,7 +27,7 @@ import ( // Only this reader may be used for input because it keeps an internal buffer. var Stdin = newTerminalPrompter() -// UserPrompter defines the methods needed by the console to promt the user for +// UserPrompter defines the methods needed by the console to prompt the user for // various types of inputs. type UserPrompter interface { // PromptInput displays the given prompt to the user and requests some textual From d3488c1affee6843d75db77b36f504c73e5e02e7 Mon Sep 17 00:00:00 2001 From: Wuxiang Date: Mon, 20 Aug 2018 20:07:21 +0800 Subject: [PATCH 146/166] p2p: fix typo (#17446) --- p2p/protocol.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index ee747ba23dda..948aeb494f87 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -35,7 +35,7 @@ type Protocol struct { // by the protocol. Length uint64 - // Run is called in a new groutine when the protocol has been + // Run is called in a new goroutine when the protocol has been // negotiated with a peer. It should read and write messages from // rw. The Payload for each message must be fully consumed. // From c4078fc80527b2cf0c2fd3e8771e9db06a597152 Mon Sep 17 00:00:00 2001 From: Elad Date: Mon, 20 Aug 2018 14:09:50 +0200 Subject: [PATCH 147/166] cmd/swarm: added swarm bootnodes (#17414) --- cmd/swarm/bootnodes.go | 77 ++++++++++++++++++++++++++++++++++++++++++ cmd/swarm/config.go | 8 ----- cmd/swarm/main.go | 51 ++++++++++++---------------- swarm/api/config.go | 2 -- 4 files changed, 98 insertions(+), 40 deletions(-) create mode 100644 cmd/swarm/bootnodes.go diff --git a/cmd/swarm/bootnodes.go b/cmd/swarm/bootnodes.go new file mode 100644 index 000000000000..cbba9970da0e --- /dev/null +++ b/cmd/swarm/bootnodes.go @@ -0,0 +1,77 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +var SwarmBootnodes = []string{ + // Foundation Swarm Gateway Cluster + "enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399", + "enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400", + "enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401", + "enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402", + "enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403", + "enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404", + "enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405", + "enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406", + "enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407", + "enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408", + "enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409", + "enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410", + "enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411", + "enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412", + "enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413", + "enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414", + "enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415", + "enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416", + "enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417", + "enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418", + "enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419", + "enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420", + "enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421", + "enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422", + "enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423", + "enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424", + "enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425", + "enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426", + "enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427", + "enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428", + "enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429", + "enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430", + "enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431", + "enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432", + "enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433", + "enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434", + "enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435", + "enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436", + "enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437", + "enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438", + "enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439", + "enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440", + "enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441", + "enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442", + "enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443", + "enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444", + "enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445", + "enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446", + + // Mainframe + "enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399", + "enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399", + "enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399", + "enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399", + "enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399", + "enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399", +} diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index 1183f8bc8169..ae4b5816e60e 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -233,10 +233,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.Cors = cors } - if ctx.GlobalIsSet(utils.BootnodesFlag.Name) { - currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name) - } - if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" { currentConfig.LocalStoreParams.ChunkDbPath = storePath } @@ -334,10 +330,6 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { currentConfig.Cors = cors } - if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" { - currentConfig.BootNodes = bootnodes - } - return currentConfig } diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 76be60cb683c..637ae06e9637 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -37,7 +37,6 @@ import ( "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/swarm" bzzapi "github.com/ethereum/go-ethereum/swarm/api" @@ -67,14 +66,7 @@ OPTIONS: ` var ( - gitCommit string // Git SHA1 commit hash of the release (set via linker flags) - testbetBootNodes = []string{ - "enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429", - "enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430", - "enode://fe29b82319b734ce1ec68b84657d57145fee237387e63273989d354486731e59f78858e452ef800a020559da22dcca759536e6aa5517c53930d29ce0b1029286@13.74.157.139:30431", - "enode://1d7187e7bde45cf0bee489ce9852dd6d1a0d9aa67a33a6b8e6db8a4fbc6fcfa6f0f1a5419343671521b863b187d1c73bad3603bae66421d157ffef357669ddb8@13.74.157.139:30432", - "enode://0e4cba800f7b1ee73673afa6a4acead4018f0149d2e3216be3f133318fd165b324cd71b81fbe1e80deac8dbf56e57a49db7be67f8b9bc81bd2b7ee496434fb5d@13.74.157.139:30433", - } + gitCommit string // Git SHA1 commit hash of the release (set via linker flags) ) var ( @@ -619,6 +611,9 @@ func bzzd(ctx *cli.Context) error { if _, err := os.Stat(bzzconfig.Path); err == nil { cfg.DataDir = bzzconfig.Path } + + //optionally set the bootnodes before configuring the node + setSwarmBootstrapNodes(ctx, &cfg) //setup the ethereum node utils.SetNodeConfig(ctx, &cfg) stack, err := node.New(&cfg) @@ -643,16 +638,6 @@ func bzzd(ctx *cli.Context) error { stack.Stop() }() - // Add bootnodes as initial peers. - if bzzconfig.BootNodes != "" { - bootnodes := strings.Split(bzzconfig.BootNodes, ",") - injectBootnodes(stack.Server(), bootnodes) - } else { - if bzzconfig.NetworkID == 3 { - injectBootnodes(stack.Server(), testbetBootNodes) - } - } - stack.Wait() return nil } @@ -760,17 +745,6 @@ func getPassPhrase(prompt string, i int, passwords []string) string { return password } -func injectBootnodes(srv *p2p.Server, nodes []string) { - for _, url := range nodes { - n, err := discover.ParseNode(url) - if err != nil { - log.Error("Invalid swarm bootnode", "err", err) - continue - } - srv.AddPeer(n) - } -} - // addDefaultHelpSubcommand scans through defined CLI commands and adds // a basic help subcommand to each // if a help command is already defined, it will take precedence over the default. @@ -783,3 +757,20 @@ func addDefaultHelpSubcommands(commands []cli.Command) { } } } + +func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalIsSet(utils.BootnodesFlag.Name) || ctx.GlobalIsSet(utils.BootnodesV4Flag.Name) { + return + } + + cfg.P2P.BootstrapNodes = []*discover.Node{} + + for _, url := range SwarmBootnodes { + node, err := discover.ParseNode(url) + if err != nil { + log.Error("Bootstrap URL invalid", "enode", url, "err", err) + } + cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node) + } + log.Debug("added default swarm bootnodes", "length", len(cfg.P2P.BootstrapNodes)) +} diff --git a/swarm/api/config.go b/swarm/api/config.go index bdfffdd05f63..3044dc2e52e9 100644 --- a/swarm/api/config.go +++ b/swarm/api/config.go @@ -68,7 +68,6 @@ type Config struct { SwapAPI string Cors string BzzAccount string - BootNodes string privateKey *ecdsa.PrivateKey } @@ -93,7 +92,6 @@ func NewConfig() (c *Config) { DeliverySkipCheck: false, SyncUpdateDelay: 15 * time.Second, SwapAPI: "", - BootNodes: "", } return From a8aa89accb0bcd4f24cd430d8f100e0ff9e239d0 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 20 Aug 2018 14:10:30 +0200 Subject: [PATCH 148/166] swarm/storage: cleanup task - remove bigger chunks (#17424) --- swarm/storage/ldbstore.go | 67 ++++++++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index 7920ee76749a..b95aa13b09f0 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock" "github.com/syndtr/goleveldb/leveldb" @@ -384,14 +385,13 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) { } func (s *LDBStore) Cleanup() { - //Iterates over the database and checks that there are no faulty chunks + //Iterates over the database and checks that there are no chunks bigger than 4kb + var errorsFound, removed, total int + it := s.db.NewIterator() - startPosition := []byte{keyIndex} - it.Seek(startPosition) - var key []byte - var errorsFound, total int - for it.Valid() { - key = it.Key() + defer it.Release() + for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() { + key := it.Key() if (key == nil) || (key[0] != keyIndex) { break } @@ -399,27 +399,50 @@ func (s *LDBStore) Cleanup() { var index dpaDBIndex err := decodeIndex(it.Value(), &index) if err != nil { - it.Next() + log.Warn("Cannot decode") + errorsFound++ continue } - data, err := s.db.Get(getDataKey(index.Idx, s.po(Address(key[1:])))) + hash := key[1:] + po := s.po(hash) + datakey := getDataKey(index.Idx, po) + data, err := s.db.Get(datakey) if err != nil { - log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err)) - s.delete(index.Idx, getIndexKey(key[1:]), s.po(Address(key[1:]))) - errorsFound++ - } else { - hasher := s.hashfunc() - hasher.Write(data[32:]) - hash := hasher.Sum(nil) - if !bytes.Equal(hash, key[1:]) { - log.Warn(fmt.Sprintf("Found invalid chunk. Hash mismatch. hash=%x, key=%x", hash, key[:])) - s.delete(index.Idx, getIndexKey(key[1:]), s.po(Address(key[1:]))) + found := false + + // highest possible proximity is 255 + for po = 1; po <= 255; po++ { + datakey = getDataKey(index.Idx, po) + data, err = s.db.Get(datakey) + if err == nil { + found = true + break + } + } + + if !found { + log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key[:])) + errorsFound++ + continue } } - it.Next() + + c := &Chunk{} + ck := data[:32] + decodeData(data, c) + + cs := int64(binary.LittleEndian.Uint64(c.SData[:8])) + log.Trace("chunk", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) + + if len(c.SData) > chunk.DefaultSize+8 { + log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) + s.delete(index.Idx, getIndexKey(key[1:]), po) + removed++ + errorsFound++ + } } - it.Release() - log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) + + log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed)) } func (s *LDBStore) ReIndex() { From 55d050ccd81d726ce47cfb57ecd9de662129b7f1 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Mon, 20 Aug 2018 14:11:08 +0200 Subject: [PATCH 149/166] travis: remove brew update and osxfuse install (#17429) --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index afa9ab503f21..3ae88aab6df1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,8 +30,6 @@ matrix: go: 1.10.x script: - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - - brew update - - brew cask install osxfuse - go run build/ci.go install - go run build/ci.go test -coverage $TEST_PACKAGES From c929030e280ecae5e5943fa277240ee7d09d7506 Mon Sep 17 00:00:00 2001 From: Aditya Date: Mon, 20 Aug 2018 05:14:50 -0700 Subject: [PATCH 150/166] core/types: fix docs about protected Vs (#17436) --- core/types/transaction.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/types/transaction.go b/core/types/transaction.go index 82af9335ff2a..9c6e77be982b 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -119,7 +119,7 @@ func isProtectedV(V *big.Int) bool { v := V.Uint64() return v != 27 && v != 28 } - // anything not 27 or 28 are considered unprotected + // anything not 27 or 28 is considered protected return true } From 1de9ada4016d7028d1d1529bc7d0676c98ddb5e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Mon, 20 Aug 2018 15:49:28 +0200 Subject: [PATCH 151/166] light: new CHTs (#17448) --- light/postprocess.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/light/postprocess.go b/light/postprocess.go index 0b25e1d88141..42985aff054b 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -61,18 +61,18 @@ type TrustedCheckpoint struct { var ( mainnetCheckpoint = TrustedCheckpoint{ name: "mainnet", - SectionIdx: 179, - SectionHead: common.HexToHash("ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea"), - CHTRoot: common.HexToHash("646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16"), - BloomRoot: common.HexToHash("d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25"), + SectionIdx: 187, + SectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"), + CHTRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"), + BloomRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"), } ropstenCheckpoint = TrustedCheckpoint{ name: "ropsten", - SectionIdx: 107, - SectionHead: common.HexToHash("e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce"), - CHTRoot: common.HexToHash("15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0"), - BloomRoot: common.HexToHash("e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2"), + SectionIdx: 117, + SectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"), + CHTRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"), + BloomRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"), } ) From 7d38d53ae449c6ec06f7b0579f1a189b02222a60 Mon Sep 17 00:00:00 2001 From: Nilesh Trivedi Date: Mon, 20 Aug 2018 19:24:38 +0530 Subject: [PATCH 152/166] cmd/puppeth: accept ssh identity in the server string (#17407) * cmd/puppeth: Accept identityfile in the server string with fallback to id_rsa * cmd/puppeth: code polishes + fix heath check double ports --- cmd/puppeth/ssh.go | 52 ++++++++++++++++++++--------------- cmd/puppeth/wizard_network.go | 8 +++--- 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/cmd/puppeth/ssh.go b/cmd/puppeth/ssh.go index 158261ce0516..c507596065ba 100644 --- a/cmd/puppeth/ssh.go +++ b/cmd/puppeth/ssh.go @@ -45,33 +45,44 @@ type sshClient struct { // dial establishes an SSH connection to a remote node using the current user and // the user's configured private RSA key. If that fails, password authentication -// is fallen back to. The caller may override the login user via user@server:port. +// is fallen back to. server can be a string like user:identity@server:port. func dial(server string, pubkey []byte) (*sshClient, error) { - // Figure out a label for the server and a logger - label := server - if strings.Contains(label, ":") { - label = label[:strings.Index(label, ":")] - } - login := "" + // Figure out username, identity, hostname and port + hostname := "" + hostport := server + username := "" + identity := "id_rsa" // default + if strings.Contains(server, "@") { - login = label[:strings.Index(label, "@")] - label = label[strings.Index(label, "@")+1:] - server = server[strings.Index(server, "@")+1:] + prefix := server[:strings.Index(server, "@")] + if strings.Contains(prefix, ":") { + username = prefix[:strings.Index(prefix, ":")] + identity = prefix[strings.Index(prefix, ":")+1:] + } else { + username = prefix + } + hostport = server[strings.Index(server, "@")+1:] } - logger := log.New("server", label) + if strings.Contains(hostport, ":") { + hostname = hostport[:strings.Index(hostport, ":")] + } else { + hostname = hostport + hostport += ":22" + } + logger := log.New("server", server) logger.Debug("Attempting to establish SSH connection") user, err := user.Current() if err != nil { return nil, err } - if login == "" { - login = user.Username + if username == "" { + username = user.Username } // Configure the supported authentication methods (private key and password) var auths []ssh.AuthMethod - path := filepath.Join(user.HomeDir, ".ssh", "id_rsa") + path := filepath.Join(user.HomeDir, ".ssh", identity) if buf, err := ioutil.ReadFile(path); err != nil { log.Warn("No SSH key, falling back to passwords", "path", path, "err", err) } else { @@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) { } } auths = append(auths, ssh.PasswordCallback(func() (string, error) { - fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server) + fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server) blob, err := terminal.ReadPassword(int(os.Stdin.Fd())) fmt.Println() return string(blob), err })) // Resolve the IP address of the remote server - addr, err := net.LookupHost(label) + addr, err := net.LookupHost(hostname) if err != nil { return nil, err } @@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) { return nil, errors.New("no IPs associated with domain") } // Try to dial in to the remote server - logger.Trace("Dialing remote SSH server", "user", login) - if !strings.Contains(server, ":") { - server += ":22" - } + logger.Trace("Dialing remote SSH server", "user", username) keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error { // If no public key is known for SSH, ask the user to confirm if pubkey == nil { @@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) { // We have a mismatch, forbid connecting return errors.New("ssh key mismatch, readd the machine to update") } - client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck}) + client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck}) if err != nil { return nil, err } // Connection established, return our utility wrapper c := &sshClient{ - server: label, + server: hostname, address: addr[0], pubkey: pubkey, client: client, diff --git a/cmd/puppeth/wizard_network.go b/cmd/puppeth/wizard_network.go index d780c550b164..c0ddcc2a3c91 100644 --- a/cmd/puppeth/wizard_network.go +++ b/cmd/puppeth/wizard_network.go @@ -62,14 +62,14 @@ func (w *wizard) manageServers() { } } -// makeServer reads a single line from stdin and interprets it as a hostname to -// connect to. It tries to establish a new SSH session and also executing some -// baseline validations. +// makeServer reads a single line from stdin and interprets it as +// username:identity@hostname to connect to. It tries to establish a +// new SSH session and also executing some baseline validations. // // If connection succeeds, the server is added to the wizards configs! func (w *wizard) makeServer() string { fmt.Println() - fmt.Println("Please enter remote server's address:") + fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?") // Read and dial the server to ensure docker is present input := w.readString() From a6d45a5d00e0f37eab9eba43f1ec60429f0dc120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Aug 2018 18:05:06 +0300 Subject: [PATCH 153/166] crypto/bn256: add missing license file, release wrapper in BSD-3 --- crypto/bn256/LICENSE | 28 ++++++++++++++++++++++++++++ crypto/bn256/bn256_fast.go | 18 +++--------------- crypto/bn256/bn256_fuzz.go | 18 +++--------------- crypto/bn256/bn256_slow.go | 18 +++--------------- crypto/bn256/cloudflare/LICENSE | 27 +++++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 45 deletions(-) create mode 100644 crypto/bn256/LICENSE create mode 100644 crypto/bn256/cloudflare/LICENSE diff --git a/crypto/bn256/LICENSE b/crypto/bn256/LICENSE new file mode 100644 index 000000000000..634e0cb2c362 --- /dev/null +++ b/crypto/bn256/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2018 Péter Szilágyi. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/bn256/bn256_fast.go b/crypto/bn256/bn256_fast.go index a8dfa8f67d1d..5c081493b0fb 100644 --- a/crypto/bn256/bn256_fast.go +++ b/crypto/bn256/bn256_fast.go @@ -1,18 +1,6 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. // +build amd64 arm64 diff --git a/crypto/bn256/bn256_fuzz.go b/crypto/bn256/bn256_fuzz.go index f360b0541fec..6aa142117045 100644 --- a/crypto/bn256/bn256_fuzz.go +++ b/crypto/bn256/bn256_fuzz.go @@ -1,18 +1,6 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. // +build gofuzz diff --git a/crypto/bn256/bn256_slow.go b/crypto/bn256/bn256_slow.go index 61373763b89d..47df49d41763 100644 --- a/crypto/bn256/bn256_slow.go +++ b/crypto/bn256/bn256_slow.go @@ -1,18 +1,6 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. // +build !amd64,!arm64 diff --git a/crypto/bn256/cloudflare/LICENSE b/crypto/bn256/cloudflare/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/crypto/bn256/cloudflare/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From 106d196ec4a6451efedc60ab15957f231fa85639 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 21 Aug 2018 09:48:53 +0200 Subject: [PATCH 154/166] eth: ensure from= 0 { + return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start) + } return api.traceChain(ctx, from, to, config) } From dcd97c41fa2789f2e642cb52b8e18c5bd7bcc81d Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 21 Aug 2018 10:34:10 +0200 Subject: [PATCH 155/166] swarm, swarm/network, swarm/pss: log error and fix logs (#17410) * swarm, swarm/network, swarm/pss: log error and fix logs * swarm/pss: log compressed publickey --- swarm/network/hive.go | 4 ++-- swarm/pss/pss.go | 5 +++-- swarm/swarm.go | 34 ++++++++++++++++------------------ 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/swarm/network/hive.go b/swarm/network/hive.go index a54a17d29deb..366021088365 100644 --- a/swarm/network/hive.go +++ b/swarm/network/hive.go @@ -102,10 +102,10 @@ func NewHive(params *HiveParams, overlay Overlay, store state.Store) *Hive { // server is used to connect to a peer based on its NodeID or enode URL // these are called on the p2p.Server which runs on the node func (h *Hive) Start(server *p2p.Server) error { - log.Info(fmt.Sprintf("%08x hive starting", h.BaseAddr()[:4])) + log.Info("Starting hive", "baseaddr", fmt.Sprintf("%x", h.BaseAddr()[:4])) // if state store is specified, load peers to prepopulate the overlay address book if h.Store != nil { - log.Info("detected an existing store. trying to load peers") + log.Info("Detected an existing store. trying to load peers") if err := h.loadPeers(); err != nil { log.Error(fmt.Sprintf("%08x hive encoutered an error trying to load peers", h.BaseAddr()[:4])) return err diff --git a/swarm/pss/pss.go b/swarm/pss/pss.go index 5c060b24863a..8459211ddb1a 100644 --- a/swarm/pss/pss.go +++ b/swarm/pss/pss.go @@ -232,12 +232,13 @@ func (p *Pss) Start(srv *p2p.Server) error { } } }() - log.Debug("Started pss", "public key", common.ToHex(crypto.FromECDSAPub(p.PublicKey()))) + log.Info("Started Pss") + log.Info("Loaded EC keys", "pubkey", common.ToHex(crypto.FromECDSAPub(p.PublicKey())), "secp256", common.ToHex(crypto.CompressPubkey(p.PublicKey()))) return nil } func (p *Pss) Stop() error { - log.Info("pss shutting down") + log.Info("Pss shutting down") close(p.quitC) return nil } diff --git a/swarm/swarm.go b/swarm/swarm.go index a895bdfa55ec..736cd37de86b 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -121,12 +121,10 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e backend: backend, privateKey: config.ShiftPrivateKey(), } - log.Debug(fmt.Sprintf("Setting up Swarm service components")) + log.Debug("Setting up Swarm service components") config.HiveParams.Discovery = true - log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store")) - nodeID, err := discover.HexID(config.NodeID) if err != nil { return nil, err @@ -201,8 +199,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e resourceHandler, } - // setup local store - log.Debug(fmt.Sprintf("Set up local storage")) + log.Debug("Setup local storage") self.bzz = network.NewBzz(bzzconfig, to, stateStore, stream.Spec, self.streamer.Run) @@ -216,11 +213,9 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e } self.api = api.NewAPI(self.fileStore, self.dns, resourceHandler, self.privateKey) - // Manifests for Smart Hosting - log.Debug(fmt.Sprintf("-> Web3 virtual server API")) self.sfs = fuse.NewSwarmFS(self.api) - log.Debug("-> Initializing Fuse file system") + log.Debug("Initialized FUSE filesystem") return self, nil } @@ -341,7 +336,7 @@ func (self *Swarm) Start(srv *p2p.Server) error { // update uaddr to correct enode newaddr := self.bzz.UpdateLocalAddr([]byte(srv.Self().String())) - log.Warn("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr)) + log.Info("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr)) // set chequebook if self.config.SwapEnabled { ctx := context.Background() // The initial setup has no deadline. @@ -354,18 +349,17 @@ func (self *Swarm) Start(srv *p2p.Server) error { log.Debug(fmt.Sprintf("SWAP disabled: no cheque book set")) } - log.Warn(fmt.Sprintf("Starting Swarm service")) + log.Info("Starting bzz service") err := self.bzz.Start(srv) if err != nil { log.Error("bzz failed", "err", err) return err } - log.Info(fmt.Sprintf("Swarm network started on bzz address: %x", self.bzz.Hive.Overlay.BaseAddr())) + log.Info("Swarm network started", "bzzaddr", fmt.Sprintf("%x", self.bzz.Hive.Overlay.BaseAddr())) if self.ps != nil { self.ps.Start(srv) - log.Info("Pss started") } // start swarm http proxy server @@ -373,13 +367,17 @@ func (self *Swarm) Start(srv *p2p.Server) error { addr := net.JoinHostPort(self.config.ListenAddr, self.config.Port) server := httpapi.NewServer(self.api, self.config.Cors) - go server.ListenAndServe(addr) - } - - log.Debug(fmt.Sprintf("Swarm http proxy started on port: %v", self.config.Port)) + if self.config.Cors != "" { + log.Debug("Swarm HTTP proxy CORS headers", "allowedOrigins", self.config.Cors) + } - if self.config.Cors != "" { - log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain: %v", self.config.Cors)) + log.Debug("Starting Swarm HTTP proxy", "port", self.config.Port) + go func() { + err := server.ListenAndServe(addr) + if err != nil { + log.Error("Could not start Swarm HTTP proxy", "err", err.Error()) + } + }() } self.periodicallyUpdateGauges() From c582667c9b88fabfca2908e1a6ef9b3a71daef37 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Tue, 21 Aug 2018 10:34:40 +0200 Subject: [PATCH 156/166] swarm/network: bump bzz protocol version (#17449) --- swarm/network/protocol.go | 2 +- swarm/network/protocol_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go index 7e7fee8efe02..7f7ca5eed579 100644 --- a/swarm/network/protocol.go +++ b/swarm/network/protocol.go @@ -44,7 +44,7 @@ const ( // BzzSpec is the spec of the generic swarm handshake var BzzSpec = &protocols.Spec{ Name: "bzz", - Version: 5, + Version: 6, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ HandshakeMsg{}, diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go index b74b72c68b8b..c052c536a4f3 100644 --- a/swarm/network/protocol_test.go +++ b/swarm/network/protocol_test.go @@ -31,7 +31,7 @@ import ( ) const ( - TestProtocolVersion = 5 + TestProtocolVersion = 6 TestProtocolNetworkID = 3 ) From 76301ca0517ef2d34e6e2d741b0b24cf0ff98642 Mon Sep 17 00:00:00 2001 From: Pierre Neter Date: Tue, 21 Aug 2018 17:36:38 +0700 Subject: [PATCH 157/166] eth: upgradedb subcommand was dropped (#17464) --- eth/backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/backend.go b/eth/backend.go index 6549cb8a3de0..588b78256817 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -138,7 +138,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { if !config.SkipBcVersionCheck { bcVersion := rawdb.ReadDatabaseVersion(chainDb) if bcVersion != core.BlockChainVersion && bcVersion != 0 { - return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d). Run geth upgradedb.\n", bcVersion, core.BlockChainVersion) + return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion) } rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion) } From 355fc47d396298bccf93c37bdbba9b9e88864790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Tue, 21 Aug 2018 13:58:10 +0200 Subject: [PATCH 158/166] les: fix CHT field in nodeInfo (#17465) --- les/commons.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/les/commons.go b/les/commons.go index 251b7a5833c6..d8e9412951a0 100644 --- a/les/commons.go +++ b/les/commons.go @@ -76,18 +76,30 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol { // nodeInfo retrieves some protocol metadata about the running host node. func (c *lesCommons) nodeInfo() interface{} { var cht light.TrustedCheckpoint - sections, _, sectionHead := c.chtIndexer.Sections() - sections2, _, sectionHead2 := c.bloomTrieIndexer.Sections() + sections, _, _ := c.chtIndexer.Sections() + sections2, _, _ := c.bloomTrieIndexer.Sections() + + if !c.protocolManager.lightSync { + // convert to client section size if running in server mode + sections /= light.CHTFrequencyClient / light.CHTFrequencyServer + } + if sections2 < sections { sections = sections2 - sectionHead = sectionHead2 } if sections > 0 { sectionIndex := sections - 1 + sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex) + var chtRoot common.Hash + if c.protocolManager.lightSync { + chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead) + } else { + chtRoot = light.GetChtV2Root(c.chainDb, sectionIndex, sectionHead) + } cht = light.TrustedCheckpoint{ SectionIdx: sectionIndex, SectionHead: sectionHead, - CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead), + CHTRoot: chtRoot, BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead), } } From 9f036647e4b3e7c3aa8941dc239f85326a5e5ecd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 21 Aug 2018 14:39:28 +0300 Subject: [PATCH 159/166] consensus/clique, light: light client snapshots on Rinkeby --- consensus/clique/clique.go | 31 ++++++++++++++-------------- consensus/clique/snapshot_test.go | 2 +- light/lightchain.go | 34 ++++++++++++++++++++----------- light/postprocess.go | 25 ++++++++++++----------- params/config.go | 1 + 5 files changed, 53 insertions(+), 40 deletions(-) diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 59bb3d40b42b..08594470147b 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -387,22 +387,23 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo break } } - // If we're at block zero, make a snapshot - if number == 0 { - genesis := chain.GetHeaderByNumber(0) - if err := c.VerifyHeader(chain, genesis, false); err != nil { - return nil, err - } - signers := make([]common.Address, (len(genesis.Extra)-extraVanity-extraSeal)/common.AddressLength) - for i := 0; i < len(signers); i++ { - copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:]) - } - snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers) - if err := snap.store(c.db); err != nil { - return nil, err + // If we're at an checkpoint block, make a snapshot if it's known + if number%c.config.Epoch == 0 { + checkpoint := chain.GetHeaderByNumber(number) + if checkpoint != nil { + hash := checkpoint.Hash() + + signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength) + for i := 0; i < len(signers); i++ { + copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:]) + } + snap = newSnapshot(c.config, c.signatures, number, hash, signers) + if err := snap.store(c.db); err != nil { + return nil, err + } + log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash) + break } - log.Trace("Stored genesis voting snapshot to disk") - break } // No snapshot for this header, gather the header and move backward var header *types.Header diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 5ac730c9e7c1..17719884f0e7 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -84,7 +84,7 @@ func (r *testerChainReader) GetHeaderByNumber(number uint64) *types.Header { if number == 0 { return rawdb.ReadHeader(r.db, rawdb.ReadCanonicalHash(r.db, 0), 0) } - panic("not supported") + return nil } // Tests that voting is evaluated correctly for various simple and complex scenarios. diff --git a/light/lightchain.go b/light/lightchain.go index b7e629e88b53..b5afe1f0e693 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -464,22 +464,32 @@ func (self *LightChain) GetHeaderByNumberOdr(ctx context.Context, number uint64) func (self *LightChain) Config() *params.ChainConfig { return self.hc.Config() } func (self *LightChain) SyncCht(ctx context.Context) bool { + // If we don't have a CHT indexer, abort if self.odr.ChtIndexer() == nil { return false } - headNum := self.CurrentHeader().Number.Uint64() - chtCount, _, _ := self.odr.ChtIndexer().Sections() - if headNum+1 < chtCount*CHTFrequencyClient { - num := chtCount*CHTFrequencyClient - 1 - header, err := GetHeaderByNumber(ctx, self.odr, num) - if header != nil && err == nil { - self.mu.Lock() - if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() { - self.hc.SetCurrentHeader(header) - } - self.mu.Unlock() - return true + // Ensure the remote CHT head is ahead of us + head := self.CurrentHeader().Number.Uint64() + sections, _, _ := self.odr.ChtIndexer().Sections() + + latest := sections*CHTFrequencyClient - 1 + if clique := self.hc.Config().Clique; clique != nil { + latest -= latest % clique.Epoch // epoch snapshot for clique + } + if head >= latest { + return false + } + // Retrieve the latest useful header and update to it + if header, err := GetHeaderByNumber(ctx, self.odr, latest); header != nil && err == nil { + self.mu.Lock() + defer self.mu.Unlock() + + // Ensure the chain didn't move past the latest block while retrieving it + if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() { + log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash()) + self.hc.SetCurrentHeader(header) } + return true } return false } diff --git a/light/postprocess.go b/light/postprocess.go index 42985aff054b..f105d57b5576 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -58,28 +58,29 @@ type TrustedCheckpoint struct { SectionHead, CHTRoot, BloomRoot common.Hash } -var ( - mainnetCheckpoint = TrustedCheckpoint{ +// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to +var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{ + params.MainnetGenesisHash: { name: "mainnet", SectionIdx: 187, SectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"), CHTRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"), BloomRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"), - } - - ropstenCheckpoint = TrustedCheckpoint{ + }, + params.TestnetGenesisHash: { name: "ropsten", SectionIdx: 117, SectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"), CHTRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"), BloomRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"), - } -) - -// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to -var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{ - params.MainnetGenesisHash: mainnetCheckpoint, - params.TestnetGenesisHash: ropstenCheckpoint, + }, + params.RinkebyGenesisHash: { + name: "rinkeby", + SectionIdx: 85, + SectionHead: common.HexToHash("92cfa67afc4ad8ab0dcbc6fa49efd14b5b19402442e7317e6bc879d85f89d64d"), + CHTRoot: common.HexToHash("2802ec92cd7a54a75bca96afdc666ae7b99e5d96cf8192dcfb09588812f51564"), + BloomRoot: common.HexToHash("ebefeb31a9a42866d8cf2d2477704b4c3d7c20d0e4e9b5aaa77f396e016a1263"), + }, } var ( diff --git a/params/config.go b/params/config.go index b9e9bb8d6ebd..70a1edead40b 100644 --- a/params/config.go +++ b/params/config.go @@ -27,6 +27,7 @@ import ( var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") + RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") ) var ( From 86acdf1a5bb31196adbc92e4923e0398868ce876 Mon Sep 17 00:00:00 2001 From: Jeremy Schlatter Date: Tue, 21 Aug 2018 06:13:03 -0700 Subject: [PATCH 160/166] vendor: update rjeczalik/notify so that it compiles on go1.11 (#17467) --- vendor/github.com/rjeczalik/notify/README.md | 1 + .../github.com/rjeczalik/notify/appveyor.yml | 12 +- .../rjeczalik/notify/debug_debug.go | 4 +- .../rjeczalik/notify/debug_nodebug.go | 4 +- .../rjeczalik/notify/watcher_fsevents_cgo.go | 12 +- .../notify/watcher_fsevents_go1.10.go | 14 ++ .../notify/watcher_fsevents_go1.11.go | 9 ++ .../notify/watcher_notimplemented.go | 15 +++ .../rjeczalik/notify/watcher_readdcw.go | 123 +++++++++++------- .../rjeczalik/notify/watcher_stub.go | 22 +--- .../rjeczalik/notify/watcher_trigger.go | 3 +- vendor/vendor.json | 6 +- 12 files changed, 143 insertions(+), 82 deletions(-) create mode 100644 vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go create mode 100644 vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go create mode 100644 vendor/github.com/rjeczalik/notify/watcher_notimplemented.go diff --git a/vendor/github.com/rjeczalik/notify/README.md b/vendor/github.com/rjeczalik/notify/README.md index 02a5f3290895..ad743b2a2440 100644 --- a/vendor/github.com/rjeczalik/notify/README.md +++ b/vendor/github.com/rjeczalik/notify/README.md @@ -19,3 +19,4 @@ Filesystem event notification library on steroids. (under active development) - [github.com/cortesi/devd](https://github.com/cortesi/devd) - [github.com/cortesi/modd](https://github.com/cortesi/modd) - [github.com/syncthing/syncthing-inotify](https://github.com/syncthing/syncthing-inotify) +- [github.com/OrlovEvgeny/TinyJPG](https://github.com/OrlovEvgeny/TinyJPG) diff --git a/vendor/github.com/rjeczalik/notify/appveyor.yml b/vendor/github.com/rjeczalik/notify/appveyor.yml index a495bd7c7300..a0bdc37a389a 100644 --- a/vendor/github.com/rjeczalik/notify/appveyor.yml +++ b/vendor/github.com/rjeczalik/notify/appveyor.yml @@ -7,16 +7,20 @@ clone_folder: c:\projects\src\github.com\rjeczalik\notify environment: PATH: c:\projects\bin;%PATH% GOPATH: c:\projects - NOTIFY_TIMEOUT: 5s + NOTIFY_TIMEOUT: 10s + GOVERSION: 1.10.3 install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip + - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL + + - cd %APPVEYOR_BUILD_FOLDER% - go version - - go get -v -t ./... build_script: - - go tool vet -all . - go build ./... - - go test -v -timeout 60s -race ./... + - go test -v -timeout 120s -race ./... test: off diff --git a/vendor/github.com/rjeczalik/notify/debug_debug.go b/vendor/github.com/rjeczalik/notify/debug_debug.go index 6fca891ab305..9d234cedda4f 100644 --- a/vendor/github.com/rjeczalik/notify/debug_debug.go +++ b/vendor/github.com/rjeczalik/notify/debug_debug.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. @@ -6,4 +6,4 @@ package notify -var debugTag bool = true +var debugTag = true diff --git a/vendor/github.com/rjeczalik/notify/debug_nodebug.go b/vendor/github.com/rjeczalik/notify/debug_nodebug.go index be391a2769d9..9ebf880d880b 100644 --- a/vendor/github.com/rjeczalik/notify/debug_nodebug.go +++ b/vendor/github.com/rjeczalik/notify/debug_nodebug.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. @@ -6,4 +6,4 @@ package notify -var debugTag bool = false +var debugTag = false diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go index a2b332a2e05d..95ee704444c9 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go +++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go @@ -48,7 +48,7 @@ var wg sync.WaitGroup // used to wait until the runloop starts // started and is ready via the wg. It also serves purpose of a dummy source, // thanks to it the runloop does not return as it also has at least one source // registered. -var source = C.CFRunLoopSourceCreate(nil, 0, &C.CFRunLoopSourceContext{ +var source = C.CFRunLoopSourceCreate(refZero, 0, &C.CFRunLoopSourceContext{ perform: (C.CFRunLoopPerformCallBack)(C.gosource), }) @@ -90,6 +90,10 @@ func gostream(_, info uintptr, n C.size_t, paths, flags, ids uintptr) { if n == 0 { return } + fn := streamFuncs.get(info) + if fn == nil { + return + } ev := make([]FSEvent, 0, int(n)) for i := uintptr(0); i < uintptr(n); i++ { switch flags := *(*uint32)(unsafe.Pointer((flags + i*offflag))); { @@ -104,7 +108,7 @@ func gostream(_, info uintptr, n C.size_t, paths, flags, ids uintptr) { } } - streamFuncs.get(info)(ev) + fn(ev) } // StreamFunc is a callback called when stream receives file events. @@ -162,8 +166,8 @@ func (s *stream) Start() error { return nil } wg.Wait() - p := C.CFStringCreateWithCStringNoCopy(nil, C.CString(s.path), C.kCFStringEncodingUTF8, nil) - path := C.CFArrayCreate(nil, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil) + p := C.CFStringCreateWithCStringNoCopy(refZero, C.CString(s.path), C.kCFStringEncodingUTF8, refZero) + path := C.CFArrayCreate(refZero, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil) ctx := C.FSEventStreamContext{} ref := C.EventStreamCreate(&ctx, C.uintptr_t(s.info), path, C.FSEventStreamEventId(atomic.LoadUint64(&since)), latency, flags) if ref == nilstream { diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go new file mode 100644 index 000000000000..dd2433d20a7b --- /dev/null +++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.10.go @@ -0,0 +1,14 @@ +// Copyright (c) 2018 The Notify Authors. All rights reserved. +// Use of this source code is governed by the MIT license that can be +// found in the LICENSE file. + +// +build darwin,!kqueue,cgo,!go1.11 + +package notify + +/* + #include +*/ +import "C" + +var refZero = (*C.struct___CFAllocator)(nil) diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go new file mode 100644 index 000000000000..92b406ce7ca5 --- /dev/null +++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_go1.11.go @@ -0,0 +1,9 @@ +// Copyright (c) 2018 The Notify Authors. All rights reserved. +// Use of this source code is governed by the MIT license that can be +// found in the LICENSE file. + +// +build darwin,!kqueue,go1.11 + +package notify + +const refZero = 0 diff --git a/vendor/github.com/rjeczalik/notify/watcher_notimplemented.go b/vendor/github.com/rjeczalik/notify/watcher_notimplemented.go new file mode 100644 index 000000000000..bb0672fd88bc --- /dev/null +++ b/vendor/github.com/rjeczalik/notify/watcher_notimplemented.go @@ -0,0 +1,15 @@ +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. +// Use of this source code is governed by the MIT license that can be +// found in the LICENSE file. + +// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows +// +build !kqueue,!solaris + +package notify + +import "errors" + +// newWatcher stub. +func newWatcher(chan<- EventInfo) watcher { + return watcherStub{errors.New("notify: not implemented")} +} diff --git a/vendor/github.com/rjeczalik/notify/watcher_readdcw.go b/vendor/github.com/rjeczalik/notify/watcher_readdcw.go index 1494fcd79993..b69811a690f9 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_readdcw.go +++ b/vendor/github.com/rjeczalik/notify/watcher_readdcw.go @@ -1,4 +1,4 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. @@ -22,7 +22,7 @@ import ( const readBufferSize = 4096 // Since all operations which go through the Windows completion routine are done -// asynchronously, filter may set one of the constants belor. They were defined +// asynchronously, filter may set one of the constants below. They were defined // in order to distinguish whether current folder should be re-registered in // ReadDirectoryChangesW function or some control operations need to be executed. const ( @@ -109,8 +109,13 @@ func (g *grip) register(cph syscall.Handle) (err error) { // buffer. Directory changes that occur between calls to this function are added // to the buffer and then, returned with the next call. func (g *grip) readDirChanges() error { + handle := syscall.Handle(atomic.LoadUintptr((*uintptr)(&g.handle))) + if handle == syscall.InvalidHandle { + return nil // Handle was closed. + } + return syscall.ReadDirectoryChanges( - g.handle, + handle, &g.buffer[0], uint32(unsafe.Sizeof(g.buffer)), g.recursive, @@ -220,12 +225,27 @@ func (wd *watched) updateGrip(idx int, cph syscall.Handle, reset bool, // returned from the operating system kernel. func (wd *watched) closeHandle() (err error) { for _, g := range wd.digrip { - if g != nil && g.handle != syscall.InvalidHandle { - switch suberr := syscall.CloseHandle(g.handle); { - case suberr == nil: - g.handle = syscall.InvalidHandle - case err == nil: - err = suberr + if g == nil { + continue + } + + for { + handle := syscall.Handle(atomic.LoadUintptr((*uintptr)(&g.handle))) + if handle == syscall.InvalidHandle { + break // Already closed. + } + + e := syscall.CloseHandle(handle) + if e != nil && err == nil { + err = e + } + + // Set invalid handle even when CloseHandle fails. This will leak + // the handle but, since we can't close it anyway, there won't be + // any difference. + if atomic.CompareAndSwapUintptr((*uintptr)(&g.handle), + (uintptr)(handle), (uintptr)(syscall.InvalidHandle)) { + break } } } @@ -272,50 +292,49 @@ func (r *readdcw) RecursiveWatch(path string, event Event) error { // watch inserts a directory to the group of watched folders. If watched folder // already exists, function tries to rewatch it with new filters(NOT VALID). Moreover, // watch starts the main event loop goroutine when called for the first time. -func (r *readdcw) watch(path string, event Event, recursive bool) (err error) { +func (r *readdcw) watch(path string, event Event, recursive bool) error { if event&^(All|fileNotifyChangeAll) != 0 { return errors.New("notify: unknown event") } + r.Lock() - wd, ok := r.m[path] - r.Unlock() - if !ok { - if err = r.lazyinit(); err != nil { - return - } - r.Lock() - defer r.Unlock() - if wd, ok = r.m[path]; ok { - dbgprint("watch: exists already") - return - } - if wd, err = newWatched(r.cph, uint32(event), recursive, path); err != nil { - return - } - r.m[path] = wd - dbgprint("watch: new watch added") - } else { - dbgprint("watch: exists already") + defer r.Unlock() + + if wd, ok := r.m[path]; ok { + dbgprint("watch: already exists") + wd.filter &^= stateUnwatch + return nil } + + if err := r.lazyinit(); err != nil { + return err + } + + wd, err := newWatched(r.cph, uint32(event), recursive, path) + if err != nil { + return err + } + + r.m[path] = wd + dbgprint("watch: new watch added") + return nil } -// lazyinit creates an I/O completion port and starts the main event processing -// loop. This method uses Double-Checked Locking optimization. +// lazyinit creates an I/O completion port and starts the main event loop. func (r *readdcw) lazyinit() (err error) { invalid := uintptr(syscall.InvalidHandle) + if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid { - r.Lock() - defer r.Unlock() - if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid { - cph := syscall.InvalidHandle - if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil { - return - } - r.cph, r.start = cph, true - go r.loop() + cph := syscall.InvalidHandle + if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil { + return } + + r.cph, r.start = cph, true + go r.loop() } + return } @@ -364,6 +383,7 @@ func (r *readdcw) loopstate(overEx *overlappedEx) { overEx.parent.parent.recreate(r.cph) case stateUnwatch: dbgprint("loopstate unwatch") + overEx.parent.parent.closeHandle() delete(r.m, syscall.UTF16ToString(overEx.parent.pathw)) case stateCPClose: default: @@ -495,27 +515,30 @@ func (r *readdcw) RecursiveUnwatch(path string) error { // TODO : pknap func (r *readdcw) unwatch(path string) (err error) { var wd *watched + r.Lock() defer r.Unlock() if wd, err = r.nonStateWatchedLocked(path); err != nil { return } + wd.filter |= stateUnwatch - if err = wd.closeHandle(); err != nil { - wd.filter &^= stateUnwatch - return - } + dbgprint("unwatch: set unwatch state") + if _, attrErr := syscall.GetFileAttributes(&wd.pathw[0]); attrErr != nil { for _, g := range wd.digrip { - if g != nil { - dbgprint("unwatch: posting") - if err = syscall.PostQueuedCompletionStatus(r.cph, 0, 0, (*syscall.Overlapped)(unsafe.Pointer(g.ovlapped))); err != nil { - wd.filter &^= stateUnwatch - return - } + if g == nil { + continue + } + + dbgprint("unwatch: posting") + if err = syscall.PostQueuedCompletionStatus(r.cph, 0, 0, (*syscall.Overlapped)(unsafe.Pointer(g.ovlapped))); err != nil { + wd.filter &^= stateUnwatch + return } } } + return } diff --git a/vendor/github.com/rjeczalik/notify/watcher_stub.go b/vendor/github.com/rjeczalik/notify/watcher_stub.go index 68b9c135b0c8..9b284ddc85f3 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_stub.go +++ b/vendor/github.com/rjeczalik/notify/watcher_stub.go @@ -1,23 +1,13 @@ -// Copyright (c) 2014-2015 The Notify Authors. All rights reserved. +// Copyright (c) 2014-2018 The Notify Authors. All rights reserved. // Use of this source code is governed by the MIT license that can be // found in the LICENSE file. -// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows -// +build !kqueue,!solaris - package notify -import "errors" - -type stub struct{ error } - -// newWatcher stub. -func newWatcher(chan<- EventInfo) watcher { - return stub{errors.New("notify: not implemented")} -} +type watcherStub struct{ error } // Following methods implement notify.watcher interface. -func (s stub) Watch(string, Event) error { return s } -func (s stub) Rewatch(string, Event, Event) error { return s } -func (s stub) Unwatch(string) (err error) { return s } -func (s stub) Close() error { return s } +func (s watcherStub) Watch(string, Event) error { return s } +func (s watcherStub) Rewatch(string, Event, Event) error { return s } +func (s watcherStub) Unwatch(string) (err error) { return s } +func (s watcherStub) Close() error { return s } diff --git a/vendor/github.com/rjeczalik/notify/watcher_trigger.go b/vendor/github.com/rjeczalik/notify/watcher_trigger.go index 78151f909bc9..1ebe04829ee6 100644 --- a/vendor/github.com/rjeczalik/notify/watcher_trigger.go +++ b/vendor/github.com/rjeczalik/notify/watcher_trigger.go @@ -106,7 +106,8 @@ func newWatcher(c chan<- EventInfo) watcher { } t.t = newTrigger(t.pthLkp) if err := t.t.Init(); err != nil { - panic(err) + t.Close() + return watcherStub{fmt.Errorf("failed setting up watcher: %v", err)} } go t.monitor() return t diff --git a/vendor/vendor.json b/vendor/vendor.json index 78886c7bb3a4..0bfc1ca0c183 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -370,10 +370,10 @@ "revisionTime": "2017-08-14T17:01:13Z" }, { - "checksumSHA1": "28UVHMmHx0iqO0XiJsjx+fwILyI=", + "checksumSHA1": "D8AVDI39CJ+jvw0HOotYU2gz54c=", "path": "github.com/rjeczalik/notify", - "revision": "c31e5f2cb22b3e4ef3f882f413847669bf2652b9", - "revisionTime": "2018-02-03T14:01:15Z" + "revision": "4e54e7fd043e865c50bda93359fb78813a8d165b", + "revisionTime": "2018-08-08T20:39:25Z" }, { "checksumSHA1": "5uqO4ITTDMklKi3uNaE/D9LQ5nM=", From a063fe9b2defdb595068483b4c5df41f1a3a4860 Mon Sep 17 00:00:00 2001 From: gary rong Date: Tue, 21 Aug 2018 22:33:04 +0800 Subject: [PATCH 161/166] miner: fix uncle iteration logic (#17469) --- miner/worker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 2f76f2a92cb6..23cfaf22566b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -308,7 +308,7 @@ func (w *worker) mainLoop() { return false } uncles = append(uncles, uncle.Header()) - return true + return false }) w.commit(uncles, nil, true, start) } @@ -522,7 +522,7 @@ func (w *worker) updateSnapshot() { return false } uncles = append(uncles, uncle.Header()) - return true + return false }) w.snapshotBlock = types.NewBlock( From 522cfc68ff496aee4205add982db049dc3092024 Mon Sep 17 00:00:00 2001 From: Geon Kim Date: Wed, 22 Aug 2018 03:13:33 +0900 Subject: [PATCH 162/166] swarm: fix typos (#17473) --- swarm/network/simulation/simulation_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/swarm/network/simulation/simulation_test.go b/swarm/network/simulation/simulation_test.go index 8576732c9f90..eed09bf508d5 100644 --- a/swarm/network/simulation/simulation_test.go +++ b/swarm/network/simulation/simulation_test.go @@ -63,7 +63,7 @@ func TestRun(t *testing.T) { } }) - t.Run("cancelation", func(t *testing.T) { + t.Run("cancellation", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() @@ -164,7 +164,7 @@ func TestDone(t *testing.T) { select { case <-time.After(timeout): - t.Error("done channel closing timmed out") + t.Error("done channel closing timed out") case <-sim.Done(): if d := time.Since(start); d < sleep { t.Errorf("done channel closed sooner then expected: %s", d) @@ -172,7 +172,7 @@ func TestDone(t *testing.T) { } } -// a helper map for usual services that do not do anyting +// a helper map for usual services that do not do anything var noopServiceFuncMap = map[string]ServiceFunc{ "noop": noopServiceFunc, } From b2c644ffb5c283a171ddf3889693673939917541 Mon Sep 17 00:00:00 2001 From: gary rong Date: Wed, 22 Aug 2018 03:56:54 +0800 Subject: [PATCH 163/166] cmd, eth, miner: make recommit configurable (#17444) * cmd, eth, miner: make recommit configurable * cmd, eth, les, miner: polish a bit * miner: filter duplicate sealing work * cmd: remove uncessary conversion * miner: avoid microptimization in favor of cleaner code --- cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 22 ++-- eth/api.go | 6 ++ eth/backend.go | 8 +- eth/config.go | 15 +-- eth/gen_config.go | 53 ++++++++-- internal/web3ext/web3ext.go | 5 + les/backend.go | 2 +- miner/miner.go | 10 +- miner/worker.go | 202 +++++++++++++++++++++++++++++------- miner/worker_test.go | 106 ++++++++++++++++++- 12 files changed, 360 insertions(+), 71 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a0638605132e..2e87bb82007f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -106,6 +106,7 @@ var ( utils.MinerLegacyEtherbaseFlag, utils.MinerExtraDataFlag, utils.MinerLegacyExtraDataFlag, + utils.MinerRecommitIntervalFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 9e18f7047279..674c5d9015ca 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -190,6 +190,7 @@ var AppHelpFlagGroups = []flagGroup{ utils.MinerGasTargetFlag, utils.MinerEtherbaseFlag, utils.MinerExtraDataFlag, + utils.MinerRecommitIntervalFlag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e3a8cc2eac9e..7317655836d5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -335,12 +335,12 @@ var ( MinerGasPriceFlag = BigFlag{ Name: "miner.gasprice", Usage: "Minimal gas price for mining a transactions", - Value: eth.DefaultConfig.GasPrice, + Value: eth.DefaultConfig.MinerGasPrice, } MinerLegacyGasPriceFlag = BigFlag{ Name: "gasprice", Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)", - Value: eth.DefaultConfig.GasPrice, + Value: eth.DefaultConfig.MinerGasPrice, } MinerEtherbaseFlag = cli.StringFlag{ Name: "miner.etherbase", @@ -360,6 +360,11 @@ var ( Name: "extradata", Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)", } + MinerRecommitIntervalFlag = cli.DurationFlag{ + Name: "miner.recommit", + Usage: "Time interval to recreate the block being mined.", + Value: 3 * time.Second, + } // Account settings UnlockedAccountFlag = cli.StringFlag{ Name: "unlock", @@ -1124,16 +1129,19 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) } if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) { - cfg.ExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name)) + cfg.MinerExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name)) } if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { - cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) + cfg.MinerExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) } if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { - cfg.GasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name) + cfg.MinerGasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name) } if ctx.GlobalIsSet(MinerGasPriceFlag.Name) { - cfg.GasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name) + cfg.MinerGasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name) + } + if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) { + cfg.MinerRecommit = ctx.Duration(MinerRecommitIntervalFlag.Name) } if ctx.GlobalIsSet(VMEnableDebugFlag.Name) { // TODO(fjl): force-enable this in --dev mode @@ -1176,7 +1184,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address) if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) { - cfg.GasPrice = big.NewInt(1) + cfg.MinerGasPrice = big.NewInt(1) } } // TODO(fjl): move trie cache generations into config diff --git a/eth/api.go b/eth/api.go index c1fbcb6d406a..4b0ba8edbd84 100644 --- a/eth/api.go +++ b/eth/api.go @@ -25,6 +25,7 @@ import ( "math/big" "os" "strings" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -160,6 +161,11 @@ func (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool { return true } +// SetRecommitInterval updates the interval for miner sealing work recommitting. +func (api *PrivateMinerAPI) SetRecommitInterval(interval int) { + api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) +} + // GetHashrate returns the current hashrate of the miner. func (api *PrivateMinerAPI) GetHashrate() uint64 { return api.e.miner.HashRate() diff --git a/eth/backend.go b/eth/backend.go index 588b78256817..648175acfdc3 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -127,7 +127,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, chainDb), shutdownChan: make(chan bool), networkID: config.NetworkId, - gasPrice: config.GasPrice, + gasPrice: config.MinerGasPrice, etherbase: config.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, bloomConfirms), @@ -167,13 +167,13 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { return nil, err } - eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine) - eth.miner.SetExtra(makeExtraData(config.ExtraData)) + eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit) + eth.miner.SetExtra(makeExtraData(config.MinerExtraData)) eth.APIBackend = &EthAPIBackend{eth, nil} gpoParams := config.GPO if gpoParams.Default == nil { - gpoParams.Default = config.GasPrice + gpoParams.Default = config.MinerGasPrice } eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams) diff --git a/eth/config.go b/eth/config.go index 0c82f2923259..cbd02416bea2 100644 --- a/eth/config.go +++ b/eth/config.go @@ -48,7 +48,7 @@ var DefaultConfig = Config{ DatabaseCache: 768, TrieCache: 256, TrieTimeout: 60 * time.Minute, - GasPrice: big.NewInt(18 * params.Shannon), + MinerGasPrice: big.NewInt(18 * params.Shannon), TxPool: core.DefaultTxPoolConfig, GPO: gasprice.Config{ @@ -95,11 +95,12 @@ type Config struct { TrieTimeout time.Duration // Mining-related options - Etherbase common.Address `toml:",omitempty"` - MinerThreads int `toml:",omitempty"` - MinerNotify []string `toml:",omitempty"` - ExtraData []byte `toml:",omitempty"` - GasPrice *big.Int + Etherbase common.Address `toml:",omitempty"` + MinerThreads int `toml:",omitempty"` + MinerNotify []string `toml:",omitempty"` + MinerExtraData []byte `toml:",omitempty"` + MinerGasPrice *big.Int + MinerRecommit time.Duration // Ethash options Ethash ethash.Config @@ -118,5 +119,5 @@ type Config struct { } type configMarshaling struct { - ExtraData hexutil.Bytes + MinerExtraData hexutil.Bytes } diff --git a/eth/gen_config.go b/eth/gen_config.go index 4f2e82d9419c..62556be7e1d2 100644 --- a/eth/gen_config.go +++ b/eth/gen_config.go @@ -4,6 +4,7 @@ package eth import ( "math/big" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -15,20 +16,26 @@ import ( var _ = (*configMarshaling)(nil) +// MarshalTOML marshals as TOML. func (c Config) MarshalTOML() (interface{}, error) { type Config struct { Genesis *core.Genesis `toml:",omitempty"` NetworkId uint64 SyncMode downloader.SyncMode + NoPruning bool LightServ int `toml:",omitempty"` LightPeers int `toml:",omitempty"` SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int + TrieCache int + TrieTimeout time.Duration Etherbase common.Address `toml:",omitempty"` MinerThreads int `toml:",omitempty"` - ExtraData hexutil.Bytes `toml:",omitempty"` - GasPrice *big.Int + MinerNotify []string `toml:",omitempty"` + MinerExtraData hexutil.Bytes `toml:",omitempty"` + MinerGasPrice *big.Int + MinerRecommit time.Duration Ethash ethash.Config TxPool core.TxPoolConfig GPO gasprice.Config @@ -39,15 +46,20 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.Genesis = c.Genesis enc.NetworkId = c.NetworkId enc.SyncMode = c.SyncMode + enc.NoPruning = c.NoPruning enc.LightServ = c.LightServ enc.LightPeers = c.LightPeers enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache + enc.TrieCache = c.TrieCache + enc.TrieTimeout = c.TrieTimeout enc.Etherbase = c.Etherbase enc.MinerThreads = c.MinerThreads - enc.ExtraData = c.ExtraData - enc.GasPrice = c.GasPrice + enc.MinerNotify = c.MinerNotify + enc.MinerExtraData = c.MinerExtraData + enc.MinerGasPrice = c.MinerGasPrice + enc.MinerRecommit = c.MinerRecommit enc.Ethash = c.Ethash enc.TxPool = c.TxPool enc.GPO = c.GPO @@ -56,20 +68,26 @@ func (c Config) MarshalTOML() (interface{}, error) { return &enc, nil } +// UnmarshalTOML unmarshals from TOML. func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { type Config struct { Genesis *core.Genesis `toml:",omitempty"` NetworkId *uint64 SyncMode *downloader.SyncMode + NoPruning *bool LightServ *int `toml:",omitempty"` LightPeers *int `toml:",omitempty"` SkipBcVersionCheck *bool `toml:"-"` DatabaseHandles *int `toml:"-"` DatabaseCache *int + TrieCache *int + TrieTimeout *time.Duration Etherbase *common.Address `toml:",omitempty"` MinerThreads *int `toml:",omitempty"` - ExtraData *hexutil.Bytes `toml:",omitempty"` - GasPrice *big.Int + MinerNotify []string `toml:",omitempty"` + MinerExtraData *hexutil.Bytes `toml:",omitempty"` + MinerGasPrice *big.Int + MinerRecommit *time.Duration Ethash *ethash.Config TxPool *core.TxPoolConfig GPO *gasprice.Config @@ -89,6 +107,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.SyncMode != nil { c.SyncMode = *dec.SyncMode } + if dec.NoPruning != nil { + c.NoPruning = *dec.NoPruning + } if dec.LightServ != nil { c.LightServ = *dec.LightServ } @@ -104,17 +125,29 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.DatabaseCache != nil { c.DatabaseCache = *dec.DatabaseCache } + if dec.TrieCache != nil { + c.TrieCache = *dec.TrieCache + } + if dec.TrieTimeout != nil { + c.TrieTimeout = *dec.TrieTimeout + } if dec.Etherbase != nil { c.Etherbase = *dec.Etherbase } if dec.MinerThreads != nil { c.MinerThreads = *dec.MinerThreads } - if dec.ExtraData != nil { - c.ExtraData = *dec.ExtraData + if dec.MinerNotify != nil { + c.MinerNotify = dec.MinerNotify + } + if dec.MinerExtraData != nil { + c.MinerExtraData = *dec.MinerExtraData + } + if dec.MinerGasPrice != nil { + c.MinerGasPrice = dec.MinerGasPrice } - if dec.GasPrice != nil { - c.GasPrice = dec.GasPrice + if dec.MinerRecommit != nil { + c.MinerRecommit = *dec.MinerRecommit } if dec.Ethash != nil { c.Ethash = *dec.Ethash diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 000e3728ded9..f4eb47a12ac7 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -519,6 +519,11 @@ web3._extend({ params: 1, inputFormatter: [web3._extend.utils.fromDecimal] }), + new web3._extend.Method({ + name: 'setRecommitInterval', + call: 'miner_setRecommitInterval', + params: 1, + }), new web3._extend.Method({ name: 'getHashrate', call: 'miner_getHashrate' diff --git a/les/backend.go b/les/backend.go index d26c1470fe0d..00025ba63400 100644 --- a/les/backend.go +++ b/les/backend.go @@ -141,7 +141,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { leth.ApiBackend = &LesApiBackend{leth, nil} gpoParams := config.GPO if gpoParams.Default == nil { - gpoParams.Default = config.GasPrice + gpoParams.Default = config.MinerGasPrice } leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams) return leth, nil diff --git a/miner/miner.go b/miner/miner.go index e350e456e977..c5a0c9d62a1c 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -20,6 +20,7 @@ package miner import ( "fmt" "sync/atomic" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" @@ -51,13 +52,13 @@ type Miner struct { shouldStart int32 // should start indicates whether we should start after sync } -func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine) *Miner { +func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration) *Miner { miner := &Miner{ eth: eth, mux: mux, engine: engine, exitCh: make(chan struct{}), - worker: newWorker(config, engine, eth, mux), + worker: newWorker(config, engine, eth, mux, recommit), canStart: 1, } go miner.update() @@ -144,6 +145,11 @@ func (self *Miner) SetExtra(extra []byte) error { return nil } +// SetRecommitInterval sets the interval for sealing work resubmitting. +func (self *Miner) SetRecommitInterval(interval time.Duration) { + self.worker.setRecommitInterval(interval) +} + // Pending returns the currently pending block and associated state. func (self *Miner) Pending() (*types.Block, *state.StateDB) { return self.worker.pending() diff --git a/miner/worker.go b/miner/worker.go index 23cfaf22566b..c299ff9dc1fc 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -51,12 +51,27 @@ const ( // chainSideChanSize is the size of channel listening to ChainSideEvent. chainSideChanSize = 10 + // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. + resubmitAdjustChanSize = 10 + // miningLogAtDepth is the number of confirmations before logging successful mining. miningLogAtDepth = 5 - // blockRecommitInterval is the time interval to recreate the mining block with + // minRecommitInterval is the minimal time interval to recreate the mining block with + // any newly arrived transactions. + minRecommitInterval = 1 * time.Second + + // maxRecommitInterval is the maximum time interval to recreate the mining block with // any newly arrived transactions. - blockRecommitInterval = 3 * time.Second + maxRecommitInterval = 15 * time.Second + + // intervalAdjustRatio is the impact a single interval adjustment has on sealing work + // resubmitting interval. + intervalAdjustRatio = 0.1 + + // intervalAdjustBias is applied during the new resubmit interval calculation in favor of + // increasing upper limit or decreasing lower limit so that the limit can be reachable. + intervalAdjustBias = 200 * 1000.0 * 1000.0 ) // environment is the worker's current environment and holds all of the current state information. @@ -89,11 +104,18 @@ const ( commitInterruptResubmit ) +// newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. type newWorkReq struct { interrupt *int32 noempty bool } +// intervalAdjust represents a resubmitting interval adjustment. +type intervalAdjust struct { + ratio float64 + inc bool +} + // worker is the main object which takes care of submitting new work to consensus engine // and gathering the sealing result. type worker struct { @@ -112,11 +134,13 @@ type worker struct { chainSideSub event.Subscription // Channels - newWorkCh chan *newWorkReq - taskCh chan *task - resultCh chan *task - startCh chan struct{} - exitCh chan struct{} + newWorkCh chan *newWorkReq + taskCh chan *task + resultCh chan *task + startCh chan struct{} + exitCh chan struct{} + resubmitIntervalCh chan time.Duration + resubmitAdjustCh chan *intervalAdjust current *environment // An environment for current running cycle. possibleUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. @@ -132,30 +156,34 @@ type worker struct { // atomic status counters running int32 // The indicator whether the consensus engine is running or not. + newTxs int32 // New arrival transaction count since last sealing work submitting. // Test hooks - newTaskHook func(*task) // Method to call upon receiving a new sealing task - skipSealHook func(*task) bool // Method to decide whether skipping the sealing. - fullTaskHook func() // Method to call before pushing the full sealing task + newTaskHook func(*task) // Method to call upon receiving a new sealing task. + skipSealHook func(*task) bool // Method to decide whether skipping the sealing. + fullTaskHook func() // Method to call before pushing the full sealing task. + resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. } -func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux) *worker { +func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration) *worker { worker := &worker{ - config: config, - engine: engine, - eth: eth, - mux: mux, - chain: eth.BlockChain(), - possibleUncles: make(map[common.Hash]*types.Block), - unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), - txsCh: make(chan core.NewTxsEvent, txChanSize), - chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), - chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), - newWorkCh: make(chan *newWorkReq), - taskCh: make(chan *task), - resultCh: make(chan *task, resultQueueSize), - exitCh: make(chan struct{}), - startCh: make(chan struct{}, 1), + config: config, + engine: engine, + eth: eth, + mux: mux, + chain: eth.BlockChain(), + possibleUncles: make(map[common.Hash]*types.Block), + unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), + txsCh: make(chan core.NewTxsEvent, txChanSize), + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), + chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), + newWorkCh: make(chan *newWorkReq), + taskCh: make(chan *task), + resultCh: make(chan *task, resultQueueSize), + exitCh: make(chan struct{}), + startCh: make(chan struct{}, 1), + resubmitIntervalCh: make(chan time.Duration), + resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), } // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) @@ -163,8 +191,14 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) + // Sanitize recommit interval if the user-specified one is too short. + if recommit < minRecommitInterval { + log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) + recommit = minRecommitInterval + } + go worker.mainLoop() - go worker.newWorkLoop() + go worker.newWorkLoop(recommit) go worker.resultLoop() go worker.taskLoop() @@ -188,6 +222,11 @@ func (w *worker) setExtra(extra []byte) { w.extra = extra } +// setRecommitInterval updates the interval for miner sealing work recommitting. +func (w *worker) setRecommitInterval(interval time.Duration) { + w.resubmitIntervalCh <- interval +} + // pending returns the pending state and corresponding block. func (w *worker) pending() (*types.Block, *state.StateDB) { // return a snapshot to avoid contention on currentMu mutex @@ -238,35 +277,94 @@ func (w *worker) close() { } // newWorkLoop is a standalone goroutine to submit new mining work upon received events. -func (w *worker) newWorkLoop() { - var interrupt *int32 +func (w *worker) newWorkLoop(recommit time.Duration) { + var ( + interrupt *int32 + minRecommit = recommit // minimal resubmit interval specified by user. + ) timer := time.NewTimer(0) <-timer.C // discard the initial tick - // recommit aborts in-flight transaction execution with given signal and resubmits a new one. - recommit := func(noempty bool, s int32) { + // commit aborts in-flight transaction execution with given signal and resubmits a new one. + commit := func(noempty bool, s int32) { if interrupt != nil { atomic.StoreInt32(interrupt, s) } interrupt = new(int32) w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty} - timer.Reset(blockRecommitInterval) + timer.Reset(recommit) + atomic.StoreInt32(&w.newTxs, 0) + } + // recalcRecommit recalculates the resubmitting interval upon feedback. + recalcRecommit := func(target float64, inc bool) { + var ( + prev = float64(recommit.Nanoseconds()) + next float64 + ) + if inc { + next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) + // Recap if interval is larger than the maximum time interval + if next > float64(maxRecommitInterval.Nanoseconds()) { + next = float64(maxRecommitInterval.Nanoseconds()) + } + } else { + next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) + // Recap if interval is less than the user specified minimum + if next < float64(minRecommit.Nanoseconds()) { + next = float64(minRecommit.Nanoseconds()) + } + } + recommit = time.Duration(int64(next)) } for { select { case <-w.startCh: - recommit(false, commitInterruptNewHead) + commit(false, commitInterruptNewHead) case <-w.chainHeadCh: - recommit(false, commitInterruptNewHead) + commit(false, commitInterruptNewHead) case <-timer.C: // If mining is running resubmit a new work cycle periodically to pull in // higher priced transactions. Disable this overhead for pending blocks. if w.isRunning() && (w.config.Clique == nil || w.config.Clique.Period > 0) { - recommit(true, commitInterruptResubmit) + // Short circuit if no new transaction arrives. + if atomic.LoadInt32(&w.newTxs) == 0 { + timer.Reset(recommit) + continue + } + commit(true, commitInterruptResubmit) + } + + case interval := <-w.resubmitIntervalCh: + // Adjust resubmit interval explicitly by user. + if interval < minRecommitInterval { + log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) + interval = minRecommitInterval + } + log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) + minRecommit, recommit = interval, interval + + if w.resubmitHook != nil { + w.resubmitHook(minRecommit, recommit) + } + + case adjust := <-w.resubmitAdjustCh: + // Adjust resubmit interval by feedback. + if adjust.inc { + before := recommit + recalcRecommit(float64(recommit.Nanoseconds())/adjust.ratio, true) + log.Trace("Increase miner recommit interval", "from", before, "to", recommit) + } else { + before := recommit + recalcRecommit(float64(minRecommit.Nanoseconds()), false) + log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) + } + + if w.resubmitHook != nil { + w.resubmitHook(minRecommit, recommit) } case <-w.exitCh: @@ -339,6 +437,7 @@ func (w *worker) mainLoop() { w.commitNewWork(nil, false) } } + atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) // System stopped case <-w.exitCh: @@ -383,7 +482,10 @@ func (w *worker) seal(t *task, stop <-chan struct{}) { // taskLoop is a standalone goroutine to fetch sealing task from the generator and // push them to consensus engine. func (w *worker) taskLoop() { - var stopCh chan struct{} + var ( + stopCh chan struct{} + prev common.Hash + ) // interrupt aborts the in-flight sealing task. interrupt := func() { @@ -398,8 +500,13 @@ func (w *worker) taskLoop() { if w.newTaskHook != nil { w.newTaskHook(task) } + // Reject duplicate sealing work due to resubmitting. + if task.block.HashNoNonce() == prev { + continue + } interrupt() stopCh = make(chan struct{}) + prev = task.block.HashNoNonce() go w.seal(task, stopCh) case <-w.exitCh: interrupt() @@ -414,11 +521,15 @@ func (w *worker) resultLoop() { for { select { case result := <-w.resultCh: + // Short circuit when receiving empty result. if result == nil { continue } + // Short circuit when receiving duplicate result caused by resubmitting. block := result.block - + if w.chain.HasBlock(block.Hash(), block.NumberU64()) { + continue + } // Update the block hash in all logs since it is now available and not when the // receipt/log of individual transactions were created. for _, r := range result.receipts { @@ -568,8 +679,18 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. // For the first two cases, the semi-finished work will be discarded. // For the third case, the semi-finished work will be submitted to the consensus engine. - // TODO(rjl493456442) give feedback to newWorkLoop to adjust resubmit interval if it is too short. if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { + // Notify resubmit loop to increase resubmitting interval due to too frequent commits. + if atomic.LoadInt32(interrupt) == commitInterruptResubmit { + ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) + if ratio < 0.1 { + ratio = 0.1 + } + w.resubmitAdjustCh <- &intervalAdjust{ + ratio: ratio, + inc: true, + } + } return atomic.LoadInt32(interrupt) == commitInterruptNewHead } // If we don't have enough gas for any further transactions then we're done @@ -644,6 +765,11 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin } go w.mux.Post(core.PendingLogsEvent{Logs: cpy}) } + // Notify resubmit loop to decrease resubmitting interval if current interval is larger + // than the user-specified one. + if interrupt != nil { + w.resubmitAdjustCh <- &intervalAdjust{inc: false} + } return false } diff --git a/miner/worker_test.go b/miner/worker_test.go index 34bb7f5f3341..16708c18c689 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -119,7 +119,7 @@ func (b *testWorkerBackend) PostChainEvents(events []interface{}) { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine) backend.txPool.AddLocals(pendingTxs) - w := newWorker(chainConfig, engine, backend, new(event.TypeMux)) + w := newWorker(chainConfig, engine, backend, new(event.TypeMux), time.Second) w.setEtherbase(testBankAddress) return w, backend } @@ -327,7 +327,7 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en } } b.txPool.AddLocals(newTxs) - time.Sleep(3 * time.Second) + time.Sleep(time.Second) select { case <-taskCh: @@ -335,3 +335,105 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en t.Error("new task timeout") } } + +func TestAdjustIntervalEthash(t *testing.T) { + testAdjustInterval(t, ethashChainConfig, ethash.NewFaker()) +} + +func TestAdjustIntervalClique(t *testing.T) { + testAdjustInterval(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase())) +} + +func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { + defer engine.Close() + + w, _ := newTestWorker(t, chainConfig, engine) + defer w.close() + + w.skipSealHook = func(task *task) bool { + return true + } + w.fullTaskHook = func() { + time.Sleep(100 * time.Millisecond) + } + var ( + progress = make(chan struct{}, 10) + result = make([]float64, 0, 10) + index = 0 + start = false + ) + w.resubmitHook = func(minInterval time.Duration, recommitInterval time.Duration) { + // Short circuit if interval checking hasn't started. + if !start { + return + } + var wantMinInterval, wantRecommitInterval time.Duration + + switch index { + case 0: + wantMinInterval, wantRecommitInterval = 3*time.Second, 3*time.Second + case 1: + origin := float64(3 * time.Second.Nanoseconds()) + estimate := origin*(1-intervalAdjustRatio) + intervalAdjustRatio*(origin/0.8+intervalAdjustBias) + wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(int(estimate))*time.Nanosecond + case 2: + estimate := result[index-1] + min := float64(3 * time.Second.Nanoseconds()) + estimate = estimate*(1-intervalAdjustRatio) + intervalAdjustRatio*(min-intervalAdjustBias) + wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(int(estimate))*time.Nanosecond + case 3: + wantMinInterval, wantRecommitInterval = time.Second, time.Second + } + + // Check interval + if minInterval != wantMinInterval { + t.Errorf("resubmit min interval mismatch want %s has %s", wantMinInterval, minInterval) + } + if recommitInterval != wantRecommitInterval { + t.Errorf("resubmit interval mismatch want %s has %s", wantRecommitInterval, recommitInterval) + } + result = append(result, float64(recommitInterval.Nanoseconds())) + index += 1 + progress <- struct{}{} + } + // Ensure worker has finished initialization + for { + b := w.pendingBlock() + if b != nil && b.NumberU64() == 1 { + break + } + } + + w.start() + + time.Sleep(time.Second) + + start = true + w.setRecommitInterval(3 * time.Second) + select { + case <-progress: + case <-time.NewTimer(time.Second).C: + t.Error("interval reset timeout") + } + + w.resubmitAdjustCh <- &intervalAdjust{inc: true, ratio: 0.8} + select { + case <-progress: + case <-time.NewTimer(time.Second).C: + t.Error("interval reset timeout") + } + + w.resubmitAdjustCh <- &intervalAdjust{inc: false} + select { + case <-progress: + case <-time.NewTimer(time.Second).C: + t.Error("interval reset timeout") + } + + w.setRecommitInterval(500 * time.Millisecond) + select { + case <-progress: + case <-time.NewTimer(time.Second).C: + t.Error("interval reset timeout") + } +} From e0d0e64ce22111a2d5492fe6f6d6a0023477e51f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 21 Aug 2018 20:30:06 +0300 Subject: [PATCH 164/166] cmd, core, miner: add --txpool.locals and priority mining --- cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 14 ++++++++++++++ core/tx_pool.go | 39 +++++++++++++++++++++++++++++++++++---- miner/worker.go | 23 +++++++++++++++++++---- 5 files changed, 70 insertions(+), 8 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2e87bb82007f..4b86382bdbaa 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -72,6 +72,7 @@ var ( utils.EthashDatasetDirFlag, utils.EthashDatasetsInMemoryFlag, utils.EthashDatasetsOnDiskFlag, + utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, utils.TxPoolJournalFlag, utils.TxPoolRejournalFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 674c5d9015ca..1e27d0ae8d50 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -114,6 +114,7 @@ var AppHelpFlagGroups = []flagGroup{ { Name: "TRANSACTION POOL", Flags: []cli.Flag{ + utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, utils.TxPoolJournalFlag, utils.TxPoolRejournalFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7317655836d5..cfca7b4ab458 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -233,6 +233,10 @@ var ( Value: eth.DefaultConfig.Ethash.DatasetsOnDisk, } // Transaction pool settings + TxPoolLocalsFlag = cli.StringFlag{ + Name: "txpool.locals", + Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)", + } TxPoolNoLocalsFlag = cli.BoolFlag{ Name: "txpool.nolocals", Usage: "Disables price exemptions for locally submitted transactions", @@ -977,6 +981,16 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) { } func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { + if ctx.GlobalIsSet(TxPoolLocalsFlag.Name) { + locals := strings.Split(ctx.GlobalString(TxPoolLocalsFlag.Name), ",") + for _, account := range locals { + if trimmed := strings.TrimSpace(account); !common.IsHexAddress(trimmed) { + Fatalf("Invalid account in --txpool.locals: %s", trimmed) + } else { + cfg.Locals = append(cfg.Locals, common.HexToAddress(account)) + } + } + } if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) { cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name) } diff --git a/core/tx_pool.go b/core/tx_pool.go index 7007f85ddfd1..46ae2759bca2 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -123,9 +123,10 @@ type blockChain interface { // TxPoolConfig are the configuration parameters of the transaction pool. type TxPoolConfig struct { - NoLocals bool // Whether local transaction handling should be disabled - Journal string // Journal of local transactions to survive node restarts - Rejournal time.Duration // Time interval to regenerate the local transaction journal + Locals []common.Address // Addresses that should be treated by default as local + NoLocals bool // Whether local transaction handling should be disabled + Journal string // Journal of local transactions to survive node restarts + Rejournal time.Duration // Time interval to regenerate the local transaction journal PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) @@ -231,6 +232,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block gasPrice: new(big.Int).SetUint64(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) + for _, addr := range config.Locals { + log.Info("Setting new local account", "address", addr) + pool.locals.add(addr) + } pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) @@ -534,6 +539,14 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { return pending, nil } +// Locals retrieves the accounts currently considered local by the pool. +func (pool *TxPool) Locals() []common.Address { + pool.mu.Lock() + defer pool.mu.Unlock() + + return pool.locals.flatten() +} + // local retrieves all currently known local transactions, groupped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. @@ -665,7 +678,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { } // Mark local addresses and journal local transactions if local { - pool.locals.add(from) + if !pool.locals.contains(from) { + log.Info("Setting new local account", "address", from) + pool.locals.add(from) + } } pool.journalTx(from, tx) @@ -1138,6 +1154,7 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } type accountSet struct { accounts map[common.Address]struct{} signer types.Signer + cache *[]common.Address } // newAccountSet creates a new address set with an associated signer for sender @@ -1167,6 +1184,20 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} + as.cache = nil +} + +// flatten returns the list of addresses within this set, also caching it for later +// reuse. The returned slice should not be changed! +func (as *accountSet) flatten() []common.Address { + if as.cache == nil { + accounts := make([]common.Address, 0, len(as.accounts)) + for account := range as.accounts { + accounts = append(accounts, account) + } + as.cache = &accounts + } + return *as.cache } // txLookup is used internally by TxPool to track transactions while allowing lookup without diff --git a/miner/worker.go b/miner/worker.go index c299ff9dc1fc..8c3337ba4532 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -877,11 +877,26 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool) { w.updateSnapshot() return } - txs := types.NewTransactionsByPriceAndNonce(w.current.signer, pending) - if w.commitTransactions(txs, w.coinbase, interrupt) { - return + // Split the pending transactions into locals and remotes + localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending + for _, account := range w.eth.TxPool().Locals() { + if txs := remoteTxs[account]; len(txs) > 0 { + delete(remoteTxs, account) + localTxs[account] = txs + } + } + if len(localTxs) > 0 { + txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) + if w.commitTransactions(txs, w.coinbase, interrupt) { + return + } + } + if len(remoteTxs) > 0 { + txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) + if w.commitTransactions(txs, w.coinbase, interrupt) { + return + } } - w.commit(uncles, w.fullTaskHook, true, tstart) } From af85d8e2b342aa6feff68cec59db9be3c1e85022 Mon Sep 17 00:00:00 2001 From: gary rong Date: Wed, 22 Aug 2018 14:47:50 +0800 Subject: [PATCH 165/166] cmd, eth: apply default miner recommit setting (#17479) --- cmd/utils/flags.go | 2 +- eth/config.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index cfca7b4ab458..c9a936dd5f16 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -367,7 +367,7 @@ var ( MinerRecommitIntervalFlag = cli.DurationFlag{ Name: "miner.recommit", Usage: "Time interval to recreate the block being mined.", - Value: 3 * time.Second, + Value: eth.DefaultConfig.MinerRecommit, } // Account settings UnlockedAccountFlag = cli.StringFlag{ diff --git a/eth/config.go b/eth/config.go index cbd02416bea2..1398a55145dd 100644 --- a/eth/config.go +++ b/eth/config.go @@ -49,6 +49,7 @@ var DefaultConfig = Config{ TrieCache: 256, TrieTimeout: 60 * time.Minute, MinerGasPrice: big.NewInt(18 * params.Shannon), + MinerRecommit: 3 * time.Second, TxPool: core.DefaultTxPoolConfig, GPO: gasprice.Config{ From 316fc7ecfc10d06603f1358c1f4c1020ec36dd2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 22 Aug 2018 11:39:00 +0300 Subject: [PATCH 166/166] params, swarm: release Geth v1.8.14 and Swarm v0.3.2 --- params/version.go | 8 ++++---- swarm/version/version.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/params/version.go b/params/version.go index 44c148e9c3c5..9cb027bd557d 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 8 // Minor version component of the current release - VersionPatch = 14 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 8 // Minor version component of the current release + VersionPatch = 14 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/swarm/version/version.go b/swarm/version/version.go index bb9935e24a98..1f0c646197df 100644 --- a/swarm/version/version.go +++ b/swarm/version/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string.